summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Vatamaniuc <vatamane@gmail.com>2022-06-07 16:37:52 -0400
committerNick Vatamaniuc <vatamane@gmail.com>2022-06-07 16:37:52 -0400
commit3edf4b30bdf1e369e9be177e934609e5ea73f02f (patch)
treea5ca74debaefedeef638c95b921d83ca3824d62b
parenta1fc8075f3e86ec2242eedd2b1bbbd15758515e7 (diff)
downloadcouchdb-3.x.tar.gz
Replace 3.x branch with a moved README marker file3.x
Use main and fdbmain from now on
-rw-r--r--.credo.exs174
-rw-r--r--.devcontainer/Dockerfile24
-rw-r--r--.devcontainer/devcontainer.json19
-rw-r--r--.formatter.exs9
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md36
-rw-r--r--.github/ISSUE_TEMPLATE/enhancement.md27
-rw-r--r--.github/ISSUE_TEMPLATE/rfc.md85
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md35
-rw-r--r--.gitignore132
-rw-r--r--.mailmap13
-rw-r--r--BUGS.md13
-rw-r--r--COMMITTERS.md11
-rw-r--r--CONTRIBUTING.md290
-rw-r--r--CONTRIBUTORS.in97
-rw-r--r--INSTALL.Unix.md264
-rw-r--r--INSTALL.Windows.md21
-rw-r--r--LICENSE2269
-rw-r--r--Makefile511
-rw-r--r--Makefile.win450
-rw-r--r--NOTICE199
-rw-r--r--README-DEV.rst256
-rw-r--r--README.md3
-rw-r--r--README.rst108
-rw-r--r--bin/erlang-version.escript3
-rw-r--r--build-aux/Jenkinsfile.full466
-rw-r--r--build-aux/Jenkinsfile.pr203
-rw-r--r--build-aux/README.md131
-rwxr-xr-xbuild-aux/couchdb-build-release.sh56
-rwxr-xr-xbuild-aux/dist-error28
-rwxr-xr-xbuild-aux/introspect73
-rwxr-xr-xbuild-aux/logfile-uploader.py138
-rwxr-xr-xbuild-aux/print-committerlist.sh68
-rwxr-xr-xbuild-aux/show-test-results.py412
-rwxr-xr-xbuild-aux/sphinx-build34
-rwxr-xr-xbuild-aux/sphinx-touch24
-rw-r--r--config/config.exs30
-rw-r--r--config/dev.exs1
-rw-r--r--config/integration.exs9
-rw-r--r--config/prod.exs1
-rw-r--r--config/test.exs12
-rwxr-xr-xconfigure360
-rw-r--r--configure.ps1250
-rw-r--r--dev/format_all.py35
-rw-r--r--dev/format_check.py48
-rw-r--r--dev/format_lib.py54
-rwxr-xr-xdev/make_boot_script9
-rw-r--r--dev/monitor_parent.erl41
-rw-r--r--dev/pbkdf2.py201
-rwxr-xr-xdev/remsh28
-rwxr-xr-xdev/remsh-tls29
-rwxr-xr-xdev/run862
-rw-r--r--dev/run.cmd15
-rw-r--r--erlang_ls.config5
-rw-r--r--make.cmd3
-rw-r--r--mix.exs162
-rw-r--r--mix.lock19
-rw-r--r--rebar.config.script220
-rw-r--r--rel/apps/config.config4
-rw-r--r--rel/apps/couch_epi.config22
-rwxr-xr-xrel/boot_dev_cluster.sh40
-rw-r--r--rel/files/README18
-rw-r--r--rel/files/couchdb.cmd.in37
-rwxr-xr-xrel/files/couchdb.in52
-rw-r--r--rel/files/eunit.config16
-rw-r--r--rel/files/eunit.ini38
-rw-r--r--rel/files/sys.config13
-rw-r--r--rel/files/vm.args11
-rw-r--r--rel/haproxy.cfg45
-rwxr-xr-xrel/overlay/bin/remsh130
-rw-r--r--rel/overlay/etc/default.d/README11
-rw-r--r--rel/overlay/etc/default.ini749
-rw-r--r--rel/overlay/etc/local.d/README8
-rw-r--r--rel/overlay/etc/local.ini95
-rw-r--r--rel/overlay/etc/vm.args97
-rw-r--r--rel/plugins/eunit_plugin.erl59
-rw-r--r--rel/reltool.config152
-rw-r--r--setup_eunit.template20
-rw-r--r--share/server/60/escodegen.js1
-rw-r--r--share/server/60/esprima.js6711
-rw-r--r--share/server/60/rewrite_fun.js56
-rw-r--r--share/server/coffee-script.js12
-rw-r--r--share/server/dreyfus.js62
-rw-r--r--share/server/filter.js46
-rw-r--r--share/server/json2.js482
-rw-r--r--share/server/loop.js167
-rw-r--r--share/server/mimeparse.js158
-rw-r--r--share/server/render.js400
-rw-r--r--share/server/rewrite_fun.js20
-rw-r--r--share/server/state.js31
-rw-r--r--share/server/util.js157
-rw-r--r--share/server/validate.js25
-rw-r--r--share/server/views.js137
-rw-r--r--src/chttpd/LICENSE202
-rw-r--r--src/chttpd/include/chttpd.hrl28
-rw-r--r--src/chttpd/include/chttpd_cors.hrl81
-rw-r--r--src/chttpd/priv/stats_descriptions.cfg24
-rw-r--r--src/chttpd/rebar.config2
-rw-r--r--src/chttpd/src/chttpd.app.src33
-rw-r--r--src/chttpd/src/chttpd.erl1622
-rw-r--r--src/chttpd/src/chttpd_app.erl21
-rw-r--r--src/chttpd/src/chttpd_auth.erl98
-rw-r--r--src/chttpd/src/chttpd_auth_cache.erl267
-rw-r--r--src/chttpd/src/chttpd_auth_request.erl156
-rw-r--r--src/chttpd/src/chttpd_cors.erl414
-rw-r--r--src/chttpd/src/chttpd_db.erl2696
-rw-r--r--src/chttpd/src/chttpd_epi.erl52
-rw-r--r--src/chttpd/src/chttpd_external.erl218
-rw-r--r--src/chttpd/src/chttpd_handlers.erl88
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl46
-rw-r--r--src/chttpd/src/chttpd_misc.erl328
-rw-r--r--src/chttpd/src/chttpd_node.erl385
-rw-r--r--src/chttpd/src/chttpd_plugin.erl64
-rw-r--r--src/chttpd/src/chttpd_prefer_header.erl61
-rw-r--r--src/chttpd/src/chttpd_rewrite.erl553
-rw-r--r--src/chttpd/src/chttpd_show.erl331
-rw-r--r--src/chttpd/src/chttpd_stats.erl96
-rw-r--r--src/chttpd/src/chttpd_sup.erl178
-rw-r--r--src/chttpd/src/chttpd_test_util.erl26
-rw-r--r--src/chttpd/src/chttpd_util.erl112
-rw-r--r--src/chttpd/src/chttpd_view.erl215
-rw-r--r--src/chttpd/src/chttpd_xframe_options.erl90
-rw-r--r--src/chttpd/test/eunit/chttpd_auth_tests.erl127
-rw-r--r--src/chttpd/test/eunit/chttpd_cors_test.erl582
-rw-r--r--src/chttpd/test/eunit/chttpd_csp_tests.erl281
-rw-r--r--src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl203
-rw-r--r--src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl366
-rw-r--r--src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl372
-rw-r--r--src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl225
-rw-r--r--src/chttpd/test/eunit/chttpd_db_test.erl618
-rw-r--r--src/chttpd/test/eunit/chttpd_dbs_info_test.erl334
-rw-r--r--src/chttpd/test/eunit/chttpd_delayed_test.erl72
-rw-r--r--src/chttpd/test/eunit/chttpd_error_info_tests.erl171
-rw-r--r--src/chttpd/test/eunit/chttpd_external_test.erl122
-rw-r--r--src/chttpd/test/eunit/chttpd_handlers_tests.erl88
-rw-r--r--src/chttpd/test/eunit/chttpd_open_revs_error_test.erl124
-rw-r--r--src/chttpd/test/eunit/chttpd_plugin_tests.erl200
-rw-r--r--src/chttpd/test/eunit/chttpd_prefer_header_test.erl114
-rw-r--r--src/chttpd/test/eunit/chttpd_purge_tests.erl491
-rw-r--r--src/chttpd/test/eunit/chttpd_revs_diff_tests.erl238
-rw-r--r--src/chttpd/test/eunit/chttpd_security_tests.erl521
-rw-r--r--src/chttpd/test/eunit/chttpd_session_tests.erl81
-rw-r--r--src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl116
-rw-r--r--src/chttpd/test/eunit/chttpd_test.hrl35
-rw-r--r--src/chttpd/test/eunit/chttpd_util_test.erl114
-rw-r--r--src/chttpd/test/eunit/chttpd_view_test.erl154
-rw-r--r--src/chttpd/test/eunit/chttpd_welcome_test.erl101
-rw-r--r--src/chttpd/test/eunit/chttpd_xframe_test.erl95
-rw-r--r--src/couch/.gitignore23
-rw-r--r--src/couch/LICENSE201
-rw-r--r--src/couch/include/couch_db.hrl244
-rw-r--r--src/couch/include/couch_eunit.hrl77
-rw-r--r--src/couch/include/couch_eunit_proper.hrl33
-rw-r--r--src/couch/include/couch_js_functions.hrl163
-rw-r--r--src/couch/priv/couch_ejson_compare/couch_ejson_compare.c603
-rw-r--r--src/couch/priv/couch_js/1.8.5/help.h79
-rw-r--r--src/couch/priv/couch_js/1.8.5/main.c307
-rw-r--r--src/couch/priv/couch_js/1.8.5/utf8.c297
-rw-r--r--src/couch/priv/couch_js/1.8.5/utf8.h19
-rw-r--r--src/couch/priv/couch_js/1.8.5/util.c296
-rw-r--r--src/couch/priv/couch_js/1.8.5/util.h35
-rw-r--r--src/couch/priv/couch_js/60/help.h79
-rw-r--r--src/couch/priv/couch_js/60/main.cpp336
-rw-r--r--src/couch/priv/couch_js/60/util.cpp355
-rw-r--r--src/couch/priv/couch_js/60/util.h38
-rw-r--r--src/couch/priv/couch_js/68/help.h79
-rw-r--r--src/couch/priv/couch_js/68/main.cpp337
-rw-r--r--src/couch/priv/couch_js/68/util.cpp348
-rw-r--r--src/couch/priv/couch_js/68/util.h41
-rw-r--r--src/couch/priv/couch_js/86/help.h79
-rw-r--r--src/couch/priv/couch_js/86/main.cpp344
-rw-r--r--src/couch/priv/couch_js/86/util.cpp348
-rw-r--r--src/couch/priv/couch_js/86/util.h41
-rwxr-xr-xsrc/couch/priv/spawnkillable/couchspawnkillable.sh20
-rw-r--r--src/couch/priv/spawnkillable/couchspawnkillable_win.c145
-rw-r--r--src/couch/priv/stats_descriptions.cfg332
-rw-r--r--src/couch/rebar.config.script253
-rw-r--r--src/couch/src/couch.app.src86
-rw-r--r--src/couch/src/couch.erl62
-rw-r--r--src/couch/src/couch_app.erl40
-rw-r--r--src/couch/src/couch_att.erl970
-rw-r--r--src/couch/src/couch_auth_cache.erl172
-rw-r--r--src/couch/src/couch_base32.erl156
-rw-r--r--src/couch/src/couch_bt_engine.erl1229
-rw-r--r--src/couch/src/couch_bt_engine.hrl27
-rw-r--r--src/couch/src/couch_bt_engine_compactor.erl767
-rw-r--r--src/couch/src/couch_bt_engine_header.erl451
-rw-r--r--src/couch/src/couch_bt_engine_stream.erl60
-rw-r--r--src/couch/src/couch_btree.erl1175
-rw-r--r--src/couch/src/couch_changes.erl777
-rw-r--r--src/couch/src/couch_compress.erl95
-rw-r--r--src/couch/src/couch_db.erl2374
-rw-r--r--src/couch/src/couch_db_engine.erl1034
-rw-r--r--src/couch/src/couch_db_epi.erl51
-rw-r--r--src/couch/src/couch_db_header.erl408
-rw-r--r--src/couch/src/couch_db_int.hrl76
-rw-r--r--src/couch/src/couch_db_plugin.erl96
-rw-r--r--src/couch/src/couch_db_split.erl523
-rw-r--r--src/couch/src/couch_db_updater.erl1029
-rw-r--r--src/couch/src/couch_debug.erl1067
-rw-r--r--src/couch/src/couch_doc.erl588
-rw-r--r--src/couch/src/couch_ejson_compare.erl128
-rw-r--r--src/couch/src/couch_ejson_size.erl92
-rw-r--r--src/couch/src/couch_emsort.erl347
-rw-r--r--src/couch/src/couch_event_sup.erl74
-rw-r--r--src/couch/src/couch_file.erl956
-rw-r--r--src/couch/src/couch_flags.erl138
-rw-r--r--src/couch/src/couch_flags_config.erl309
-rw-r--r--src/couch/src/couch_hash.erl45
-rw-r--r--src/couch/src/couch_hotp.erl31
-rw-r--r--src/couch/src/couch_httpd.erl1492
-rw-r--r--src/couch/src/couch_httpd_auth.erl697
-rw-r--r--src/couch/src/couch_httpd_db.erl1449
-rw-r--r--src/couch/src/couch_httpd_handlers.erl21
-rw-r--r--src/couch/src/couch_httpd_misc_handlers.erl313
-rw-r--r--src/couch/src/couch_httpd_multipart.erl359
-rw-r--r--src/couch/src/couch_httpd_rewrite.erl555
-rw-r--r--src/couch/src/couch_httpd_vhost.erl457
-rw-r--r--src/couch/src/couch_io_logger.erl97
-rw-r--r--src/couch/src/couch_key_tree.erl603
-rw-r--r--src/couch/src/couch_lru.erl68
-rw-r--r--src/couch/src/couch_multidb_changes.erl859
-rw-r--r--src/couch/src/couch_native_process.erl488
-rw-r--r--src/couch/src/couch_os_process.erl274
-rw-r--r--src/couch/src/couch_partition.erl155
-rw-r--r--src/couch/src/couch_passwords.erl200
-rw-r--r--src/couch/src/couch_primary_sup.erl34
-rw-r--r--src/couch/src/couch_proc_manager.erl576
-rw-r--r--src/couch/src/couch_query_servers.erl934
-rw-r--r--src/couch/src/couch_rand.erl24
-rw-r--r--src/couch/src/couch_secondary_sup.erl79
-rw-r--r--src/couch/src/couch_server.erl1097
-rw-r--r--src/couch/src/couch_server_int.hrl23
-rw-r--r--src/couch/src/couch_stream.erl302
-rw-r--r--src/couch/src/couch_sup.erl170
-rw-r--r--src/couch/src/couch_task_status.erl154
-rw-r--r--src/couch/src/couch_totp.erl24
-rw-r--r--src/couch/src/couch_users_db.erl228
-rw-r--r--src/couch/src/couch_util.erl817
-rw-r--r--src/couch/src/couch_uuids.erl188
-rw-r--r--src/couch/src/couch_work_queue.erl174
-rw-r--r--src/couch/src/test_request.erl110
-rw-r--r--src/couch/src/test_util.erl429
-rw-r--r--src/couch/test/eunit/chttpd_endpoints_tests.erl108
-rw-r--r--src/couch/test/eunit/couch_auth_cache_tests.erl382
-rw-r--r--src/couch/test/eunit/couch_base32_tests.erl28
-rw-r--r--src/couch/test/eunit/couch_bt_engine_compactor_ev.erl101
-rw-r--r--src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl336
-rw-r--r--src/couch/test/eunit/couch_bt_engine_compactor_tests.erl124
-rw-r--r--src/couch/test/eunit/couch_bt_engine_tests.erl18
-rw-r--r--src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl255
-rw-r--r--src/couch/test/eunit/couch_btree_tests.erl693
-rw-r--r--src/couch/test/eunit/couch_changes_tests.erl1093
-rw-r--r--src/couch/test/eunit/couch_db_doc_tests.erl117
-rw-r--r--src/couch/test/eunit/couch_db_mpr_tests.erl127
-rw-r--r--src/couch/test/eunit/couch_db_plugin_tests.erl233
-rw-r--r--src/couch/test/eunit/couch_db_props_upgrade_tests.erl77
-rw-r--r--src/couch/test/eunit/couch_db_split_tests.erl358
-rw-r--r--src/couch/test/eunit/couch_db_tests.erl212
-rw-r--r--src/couch/test/eunit/couch_doc_json_tests.erl526
-rw-r--r--src/couch/test/eunit/couch_doc_tests.erl179
-rw-r--r--src/couch/test/eunit/couch_ejson_compare_tests.erl289
-rw-r--r--src/couch/test/eunit/couch_ejson_size_tests.erl99
-rw-r--r--src/couch/test/eunit/couch_etag_tests.erl31
-rw-r--r--src/couch/test/eunit/couch_file_tests.erl553
-rw-r--r--src/couch/test/eunit/couch_flags_config_tests.erl147
-rw-r--r--src/couch/test/eunit/couch_flags_tests.erl158
-rw-r--r--src/couch/test/eunit/couch_hotp_tests.erl28
-rw-r--r--src/couch/test/eunit/couch_index_tests.erl273
-rw-r--r--src/couch/test/eunit/couch_js_tests.erl200
-rw-r--r--src/couch/test/eunit/couch_key_tree_prop_tests.erl531
-rw-r--r--src/couch/test/eunit/couch_key_tree_tests.erl576
-rw-r--r--src/couch/test/eunit/couch_passwords_tests.erl65
-rw-r--r--src/couch/test/eunit/couch_query_servers_tests.erl154
-rw-r--r--src/couch/test/eunit/couch_server_tests.erl290
-rw-r--r--src/couch/test/eunit/couch_stream_tests.erl128
-rw-r--r--src/couch/test/eunit/couch_task_status_tests.erl243
-rw-r--r--src/couch/test/eunit/couch_totp_tests.erl55
-rw-r--r--src/couch/test/eunit/couch_util_tests.erl152
-rw-r--r--src/couch/test/eunit/couch_uuids_tests.erl109
-rw-r--r--src/couch/test/eunit/couch_work_queue_tests.erl416
-rw-r--r--src/couch/test/eunit/couchdb_attachments_tests.erl851
-rw-r--r--src/couch/test/eunit/couchdb_auth_tests.erl132
-rwxr-xr-xsrc/couch/test/eunit/couchdb_cookie_domain_tests.erl88
-rw-r--r--src/couch/test/eunit/couchdb_cors_tests.erl431
-rw-r--r--src/couch/test/eunit/couchdb_db_tests.erl88
-rw-r--r--src/couch/test/eunit/couchdb_design_doc_tests.erl99
-rw-r--r--src/couch/test/eunit/couchdb_file_compression_tests.erl251
-rw-r--r--src/couch/test/eunit/couchdb_location_header_tests.erl83
-rw-r--r--src/couch/test/eunit/couchdb_mrview_cors_tests.erl142
-rw-r--r--src/couch/test/eunit/couchdb_mrview_tests.erl272
-rw-r--r--src/couch/test/eunit/couchdb_os_proc_pool.erl390
-rw-r--r--src/couch/test/eunit/couchdb_update_conflicts_tests.erl348
-rw-r--r--src/couch/test/eunit/couchdb_vhosts_tests.erl346
-rw-r--r--src/couch/test/eunit/couchdb_views_tests.erl1125
-rw-r--r--src/couch/test/eunit/fixtures/15a5cb17365a99cd9ddc7327c82bbd0d.viewbin12388 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/1f2c24bc334d701c2048f85e7438eef1.viewbin4230 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/6cf2c2f766f87b618edf6630b00f8736.viewbin8310 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/colltest1.couchbin24768 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg19
-rw-r--r--src/couch/test/eunit/fixtures/couch_stats_aggregates.ini20
-rw-r--r--src/couch/test/eunit/fixtures/db321.couchbin28864 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_non_partitioned.couchbin12479 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couchbin12470 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couchbin16557 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couchbin16566 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_without_purge_req.couchbin61644 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couchbin16617 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couchbin20705 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couchbin20713 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_without_purge_req.couchbin65781 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/logo.pngbin3010 -> 0 bytes
-rw-r--r--src/couch/test/eunit/fixtures/multipart.http13
-rw-r--r--src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh17
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_can_reboot.sh15
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_configer.escript97
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh15
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_die_quickly.sh15
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_looper.escript26
-rw-r--r--src/couch/test/eunit/fixtures/test.couchbin28878 -> 0 bytes
-rw-r--r--src/couch/test/eunit/global_changes_tests.erl164
-rw-r--r--src/couch/test/eunit/json_stream_parse_tests.erl157
-rw-r--r--src/couch/test/eunit/test_web.erl114
-rw-r--r--src/couch/test/exunit/couch_compress_tests.exs113
-rw-r--r--src/couch/test/exunit/fabric_test.exs101
-rw-r--r--src/couch/test/exunit/same_site_cookie_tests.exs44
-rw-r--r--src/couch/test/exunit/test_helper.exs2
-rw-r--r--src/couch_dist/LICENSE177
-rw-r--r--src/couch_dist/rebar.config2
-rw-r--r--src/couch_dist/src/couch_dist.app.src19
-rw-r--r--src/couch_dist/src/couch_dist.erl149
-rw-r--r--src/couch_dist/test/eunit/couch_dist_tests.erl95
-rw-r--r--src/couch_epi/.gitignore4
-rw-r--r--src/couch_epi/LICENSE203
-rw-r--r--src/couch_epi/README.md166
-rw-r--r--src/couch_epi/rebar.config7
-rw-r--r--src/couch_epi/src/couch_epi.app.src.script28
-rw-r--r--src/couch_epi/src/couch_epi.erl199
-rw-r--r--src/couch_epi/src/couch_epi.hrl15
-rw-r--r--src/couch_epi/src/couch_epi_app.erl23
-rw-r--r--src/couch_epi/src/couch_epi_codechange_monitor.erl69
-rw-r--r--src/couch_epi/src/couch_epi_codegen.erl94
-rw-r--r--src/couch_epi/src/couch_epi_data.erl120
-rw-r--r--src/couch_epi/src/couch_epi_data_gen.erl307
-rw-r--r--src/couch_epi/src/couch_epi_functions.erl53
-rw-r--r--src/couch_epi/src/couch_epi_functions_gen.erl492
-rw-r--r--src/couch_epi/src/couch_epi_module_keeper.erl174
-rw-r--r--src/couch_epi/src/couch_epi_plugin.erl395
-rw-r--r--src/couch_epi/src/couch_epi_sup.erl163
-rw-r--r--src/couch_epi/src/couch_epi_util.erl29
-rw-r--r--src/couch_epi/test/eunit/couch_epi_basic_test.erl167
-rw-r--r--src/couch_epi/test/eunit/couch_epi_tests.erl724
-rw-r--r--src/couch_epi/test/eunit/fixtures/app_data1.cfg4
-rw-r--r--src/couch_epi/test/eunit/fixtures/app_data2.cfg8
-rw-r--r--src/couch_event/.gitignore2
-rw-r--r--src/couch_event/LICENSE202
-rw-r--r--src/couch_event/README.md3
-rw-r--r--src/couch_event/rebar.config1
-rw-r--r--src/couch_event/src/couch_event.app.src22
-rw-r--r--src/couch_event/src/couch_event.erl56
-rw-r--r--src/couch_event/src/couch_event_app.erl25
-rw-r--r--src/couch_event/src/couch_event_int.hrl19
-rw-r--r--src/couch_event/src/couch_event_listener.erl218
-rw-r--r--src/couch_event/src/couch_event_listener_mfa.erl96
-rw-r--r--src/couch_event/src/couch_event_os_listener.erl67
-rw-r--r--src/couch_event/src/couch_event_server.erl150
-rw-r--r--src/couch_event/src/couch_event_sup2.erl36
-rw-r--r--src/couch_index/.gitignore3
-rw-r--r--src/couch_index/LICENSE202
-rw-r--r--src/couch_index/rebar.config2
-rw-r--r--src/couch_index/src/couch_index.app.src19
-rw-r--r--src/couch_index/src/couch_index.erl618
-rw-r--r--src/couch_index/src/couch_index_app.erl21
-rw-r--r--src/couch_index/src/couch_index_compactor.erl122
-rw-r--r--src/couch_index/src/couch_index_debug.erl171
-rw-r--r--src/couch_index/src/couch_index_epi.erl50
-rw-r--r--src/couch_index/src/couch_index_plugin.erl51
-rw-r--r--src/couch_index/src/couch_index_plugin_couch_db.erl24
-rw-r--r--src/couch_index/src/couch_index_server.erl396
-rw-r--r--src/couch_index/src/couch_index_sup.erl23
-rw-r--r--src/couch_index/src/couch_index_updater.erl231
-rw-r--r--src/couch_index/src/couch_index_util.erl74
-rw-r--r--src/couch_index/test/eunit/couch_index_compaction_tests.erl123
-rw-r--r--src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl177
-rw-r--r--src/couch_log/.gitignore3
-rw-r--r--src/couch_log/LICENSE202
-rw-r--r--src/couch_log/include/couch_log.hrl22
-rw-r--r--src/couch_log/priv/stats_descriptions.cfg48
-rw-r--r--src/couch_log/rebar.config2
-rw-r--r--src/couch_log/src/couch_log.app.src19
-rw-r--r--src/couch_log/src/couch_log.erl65
-rw-r--r--src/couch_log/src/couch_log_app.erl23
-rw-r--r--src/couch_log/src/couch_log_config.erl120
-rw-r--r--src/couch_log/src/couch_log_config_dyn.erl28
-rw-r--r--src/couch_log/src/couch_log_error_logger_h.erl48
-rw-r--r--src/couch_log/src/couch_log_formatter.erl438
-rw-r--r--src/couch_log/src/couch_log_monitor.erl69
-rw-r--r--src/couch_log/src/couch_log_server.erl92
-rw-r--r--src/couch_log/src/couch_log_sup.erl93
-rw-r--r--src/couch_log/src/couch_log_trunc_io.erl1105
-rw-r--r--src/couch_log/src/couch_log_trunc_io_fmt.erl593
-rw-r--r--src/couch_log/src/couch_log_util.erl147
-rw-r--r--src/couch_log/src/couch_log_writer.erl73
-rw-r--r--src/couch_log/src/couch_log_writer_file.erl131
-rw-r--r--src/couch_log/src/couch_log_writer_journald.erl63
-rw-r--r--src/couch_log/src/couch_log_writer_stderr.erl49
-rw-r--r--src/couch_log/src/couch_log_writer_syslog.erl201
-rw-r--r--src/couch_log/test/eunit/couch_log_config_listener_test.erl79
-rw-r--r--src/couch_log/test/eunit/couch_log_config_test.erl170
-rw-r--r--src/couch_log/test/eunit/couch_log_error_logger_h_test.erl36
-rw-r--r--src/couch_log/test/eunit/couch_log_formatter_test.erl966
-rw-r--r--src/couch_log/test/eunit/couch_log_monitor_test.erl56
-rw-r--r--src/couch_log/test/eunit/couch_log_server_test.erl110
-rw-r--r--src/couch_log/test/eunit/couch_log_test.erl76
-rw-r--r--src/couch_log/test/eunit/couch_log_test_util.erl170
-rw-r--r--src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl91
-rw-r--r--src/couch_log/test/eunit/couch_log_util_test.erl91
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_ets.erl44
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_file_test.erl155
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_stderr_test.erl49
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_syslog_test.erl144
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_test.erl46
-rw-r--r--src/couch_mrview/LICENSE202
-rw-r--r--src/couch_mrview/include/couch_mrview.hrl112
-rw-r--r--src/couch_mrview/priv/stats_descriptions.cfg24
-rw-r--r--src/couch_mrview/rebar.config2
-rw-r--r--src/couch_mrview/src/couch_mrview.app.src18
-rw-r--r--src/couch_mrview/src/couch_mrview.erl748
-rw-r--r--src/couch_mrview/src/couch_mrview_cleanup.erl68
-rw-r--r--src/couch_mrview/src/couch_mrview_compactor.erl317
-rw-r--r--src/couch_mrview/src/couch_mrview_debug.erl50
-rw-r--r--src/couch_mrview/src/couch_mrview_http.erl674
-rw-r--r--src/couch_mrview/src/couch_mrview_index.erl362
-rw-r--r--src/couch_mrview/src/couch_mrview_show.erl515
-rw-r--r--src/couch_mrview/src/couch_mrview_test_util.erl136
-rw-r--r--src/couch_mrview/src/couch_mrview_update_notifier.erl51
-rw-r--r--src/couch_mrview/src/couch_mrview_updater.erl383
-rw-r--r--src/couch_mrview/src/couch_mrview_util.erl1292
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl145
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl236
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl130
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl157
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl557
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl144
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_http_tests.erl37
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl99
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl158
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl152
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl314
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl607
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl97
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_util_tests.erl37
-rw-r--r--src/couch_peruser/.gitignore9
-rw-r--r--src/couch_peruser/LICENSE202
-rw-r--r--src/couch_peruser/README.md34
-rw-r--r--src/couch_peruser/src/couch_peruser.app.src20
-rw-r--r--src/couch_peruser/src/couch_peruser.erl505
-rw-r--r--src/couch_peruser/src/couch_peruser_app.erl23
-rw-r--r--src/couch_peruser/src/couch_peruser_sup.erl26
-rw-r--r--src/couch_peruser/test/eunit/couch_peruser_test.erl569
-rw-r--r--src/couch_plugins/LICENSE202
-rw-r--r--src/couch_plugins/Makefile.am40
-rw-r--r--src/couch_plugins/README.md159
-rw-r--r--src/couch_plugins/src/couch_plugins.app.src22
-rw-r--r--src/couch_plugins/src/couch_plugins.erl322
-rw-r--r--src/couch_plugins/src/couch_plugins_httpd.erl69
-rw-r--r--src/couch_prometheus/src/couch_prometheus.app.src20
-rw-r--r--src/couch_prometheus/src/couch_prometheus.hrl15
-rw-r--r--src/couch_prometheus/src/couch_prometheus_app.erl23
-rw-r--r--src/couch_prometheus/src/couch_prometheus_http.erl112
-rw-r--r--src/couch_prometheus/src/couch_prometheus_server.erl189
-rw-r--r--src/couch_prometheus/src/couch_prometheus_sup.erl40
-rw-r--r--src/couch_prometheus/src/couch_prometheus_util.erl169
-rw-r--r--src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl151
-rw-r--r--src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl75
-rw-r--r--src/couch_pse_tests/src/couch_pse_tests.app.src20
-rw-r--r--src/couch_pse_tests/src/cpse_gather.erl95
-rw-r--r--src/couch_pse_tests/src/cpse_test_attachments.erl98
-rw-r--r--src/couch_pse_tests/src/cpse_test_compaction.erl331
-rw-r--r--src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl88
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_changes.erl182
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_docs.erl414
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl179
-rw-r--r--src/couch_pse_tests/src/cpse_test_get_set_props.erl90
-rw-r--r--src/couch_pse_tests/src/cpse_test_open_close_delete.erl70
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl83
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_docs.erl453
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_replication.erl206
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_seqs.erl125
-rw-r--r--src/couch_pse_tests/src/cpse_test_read_write_docs.erl303
-rw-r--r--src/couch_pse_tests/src/cpse_test_ref_counting.erl105
-rw-r--r--src/couch_pse_tests/src/cpse_util.erl667
-rw-r--r--src/couch_replicator/.gitignore4
-rw-r--r--src/couch_replicator/LICENSE202
-rw-r--r--src/couch_replicator/README.md285
-rw-r--r--src/couch_replicator/include/couch_replicator_api_wrap.hrl31
-rw-r--r--src/couch_replicator/priv/stats_descriptions.cfg152
-rw-r--r--src/couch_replicator/src/couch_replicator.app.src37
-rw-r--r--src/couch_replicator/src/couch_replicator.erl419
-rw-r--r--src/couch_replicator/src/couch_replicator.hrl59
-rw-r--r--src/couch_replicator/src/couch_replicator_api_wrap.erl1057
-rw-r--r--src/couch_replicator/src/couch_replicator_app.erl17
-rw-r--r--src/couch_replicator/src/couch_replicator_auth.erl95
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_noop.erl44
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_session.erl719
-rw-r--r--src/couch_replicator/src/couch_replicator_changes_reader.erl158
-rw-r--r--src/couch_replicator/src/couch_replicator_clustering.erl269
-rw-r--r--src/couch_replicator/src/couch_replicator_connection.erl281
-rw-r--r--src/couch_replicator/src/couch_replicator_db_changes.erl103
-rw-r--r--src/couch_replicator/src/couch_replicator_doc_processor.erl934
-rw-r--r--src/couch_replicator/src/couch_replicator_doc_processor_worker.erl295
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl952
-rw-r--r--src/couch_replicator/src/couch_replicator_fabric.erl158
-rw-r--r--src/couch_replicator/src/couch_replicator_fabric_rpc.erl97
-rw-r--r--src/couch_replicator/src/couch_replicator_filters.erl220
-rw-r--r--src/couch_replicator/src/couch_replicator_httpc.erl538
-rw-r--r--src/couch_replicator/src/couch_replicator_httpc_pool.erl216
-rw-r--r--src/couch_replicator/src/couch_replicator_httpd.erl190
-rw-r--r--src/couch_replicator/src/couch_replicator_httpd_util.erl201
-rw-r--r--src/couch_replicator/src/couch_replicator_ids.erl282
-rw-r--r--src/couch_replicator/src/couch_replicator_job_sup.erl34
-rw-r--r--src/couch_replicator/src/couch_replicator_js_functions.hrl177
-rw-r--r--src/couch_replicator/src/couch_replicator_notifier.erl60
-rw-r--r--src/couch_replicator/src/couch_replicator_rate_limiter.erl239
-rw-r--r--src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl56
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler.erl1690
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_job.erl1182
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_sup.erl59
-rw-r--r--src/couch_replicator/src/couch_replicator_share.erl762
-rw-r--r--src/couch_replicator/src/couch_replicator_stats.erl87
-rw-r--r--src/couch_replicator/src/couch_replicator_sup.erl40
-rw-r--r--src/couch_replicator/src/couch_replicator_utils.erl572
-rw-r--r--src/couch_replicator/src/couch_replicator_worker.erl546
-rw-r--r--src/couch_replicator/src/json_stream_parse.erl425
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl99
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl523
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl237
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl144
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl260
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl262
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl178
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl86
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl127
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl229
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl159
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl123
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl79
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl287
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl122
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl184
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_test.hrl35
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_test_helper.erl147
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl201
-rw-r--r--src/couch_stats/.gitignore6
-rw-r--r--src/couch_stats/LICENSE201
-rw-r--r--src/couch_stats/README.md29
-rw-r--r--src/couch_stats/priv/sample_descriptions.cfg15
-rw-r--r--src/couch_stats/src/couch_stats.app.src20
-rw-r--r--src/couch_stats/src/couch_stats.erl130
-rw-r--r--src/couch_stats/src/couch_stats.hrl14
-rw-r--r--src/couch_stats/src/couch_stats_aggregator.erl162
-rw-r--r--src/couch_stats/src/couch_stats_app.erl23
-rw-r--r--src/couch_stats/src/couch_stats_httpd.erl115
-rw-r--r--src/couch_stats/src/couch_stats_process_tracker.erl80
-rw-r--r--src/couch_stats/src/couch_stats_sup.erl34
-rw-r--r--src/couch_tests/.gitignore6
-rw-r--r--src/couch_tests/include/couch_tests.hrl28
-rw-r--r--src/couch_tests/rebar.config20
-rw-r--r--src/couch_tests/setups/couch_epi_dispatch.erl98
-rw-r--r--src/couch_tests/src/couch_tests.app.src18
-rw-r--r--src/couch_tests/src/couch_tests.erl233
-rw-r--r--src/couch_tests/src/couch_tests_combinatorics.erl136
-rw-r--r--src/couch_tests/test/couch_tests_app_tests.erl117
-rw-r--r--src/custodian/README8
-rw-r--r--src/custodian/rebar.config.script35
-rw-r--r--src/custodian/src/custodian.app.src.script48
-rw-r--r--src/custodian/src/custodian.erl21
-rw-r--r--src/custodian/src/custodian_app.erl28
-rw-r--r--src/custodian/src/custodian_db_checker.erl139
-rw-r--r--src/custodian/src/custodian_monitor.erl26
-rw-r--r--src/custodian/src/custodian_noop_monitor.erl30
-rw-r--r--src/custodian/src/custodian_server.erl244
-rw-r--r--src/custodian/src/custodian_sup.erl45
-rw-r--r--src/custodian/src/custodian_util.erl284
-rw-r--r--src/ddoc_cache/LICENSE202
-rw-r--r--src/ddoc_cache/README.md4
-rw-r--r--src/ddoc_cache/priv/stats_descriptions.cfg12
-rw-r--r--src/ddoc_cache/src/ddoc_cache.app.src31
-rw-r--r--src/ddoc_cache/src/ddoc_cache.erl53
-rw-r--r--src/ddoc_cache/src/ddoc_cache.hrl40
-rw-r--r--src/ddoc_cache/src/ddoc_cache_app.erl22
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry.erl327
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_custom.erl32
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl39
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl39
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl42
-rw-r--r--src/ddoc_cache/src/ddoc_cache_lru.erl337
-rw-r--r--src/ddoc_cache/src/ddoc_cache_sup.erl35
-rw-r--r--src/ddoc_cache/src/ddoc_cache_value.erl24
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl159
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl71
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl56
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl156
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_ev.erl20
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl74
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl245
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl81
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl41
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl102
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl150
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl221
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_test.hrl26
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl110
-rw-r--r--src/dreyfus/.gitignore4
-rw-r--r--src/dreyfus/LICENSE.txt202
-rw-r--r--src/dreyfus/README.md78
-rw-r--r--src/dreyfus/include/dreyfus.hrl74
-rw-r--r--src/dreyfus/priv/stats_descriptions.cfg65
-rw-r--r--src/dreyfus/src/clouseau_rpc.erl108
-rw-r--r--src/dreyfus/src/dreyfus.app.src22
-rw-r--r--src/dreyfus/src/dreyfus.erl31
-rw-r--r--src/dreyfus/src/dreyfus_app.erl23
-rw-r--r--src/dreyfus/src/dreyfus_bookmark.erl88
-rw-r--r--src/dreyfus/src/dreyfus_config.erl16
-rw-r--r--src/dreyfus/src/dreyfus_epi.erl47
-rw-r--r--src/dreyfus/src/dreyfus_fabric.erl270
-rw-r--r--src/dreyfus/src/dreyfus_fabric_cleanup.erl105
-rw-r--r--src/dreyfus/src/dreyfus_fabric_group1.erl153
-rw-r--r--src/dreyfus/src/dreyfus_fabric_group2.erl185
-rw-r--r--src/dreyfus/src/dreyfus_fabric_info.erl112
-rw-r--r--src/dreyfus/src/dreyfus_fabric_search.erl334
-rw-r--r--src/dreyfus/src/dreyfus_httpd.erl709
-rw-r--r--src/dreyfus/src/dreyfus_httpd_handlers.erl28
-rw-r--r--src/dreyfus/src/dreyfus_index.erl432
-rw-r--r--src/dreyfus/src/dreyfus_index_manager.erl160
-rw-r--r--src/dreyfus/src/dreyfus_index_updater.erl184
-rw-r--r--src/dreyfus/src/dreyfus_plugin_couch_db.erl24
-rw-r--r--src/dreyfus/src/dreyfus_rpc.erl134
-rw-r--r--src/dreyfus/src/dreyfus_sup.erl30
-rw-r--r--src/dreyfus/src/dreyfus_util.erl486
-rw-r--r--src/dreyfus/test/dreyfus_blacklist_await_test.erl86
-rw-r--r--src/dreyfus/test/dreyfus_blacklist_request_test.erl164
-rw-r--r--src/dreyfus/test/dreyfus_config_test.erl74
-rw-r--r--src/dreyfus/test/dreyfus_purge_test.erl1118
-rw-r--r--src/dreyfus/test/dreyfus_test_util.erl15
-rw-r--r--src/dreyfus/test/elixir/mix.exs30
-rw-r--r--src/dreyfus/test/elixir/mix.lock5
-rwxr-xr-xsrc/dreyfus/test/elixir/run4
-rw-r--r--src/dreyfus/test/elixir/test/partition_search_test.exs247
-rw-r--r--src/dreyfus/test/elixir/test/search_test.exs226
-rw-r--r--src/dreyfus/test/elixir/test/test_helper.exs4
-rw-r--r--src/fabric/LICENSE202
-rw-r--r--src/fabric/README.md18
-rw-r--r--src/fabric/include/fabric.hrl46
-rw-r--r--src/fabric/priv/stats_descriptions.cfg28
-rw-r--r--src/fabric/rebar.config14
-rw-r--r--src/fabric/src/fabric.app.src27
-rw-r--r--src/fabric/src/fabric.erl840
-rw-r--r--src/fabric/src/fabric_db_create.erl237
-rw-r--r--src/fabric/src/fabric_db_delete.erl95
-rw-r--r--src/fabric/src/fabric_db_doc_count.erl59
-rw-r--r--src/fabric/src/fabric_db_info.erl191
-rw-r--r--src/fabric/src/fabric_db_meta.erl200
-rw-r--r--src/fabric/src/fabric_db_partition_info.erl148
-rw-r--r--src/fabric/src/fabric_db_update_listener.erl183
-rw-r--r--src/fabric/src/fabric_db_uuids.erl64
-rw-r--r--src/fabric/src/fabric_design_doc_count.erl59
-rw-r--r--src/fabric/src/fabric_dict.erl60
-rw-r--r--src/fabric/src/fabric_doc_atts.erl177
-rw-r--r--src/fabric/src/fabric_doc_missing_revs.erl116
-rw-r--r--src/fabric/src/fabric_doc_open.erl611
-rw-r--r--src/fabric/src/fabric_doc_open_revs.erl766
-rw-r--r--src/fabric/src/fabric_doc_purge.erl591
-rw-r--r--src/fabric/src/fabric_doc_update.erl762
-rw-r--r--src/fabric/src/fabric_group_info.erl164
-rw-r--r--src/fabric/src/fabric_ring.erl560
-rw-r--r--src/fabric/src/fabric_rpc.erl706
-rw-r--r--src/fabric/src/fabric_streams.erl283
-rw-r--r--src/fabric/src/fabric_util.erl477
-rw-r--r--src/fabric/src/fabric_view.erl561
-rw-r--r--src/fabric/src/fabric_view_all_docs.erl361
-rw-r--r--src/fabric/src/fabric_view_changes.erl1042
-rw-r--r--src/fabric/src/fabric_view_map.erl292
-rw-r--r--src/fabric/src/fabric_view_reduce.erl181
-rw-r--r--src/fabric/test/eunit/fabric_db_create_tests.erl49
-rw-r--r--src/fabric/test/eunit/fabric_db_info_tests.erl62
-rw-r--r--src/fabric/test/eunit/fabric_db_uuids_tests.erl55
-rw-r--r--src/fabric/test/eunit/fabric_moved_shards_seq_tests.erl111
-rw-r--r--src/fabric/test/eunit/fabric_rpc_purge_tests.erl288
-rw-r--r--src/fabric/test/eunit/fabric_rpc_tests.erl187
-rw-r--r--src/fabric/test/eunit/fabric_tests.erl59
-rw-r--r--src/global_changes/.gitignore2
-rw-r--r--src/global_changes/LICENSE203
-rw-r--r--src/global_changes/README.md27
-rw-r--r--src/global_changes/priv/stats_descriptions.cfg20
-rw-r--r--src/global_changes/src/global_changes.app.src32
-rw-r--r--src/global_changes/src/global_changes_app.erl25
-rw-r--r--src/global_changes/src/global_changes_epi.erl50
-rw-r--r--src/global_changes/src/global_changes_httpd.erl298
-rw-r--r--src/global_changes/src/global_changes_httpd_handlers.erl22
-rw-r--r--src/global_changes/src/global_changes_listener.erl175
-rw-r--r--src/global_changes/src/global_changes_plugin.erl39
-rw-r--r--src/global_changes/src/global_changes_server.erl227
-rw-r--r--src/global_changes/src/global_changes_sup.erl82
-rw-r--r--src/global_changes/src/global_changes_util.erl25
-rw-r--r--src/global_changes/test/eunit/global_changes_hooks_tests.erl164
-rw-r--r--src/ioq/.gitignore2
-rw-r--r--src/ioq/src/ioq.app.src21
-rw-r--r--src/ioq/src/ioq.erl208
-rw-r--r--src/ioq/src/ioq_app.erl21
-rw-r--r--src/ioq/src/ioq_sup.erl24
-rw-r--r--src/jwtf/.gitignore4
-rw-r--r--src/jwtf/LICENSE176
-rw-r--r--src/jwtf/README.md18
-rw-r--r--src/jwtf/rebar.config2
-rw-r--r--src/jwtf/src/jwtf.app.src32
-rw-r--r--src/jwtf/src/jwtf.erl414
-rw-r--r--src/jwtf/src/jwtf_app.erl28
-rw-r--r--src/jwtf/src/jwtf_keystore.erl152
-rw-r--r--src/jwtf/src/jwtf_sup.erl38
-rw-r--r--src/jwtf/test/jwtf_keystore_tests.erl61
-rw-r--r--src/jwtf/test/jwtf_tests.erl425
-rw-r--r--src/ken/README.md12
-rw-r--r--src/ken/rebar.config.script28
-rw-r--r--src/ken/src/ken.app.src.script38
-rw-r--r--src/ken/src/ken.erl29
-rw-r--r--src/ken/src/ken_app.erl28
-rw-r--r--src/ken/src/ken_event_handler.erl55
-rw-r--r--src/ken/src/ken_server.erl605
-rw-r--r--src/ken/src/ken_sup.erl32
-rw-r--r--src/ken/test/config.ini2
-rw-r--r--src/ken/test/ken_server_test.erl90
-rw-r--r--src/mango/.gitignore5
-rw-r--r--src/mango/LICENSE.txt202
-rw-r--r--src/mango/README.md372
-rw-r--r--src/mango/TODO.md9
-rw-r--r--src/mango/rebar.config.script26
-rw-r--r--src/mango/requirements.txt4
-rw-r--r--src/mango/src/mango.app.src26
-rw-r--r--src/mango/src/mango.hrl13
-rw-r--r--src/mango/src/mango_app.erl21
-rw-r--r--src/mango/src/mango_crud.erl171
-rw-r--r--src/mango/src/mango_cursor.erl253
-rw-r--r--src/mango/src/mango_cursor.hrl31
-rw-r--r--src/mango/src/mango_cursor_special.erl65
-rw-r--r--src/mango/src/mango_cursor_text.erl334
-rw-r--r--src/mango/src/mango_cursor_view.erl504
-rw-r--r--src/mango/src/mango_doc.erl543
-rw-r--r--src/mango/src/mango_epi.erl48
-rw-r--r--src/mango/src/mango_error.erl380
-rw-r--r--src/mango/src/mango_execution_stats.erl86
-rw-r--r--src/mango/src/mango_execution_stats.hrl20
-rw-r--r--src/mango/src/mango_fields.erl55
-rw-r--r--src/mango/src/mango_httpd.erl336
-rw-r--r--src/mango/src/mango_httpd_handlers.erl24
-rw-r--r--src/mango/src/mango_idx.erl513
-rw-r--r--src/mango/src/mango_idx.hrl21
-rw-r--r--src/mango/src/mango_idx_special.erl98
-rw-r--r--src/mango/src/mango_idx_text.erl459
-rw-r--r--src/mango/src/mango_idx_view.erl523
-rw-r--r--src/mango/src/mango_idx_view.hrl13
-rw-r--r--src/mango/src/mango_json.erl112
-rw-r--r--src/mango/src/mango_json_bookmark.erl70
-rw-r--r--src/mango/src/mango_native_proc.erl346
-rw-r--r--src/mango/src/mango_opts.erl360
-rw-r--r--src/mango/src/mango_selector.erl985
-rw-r--r--src/mango/src/mango_selector_text.erl423
-rw-r--r--src/mango/src/mango_sort.erl68
-rw-r--r--src/mango/src/mango_sup.erl23
-rw-r--r--src/mango/src/mango_util.erl405
-rw-r--r--src/mango/test/01-index-crud-test.py384
-rw-r--r--src/mango/test/02-basic-find-test.py302
-rw-r--r--src/mango/test/03-operator-test.py203
-rw-r--r--src/mango/test/04-key-tests.py158
-rw-r--r--src/mango/test/05-index-selection-test.py336
-rw-r--r--src/mango/test/06-basic-text-test.py602
-rw-r--r--src/mango/test/06-text-default-field-test.py67
-rw-r--r--src/mango/test/07-text-custom-field-list-test.py209
-rw-r--r--src/mango/test/08-text-limit-test.py135
-rw-r--r--src/mango/test/09-text-sort-test.py115
-rw-r--r--src/mango/test/10-disable-array-length-field-test.py44
-rw-r--r--src/mango/test/11-ignore-design-docs-test.py27
-rw-r--r--src/mango/test/12-use-correct-index-test.py133
-rw-r--r--src/mango/test/13-stable-update-test.py51
-rw-r--r--src/mango/test/13-users-db-find-test.py74
-rw-r--r--src/mango/test/14-json-pagination-test.py269
-rw-r--r--src/mango/test/15-execution-stats-test.py79
-rw-r--r--src/mango/test/16-index-selectors-test.py265
-rw-r--r--src/mango/test/17-multi-type-value-test.py69
-rw-r--r--src/mango/test/18-json-sort.py122
-rw-r--r--src/mango/test/19-find-conflicts.py33
-rw-r--r--src/mango/test/20-no-timeout-test.py32
-rw-r--r--src/mango/test/21-empty-selector-tests.py91
-rw-r--r--src/mango/test/README.md29
-rw-r--r--src/mango/test/friend_docs.py280
-rw-r--r--src/mango/test/limit_docs.py105
-rw-r--r--src/mango/test/mango.py364
-rw-r--r--src/mango/test/user_docs.py383
-rw-r--r--src/mango/unittest.cfg3
-rw-r--r--src/mem3/LICENSE202
-rw-r--r--src/mem3/README.md43
-rw-r--r--src/mem3/README_reshard.md93
-rw-r--r--src/mem3/include/mem3.hrl59
-rw-r--r--src/mem3/priv/stats_descriptions.cfg12
-rw-r--r--src/mem3/rebar.config14
-rw-r--r--src/mem3/rebar.config.script22
-rw-r--r--src/mem3/src/mem3.app.src40
-rw-r--r--src/mem3/src/mem3.erl508
-rw-r--r--src/mem3/src/mem3_app.erl21
-rw-r--r--src/mem3/src/mem3_bdu.erl111
-rw-r--r--src/mem3/src/mem3_cluster.erl149
-rw-r--r--src/mem3/src/mem3_epi.erl49
-rw-r--r--src/mem3/src/mem3_hash.erl64
-rw-r--r--src/mem3/src/mem3_httpd.erl118
-rw-r--r--src/mem3/src/mem3_httpd_handlers.erl25
-rw-r--r--src/mem3/src/mem3_nodes.erl177
-rw-r--r--src/mem3/src/mem3_plugin_couch_db.erl20
-rw-r--r--src/mem3/src/mem3_rep.erl1094
-rw-r--r--src/mem3/src/mem3_reshard.erl851
-rw-r--r--src/mem3/src/mem3_reshard.hrl74
-rw-r--r--src/mem3/src/mem3_reshard_api.erl229
-rw-r--r--src/mem3/src/mem3_reshard_dbdoc.erl255
-rw-r--r--src/mem3/src/mem3_reshard_httpd.erl319
-rw-r--r--src/mem3/src/mem3_reshard_index.erl184
-rw-r--r--src/mem3/src/mem3_reshard_job.erl669
-rw-r--r--src/mem3/src/mem3_reshard_job_sup.erl46
-rw-r--r--src/mem3/src/mem3_reshard_store.erl269
-rw-r--r--src/mem3/src/mem3_reshard_sup.erl36
-rw-r--r--src/mem3/src/mem3_reshard_validate.erl119
-rw-r--r--src/mem3/src/mem3_rpc.erl797
-rw-r--r--src/mem3/src/mem3_seeds.erl176
-rw-r--r--src/mem3/src/mem3_shards.erl789
-rw-r--r--src/mem3/src/mem3_sup.erl41
-rw-r--r--src/mem3/src/mem3_sync.erl367
-rw-r--r--src/mem3/src/mem3_sync_event.erl89
-rw-r--r--src/mem3/src/mem3_sync_event_listener.erl356
-rw-r--r--src/mem3/src/mem3_sync_nodes.erl98
-rw-r--r--src/mem3/src/mem3_sync_security.erl125
-rw-r--r--src/mem3/src/mem3_util.erl755
-rw-r--r--src/mem3/test/eunit/mem3_bdu_test.erl259
-rw-r--r--src/mem3/test/eunit/mem3_cluster_test.erl129
-rw-r--r--src/mem3/test/eunit/mem3_hash_test.erl23
-rw-r--r--src/mem3/test/eunit/mem3_rep_test.erl318
-rw-r--r--src/mem3/test/eunit/mem3_reshard_api_test.erl984
-rw-r--r--src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl398
-rw-r--r--src/mem3/test/eunit/mem3_reshard_test.erl990
-rw-r--r--src/mem3/test/eunit/mem3_ring_prop_tests.erl160
-rw-r--r--src/mem3/test/eunit/mem3_seeds_test.erl82
-rw-r--r--src/mem3/test/eunit/mem3_shards_test.erl130
-rw-r--r--src/mem3/test/eunit/mem3_sync_security_test.erl57
-rw-r--r--src/mem3/test/eunit/mem3_util_test.erl152
-rw-r--r--src/rexi/README.md23
-rw-r--r--src/rexi/include/rexi.hrl20
-rw-r--r--src/rexi/priv/stats_descriptions.cfg24
-rw-r--r--src/rexi/rebar.config2
-rw-r--r--src/rexi/src/rexi.app.src28
-rw-r--r--src/rexi/src/rexi.erl330
-rw-r--r--src/rexi/src/rexi_app.erl21
-rw-r--r--src/rexi/src/rexi_buffer.erl107
-rw-r--r--src/rexi/src/rexi_monitor.erl67
-rw-r--r--src/rexi/src/rexi_server.erl207
-rw-r--r--src/rexi/src/rexi_server_mon.erl164
-rw-r--r--src/rexi/src/rexi_server_sup.erl26
-rw-r--r--src/rexi/src/rexi_sup.erl65
-rw-r--r--src/rexi/src/rexi_utils.erl105
-rw-r--r--src/setup/.gitignore4
-rw-r--r--src/setup/LICENSE203
-rw-r--r--src/setup/README.md210
-rw-r--r--src/setup/src/setup.app.src27
-rw-r--r--src/setup/src/setup.erl395
-rw-r--r--src/setup/src/setup_app.erl28
-rw-r--r--src/setup/src/setup_epi.erl48
-rw-r--r--src/setup/src/setup_httpd.erl186
-rw-r--r--src/setup/src/setup_httpd_handlers.erl22
-rw-r--r--src/setup/src/setup_sup.erl44
-rwxr-xr-xsrc/setup/test/t-frontend-setup.sh71
-rwxr-xr-xsrc/setup/test/t-single-node-auto-setup.sh24
-rwxr-xr-xsrc/setup/test/t-single-node.sh46
-rwxr-xr-xsrc/setup/test/t.sh63
-rw-r--r--src/smoosh/README.md140
-rw-r--r--src/smoosh/operator_guide.md398
-rw-r--r--src/smoosh/rebar.config2
-rw-r--r--src/smoosh/recovery_process_diagram.jpegbin51388 -> 0 bytes
-rw-r--r--src/smoosh/src/smoosh.app.src28
-rw-r--r--src/smoosh/src/smoosh.erl84
-rw-r--r--src/smoosh/src/smoosh_app.erl28
-rw-r--r--src/smoosh/src/smoosh_channel.erl548
-rw-r--r--src/smoosh/src/smoosh_priority_queue.erl177
-rw-r--r--src/smoosh/src/smoosh_server.erl640
-rw-r--r--src/smoosh/src/smoosh_sup.erl38
-rw-r--r--src/smoosh/src/smoosh_utils.erl108
-rw-r--r--src/smoosh/test/exunit/scheduling_window_test.exs79
-rw-r--r--src/smoosh/test/exunit/test_helper.exs2
-rw-r--r--src/smoosh/test/smoosh_priority_queue_tests.erl167
-rw-r--r--src/smoosh/test/smoosh_tests.erl129
-rw-r--r--src/weatherreport/.gitignore13
-rw-r--r--src/weatherreport/.manifest5
-rw-r--r--src/weatherreport/LICENSE178
-rw-r--r--src/weatherreport/README.md81
-rw-r--r--src/weatherreport/how_to_add_a_check.md113
-rw-r--r--src/weatherreport/rebar.config31
-rw-r--r--src/weatherreport/src/weatherreport.app.src39
-rw-r--r--src/weatherreport/src/weatherreport.erl203
-rw-r--r--src/weatherreport/src/weatherreport_check.erl113
-rw-r--r--src/weatherreport/src/weatherreport_check_custodian.erl84
-rw-r--r--src/weatherreport/src/weatherreport_check_disk.erl195
-rw-r--r--src/weatherreport/src/weatherreport_check_internal_replication.erl59
-rw-r--r--src/weatherreport/src/weatherreport_check_ioq.erl101
-rw-r--r--src/weatherreport/src/weatherreport_check_mem3_sync.erl57
-rw-r--r--src/weatherreport/src/weatherreport_check_membership.erl68
-rw-r--r--src/weatherreport/src/weatherreport_check_memory_use.erl69
-rw-r--r--src/weatherreport/src/weatherreport_check_message_queues.erl60
-rw-r--r--src/weatherreport/src/weatherreport_check_node_stats.erl68
-rw-r--r--src/weatherreport/src/weatherreport_check_nodes_connected.erl64
-rw-r--r--src/weatherreport/src/weatherreport_check_process_calls.erl168
-rw-r--r--src/weatherreport/src/weatherreport_check_process_memory.erl60
-rw-r--r--src/weatherreport/src/weatherreport_check_safe_to_rebuild.erl121
-rw-r--r--src/weatherreport/src/weatherreport_check_search.erl60
-rw-r--r--src/weatherreport/src/weatherreport_check_tcp_queues.erl92
-rw-r--r--src/weatherreport/src/weatherreport_config.erl200
-rw-r--r--src/weatherreport/src/weatherreport_getopt.erl655
-rw-r--r--src/weatherreport/src/weatherreport_log.erl78
-rw-r--r--src/weatherreport/src/weatherreport_node.erl221
-rw-r--r--src/weatherreport/src/weatherreport_runner.erl96
-rw-r--r--src/weatherreport/src/weatherreport_util.erl115
-rw-r--r--support/build_js.escript90
-rwxr-xr-xtest/bench/benchbulk.sh69
-rwxr-xr-xtest/build/test-configure-distclean.sh15
-rwxr-xr-xtest/build/test-configure.sh372
-rwxr-xr-xtest/build/test-make-clean.sh20
-rw-r--r--test/elixir/.formatter.exs6
-rw-r--r--test/elixir/.gitignore2
-rw-r--r--test/elixir/Makefile4
-rw-r--r--test/elixir/README.md256
-rw-r--r--test/elixir/config/config.exs30
-rw-r--r--test/elixir/config/test.exs3
-rw-r--r--test/elixir/lib/couch.ex190
-rw-r--r--test/elixir/lib/couch/db_test.ex557
-rw-r--r--test/elixir/lib/couch_raw.ex105
-rw-r--r--test/elixir/lib/ex_unit.ex48
-rw-r--r--test/elixir/lib/setup.ex97
-rw-r--r--test/elixir/lib/setup/common.ex27
-rw-r--r--test/elixir/lib/step.ex44
-rw-r--r--test/elixir/lib/step/config.ex33
-rw-r--r--test/elixir/lib/step/create_db.ex53
-rw-r--r--test/elixir/lib/step/start.ex85
-rw-r--r--test/elixir/lib/step/user.ex103
-rw-r--r--test/elixir/lib/suite.ex222
-rw-r--r--test/elixir/lib/utils.ex61
-rw-r--r--test/elixir/run.cmd7
-rw-r--r--test/elixir/test/all_docs_test.exs317
-rw-r--r--test/elixir/test/attachment_names_test.exs112
-rw-r--r--test/elixir/test/attachment_paths_test.exs177
-rw-r--r--test/elixir/test/attachment_ranges_test.exs143
-rw-r--r--test/elixir/test/attachment_views_test.exs142
-rw-r--r--test/elixir/test/attachments_multipart_test.exs476
-rw-r--r--test/elixir/test/attachments_test.exs506
-rw-r--r--test/elixir/test/auth_cache_test.exs197
-rw-r--r--test/elixir/test/basics_test.exs384
-rw-r--r--test/elixir/test/batch_save_test.exs42
-rw-r--r--test/elixir/test/bulk_docs_test.exs154
-rw-r--r--test/elixir/test/changes_async_test.exs442
-rw-r--r--test/elixir/test/changes_test.exs509
-rw-r--r--test/elixir/test/cluster_with_quorum_test.exs185
-rw-r--r--test/elixir/test/cluster_without_quorum_test.exs184
-rw-r--r--test/elixir/test/coffee_test.exs73
-rw-r--r--test/elixir/test/compact_test.exs88
-rw-r--r--test/elixir/test/config/skip.elixir33
-rw-r--r--test/elixir/test/config/suite.elixir713
-rw-r--r--test/elixir/test/config/test-config.ini2
-rw-r--r--test/elixir/test/config_test.exs184
-rw-r--r--test/elixir/test/conflicts_test.exs110
-rw-r--r--test/elixir/test/cookie_auth_test.exs395
-rw-r--r--test/elixir/test/copy_doc_test.exs71
-rw-r--r--test/elixir/test/data/lorem.txt103
-rw-r--r--test/elixir/test/data/lorem_b64.txt1
-rw-r--r--test/elixir/test/design_docs_query_test.exs273
-rw-r--r--test/elixir/test/design_docs_test.exs488
-rw-r--r--test/elixir/test/design_options_test.exs74
-rw-r--r--test/elixir/test/design_paths_test.exs76
-rw-r--r--test/elixir/test/erlang_views_test.exs117
-rw-r--r--test/elixir/test/etags_head_test.exs151
-rw-r--r--test/elixir/test/form_submit_test.exs29
-rw-r--r--test/elixir/test/helper_test.exs31
-rw-r--r--test/elixir/test/http_test.exs81
-rw-r--r--test/elixir/test/invalid_docids_test.exs85
-rw-r--r--test/elixir/test/jsonp_test.exs116
-rw-r--r--test/elixir/test/jwtauth_test.exs217
-rw-r--r--test/elixir/test/large_docs_text.exs40
-rw-r--r--test/elixir/test/list_views_test.exs581
-rw-r--r--test/elixir/test/local_docs_test.exs110
-rw-r--r--test/elixir/test/lots_of_docs_test.exs116
-rw-r--r--test/elixir/test/method_override_test.exs55
-rw-r--r--test/elixir/test/multiple_rows_test.exs136
-rw-r--r--test/elixir/test/partition_all_docs_test.exs204
-rw-r--r--test/elixir/test/partition_crud_test.exs369
-rw-r--r--test/elixir/test/partition_ddoc_test.exs179
-rw-r--r--test/elixir/test/partition_design_docs_test.exs16
-rw-r--r--test/elixir/test/partition_helpers.exs76
-rw-r--r--test/elixir/test/partition_mango_test.exs736
-rw-r--r--test/elixir/test/partition_size_limit_test.exs293
-rw-r--r--test/elixir/test/partition_size_test.exs361
-rw-r--r--test/elixir/test/partition_view_test.exs374
-rw-r--r--test/elixir/test/partition_view_update_test.exs160
-rw-r--r--test/elixir/test/proxyauth_test.exs167
-rw-r--r--test/elixir/test/purge_test.exs150
-rw-r--r--test/elixir/test/reader_acl_test.exs254
-rw-r--r--test/elixir/test/recreate_doc_test.exs165
-rw-r--r--test/elixir/test/reduce_builtin_test.exs282
-rw-r--r--test/elixir/test/reduce_false_test.exs50
-rw-r--r--test/elixir/test/reduce_test.exs632
-rw-r--r--test/elixir/test/replication_test.exs1773
-rw-r--r--test/elixir/test/reshard_all_docs_test.exs79
-rw-r--r--test/elixir/test/reshard_basic_test.exs174
-rw-r--r--test/elixir/test/reshard_changes_feed.exs81
-rw-r--r--test/elixir/test/reshard_helpers.exs114
-rw-r--r--test/elixir/test/rev_stemming_test.exs157
-rw-r--r--test/elixir/test/rewrite_js_test.exs411
-rw-r--r--test/elixir/test/rewrite_test.exs526
-rw-r--r--test/elixir/test/security_validation_test.exs324
-rw-r--r--test/elixir/test/show_documents_test.exs448
-rw-r--r--test/elixir/test/support/couch_test_case.ex29
-rw-r--r--test/elixir/test/test_helper.exs3
-rw-r--r--test/elixir/test/update_documents_test.exs324
-rw-r--r--test/elixir/test/users_db_security_test.exs520
-rw-r--r--test/elixir/test/users_db_test.exs426
-rw-r--r--test/elixir/test/utf8_test.exs65
-rw-r--r--test/elixir/test/uuids_test.exs96
-rw-r--r--test/elixir/test/view_collation_raw_test.exs159
-rw-r--r--test/elixir/test/view_collation_test.exs144
-rw-r--r--test/elixir/test/view_compaction_test.exs105
-rw-r--r--test/elixir/test/view_conflicts_test.exs74
-rw-r--r--test/elixir/test/view_errors_test.exs290
-rw-r--r--test/elixir/test/view_include_docs_test.exs263
-rw-r--r--test/elixir/test/view_multi_key_all_docs_test.exs191
-rw-r--r--test/elixir/test/view_multi_key_design_test.exs346
-rw-r--r--test/elixir/test/view_offsets_test.exs100
-rw-r--r--test/elixir/test/view_pagination_test.exs189
-rw-r--r--test/elixir/test/view_sandboxing_test.exs191
-rw-r--r--test/elixir/test/view_test.exs155
-rw-r--r--test/elixir/test/view_update_seq_test.exs142
-rw-r--r--test/javascript/tests/list_views.js502
-rw-r--r--test/javascript/tests/proxyauth.js137
-rw-r--r--test/javascript/tests/replicator_db_bad_rep_id.js103
-rw-r--r--test/javascript/tests/replicator_db_by_doc_id.js128
-rw-r--r--test/javascript/tests/rewrite.js513
-rw-r--r--test/javascript/tests/rewrite_js.js366
-rw-r--r--test/javascript/tests/security_validation.js330
-rw-r--r--test/javascript/tests/show_documents.js376
-rw-r--r--test/javascript/tests/users_db_security.js418
-rw-r--r--test/random_port.ini19
-rw-r--r--test/view_server/query_server_spec.rb885
-rwxr-xr-xtest/view_server/run_native_process.es59
-rw-r--r--version.mk3
1053 files changed, 3 insertions, 215792 deletions
diff --git a/.credo.exs b/.credo.exs
deleted file mode 100644
index 59e5550fe..000000000
--- a/.credo.exs
+++ /dev/null
@@ -1,174 +0,0 @@
-# This file contains the configuration for Credo and you are probably reading
-# this after creating it with `mix credo.gen.config`.
-#
-# If you find anything wrong or unclear in this file, please report an
-# issue on GitHub: https://github.com/rrrene/credo/issues
-#
-%{
- #
- # You can have as many configs as you like in the `configs:` field.
- configs: [
- %{
- #
- # Run any exec using `mix credo -C <name>`. If no exec name is given
- # "default" is used.
- #
- name: "default",
- #
- # These are the files included in the analysis:
- files: %{
- #
- # You can give explicit globs or simply directories.
- # In the latter case `**/*.{ex,exs}` will be used.
- #
- included: ["lib/", "src/", "test/", "web/", "apps/"],
- excluded: [
- ~r"/_build/",
- ~r"/node_modules/",
- ~r"/src/certifi/",
- ~r"/src/excoveralls/",
- ~r"/src/jason",
- ~r"/src/hackney",
- ~r"/src/httpotion",
- ~r"/src/file_system",
- ~r"/src/credo",
- ~r"/src/idna",
- ~r"/src/junit_formatter",
- ~r"/src/bunt",
- ~r"/src/metrics",
- ~r"/src/minerl",
- ~r"/src/parse_trans",
- ~r"/src/ssl_verify_fun",
- ~r"/test/elixir/deps/"
- ]
- },
- #
- # If you create your own checks, you must specify the source files for
- # them here, so they can be loaded by Credo before running the analysis.
- #
- requires: [],
- #
- # If you want to enforce a style guide and need a more traditional linting
- # experience, you can change `strict` to `true` below:
- #
- strict: false,
- #
- # If you want to use uncolored output by default, you can change `color`
- # to `false` below:
- #
- color: true,
- #
- # You can customize the parameters of any check by adding a second element
- # to the tuple.
- #
- # To disable a check put `false` as second element:
- #
- # {Credo.Check.Design.DuplicatedCode, false}
- #
- checks: [
- #
- ## Consistency Checks
- #
- {Credo.Check.Consistency.ExceptionNames, []},
- {Credo.Check.Consistency.LineEndings, []},
- {Credo.Check.Consistency.ParameterPatternMatching, false},
- {Credo.Check.Consistency.SpaceAroundOperators, []},
- {Credo.Check.Consistency.SpaceInParentheses, []},
- {Credo.Check.Consistency.TabsOrSpaces, []},
-
- #
- ## Design Checks
- #
- # You can customize the priority of any check
- # Priority values are: `low, normal, high, higher`
- #
- {Credo.Check.Design.AliasUsage,
- [priority: :low, if_nested_deeper_than: 2, if_called_more_often_than: 0]},
- # You can also customize the exit_status of each check.
- # If you don't want TODO comments to cause `mix credo` to fail, just
- # set this value to 0 (zero).
- #
- {Credo.Check.Design.TagTODO, false},
- {Credo.Check.Design.TagFIXME, []},
-
- #
- ## Readability Checks
- #
- {Credo.Check.Readability.AliasOrder, []},
- {Credo.Check.Readability.FunctionNames, []},
- {Credo.Check.Readability.LargeNumbers, []},
- {Credo.Check.Readability.MaxLineLength, [priority: :normal, max_length: 90]},
- {Credo.Check.Readability.ModuleAttributeNames, []},
- {Credo.Check.Readability.ModuleDoc, []},
- {Credo.Check.Readability.ModuleNames, []},
- {Credo.Check.Readability.ParenthesesInCondition, []},
- {Credo.Check.Readability.ParenthesesOnZeroArityDefs, []},
- {Credo.Check.Readability.PredicateFunctionNames, []},
- {Credo.Check.Readability.PreferImplicitTry, []},
- {Credo.Check.Readability.RedundantBlankLines, []},
- {Credo.Check.Readability.Semicolons, []},
- {Credo.Check.Readability.SpaceAfterCommas, []},
- {Credo.Check.Readability.StringSigils, []},
- {Credo.Check.Readability.TrailingBlankLine, []},
- {Credo.Check.Readability.TrailingWhiteSpace, []},
- {Credo.Check.Readability.VariableNames, []},
-
- #
- ## Refactoring Opportunities
- #
- {Credo.Check.Refactor.CondStatements, []},
- {Credo.Check.Refactor.CyclomaticComplexity, false},
- {Credo.Check.Refactor.FunctionArity, []},
- {Credo.Check.Refactor.LongQuoteBlocks, false},
- {Credo.Check.Refactor.MapInto, []},
- {Credo.Check.Refactor.MatchInCondition, []},
- {Credo.Check.Refactor.NegatedConditionsInUnless, []},
- {Credo.Check.Refactor.NegatedConditionsWithElse, []},
- {Credo.Check.Refactor.Nesting, false},
- {Credo.Check.Refactor.PipeChainStart,
- [
- excluded_argument_types: [:atom, :binary, :fn, :keyword],
- excluded_functions: []
- ]},
- {Credo.Check.Refactor.UnlessWithElse, []},
-
- #
- ## Warnings
- #
- {Credo.Check.Warning.BoolOperationOnSameValues, []},
- {Credo.Check.Warning.ExpensiveEmptyEnumCheck, []},
- {Credo.Check.Warning.IExPry, []},
- {Credo.Check.Warning.IoInspect, []},
- {Credo.Check.Warning.LazyLogging, []},
- {Credo.Check.Warning.OperationOnSameValues, []},
- {Credo.Check.Warning.OperationWithConstantResult, []},
- {Credo.Check.Warning.RaiseInsideRescue, []},
- {Credo.Check.Warning.UnusedEnumOperation, []},
- {Credo.Check.Warning.UnusedFileOperation, []},
- {Credo.Check.Warning.UnusedKeywordOperation, []},
- {Credo.Check.Warning.UnusedListOperation, []},
- {Credo.Check.Warning.UnusedPathOperation, []},
- {Credo.Check.Warning.UnusedRegexOperation, []},
- {Credo.Check.Warning.UnusedStringOperation, []},
- {Credo.Check.Warning.UnusedTupleOperation, []},
-
- #
- # Controversial and experimental checks (opt-in, just remove `, false`)
- #
- {Credo.Check.Consistency.MultiAliasImportRequireUse, false},
- {Credo.Check.Design.DuplicatedCode, false},
- {Credo.Check.Readability.Specs, false},
- {Credo.Check.Refactor.ABCSize, false},
- {Credo.Check.Refactor.AppendSingleItem, false},
- {Credo.Check.Refactor.DoubleBooleanNegation, false},
- {Credo.Check.Refactor.VariableRebinding, false},
- {Credo.Check.Warning.MapGetUnsafePass, false},
- {Credo.Check.Warning.UnsafeToAtom, false}
-
- #
- # Custom checks can be created using `mix credo.gen.check`.
- #
- ]
- }
- ]
-}
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
deleted file mode 100644
index 04a117cb2..000000000
--- a/.devcontainer/Dockerfile
+++ /dev/null
@@ -1,24 +0,0 @@
-ARG ELIXIR_VERSION
-FROM elixir:${ELIXIR_VERSION}
-
-# Install SpiderMonkey 60 and tell CouchDB to use it in configure
-ENV SM_VSN=60
-
-# Use NodeSource binaries for Node.js (Fauxton dependency)
-RUN set -ex; \
- curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -; \
- echo "deb https://deb.nodesource.com/node_10.x buster main" | tee /etc/apt/sources.list.d/nodesource.list; \
- echo "deb-src https://deb.nodesource.com/node_10.x buster main" | tee -a /etc/apt/sources.list.d/nodesource.list
-
-RUN set -ex; \
- apt-get update; \
- apt-get install -y --no-install-recommends \
- libmozjs-${SM_VSN}-dev \
- libicu-dev \
- python3-venv \
- python3-pip \
- python3-sphinx \
- nodejs
-
-# Documentation theme
-RUN pip3 install sphinx_rtd_theme
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
deleted file mode 100644
index 666f9fa16..000000000
--- a/.devcontainer/devcontainer.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "build": {
- "dockerfile": "Dockerfile",
- "args": {
- // Useful choices include:
- // 1.11 -> Erlang 23, Debian Buster
- // 1.10 -> Erlang 22, Debian Buster
- // 1.9 -> Erlang 22, Debian Buster
- //
- // Older versions based on Debian Stretch will not include
- // SpiderMonkey 60, which the Dockerfile expects to be able
- // to install via apt-get.
- "ELIXIR_VERSION": "1.10"
- }
- },
- "extensions": [
- "erlang-ls.erlang-ls"
- ]
-}
diff --git a/.formatter.exs b/.formatter.exs
deleted file mode 100644
index 28b883d54..000000000
--- a/.formatter.exs
+++ /dev/null
@@ -1,9 +0,0 @@
-# Used by "mix format"
-[
- inputs: [
- "{mix,.formatter}.exs",
- "{config,src}/*/test/exunit/*.{ex,exs}"
- ],
- line_length: 90,
- rename_deprecated_at: "1.5.0"
-]
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 360d4fa62..000000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,36 +0,0 @@
----
-name: Bug report
-about: Describe a scenario in which CouchDB behaves unexpectedly
-title: ''
-labels: bug, needs-triage
-assignees: ''
-
----
-
-[NOTE]: # ( ^^ Provide a general summary of the issue in the title above. ^^ )
-
-## Description
-
-[NOTE]: # ( Describe the problem you're encountering. )
-[TIP]: # ( Do NOT give us access or passwords to your actual CouchDB! )
-
-## Steps to Reproduce
-
-[NOTE]: # ( Include commands to reproduce, if possible. curl is preferred. )
-
-## Expected Behaviour
-
-[NOTE]: # ( Tell us what you expected to happen. )
-
-## Your Environment
-
-[TIP]: # ( Include as many relevant details about your environment as possible. )
-[TIP]: # ( You can paste the output of curl http://YOUR-COUCHDB:5984/ here. )
-
-* CouchDB version used:
-* Browser name and version:
-* Operating system and version:
-
-## Additional Context
-
-[TIP]: # ( Add any other context about the problem here. )
diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md
deleted file mode 100644
index ca92725a6..000000000
--- a/.github/ISSUE_TEMPLATE/enhancement.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-name: Enhancement request
-about: Suggest an idea for a future version of CouchDB
-title: ''
-labels: enhancement, needs-triage
-assignees: ''
-
----
-
-[NOTE]: # ( ^^ Provide a general summary of the request in the title above. ^^ )
-
-## Summary
-
-[NOTE]: # ( Provide a brief overview of what the new feature is all about. )
-
-## Desired Behaviour
-
-[NOTE]: # ( Tell us how the new feature should work. Be specific. )
-[TIP]: # ( Do NOT give us access or passwords to your actual CouchDB! )
-
-## Possible Solution
-
-[NOTE]: # ( Not required. Suggest how to implement the addition or change. )
-
-## Additional context
-
-[TIP]: # ( Why does this feature matter to you? What unique circumstances do you have? )
diff --git a/.github/ISSUE_TEMPLATE/rfc.md b/.github/ISSUE_TEMPLATE/rfc.md
deleted file mode 100644
index 08bd0549e..000000000
--- a/.github/ISSUE_TEMPLATE/rfc.md
+++ /dev/null
@@ -1,85 +0,0 @@
----
-name: Formal RFC
-about: Submit a formal Request For Comments for consideration by the team.
-title: ''
-labels: rfc, discussion
-assignees: ''
-
----
-
-[NOTE]: # ( ^^ Provide a general summary of the RFC in the title above. ^^ )
-
-# Introduction
-
-## Abstract
-
-[NOTE]: # ( Provide a 1-to-3 paragraph overview of the requested change. )
-[NOTE]: # ( Describe what problem you are solving, and the general approach. )
-
-## Requirements Language
-
-[NOTE]: # ( Do not alter the section below. Follow its instructions. )
-
-The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
-"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
-document are to be interpreted as described in
-[RFC 2119](https://www.rfc-editor.org/rfc/rfc2119.txt).
-
-## Terminology
-
-[TIP]: # ( Provide a list of any unique terms or acronyms, and their definitions here.)
-
----
-
-# Detailed Description
-
-[NOTE]: # ( Describe the solution being proposed in greater detail. )
-[NOTE]: # ( Assume your audience has knowledge of, but not necessarily familiarity )
-[NOTE]: # ( with, the CouchDB internals. Provide enough context so that the reader )
-[NOTE]: # ( can make an informed decision about the proposal. )
-
-[TIP]: # ( Artwork may be attached to the submission and linked as necessary. )
-[TIP]: # ( ASCII artwork can also be included in code blocks, if desired. )
-
-# Advantages and Disadvantages
-
-[NOTE]: # ( Briefly, list the benefits and drawbacks that would be realized should )
-[NOTE]: # ( the proposal be accepted for inclusion into Apache CouchDB. )
-
-# Key Changes
-
-[TIP]: # ( If the changes will affect how a user interacts with CouchDB, explain. )
-
-## Applications and Modules affected
-
-[NOTE]: # ( List the OTP applications or functional modules in CouchDB affected by the proposal. )
-
-## HTTP API additions
-
-[NOTE]: # ( Provide *exact* detail on each new API endpoint, including: )
-[NOTE]: # ( HTTP methods [HEAD, GET, PUT, POST, DELETE, etc.] )
-[NOTE]: # ( Synopsis of functionality )
-[NOTE]: # ( Headers and parameters accepted )
-[NOTE]: # ( JSON in [if a PUT or POST type] )
-[NOTE]: # ( JSON out )
-[NOTE]: # ( Valid status codes and their defintions )
-[NOTE]: # ( A proposed Request and Response block )
-
-## HTTP API deprecations
-
-[NOTE]: # ( Provide *exact* detail on the API endpoints to be deprecated. )
-[NOTE]: # ( If these endpoints are replaced by new endpoints, list those as well. )
-[NOTE]: # ( State the proposed version in which the deprecation and removal will occur. )
-
-# Security Considerations
-
-[NOTE]: # ( Include any impact to the security of CouchDB here. )
-
-# References
-
-[TIP]: # ( Include any references to CouchDB documentation, mailing list discussion, )
-[TIP]: # ( external standards or other links here. )
-
-# Acknowledgements
-
-[TIP]: # ( Who helped you write this RFC? )
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 0d3aef603..000000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,35 +0,0 @@
-<!-- Thank you for your contribution!
-
- Please file this form by replacing the Markdown comments
- with your text. If a section needs no action - remove it.
-
- Also remember, that CouchDB uses the Review-Then-Commit (RTC) model
- of code collaboration. Positive feedback is represented +1 from committers
- and negative is a -1. The -1 also means veto, and needs to be addressed
- to proceed. Once there are no objections, the PR can be merged by a
- CouchDB committer.
-
- See: http://couchdb.apache.org/bylaws.html#decisions for more info. -->
-
-## Overview
-
-<!-- Please give a short brief for the pull request,
- what problem it solves or how it makes things better. -->
-
-## Testing recommendations
-
-<!-- Describe how we can test your changes.
- Does it provides any behaviour that the end users
- could notice? -->
-
-## Related Issues or Pull Requests
-
-<!-- If your changes affects multiple components in different
- repositories please put links to those issues or pull requests here. -->
-
-## Checklist
-
-- [ ] Code is written and works correctly
-- [ ] Changes are covered by tests
-- [ ] Any new configurable parameters are documented in `rel/overlay/etc/default.ini`
-- [ ] A PR for documentation changes has been made in https://github.com/apache/couchdb-documentation
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index dd3cb9442..000000000
--- a/.gitignore
+++ /dev/null
@@ -1,132 +0,0 @@
-*.o
-*.asc
-*.sha256
-*.sha512
-*.snap
-*.so
-*.pyc
-*.swp
-*.pdb
-*~
-.venv
-.DS_Store
-.vscode
-.rebar/
-.eunit/
-cover/
-core
-debian/
-log
-apache-couchdb-*/
-bin/
-config.erl
-*.tar.gz
-*.tar.bz2
-dev/*.beam
-dev/devnode.*
-dev/lib/
-dev/logs/
-dev/erlserver.pem
-dev/couch_ssl_dist.conf
-ebin/
-erl_crash.dump
-erln8.config
-install.mk
-rel/*.config
-rel/couchdb
-rel/dev*
-rel/tmpdata
-share/server/main-coffee.js
-share/server/main.js
-share/www
-src/b64url/
-src/bear/
-src/certifi/
-src/config/
-src/couch/priv/couch_js/**/config.h
-src/couch/priv/couchjs
-src/couch/priv/couchspawnkillable
-src/couch/priv/couch_ejson_compare/couch_ejson_compare.d
-src/couch/priv/couch_js/**/*.d
-src/couch/priv/icu_driver/couch_icu_driver.d
-src/mango/src/mango_cursor_text.nocompile
-src/docs/
-src/ets_lru/
-src/excoveralls/
-src/fauxton/
-src/folsom/
-src/hackney/
-src/hqueue/
-src/hyper/
-src/ibrowse/
-src/idna/
-src/jiffy/
-src/khash/
-src/meck/
-src/metrics/
-src/mimerl/
-src/mochiweb/
-src/oauth/
-src/parse_trans/
-src/proper/
-src/rebar/
-src/recon/
-src/snappy/
-src/ssl_verify_fun/
-src/triq/
-src/unicode_util_compat/
-src/file_system/
-src/rebar3/
-src/erlfmt/
-tmp/
-
-src/couch/*.o
-src/couch/*.so
-src/couch/ebin/
-src/couch/priv/couch_js/config.h
-src/couch/priv/couchjs
-src/couch/priv/couchspawnkillable
-src/couch/priv/*.exp
-src/couch/priv/*.lib
-src/couch/priv/*.dll
-src/couch/priv/*.exe
-src/couch/vc120.pdb
-src/couch_epi/ebin
-src/couch_epi/erl_crash.dump
-src/couch_event/deps/
-src/couch_event/ebin/
-src/couch_index/ebin
-src/couch_log/ebin
-src/couch_peruser/doc
-src/couch_peruser/ebin
-src/couch_peruser/deps
-src/couch_peruser/couchperuser-*
-src/couch_peruser/erl_crash.dump
-src/couch_peruser/TEST-*.xml
-src/couch_peruser/*.beam
-src/couch_replicator/*.beam
-src/couch_replicator/ebin/replicator.app
-src/couch_replicator/.DS_Store
-src/couch_stats/*~
-src/couch_stats/*.beam
-src/couch_stats/deps
-src/couch_stats/ebin
-src/couch_stats/doc
-src/couch_stats/.project
-src/couch_tests/*.o
-src/couch_tests/*.so
-src/couch_tests/ebin/
-src/global_changes/ebin/
-src/mango/ebin/
-src/mango/test/*.pyc
-src/mango/nosetests.xml
-src/mango/venv/
-src/jwtf/.rebar3/
-test/javascript/junit.xml
-
-/_build/
-/src/bunt
-/src/credo/
-/src/httpotion/
-/src/jason/
-/src/junit_formatter/
diff --git a/.mailmap b/.mailmap
deleted file mode 100644
index a51c763dc..000000000
--- a/.mailmap
+++ /dev/null
@@ -1,13 +0,0 @@
-Benoit Chesneau <benoitc@apache.org> <bchesneau@gmail.com>
-Benoit Chesneau <benoitc@apache.org> benoitc <benoitc@apache.org>
-
-Jason Smith <jhs@apache.org> Jason Smith (air) <jhs@iriscouch.com>
-Jason Smith <jhs@apache.org> Jason Smith (air) <jhs@apache.org>
-
-Filipe David Borba Manana <fdmanana@apache.org>
-
-Randall Leeds <randall@apache.org> <randall.leeds@gmail.com>
-
-Paul Joseph Davis <davisp@apache.org> Paul J. Davis <davisp@apache.org>
-
-Bob Dionne <bitdiddle@apache.org> bitdiddle <bitdiddle@apache.org>
diff --git a/BUGS.md b/BUGS.md
deleted file mode 100644
index 235b634d6..000000000
--- a/BUGS.md
+++ /dev/null
@@ -1,13 +0,0 @@
-Apache CouchDB BUGS
-===================
-
-Visit our issue tracker:
-
- https://github.com/apache/couchdb/issues
-
-You can use this to report bugs, request features, or suggest enhancements.
-
-Our JIRA system no longer accepts new issues, but may have important historical
-information:
-
- https://issues.apache.org/jira/browse/CouchDB
diff --git a/COMMITTERS.md b/COMMITTERS.md
deleted file mode 100644
index 3b25283e3..000000000
--- a/COMMITTERS.md
+++ /dev/null
@@ -1,11 +0,0 @@
-Apache CouchDB COMMITTERS
-=========================
-
-Committers are given a binding vote in certain project decisions, as well as
-write access to public project infrastructure. Committers are elected to the
-project in recognition of their committment to Apache CouchDB. We mean this in
-the sense of being loyal to the project and its interests.
-
-A full list of committers elected to the project is available at:
-
- https://people.apache.org/committers-by-project.html#couchdb
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
deleted file mode 100644
index cd3a4437c..000000000
--- a/CONTRIBUTING.md
+++ /dev/null
@@ -1,290 +0,0 @@
-# Contributing to CouchDB
-
-Please take a moment to review this document in order to make the contribution
-process easy and effective for everyone involved.
-
-Following these guidelines helps to communicate that you respect the time of
-the developers managing and developing this open source project. In return,
-they should reciprocate that respect in addressing your issue, assessing
-changes, and helping you finalize your pull requests.
-
-Contributions to CouchDB are governed by our [Code of Conduct][6] and a set of
-[Project Bylaws][7]. Come join us!
-
-
-## Using the issue tracker
-
-First things first: **Do NOT report security vulnerabilities in public issues!**
-Please disclose responsibly by letting [the Apache CouchDB Security team](mailto:security@couchdb.apache.org?subject=Security)
-know upfront. We will assess the issue as soon as possible on a best-effort
-basis and will give you an estimate for when we have a fix and release available
-for an eventual public disclosure.
-
-The GitHub issue tracker is the preferred channel for [bug reports](#bugs),
-[features requests](#features) and [submitting pull requests](#pull-requests),
-but please respect the following restrictions:
-
-* Please **do not** use the issue tracker for personal support requests. Use
- [CouchDB Chat][8] instead. Alternately, help us to help more people by
- using our publicly archived [user][1] or [developer][5] mailing lists.
-
-* Please **do not** derail or troll issues. Keep the discussion on topic and
- respect the opinions of others.
-
-
-## Bug reports
-
-A bug is a _demonstrable problem_ that is caused by the code in our
-repositories. Good bug reports are extremely helpful - thank you!
-
-Guidelines for bug reports:
-
-1. **Use the GitHub issue search** &mdash; check if the issue has already been
- reported.
-
-2. **Check if the issue has been fixed** &mdash; try to reproduce it using the
- latest `master` or `next` branch in the repository.
-
-3. **Isolate the problem** &mdash; ideally create a reduced test case.
-
-A good bug report shouldn't leave others needing to chase you up for more
-information. Please try to be as detailed as possible in your report. What is
-your environment? What steps will reproduce the issue? What OS experiences the
-problem? What would you expect to be the outcome? All these details will help
-people to fix any potential bugs. Our issue template will help you include all
-of the relevant detail.
-
-Example:
-
-> Short and descriptive example bug report title
->
-> A summary of the issue and the browser/OS environment in which it occurs. If
-> suitable, include the steps required to reproduce the bug.
->
-> 1. This is the first step
-> 2. This is the second step
-> 3. Further steps, etc.
->
-> `<url>` - a link to the reduced test case
->
-> Any other information you want to share that is relevant to the issue being
-> reported. This might include the lines of code that you have identified as
-> causing the bug, and potential solutions (and your opinions on their
-> merits).
-
-
-## Feature requests
-
-Feature requests are welcome. But take a moment to find out whether your idea
-fits with the scope and aims of the project. It's up to *you* to make a strong
-case to convince the project's developers of the merits of this feature. Please
-provide as much detail and context as possible.
-
-
-## Pull requests
-
-Good pull requests - patches, improvements, new features - are a fantastic
-help. They should remain focused in scope and avoid containing unrelated
-commits.
-
-**Please ask first** before embarking on any significant pull request (e.g.
-implementing features, refactoring code), otherwise you risk spending a lot of
-time working on something that the project's developers might not want to merge
-into the project. You can talk with the community on our
-[developer mailing list][5]. We're always open to suggestions and will get
-back to you as soon as we can!
-
-
-### For new Contributors
-
-If you never created a pull request before, welcome :tada: :smile: [Here is a great tutorial](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github)
-on how to send one :)
-
-1. [Fork](http://help.github.com/fork-a-repo/) the project, clone your fork,
- and configure the remotes:
-
- ```bash
- # Clone your fork of the repo into the current directory
- git clone https://github.com/<your-username>/<repo-name>
- # Navigate to the newly cloned directory
- cd <repo-name>
- # Assign the original repo to a remote called "upstream"
- git remote add upstream https://github.com/apache/<repo-name>
- ```
-
-2. If you cloned a while ago, get the latest changes from upstream:
-
- ```bash
- git checkout master
- git pull upstream master
- ```
-
-3. Create a new topic branch (off the main project development branch) to
- contain your feature, change, or fix:
-
- ```bash
- git checkout -b <topic-branch-name>
- ```
-
-4. Make sure to update, or add to the tests when appropriate. Patches and
- features will not be accepted without tests. Run `make check` to check that
- all tests pass after you've made changes. Look for a `Testing` section in
- the project’s README for more information.
-
-5. If you added or changed a feature, make sure to document it accordingly in
- the [CouchDB documentation](https://github.com/apache/couchdb-documentation)
- repository.
-
-6. Push your topic branch up to your fork:
-
- ```bash
- git push origin <topic-branch-name>
- ```
-
-8. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/)
- with a clear title and description.
-
-
-### For Apache CouchDB Committers
-
-1. Be sure to set up [GitHub two-factor authentication](https://help.github.com/articles/about-two-factor-authentication/),
- then [link your Apache account to your GitHub account](https://gitbox.apache.org/setup/).
- You will need to wait about 30 minutes after completing this process
- for it to complete. Follow the instructions in the organisational
- invite email you receive. Alternately, you can use the Apache mirror
- of the repository at `https://gitbox.apache.org/repos/asf/couchdb.git`
- if you do not agree to the GitHub Terms of Service.
-
-2. Clone the repo and create a branch.
-
- ```bash
- git clone https://github.com/couchdb/couchdb
- # or git clone https://gitbox.apache.org/repos/asf/couchdb.git
- cd couchdb
- git checkout -b <topic-branch-name>
- ```
-
-3. Make sure to update, or add to the tests when appropriate. Patches and
- features will not be accepted without tests. Run `make check` to check that
- all tests pass after you've made changes. Look for a `Testing` section in
- the project’s README for more information.
-
-4. If you added or changed a feature, make sure to document it accordingly in
- the [CouchDB documentation](https://github.com/apache/couchdb-documentation)
- repository.
-
-5. Push your topic branch up to our repo
-
- ```bash
- git push origin <topic-branch-name>
- ```
-
-6. Open a Pull Request using your branch with a clear title and description.
- Please also add any appropriate labels to the pull request for clarity.
-
-Optionally, you can help us with these things. But don’t worry if they are too
-complicated, we can help you out and teach you as we go :)
-
-1. Update your branch to the latest changes in the upstream master branch. You
- can do that locally with
-
- ```bash
- git pull --rebase upstream master
- ```
-
- Afterwards force push your changes to your remote feature branch.
-
-2. Once a pull request is good to go, you can tidy up your commit messages using
- Git's [interactive rebase](https://help.github.com/articles/interactive-rebase).
-
-**IMPORTANT**: By submitting a patch, you agree to license your work under the
-Apache License, per your signed Apache CLA.
-
-
-## Triagers
-
-Apache CouchDB committers who have completed the GitHub account linking
-process may triage issues. This helps to speed up releases and minimises both
-user and developer pain in working through our backlog.
-
-Briefly, to triage an issue, review the report, validate that it is an actual
-issue (reproducing if possible), and add one or more labels. We have a
-[summary of our label taxonomy](https://github.com/apache/couchdb/issues/499)
-for your reference.
-
-If you are not an official committer, please reach out to our [mailing list][5]
-or [chat][8] to learn how you can assist with triaging indirectly.
-
-
-## Maintainers
-
-If you have commit access, please follow this process for merging patches and cutting new releases.
-
-### Reviewing changes
-
-1. Check that a change is within the scope and philosophy of the component.
-2. Check that a change has any necessary tests.
-3. Check that a change has any necessary documentation.
-4. If there is anything you don’t like, leave a comment below the respective
- lines and submit a "Request changes" review. Repeat until everything has
- been addressed.
-5. If you are not sure about something, mention specific people for help in a
- comment.
-6. If there is only a tiny change left before you can merge it and you think
- it’s best to fix it yourself, you can directly commit to the author’s fork.
- Leave a comment about it so the author and others will know.
-7. Once everything looks good, add an "Approve" review. Don’t forget to say
- something nice 👏🐶💖✨
-8. If the commit messages follow [our conventions](@commit-message-conventions)
-
- 1. If the pull request fixes one or more open issues, please include the
- text "Fixes #472" or "Fixes apache/couchdb#472".
- 2. Use the "Rebase and merge" button to merge the pull request.
- 3. Done! You are awesome! Thanks so much for your help 🤗
-
-9. If the commit messages _do not_ follow our conventions
-
- 1. Use the "squash and merge" button to clean up the commits and merge at
- the same time: ✨🎩
- 2. If the pull request fixes one or more open issues, please include the
- text "Fixes #472" or "Fixes apache/couchdb#472".
-
-Sometimes there might be a good reason to merge changes locally. The process
-looks like this:
-
-### Reviewing and merging changes locally
-
-```
-git checkout master # or the main branch configured on github
-git pull # get latest changes
-git checkout feature-branch # replace name with your branch
-git rebase master
-git checkout master
-git merge feature-branch # replace name with your branch
-git push
-```
-
-When merging PRs from forked repositories, we recommend you install the
-[hub](https://github.com/github/hub) command line tools.
-
-This allows you to do:
-
-```
-hub checkout link-to-pull-request
-```
-
-meaning that you will automatically check out the branch for the pull request,
-without needing any other steps like setting git upstreams! :sparkles:
-
-
-## Thanks
-
-Special thanks to [Hoodie](https://github.com/hoodiehq/hoodie) for the great
-CONTRIBUTING.md template.
-
-[1]: http://mail-archives.apache.org/mod_mbox/couchdb-user/
-[5]: http://mail-archives.apache.org/mod_mbox/couchdb-dev/
-[6]: http://couchdb.apache.org/conduct.html
-[7]: http://couchdb.apache.org/bylaws.html
-[8]: http://couchdb.apache.org/#chat
-
diff --git a/CONTRIBUTORS.in b/CONTRIBUTORS.in
deleted file mode 100644
index 6edf71d8c..000000000
--- a/CONTRIBUTORS.in
+++ /dev/null
@@ -1,97 +0,0 @@
-Apache CouchDB CONTRIBUTORS
-===========================
-
-A number of people have made contributions to the Apache CouchDB community,
-project, documentation, or code. Some of these people are listed here.
-
- * William Beh <willbeh@gmail.com>
- * Dirk Schalge <dirk@epd-me.net>
- * Roger Leigh <rleigh@debian.org>
- * Sam Ruby <rubys@intertwingly.net>
- * Carlos Valiente <superdupont@gmail.com>
- * Till Klampaeckel <till@klampaeckel.de>
- * Jim Lindley <web@jimlindley.com>
- * Yoan Blanc <yoan.blanc@gmail.com>
- * Michael Gottesman <gottesmm@reed.edu>
- * Mark Baran <mebaran@gmail.com>
- * Michael Hendricks <michael@ndrix.org>
- * Antony Blakey <antony.blakey@gmail.com>
- * Paul Carey <paul.p.carey@gmail.com>
- * Hunter Morris <huntermorris@gmail.com>
- * Brian Palmer <jira@brian.codekitchen.net>
- * Maximillian Dornseif <md@hudora.de>
- * Eric Casteleijn <eric.casteleijn@canonical.com>
- * Maarten Thibaut <mthibaut@cisco.com>
- * Florian Ebeling <florian.ebeling@gmail.com>
- * Volker Mische <volker.mische@gmail.com>
- * Brian Candler <B.Candler@pobox.com>
- * Brad Anderson <brad@sankatygroup.com>
- * Nick Gerakines <nick@gerakines.net>
- * Kevin Ilchmann Jørgensen <kijmail@gmail.com>
- * Sebastian Cohnen <sebastian.cohnen@gmx.net>
- * Sven Helmberger <sven.helmberger@gmx.de>
- * Dan Walters <dan@danwalters.net>
- * Curt Arnold <carnold@apache.org>
- * Gustavo Niemeyer
- * Joshua Bronson <jabronson@gmail.com>
- * Kostis Sagonas <kostis@cs.ntua.gr>
- * Matthew Hooker <mwhooker@gmail.com>
- * Ilia Cheishvili <ilia.cheishvili@gmail.com>
- * Lena Herrmann <lena@zeromail.org>
- * Jack Moffit <metajack@gmail.com>
- * Damjan Georgievski <gdamjan@gmail.com>
- * Jan Kassens <jan@kassens.net>
- * James Marca <jmarca@translab.its.uci.edu>
- * Matt Goodall <matt.goodall@gmail.com>
- * Joel Clark <unsigned_char@yahoo.com>
- * Matt Lyon <matt@flowerpowered.com>
- * mikeal <mikeal.rogers@gmail.com>
- * Joscha Feth <joscha@feth.com>
- * Jarrod Roberson <jarrod@vertigrated.com>
- * Jae Kwon <jkwon.work@gmail.com>
- * Gavin Sherry <swm@alcove.com.au>
- * Timothy Smith <tim@couch.io>
- * Martin Haaß <MartinHaass@gmx.net>
- * Hans Ulrich Niedermann <hun@n-dimensional.de>
- * Dmitry Unkovsky <oil.crayons@gmail.com>
- * Zachary Zolton <zachary.zolton@gmail.com>
- * Brian Jenkins <bonkydog@bonkydog.com>
- * Paul Bonser <pib@paulbonser.com>
- * Caleb Land <caleb.land@gmail.com>
- * Juhani Ränkimies <juhani@juranki.com>
- * Kev Jackson <foamdino@gmail.com>
- * Jonathan D. Knezek <jdknezek@gmail.com>
- * David Rose <doppler@gmail.com>
- * Lim Yue Chuan <shasderias@gmail.com>
- * David Davis <xantus@xantus.org>
- * Juuso Väänänen <juuso@vaananen.org>
- * Jeff Zellner <jeff.zellner@gmail.com>
- * Gabriel Farrell <gsf747@gmail.com>
- * Mike Leddy <mike@loop.com.br>
- * Wayne Conrad <wayne@databill.com>
- * Thomas Vander Stichele <thomas@apestaart.org>
- * Felix Hummel <apache@felixhummel.de>
- * Tim Smith <tim@couchbase.com>
- * Dipesh Patel <dipthegeezer.opensource@googlemail.com>
- * Sam Bisbee <sam@sbisbee.com>
- * Nathan Vander Wilt <natevw@yahoo.com>
- * Caolan McMahon <caolan.mcmahon@googlemail.com>
- * Andrey Somov <trophybase@gmail.com>
- * Chris Coulson <chrisccoulson.googlemail.com>
- * Trond Norbye <trond.norbye@gmail.com>
- * Christopher Bonhage <queezey@me.com>
- * Christian Carter <cdcarter@gmail.com>
- * Lukasz Mielicki <mielicki@gmail.com>
- * Omar Yasin <omarkj@gmail.com
- * Matt Cooley <matt@mattcooley.net>
- * Simon Leblanc <sim.leblanc+apache@gmail.com>
- * Rogutės Sparnuotos <rogutes@googlemail.com>
- * Gavin McDonald <gmcdonald@apache.org>
- * Fedor Indutny <fedor@indutny.com>
- * Tim Blair
- * Tady Walsh <hello@tady.me>
- * Sam Rijs <recv@awesam.de>
- * Benjamin Anderson <b@banjiewen.net>
-# Authors from commit 6c976bd and onwards are auto-inserted. If you are merging
-# a commit from a non-committer, you should not add an entry to this file. When
-# `bootstrap` is run, the actual CONTRIBUTORS file will be generated.
diff --git a/INSTALL.Unix.md b/INSTALL.Unix.md
deleted file mode 100644
index 6a37f3de4..000000000
--- a/INSTALL.Unix.md
+++ /dev/null
@@ -1,264 +0,0 @@
-# Apache CouchDB INSTALL.Unix
-
-A high-level guide to Unix-like systems, inc. Mac OS X and Ubuntu.
-
-Community installation guides are available on the wiki:
-
- http://wiki.apache.org/couchdb/Installation
-
-If you are trying to build CouchDB from a git checkout rather than
-a .tar.gz, see the `DEVELOPERS` file.
-
-This document is the canonical source of installation
-information. However, many systems have gotchas that you need to be
-aware of. In addition, dependencies frequently change as distributions
-update their archives. If you're running into trouble, be sure to
-check out the wiki. If you have any tips to share, please also update
-the wiki so that others can benefit from your experience.
-
-## Troubleshooting
-
-There is a troubleshooting guide:
-
- http://wiki.apache.org/couchdb/Troubleshooting
-
-There is a wiki for general documentation:
-
- http://wiki.apache.org/couchdb/
-
-There are collection of friendly mailing lists:
-
- http://couchdb.apache.org/community/lists.html
-
-Please work through these in order if you experience any problems.
-
-## Dependencies
-
-You should have the following installed:
-
- * Erlang OTP (>= 19.x) (http://erlang.org/)
- * ICU (http://icu-project.org/)
- * OpenSSL (http://www.openssl.org/)
- * Mozilla SpiderMonkey - either 1.8.5 or 60
- * 60 is not supported on ARM 64-bit (aarch64) at this time.
- * https://developer.mozilla.org/en/docs/Mozilla/Projects/SpiderMonkey/Releases/1.8.5
- * https://archive.mozilla.org/pub/firefox/releases/60.9.0esr/source/ (src/js)
- * GNU Make (http://www.gnu.org/software/make/)
- * GNU Compiler Collection (http://gcc.gnu.org/)
- * Python (>=3.5) (http://python.org/)
-
-To build Fauxton, you should have the following installed:
- * Node.JS (>=10.x) (https://nodejs.org/)
- -- obtainable from NodeSource (https://github.com/nodesource/distributions)
-
-To build the documentation, you should have the following installed:
- * Python Sphinx (>=1.5) (http://pypi.python.org/pypi/Sphinx)
- * Sphinx RT theme (https://github.com/readthedocs/sphinx_rtd_theme)
-
-It is recommended that you install Erlang OTP 20.3.8.11 or above where
-possible. Sphinx and the RTD theme are only required for building the online
-documentation. You can disable Fauxton and/or the documentation builds by
-adding the --disable-fauxton and/or --disable-docs flag(s) to the configure script.
-
-### Debian-based Systems
-
-You can install the dependencies by running:
-
- sudo apt-get --no-install-recommends -y install \
- build-essential pkg-config erlang erlang-reltool \
- libicu-dev libmozjs-60-dev python3
-
-Your distribution may have libmozjs-68-dev instead of 60. Both are supported.
-
-You can install Node.JS [NodeSource](https://github.com/nodesource/distributions#installation-instructions).
-
-You can install the documentation dependencies by running:
-
- sudo apt-get --no-install-recommends -y install \
- python-sphinx
-
- sudo pip install --upgrade sphinx_rtd_theme nose requests hypothesis
-
-
-Be sure to update the version numbers to match your system's available
-packages.
-
-### RedHat-based (Fedora, Centos, RHEL) Systems
-
-You can install the dependencies by running:
-
- sudo yum install autoconf autoconf-archive automake \
- erlang-asn1 erlang-erts erlang-eunit erlang-xmerl \
- libmozjs-60-dev libicu-devel libtool perl-Test-Harness \
- python3
-
-You can install Node.JS via [NodeSource](https://github.com/nodesource/distributions#rpminstall).
-
-The built-in packages for Sphinx in RHEL repositories are too old
-to run the documentation build process. Instead, use pip:
-
- sudo yum install python-pip
- sudo pip install --upgrade sphinx nose requests hypothesis
-
-### Mac OS X
-
-To build CouchDB from source on Mac OS X, you will need to install
-the Command Line Tools:
-
- xcode-select --install
-
-You can then install the other dependencies by running:
-
- brew install autoconf autoconf-archive automake libtool \
- erlang icu4c spidermonkey pkg-config
-
-You can install Node.JS via the
-[official Macintosh installer](https://nodejs.org/en/download/).
-
-You can install the documentation dependencies by running:
-
- sudo easy_install pip
- sudo pip install --upgrade sphinx nose requests hypothesis
-
-You will need Homebrew installed to use the brew command.
-
-Learn more about Homebrew at:
-
- http://mxcl.github.com/homebrew/
-
-Some versions of Mac OS X ship a problematic OpenSSL library. If
-you're experiencing troubles with CouchDB crashing intermittently with
-a segmentation fault or a bus error, you will need to install your own
-version of OpenSSL. See the wiki, mentioned above, for more information.
-
-### FreeBSD
-
-FreeBSD requires the use of GNU Make. Where `make` is specified in this
-documentation, substitute `gmake`.
-
-You can install this by running:
-
- pkg install gmake
-
-You can install the remaining dependencies by running:
-
- pkg install openssl icu git bash autoconf \
- www/node npm libtool spidermonkey60 \
- erlang lang/python py37-sphinx py37-pip
- pip install --upgrade sphinx_rtd_theme nose requests hypothesis
-
-## Installing
-
-Once you have satisfied the dependencies you should run:
-
- ./configure
-
-If you wish to customize the installation, pass `--help` to this
-script.
-
-If everything was successful you should see the following message:
-
- You have configured Apache CouchDB, time to relax.
-
-Relax.
-
-To build CouchDB you should run:
-
- make release
-
-Try `gmake` if `make` is giving you any problems.
-
-If everything was successful you should see the following message:
-
- ... done
- You can now copy the rel/couchdb directory anywhere on your system.
- Start CouchDB with ./bin/couchdb from within that directory.
-
-Relax.
-
-## User Registration
-
-For OS X, in the steps below, substitute `/Users/couchdb` for `/home/couchdb`.
-
-You should create a special `couchdb` user for CouchDB.
-
-On many Unix-like systems you can run:
-
- adduser --system \
- --home /opt/couchdb \
- --no-create-home \
- --shell /bin/bash \
- --group --gecos \
- "CouchDB Administrator" couchdb
-
-On Mac OS X you can use the Workgroup Manager to create users up to version
-10.9, and dscl or sysadminctl after version 10.9. Search Apple's support
-site to find the documentation appropriate for your system. As of recent
-versions of OS X, this functionality is also included in Server.app,
-available through the App Store only as part of OS X Server.
-
-You must make sure that the user has a working POSIX shell.
-
-You can test this by:
-
- * Trying to log in as the `couchdb` user
-
- * Running `pwd` and checking the present working directory
-
-Copy the built couchdb release to the new user's home directory:
-
- cp -R /path/to/couchdb/rel/couchdb /opt/couchdb
-
-Change the ownership of the CouchDB directories by running:
-
- chown -R couchdb:couchdb /opt/couchdb
-
-Change the permission of the CouchDB directories by running:
-
- find /opt/couchdb -type d -exec chmod 0770 {} \;
-
-Update the permissions for your ini files:
-
- chmod 0644 /opt/couchdb/etc/*
-
-## First Run
-
-You can start the CouchDB server by running:
-
- sudo -i -u couchdb couchdb/bin/couchdb
-
-This uses the `sudo` command to run the `couchdb` command as the
-`couchdb` user.
-
-When CouchDB starts it should eventually display the following
-message:
-
- Apache CouchDB has started, time to relax.
-
-Relax.
-
-To check that everything has worked, point your web browser to:
-
- http://127.0.0.1:5984/_utils/
-
-From here you should verify your installation by pointing your web browser to:
-
- http://localhost:5984/_utils/#/verifyinstall
-
-## Running as a daemon
-
-The couchdb team recommends [runit](http://smarden.org/runit/) to
-run CouchDB persistently and reliably. Configuration of runit is
-straightforward; if you have questions, reach out to the CouchDB
-user mailing list.
-
-Naturally, you can configure systemd, launchd or SysV-init daemons to
-launch CouchDB and keep it running using standard configuration files.
-Sample scripts are in the couchdb-pkg repository:
-
-* SysV-init (Debian-style): https://github.com/apache/couchdb-pkg/blob/master/debian/couchdb.init
-* SysV-init (RHEL-style): https://github.com/apache/couchdb-pkg/blob/master/rpm/SOURCES/couchdb.init
-* upstart: Use the Debian-style sysvinit script instead.
-* systemd: https://github.com/apache/couchdb-pkg/blob/master/debian/couchdb.service
-
-Consult your system documentation for more information.
diff --git a/INSTALL.Windows.md b/INSTALL.Windows.md
deleted file mode 100644
index 6cf148c90..000000000
--- a/INSTALL.Windows.md
+++ /dev/null
@@ -1,21 +0,0 @@
-Apache CouchDB INSTALL.Windows
-==============================
-
-Due to the complexity of building CouchDB on the Windows platform,
-full build documentation and all necessary support files are in
-the couchdb-glazier repository.
-
-Be sure to find the branch that matches the release you are building, for
-example `couchdb_2.0`.
-
-Build & Test
-------------
-Once all dependencies are built and installed per the documentation in
-couchdb-glazier, these commands will configure and build CouchDB:
-
- powershell -ExecutionPolicy Bypass .\configure.ps1
- make -f Makefile.win check
-
-This will build couchdb, as well as run the eunit and javascript tests.
-
-As of CouchDB 2.0 RC1, all tests should pass.
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 048ee41a5..000000000
--- a/LICENSE
+++ /dev/null
@@ -1,2269 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2020 The Apache Foundation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-Apache CouchDB Subcomponents
-
-The Apache CouchDB project includes a number of subcomponents with separate
-copyright notices and license terms. Your use of the code for the these
-subcomponents is subject to the terms and conditions of the following licenses.
-
-
-For the share/server/json2.js component:
-
- Public Domain
-
- No warranty expressed or implied. Use at your own risk.
-
-
-For the share/www/favicon.ico component from https://github.com/BigBlueHat/futon2:
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-For the src/mochiweb component:
-
- Copyright (c) 2007 Mochi Media, Inc.
-
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sublicense, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-For the src/ibrowse component:
-
- Copyright (c) 2006, Chandrashekhar Mullaparthi
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- * Neither the name of the T-Mobile nor the names of its contributors may be
- used to endorse or promote products derived from this software without
- specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
- ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-For the src/couch_log/src/couch_log_trunc_io.erl and
- the src/couch_log/src/couch_log_trunc_io_fmt.erl components
-
-ERLANG PUBLIC LICENSE
-Version 1.1
-
-1. Definitions.
-
-1.1. ``Contributor'' means each entity that creates or contributes to
-the creation of Modifications.
-
-1.2. ``Contributor Version'' means the combination of the Original
-Code, prior Modifications used by a Contributor, and the Modifications
-made by that particular Contributor.
-
-1.3. ``Covered Code'' means the Original Code or Modifications or the
-combination of the Original Code and Modifications, in each case
-including portions thereof.
-
-1.4. ``Electronic Distribution Mechanism'' means a mechanism generally
-accepted in the software development community for the electronic
-transfer of data.
-
-1.5. ``Executable'' means Covered Code in any form other than Source
-Code.
-
-1.6. ``Initial Developer'' means the individual or entity identified
-as the Initial Developer in the Source Code notice required by Exhibit
-A.
-
-1.7. ``Larger Work'' means a work which combines Covered Code or
-portions thereof with code not governed by the terms of this License.
-
-1.8. ``License'' means this document.
-
-1.9. ``Modifications'' means any addition to or deletion from the
-substance or structure of either the Original Code or any previous
-Modifications. When Covered Code is released as a series of files, a
-Modification is:
-
-A. Any addition to or deletion from the contents of a file containing
- Original Code or previous Modifications.
-
-B. Any new file that contains any part of the Original Code or
- previous Modifications.
-
-1.10. ``Original Code'' means Source Code of computer software code
-which is described in the Source Code notice required by Exhibit A as
-Original Code, and which, at the time of its release under this
-License is not already Covered Code governed by this License.
-
-1.11. ``Source Code'' means the preferred form of the Covered Code for
-making modifications to it, including all modules it contains, plus
-any associated interface definition files, scripts used to control
-compilation and installation of an Executable, or a list of source
-code differential comparisons against either the Original Code or
-another well known, available Covered Code of the Contributor's
-choice. The Source Code can be in a compressed or archival form,
-provided the appropriate decompression or de-archiving software is
-widely available for no charge.
-
-1.12. ``You'' means an individual or a legal entity exercising rights
-under, and complying with all of the terms of, this License. For legal
-entities,``You'' includes any entity which controls, is controlled by,
-or is under common control with You. For purposes of this definition,
-``control'' means (a) the power, direct or indirect, to cause the
-direction or management of such entity, whether by contract or
-otherwise, or (b) ownership of fifty percent (50%) or more of the
-outstanding shares or beneficial ownership of such entity.
-
-2. Source Code License.
-
-2.1. The Initial Developer Grant.
-The Initial Developer hereby grants You a world-wide, royalty-free,
-non-exclusive license, subject to third party intellectual property
-claims:
-
-(a) to use, reproduce, modify, display, perform, sublicense and
- distribute the Original Code (or portions thereof) with or without
- Modifications, or as part of a Larger Work; and
-
-(b) under patents now or hereafter owned or controlled by Initial
- Developer, to make, have made, use and sell (``Utilize'') the
- Original Code (or portions thereof), but solely to the extent that
- any such patent is reasonably necessary to enable You to Utilize
- the Original Code (or portions thereof) and not to any greater
- extent that may be necessary to Utilize further Modifications or
- combinations.
-
-2.2. Contributor Grant.
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license, subject to third party intellectual property
-claims:
-
-(a) to use, reproduce, modify, display, perform, sublicense and
- distribute the Modifications created by such Contributor (or
- portions thereof) either on an unmodified basis, with other
- Modifications, as Covered Code or as part of a Larger Work; and
-
-(b) under patents now or hereafter owned or controlled by Contributor,
- to Utilize the Contributor Version (or portions thereof), but
- solely to the extent that any such patent is reasonably necessary
- to enable You to Utilize the Contributor Version (or portions
- thereof), and not to any greater extent that may be necessary to
- Utilize further Modifications or combinations.
-
-3. Distribution Obligations.
-
-3.1. Application of License.
-The Modifications which You contribute are governed by the terms of
-this License, including without limitation Section 2.2. The Source
-Code version of Covered Code may be distributed only under the terms
-of this License, and You must include a copy of this License with
-every copy of the Source Code You distribute. You may not offer or
-impose any terms on any Source Code version that alters or restricts
-the applicable version of this License or the recipients' rights
-hereunder. However, You may include an additional document offering
-the additional rights described in Section 3.5.
-
-3.2. Availability of Source Code.
-Any Modification which You contribute must be made available in Source
-Code form under the terms of this License either on the same media as
-an Executable version or via an accepted Electronic Distribution
-Mechanism to anyone to whom you made an Executable version available;
-and if made available via Electronic Distribution Mechanism, must
-remain available for at least twelve (12) months after the date it
-initially became available, or at least six (6) months after a
-subsequent version of that particular Modification has been made
-available to such recipients. You are responsible for ensuring that
-the Source Code version remains available even if the Electronic
-Distribution Mechanism is maintained by a third party.
-
-3.3. Description of Modifications.
-You must cause all Covered Code to which you contribute to contain a
-file documenting the changes You made to create that Covered Code and
-the date of any change. You must include a prominent statement that
-the Modification is derived, directly or indirectly, from Original
-Code provided by the Initial Developer and including the name of the
-Initial Developer in (a) the Source Code, and (b) in any notice in an
-Executable version or related documentation in which You describe the
-origin or ownership of the Covered Code.
-
-3.4. Intellectual Property Matters
-
-(a) Third Party Claims.
- If You have knowledge that a party claims an intellectual property
- right in particular functionality or code (or its utilization
- under this License), you must include a text file with the source
- code distribution titled ``LEGAL'' which describes the claim and
- the party making the claim in sufficient detail that a recipient
- will know whom to contact. If you obtain such knowledge after You
- make Your Modification available as described in Section 3.2, You
- shall promptly modify the LEGAL file in all copies You make
- available thereafter and shall take other steps (such as notifying
- appropriate mailing lists or newsgroups) reasonably calculated to
- inform those who received the Covered Code that new knowledge has
- been obtained.
-
-(b) Contributor APIs.
- If Your Modification is an application programming interface and
- You own or control patents which are reasonably necessary to
- implement that API, you must also include this information in the
- LEGAL file.
-
-3.5. Required Notices.
-You must duplicate the notice in Exhibit A in each file of the Source
-Code, and this License in any documentation for the Source Code, where
-You describe recipients' rights relating to Covered Code. If You
-created one or more Modification(s), You may add your name as a
-Contributor to the notice described in Exhibit A. If it is not
-possible to put such notice in a particular Source Code file due to
-its structure, then you must include such notice in a location (such
-as a relevant directory file) where a user would be likely to look for
-such a notice. You may choose to offer, and to charge a fee for,
-warranty, support, indemnity or liability obligations to one or more
-recipients of Covered Code. However, You may do so only on Your own
-behalf, and not on behalf of the Initial Developer or any
-Contributor. You must make it absolutely clear than any such warranty,
-support, indemnity or liability obligation is offered by You alone,
-and You hereby agree to indemnify the Initial Developer and every
-Contributor for any liability incurred by the Initial Developer or
-such Contributor as a result of warranty, support, indemnity or
-liability terms You offer.
-
-3.6. Distribution of Executable Versions.
-You may distribute Covered Code in Executable form only if the
-requirements of Section 3.1-3.5 have been met for that Covered Code,
-and if You include a notice stating that the Source Code version of
-the Covered Code is available under the terms of this License,
-including a description of how and where You have fulfilled the
-obligations of Section 3.2. The notice must be conspicuously included
-in any notice in an Executable version, related documentation or
-collateral in which You describe recipients' rights relating to the
-Covered Code. You may distribute the Executable version of Covered
-Code under a license of Your choice, which may contain terms different
-from this License, provided that You are in compliance with the terms
-of this License and that the license for the Executable version does
-not attempt to limit or alter the recipient's rights in the Source
-Code version from the rights set forth in this License. If You
-distribute the Executable version under a different license You must
-make it absolutely clear that any terms which differ from this License
-are offered by You alone, not by the Initial Developer or any
-Contributor. You hereby agree to indemnify the Initial Developer and
-every Contributor for any liability incurred by the Initial Developer
-or such Contributor as a result of any such terms You offer.
-
-3.7. Larger Works.
-You may create a Larger Work by combining Covered Code with other code
-not governed by the terms of this License and distribute the Larger
-Work as a single product. In such a case, You must make sure the
-requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Code due to statute
-or regulation then You must: (a) comply with the terms of this License
-to the maximum extent possible; and (b) describe the limitations and
-the code they affect. Such description must be included in the LEGAL
-file described in Section 3.4 and must be included with all
-distributions of the Source Code. Except to the extent prohibited by
-statute or regulation, such description must be sufficiently detailed
-for a recipient of ordinary skill to be able to understand it.
-
-5. Application of this License.
-
-This License applies to code to which the Initial Developer has
-attached the notice in Exhibit A, and to related Covered Code.
-
-6. CONNECTION TO MOZILLA PUBLIC LICENSE
-
-This Erlang License is a derivative work of the Mozilla Public
-License, Version 1.0. It contains terms which differ from the Mozilla
-Public License, Version 1.0.
-
-7. DISCLAIMER OF WARRANTY.
-
-COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN ``AS IS'' BASIS,
-WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
-WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
-DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR
-NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF
-THE COVERED CODE IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE
-IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER
-CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR
-CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART
-OF THIS LICENSE. NO USE OF ANY COVERED CODE IS AUTHORIZED HEREUNDER
-EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-This License and the rights granted hereunder will terminate
-automatically if You fail to comply with terms herein and fail to cure
-such breach within 30 days of becoming aware of the breach. All
-sublicenses to the Covered Code which are properly granted shall
-survive any termination of this License. Provisions which, by their
-nature, must remain in effect beyond the termination of this License
-shall survive.
-
-9. DISCLAIMER OF LIABILITY
-Any utilization of Covered Code shall not cause the Initial Developer
-or any Contributor to be liable for any damages (neither direct nor
-indirect).
-
-10. MISCELLANEOUS
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision is held to be unenforceable, such
-provision shall be reformed only to the extent necessary to make it
-enforceable. This License shall be construed by and in accordance with
-the substantive laws of Sweden. Any dispute, controversy or claim
-arising out of or relating to this License, or the breach, termination
-or invalidity thereof, shall be subject to the exclusive jurisdiction
-of Swedish courts, with the Stockholm City Court as the first
-instance.
-
-EXHIBIT A.
-
-``The contents of this file are subject to the Erlang Public License,
-Version 1.1, (the "License"); you may not use this file except in
-compliance with the License. You should have received a copy of the
-Erlang Public License along with this software. If not, it can be
-retrieved via the world wide web at http://www.erlang.org/.
-
-Software distributed under the License is distributed on an "AS IS"
-basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-the License for the specific language governing rights and limitations
-under the License.
-
-The Initial Developer of the Original Code is Ericsson Utvecklings AB.
-Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
-AB. All Rights Reserved.''
-
-
-For the src/ejson/yajl component
-
-Copyright 2010, Lloyd Hilaiel.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- 3. Neither the name of Lloyd Hilaiel nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-For the src/ejson/erl_nif_compat.h file
-
- Copyright (c) 2010-2011 Basho Technologies, Inc.
- With some minor modifications for Apache CouchDB.
-
- This file is provided to you under the Apache License,
- Version 2.0 (the "License"); you may not use this file
- except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied. See the License for the
- specific language governing permissions and limitations
- under the License.
-
-For the src/snappy/google-snappy component
-
- Copyright 2005 and onwards Google Inc.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-For the share/server/coffee-script.js file
-
- Copyright (c) 2011 Jeremy Ashkenas
-
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation
- files (the "Software"), to deal in the Software without
- restriction, including without limitation the rights to use,
- copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following
- conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- OTHER DEALINGS IN THE SOFTWARE.
-
-
-for dev/pbkdf2.py
-
-(The BSD License)
-
-Copyright (c) 2011 by Armin Ronacher, Reed O'Brien
-
-Some rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
- * The names of the contributors may not be used to endorse or
- promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-for src/fauxton/assets/js/libs/bootstrap.js
-for share/www/js/require*
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-
-for src/fauxton/assets/js/plugins/prettify.js
-for share/www/js/require*
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-
-for src/fauxton/assets/js/plugins/beautify.js
-for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2007-2013 Einar Lielmanis and contributors.
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation files
-(the "Software"), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of the Software,
-and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-for src/fauxton/assets/js/plugins/cloudant.pagingcollection.js
-for share/www/js/require*
-
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-
-for src/fauxton/assets/fonts/fontawesome
-for share/www/fonts/*
-
-SIL OPEN FONT LICENSE
-
-Version 1.1 - 26 February 2007
-
-PREAMBLE
-The goals of the Open Font License (OFL) are to stimulate worldwide
-development of collaborative font projects, to support the font creation
-efforts of academic and linguistic communities, and to provide a free and
-open framework in which fonts may be shared and improved in partnership
-with others.
-
-The OFL allows the licensed fonts to be used, studied, modified and
-redistributed freely as long as they are not sold by themselves. The
-fonts, including any derivative works, can be bundled, embedded,
-redistributed and/or sold with any software provided that any reserved
-names are not used by derivative works. The fonts and derivatives,
-however, cannot be released under any other type of license. The
-requirement for fonts to remain under this license does not apply
-to any document created using the fonts or their derivatives.
-
-DEFINITIONS
-"Font Software" refers to the set of files released by the Copyright
-Holder(s) under this license and clearly marked as such. This may
-include source files, build scripts and documentation.
-
-"Reserved Font Name" refers to any names specified as such after the
-copyright statement(s).
-
-"Original Version" refers to the collection of Font Software components as
-distributed by the Copyright Holder(s).
-
-"Modified Version" refers to any derivative made by adding to, deleting,
-or substituting — in part or in whole — any of the components of the
-Original Version, by changing formats or by porting the Font Software to a
-new environment.
-
-"Author" refers to any designer, engineer, programmer, technical
-writer or other person who contributed to the Font Software.
-
-PERMISSION & CONDITIONS
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of the Font Software, to use, study, copy, merge, embed, modify,
-redistribute, and sell modified and unmodified copies of the Font
-Software, subject to the following conditions:
-
-1) Neither the Font Software nor any of its individual components,
-in Original or Modified Versions, may be sold by itself.
-
-2) Original or Modified Versions of the Font Software may be bundled,
-redistributed and/or sold with any software, provided that each copy
-contains the above copyright notice and this license. These can be
-included either as stand-alone text files, human-readable headers or
-in the appropriate machine-readable metadata fields within text or
-binary files as long as those fields can be easily viewed by the user.
-
-3) No Modified Version of the Font Software may use the Reserved Font
-Name(s) unless explicit written permission is granted by the corresponding
-Copyright Holder. This restriction only applies to the primary font name as
-presented to the users.
-
-4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
-Software shall not be used to promote, endorse or advertise any
-Modified Version, except to acknowledge the contribution(s) of the
-Copyright Holder(s) and the Author(s) or with their explicit written
-permission.
-
-5) The Font Software, modified or unmodified, in part or in whole,
-must be distributed entirely under this license, and must not be
-distributed under any other license. The requirement for fonts to
-remain under this license does not apply to any document created
-using the Font Software.
-
-TERMINATION
-This license becomes null and void if any of the above conditions are
-not met.
-
-DISCLAIMER
-THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
-OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
-COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
-DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
-OTHER DEALINGS IN THE FONT SOFTWARE.
-
-
-for share/server/60/esprima.js
-Based on https://github.com/jquery/esprima
-
-BSD License
-
-Copyright JS Foundation and other contributors, https://js.foundation/
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-share/server/60/escodegen.js
-Based on https://github.com/estools/escodegen
-
-BSD License
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-for src/fauxton/assets/less/bootstrap/font-awesome/*
-for share/www/css/*
-
-The MIT License (MIT)
-
-Copyright (c) 2013 Dave Gandy
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
-for src/fauxton/test/nightwatch_tests/custom-commands/waitForAttribute.js:
-
- The MIT License (MIT)
-
- Copyright (c) 2014 Dave Koo
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in all
- copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
-
-react-select for share/www/js/require*
-
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Jed Watson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-jquery for share/www/js/require*
-
-Copyright jQuery Foundation and other contributors, https://jquery.org/
-
-This software consists of voluntary contributions made by many
-individuals. For exact contribution history, see the revision history
-available at https://github.com/jquery/jquery
-
-The following license applies to all parts of this software except as
-documented below:
-
-====
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-====
-
-All files located in the node_modules and external directories are
-externally maintained libraries used by this software which have their
-own licenses; we recommend you read them, as their terms may differ from
-the terms above.
-
-
-Sizzle for jquery
-
-Copyright jQuery Foundation and other contributors, https://jquery.org/
-
-This software consists of voluntary contributions made by many
-individuals. For exact contribution history, see the revision history
-available at https://github.com/jquery/sizzle
-
-The following license applies to all parts of this software except as
-documented below:
-
-====
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-====
-
-All files located in the node_modules and external directories are
-externally maintained libraries used by this software which have their
-own licenses; we recommend you read them, as their terms may differ from
-the terms above.
-
-lodash for share/www/js/require*
-
-Copyright 2012-2015 The Dojo Foundation <http://dojofoundation.org/>
-Based on Underscore.js, copyright 2009-2015 Jeremy Ashkenas,
-DocumentCloud and Investigative Reporters & Editors <http://underscorejs.org/>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-backbone for share/www/js/require*
-
-Copyright (c) 2010-2016 Jeremy Ashkenas, DocumentCloud
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
-
-d3 for share/www/js/require*
-
-Copyright (c) 2010-2016, Michael Bostock
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-* The name Michael Bostock may not be used to endorse or promote products
- derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-moment for share/www/js/require*
-
-Copyright (c) 2011-2016 Tim Wood, Iskren Chernev, Moment.js contributors
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
-
-backbone.layoutmanager for share/www/js/require*
-
-Copyright (c) 2015 Tim Branyen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-react for share/www/js/require*
-
-BSD License
-
-For React software
-
-Copyright (c) 2013-present, Facebook, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
- endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-for share/www/js/require* as part of react
-
-BSD License
-
-For fbjs software
-
-Copyright (c) 2013-2015, Facebook, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
- endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-object-assign for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
-react-dom for share/www/js/require*
-
-BSD License
-
-For React software
-
-Copyright (c) 2013-present, Facebook, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
- endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-flux for share/www/js/require*
-
-BSD License
-
-For Flux software
-
-Copyright (c) 2014-2015, Facebook, Inc. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
- endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-css-loader for share/www/js/require*
-
- MIT License http://www.opensource.org/licenses/mit-license.php
- Author Tobias Koppers @sokra
-
-
-style-loader for for share/www/js/require*
-
- MIT License http://www.opensource.org/licenses/mit-license.php
- Author Tobias Koppers @sokra
-
-
-zeroclipboard for share/www/js/require*
-zeroclipboard for share/www/js/zeroclipboard
-
-The MIT License (MIT)
-Copyright (c) 2009-2014 Jon Rohan, James M. Greene
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-react-bootstrap for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Stephen J. Collings, Matthew Honnibal, Pieter Vanderwerff
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
-babel-runtime for share/www/js/require* (from react-bootstrap)
-
-Copyright (c) 2014-2016 Sebastian McKenzie <sebmck@gmail.com>
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-core-js for share/www/js/require* (from react-bootstrap)
-
-Copyright (c) 2015 Denis Pushkarev
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
-react-prop-types for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2015 react-bootstrap
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-invariant for share/www/js/require*
-
-BSD-3-Clause
-https://opensource.org/licenses/BSD-3-Clause
-
-
-warning for share/www/js/require*
-
-BSD License
-
-For React software
-
-Copyright (c) 2013-2015, Facebook, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
- endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-classnames for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Jed Watson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-dom-helpers for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Jason Quense
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-react-overlays for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2015 react-bootstrap
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-keycode for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Tim Oxley
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
-lodash-compat for share/www/js/require*
-
-Copyright 2012-2016 The Dojo Foundation <http://dojofoundation.org/>
-Based on Underscore.js, copyright 2009-2016 Jeremy Ashkenas,
-DocumentCloud and Investigative Reporters & Editors <http://underscorejs.org/>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-uncontrollable for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Jason Quense
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-velocity-animate for share/www/js/require*
-
-The MIT License
-
-Copyright (c) 2014 Julian Shapiro
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-react-addons-css-transition-group for share/www/js/require*
-
-BSD License
-
-For React software
-
-Copyright (c) 2013-present, Facebook, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
- endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-brace for share/www/js/require*
-
-Copyright 2013 Thorsten Lorenz.
-All rights reserved.
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
-
-w3c-blob for share/www/js/require*
-
-MIT License
-
-
-velocity-react for share/www/js/require*
-
-
-Copyright (c) 2015 Twitter and other contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-react-addons-transition-group for share/www/js/require*
-
-BSD License
-
-For React software
-
-Copyright (c) 2013-present, Facebook, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
- endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-react-input-autosize for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Jed Watson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-blacklist for share/www/js/require*
-
-
-visualizeRevTree for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2013 Tomasz Kołodziejski
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-pouchdb for share/www/js/require*
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-process for share/www/js/require*
-
-(The MIT License)
-
-Copyright (c) 2013 Roman Shtylman <shtylman@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-js-extend for share/www/js/require*
-
-ISC License
-
-
-debug for share/www/js/require*
-
-(The MIT License)
-
-Copyright (c) 2014 TJ Holowaychuk &lt;tj@vision-media.ca&gt;
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-ms for share/www/js/require*
-
-(The MIT License)
-
-Copyright (c) 2014 Guillermo Rauch <rauchg@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-inherits for share/www/js/require*
-
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
-OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THIS SOFTWARE.
-
-
-lie for share/www/js/require*
-
-#Copyright (c) 2014 Calvin Metcalf
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.**
-
-
-immediate for share/www/js/require*
-
-Copyright (c) 2012 Barnesandnoble.com, llc, Donavon West, Domenic Denicola, Brian Cavalier
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-pouchdb-collections for share/www/js/require*
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-
-argsarray for share/www/js/require*
-
-# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
-## TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
-
-events for share/www/js/require*
-
-MIT
-
-Copyright Joyent, Inc. and other Node contributors.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to permit
-persons to whom the Software is furnished to do so, subject to the
-following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-scope-eval for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Alex David
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-spark-md for share/www/js/require*
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2015 André Cruz <amdfcruz@gmail.com>
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
-
-vuvuzela for share/www/js/require*
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-
-es6-promise-pool for share/www/js/require*
-
-Copyright (c) 2015 Tim De Pauw <https://tmdpw.eu/>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-pouchdb-collate for share/www/js/require*
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-
-jsondiffpatch for share/www/js/require*
-
-The MIT License
-
-Copyright (c) 2014 Benjamín Eidelman twitter.com/beneidel
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-
-chalk for share/www/js/require*
-
-MIT © [Sindre Sorhus](http://sindresorhus.com)
-
-
-ansi-styles for share/www/js/require*
-
-MIT © [Sindre Sorhus](http://sindresorhus.com)
-
-
-strip-ansi for share/www/js/require*
-
-MIT © [Sindre Sorhus](http://sindresorhus.com)
-
-
-ansi-regex for share/www/js/require*
-
-MIT © [Sindre Sorhus](http://sindresorhus.com)
-
-
-has-ansi for share/www/js/require*
-
-MIT © [Sindre Sorhus](http://sindresorhus.com)
-
-
-supports-color for share/www/js/require*
-
-MIT © [Sindre Sorhus](http://sindresorhus.com)
-
-
-escape-string-regexp for share/www/js/require*
-
-The MIT License (MIT)
-
-Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-For the src/hyper component:
-
-The MIT License (MIT)
-
-Copyright (c) 2014 Game Analytics ApS
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-For the src/recon component:
-
-Copyright (c) 2012-2017, Frédéric Trottier-Hébert
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright notice, this
- list of conditions and the following disclaimer in the documentation and/or
- other materials provided with the distribution.
-
- The names of its contributors may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 074e47436..000000000
--- a/Makefile
+++ /dev/null
@@ -1,511 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# *******************************************************
-# WARNING! If you edit this file, also edit Makefile.win!
-# *******************************************************
-
-include version.mk
-
-REBAR?=$(shell echo `pwd`/bin/rebar)
-ERLFMT?=$(shell echo `pwd`/bin/erlfmt)
-
-# Handle the following scenarios:
-# 1. When building from a tarball, use version.mk.
-# 2. When building from a clean release tag (#.#.#), use that tag.
-# 3. When building from a clean RC tag (#.#.#-RC#), use JUST the version
-# number inside the tarball, but use the full name for the name of the
-# tarball itself.
-# 4. When not on a clean tag, use version.mk + git sha + dirty status.
-
-COUCHDB_GIT_SHA=$(git_sha)
-
-IN_RELEASE = $(shell if [ ! -d .git ]; then echo true; fi)
-ifeq ($(IN_RELEASE), true)
-
-# 1. Building from tarball, use version.mk.
-COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)
-
-else
-
-# Gather some additional information.
-# We do it this way so we don't bake shell-isms into Makefile
-# to make it easier to port to Windows. I know, I know. -jst
-# IN_RC contains the -RCx suffix in the name if present
-IN_RC = $(shell git describe --tags --always --first-parent \
- | grep -Eo -- '-RC[0-9]+' 2>/dev/null)
-# ON_TAG matches *ONLY* if we are on a release or RC tag
-ON_TAG = $(shell git describe --tags --always --first-parent \
- | grep -Eo -- '^[0-9]+\.[0-9]\.[0-9]+(-RC[0-9]+)?$$' 2>/dev/null)
-# REL_TAG contains the #.#.# from git describe, which might be used
-REL_TAG = $(shell git describe --tags --always --first-parent \
- | grep -Eo -- '^[0-9]+\.[0-9]\.[0-9]+' 2>/dev/null)
-# DIRTY identifies if we're not on a commit
-DIRTY = $(shell git describe --dirty | grep -Eo -- '-dirty' 2>/dev/null)
-# COUCHDB_GIT_SHA is our current git hash.
-COUCHDB_GIT_SHA=$(shell git rev-parse --short=7 --verify HEAD)
-
-ifeq ($(ON_TAG),)
-# 4. Not on a tag.
-COUCHDB_VERSION_SUFFIX = $(COUCHDB_GIT_SHA)$(DIRTY)
-COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)-$(COUCHDB_VERSION_SUFFIX)
-else
-# 2 and 3. On a tag.
-COUCHDB_VERSION = $(REL_TAG)$(DIRTY)
-endif
-endif
-
-# needed to do text substitutions
-comma:= ,
-empty:=
-space:= $(empty) $(empty)
-
-DESTDIR=
-
-# Rebar options
-apps=
-skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper,ibrowse
-suites=
-tests=
-
-COMPILE_OPTS=$(shell echo "\
- apps=$(apps) \
- " | sed -e 's/[a-z_]\{1,\}= / /g')
-EUNIT_OPTS=$(shell echo "\
- skip_deps=$(skip_deps) \
- suites=$(suites) \
- tests=$(tests) \
- " | sed -e 's/[a-z]\{1,\}= / /g')
-DIALYZE_OPTS=$(shell echo "\
- apps=$(apps) \
- skip_deps=$(skip_deps) \
- " | sed -e 's/[a-z]\{1,\}= / /g')
-EXUNIT_OPTS=$(subst $(comma),$(space),$(tests))
-
-TEST_OPTS="-c 'startup_jitter=0' -c 'default_security=admin_local'"
-
-################################################################################
-# Main commands
-################################################################################
-
-
-.PHONY: all
-# target: all - Build everything
-all: couch fauxton docs escriptize
-
-
-.PHONY: help
-# target: help - Print this help
-help:
- @egrep "^# target: " Makefile \
- | sed -e 's/^# target: //g' \
- | sort \
- | awk '{printf(" %-20s", $$1); $$1=$$2=""; print "-" $$0}'
-
-
-################################################################################
-# Building
-################################################################################
-
-
-.PHONY: couch
-# target: couch - Build CouchDB core, use ERL_COMPILER_OPTIONS to provide custom compiler's options
-couch: config.erl
- @COUCHDB_VERSION=$(COUCHDB_VERSION) COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) $(REBAR) compile $(COMPILE_OPTS)
- @cp src/couch/priv/couchjs bin/
-
-
-.PHONY: docs
-# target: docs - Build documentation
-ifeq ($(IN_RELEASE), true)
-docs: share/docs/html
-else
-docs: src/docs/build
-endif
-
-.PHONY: fauxton
-# target: fauxton - Build Fauxton web UI
-fauxton: share/www
-
-
-.PHONY: escriptize
-# target: escriptize - Build CLI tools
-escriptize: couch
- @$(REBAR) -r escriptize apps=weatherreport
- @cp src/weatherreport/weatherreport bin/weatherreport
-
-
-################################################################################
-# Testing
-################################################################################
-
-
-.PHONY: check
-# target: check - Test everything
-check: all python-black
- @$(MAKE) exunit
- @$(MAKE) eunit
- @$(MAKE) mango-test
- @$(MAKE) elixir-suite
- @$(MAKE) weatherreport-test
-
-ifdef apps
-subdirs = $(apps)
-else
-subdirs=$(shell ls src)
-endif
-
-.PHONY: eunit
-# target: eunit - Run EUnit tests, use EUNIT_OPTS to provide custom options
-eunit: export BUILDDIR = $(shell pwd)
-eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
-eunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell pwd)/bin/couchjs $(shell pwd)/share/server/main.js
-eunit: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-eunit: couch
- @COUCHDB_VERSION=$(COUCHDB_VERSION) COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) $(REBAR) setup_eunit 2> /dev/null
- @for dir in $(subdirs); do \
- COUCHDB_VERSION=$(COUCHDB_VERSION) COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) $(REBAR) -r eunit $(EUNIT_OPTS) apps=$$dir || exit 1; \
- done
-
-
-.PHONY: exunit
-# target: exunit - Run ExUnit tests
-exunit: export BUILDDIR = $(shell pwd)
-exunit: export MIX_ENV=test
-exunit: export ERL_LIBS = $(shell pwd)/src
-exunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
-exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell pwd)/bin/couchjs $(shell pwd)/share/server/main.js
-exunit: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-exunit: couch elixir-init setup-eunit elixir-check-formatted elixir-credo
- @mix test --trace $(EXUNIT_OPTS)
-
-setup-eunit: export BUILDDIR = $(shell pwd)
-setup-eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
-setup-eunit:
- @$(REBAR) setup_eunit 2> /dev/null
-
-just-eunit: export BUILDDIR = $(shell pwd)
-just-eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
-just-eunit:
- @$(REBAR) -r eunit $(EUNIT_OPTS)
-
-.PHONY: soak-eunit
-soak-eunit: export BUILDDIR = $(shell pwd)
-soak-eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
-soak-eunit: couch
- @$(REBAR) setup_eunit 2> /dev/null
- while [ $$? -eq 0 ] ; do $(REBAR) -r eunit $(EUNIT_OPTS) ; done
-
-erlfmt-check:
- ERLFMT_PATH=$(ERLFMT) python3 dev/format_check.py
-
-erlfmt-format:
- ERLFMT_PATH=$(ERLFMT) python3 dev/format_all.py
-
-.venv/bin/black:
- @python3 -m venv .venv
- @.venv/bin/pip3 install black || touch .venv/bin/black
-
-# Python code formatter - only runs if we're on Python 3.6 or greater
-python-black: .venv/bin/black
- @python3 -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \
- echo "Python formatter not supported on Python < 3.6; check results on a newer platform"
- @python3 -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
- LC_ALL=C.UTF-8 LANG=C.UTF-8 .venv/bin/black --check \
- --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/erlfmt|src/jiffy|src/rebar/pr2relnotes.py|src/fauxton" \
- build-aux/*.py dev/run dev/format_*.py src/mango/test/*.py src/docs/src/conf.py src/docs/ext/*.py .
-
-python-black-update: .venv/bin/black
- @python3 -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \
- echo "Python formatter not supported on Python < 3.6; check results on a newer platform"
- @python3 -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
- LC_ALL=C.UTF-8 LANG=C.UTF-8 .venv/bin/black \
- --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \
- build-aux/*.py dev/run src/mango/test/*.py src/docs/src/conf.py src/docs/ext/*.py .
-
-.PHONY: elixir
-elixir: export MIX_ENV=integration
-elixir: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-elixir: elixir-init elixir-check-formatted elixir-credo devclean
- @dev/run "$(TEST_OPTS)" -a adm:pass -n 1 \
- --enable-erlang-views \
- --locald-config test/elixir/test/config/test-config.ini \
- --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
-
-.PHONY: elixir-init
-elixir-init: MIX_ENV=test
-elixir-init: config.erl
- @mix local.rebar --force && mix local.hex --force && mix deps.get
-
-.PHONY: elixir-cluster-without-quorum
-elixir-cluster-without-quorum: export MIX_ENV=integration
-elixir-cluster-without-quorum: elixir-init elixir-check-formatted elixir-credo devclean
- @dev/run -n 3 -q -a adm:pass \
- --degrade-cluster 2 \
- --no-eval 'mix test --trace --only without_quorum_test $(EXUNIT_OPTS)'
-
-.PHONY: elixir-cluster-with-quorum
-elixir-cluster-with-quorum: export MIX_ENV=integration
-elixir-cluster-with-quorum: elixir-init elixir-check-formatted elixir-credo devclean
- @dev/run -n 3 -q -a adm:pass \
- --degrade-cluster 1 \
- --no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)'
-
-.PHONY: elixir-suite
-elixir-suite: export MIX_ENV=integration
-elixir-suite: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-elixir-suite: elixir-init elixir-check-formatted elixir-credo devclean
- @dev/run -n 1 -q -a adm:pass \
- --enable-erlang-views \
- --no-join \
- --locald-config test/elixir/test/config/test-config.ini \
- --erlang-config rel/files/eunit.config \
- --no-eval 'mix test --trace --include test/elixir/test/config/suite.elixir --exclude test/elixir/test/config/skip.elixir'
-
-.PHONY: elixir-check-formatted
-elixir-check-formatted: elixir-init
- @mix format --check-formatted
-
-# Credo is a static code analysis tool for Elixir.
-# We use it in our tests
-.PHONY: elixir-credo
-elixir-credo: elixir-init
- @mix credo
-
-.PHONY: build-report
-# target: build-report - Generate and upload a build report
-build-report:
- build-aux/show-test-results.py --suites=10 --tests=10 > test-results.log
- build-aux/logfile-uploader.py
-
-.PHONY: check-qs
-# target: check-qs - Run query server tests (ruby and rspec required!)
-check-qs:
- @QS_LANG=js rspec test/view_server/query_server_spec.rb
-
-
-.PHONY: list-eunit-apps
-# target: list-eunit-apps - List EUnit target apps
-list-eunit-apps:
- @find ./src/ -type f -name *_test.erl -o -name *_tests.erl \
- | cut -d '/' -f 3 \
- | sort -u
-
-
-.PHONY: list-eunit-suites
-# target: list-eunit-suites - List EUnit target test suites
-list-eunit-suites:
- @find ./src/ -type f -name *_test.erl -o -name *_tests.erl -exec basename {} \; \
- | cut -d '.' -f -1 \
- | sort
-
-
-.PHONY: build-test
-# target: build-test - Test build script
-build-test:
- @test/build/test-configure.sh
-
-
-.PHONY: mango-test
-# target: mango-test - Run Mango tests
-mango-test: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-mango-test: devclean all
- @cd src/mango && \
- python3 -m venv .venv && \
- .venv/bin/python3 -m pip install -r requirements.txt
- @cd src/mango && ../../dev/run "$(TEST_OPTS)" -n 1 --admin=testuser:testpass '.venv/bin/python3 -m nose2'
-
-
-.PHONY: weatherreport-test
-# target: weatherreport-test - Run weatherreport against dev cluster
-weatherreport-test: devclean escriptize
- @dev/run -n 1 -a adm:pass --no-eval \
- 'bin/weatherreport --etc dev/lib/node1/etc --level error'
-
-################################################################################
-# Developing
-################################################################################
-
-
-.PHONY: build-plt
-# target: build-plt - Build project-specific PLT
-build-plt:
- @$(REBAR) -r build-plt $(DIALYZE_OPTS)
-
-
-.PHONY: check-plt
-# target: check-plt - Check the PLT for consistency and rebuild it if it is not up-to-date
-check-plt:
- @$(REBAR) -r check-plt $(DIALYZE_OPTS)
-
-
-.PHONY: dialyze
-# target: dialyze - Analyze the code for discrepancies
-dialyze: .rebar
- @$(REBAR) -r dialyze $(DIALYZE_OPTS)
-
-
-.PHONY: introspect
-# target: introspect - Check for commits difference between rebar.config and repository
-introspect:
- @$(REBAR) -r update-deps
- @build-aux/introspect
-
-################################################################################
-# Distributing
-################################################################################
-
-
-.PHONY: dist
-# target: dist - Make release tarball
-dist: all derived
- @./build-aux/couchdb-build-release.sh $(COUCHDB_VERSION)
-
- @cp -r share/www apache-couchdb-$(COUCHDB_VERSION)/share/
- @mkdir -p apache-couchdb-$(COUCHDB_VERSION)/share/docs/html
- @cp -r src/docs/build/html apache-couchdb-$(COUCHDB_VERSION)/share/docs/
-
- @mkdir -p apache-couchdb-$(COUCHDB_VERSION)/share/docs/man
- @cp src/docs/build/man/apachecouchdb.1 apache-couchdb-$(COUCHDB_VERSION)/share/docs/man/
-
- @tar czf apache-couchdb-$(COUCHDB_VERSION)$(IN_RC).tar.gz apache-couchdb-$(COUCHDB_VERSION)
- @echo "Done: apache-couchdb-$(COUCHDB_VERSION)$(IN_RC).tar.gz"
-
-
-.PHONY: release
-# target: release - Create an Erlang release including CouchDB!
--include install.mk
-release: all
- @echo "Installing CouchDB into rel/couchdb/ ..."
- @rm -rf rel/couchdb
- @$(REBAR) generate # make full erlang release
- @cp bin/weatherreport rel/couchdb/bin/weatherreport
-
-ifeq ($(with_fauxton), 1)
- @mkdir -p rel/couchdb/share/
- @cp -R share/www rel/couchdb/share/
-endif
-
-ifeq ($(with_docs), 1)
-ifeq ($(IN_RELEASE), true)
- @mkdir -p rel/couchdb/share/www/docs/
- @mkdir -p rel/couchdb/share/docs/
- @cp -R share/docs/html/* rel/couchdb/share/www/docs/
- @cp share/docs/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1
-else
- @mkdir -p rel/couchdb/share/www/docs/
- @mkdir -p rel/couchdb/share/docs/
- @cp -R src/docs/build/html/ rel/couchdb/share/www/docs
- @cp src/docs/build/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1
-endif
-endif
-
- @echo "... done"
- @echo
- @echo " You can now copy the rel/couchdb directory anywhere on your system."
- @echo " Start CouchDB with ./bin/couchdb from within that directory."
- @echo
-
-.PHONY: install
-# target: install- install CouchDB :)
-install: release
- @echo
- @echo "Notice: There is no 'make install' command for CouchDB 2.x+."
- @echo
- @echo " To install CouchDB into your system, copy the rel/couchdb"
- @echo " to your desired installation location. For example:"
- @echo " cp -r rel/couchdb /usr/local/lib"
- @echo
-
-################################################################################
-# Cleaning
-################################################################################
-
-
-.PHONY: clean
-# target: clean - Remove build artifacts
-clean:
- @$(REBAR) -r clean
- @rm -rf .rebar/
- @rm -f bin/couchjs
- @rm -f bin/weatherreport
- @rm -rf src/*/ebin
- @rm -rf src/*/.rebar
- @rm -rf src/*/priv/*.so
- @rm -rf src/couch/priv/{couchspawnkillable,couchjs}
- @rm -rf share/server/main.js share/server/main-coffee.js
- @rm -rf tmp dev/data dev/lib dev/logs
- @rm -rf src/mango/.venv
- @rm -f src/couch/priv/couchspawnkillable
- @rm -f src/couch/priv/couch_js/config.h
- @rm -f dev/*.beam dev/devnode.* dev/pbkdf2.pyc log/crash.log
- @rm -f dev/erlserver.pem dev/couch_ssl_dist.conf
-
-
-.PHONY: distclean
-# target: distclean - Remove build and release artifacts
-distclean: clean
- @rm -f install.mk
- @rm -f config.erl
- @rm -f rel/couchdb.config
-ifneq ($(IN_RELEASE), true)
-# when we are in a release, don’t delete the
-# copied sources, generated docs, or fauxton
- @rm -rf rel/couchdb
- @rm -rf share/www
- @rm -rf src/docs
-endif
-
-
-.PHONY: devclean
-# target: devclean - Remove dev cluster artifacts
-devclean:
- @rm -rf dev/lib/*/data
- @rm -rf dev/lib/*/etc
-
-################################################################################
-# Misc
-################################################################################
-
-
-.rebar: build-plt
-
-config.erl:
- @echo "Apache CouchDB has not been configured."
- @echo "Try \"./configure -h\" for help."
- @echo
- @false
-
-
-src/docs/build:
-ifeq ($(with_docs), 1)
- @cd src/docs; $(MAKE)
-endif
-
-
-share/www:
-ifeq ($(with_fauxton), 1)
- @echo "Building Fauxton"
- @cd src/fauxton && npm install && ./node_modules/grunt-cli/bin/grunt couchdb
-endif
-
-
-derived:
- @echo "COUCHDB_GIT_SHA: $(COUCHDB_GIT_SHA)"
- @echo "COUCHDB_VERSION: $(COUCHDB_VERSION)"
- @echo "COUCHDB_VERSION_SUFFIX: $(COUCHDB_VERSION_SUFFIX)"
- @echo "DIRTY: $(DIRTY)"
- @echo "IN_RC: $(IN_RC)"
- @echo "IN_RELEASE: $(IN_RELEASE)"
- @echo "ON_TAG: $(ON_TAG)"
- @echo "REL_TAG: $(REL_TAG)"
- @echo "SUB_VSN: $(SUB_VSN)"
diff --git a/Makefile.win b/Makefile.win
deleted file mode 100644
index 5bbfeead9..000000000
--- a/Makefile.win
+++ /dev/null
@@ -1,450 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# ***************************************************
-# WARNING! If you edit this file, also edit Makefile!
-# ***************************************************
-
-include version.mk
-
-SHELL=cmd.exe
-REBAR?=$(CURDIR)\bin\rebar.cmd
-PYTHON=python.exe
-ERLFMT?=$(CURDIR)\bin\erlfmt.cmd
-MAKE=make -f Makefile.win
-# REBAR?=$(shell where rebar.cmd)
-
-# Handle the following scenarios:
-# 1. When building from a tarball, use version.mk.
-# 2. When building from a clean release tag (#.#.#), use that tag.
-# 3. When building from a clean RC tag (#.#.#-RC#), use JUST the version
-# number inside the tarball, but use the full name for the name of the
-# tarball itself.
-# 4. When not on a clean tag, use version.mk + git sha + dirty status.
-
-COUCHDB_GIT_SHA=$(git_sha)
-
-IN_RELEASE = $(shell if not exist .git echo true)
-
-ifeq ($(IN_RELEASE), true)
-
-# 1. Building from tarball, use version.mk.
-COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)
-
-else
-
-# Gather some additional information.
-# We do it this way so we don't bake shell-isms into Makefile
-# to make it easier to port to Windows. I know, I know. -jst
-# IN_RC contains the -RCx suffix in the name if present
-IN_RC = $(shell git describe --tags --always --first-parent \
- | grep -Eo -- '-RC[0-9]+' 2>nul)
-# ON_TAG matches *ONLY* if we are on a release or RC tag
-ON_TAG = $(shell git describe --tags --always --first-parent \
- | grep -Eo -- '^[0-9]+\.[0-9]\.[0-9]+(-RC[0-9]+)?$$' 2>nul)
-# REL_TAG contains the #.#.# from git describe, which might be used
-REL_TAG = $(shell git describe --tags --always --first-parent \
- | grep -Eo -- '^[0-9]+\.[0-9]\.[0-9]+' 2>nul)
-# DIRTY identifies if we're not on a commit
-DIRTY = $(shell git describe --dirty | grep -Eo -- '-dirty' 2>nul)
-# COUCHDB_GIT_SHA is our current git hash.
-COUCHDB_GIT_SHA=$(shell git rev-parse --short=7 --verify HEAD)
-
-ifeq ($(ON_TAG),)
-# 4. Not on a tag.
-COUCHDB_VERSION_SUFFIX = $(COUCHDB_GIT_SHA)$(DIRTY)
-COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)-$(COUCHDB_VERSION_SUFFIX)
-else
-# 2 and 3. On a tag.
-COUCHDB_VERSION = $(REL_TAG)$(DIRTY)
-endif
-endif
-
-# needed to do text substitutions
-comma:= ,
-empty:=
-space:= $(empty) $(empty)
-
-DESTDIR=
-
-# Rebar options
-apps=
-skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper,ibrowse,local
-suites=
-tests=
-
-# no sed on Windows, hard code since apps\suites\tests are empty
-EUNIT_OPTS=skip_deps=$(skip_deps)
-DIALYZE_OPTS=skip_deps=$(skip_deps)
-
-EXUNIT_OPTS=$(subst $(comma),$(space),$(tests))
-
-TEST_OPTS=-c startup_jitter=0 -c default_security=admin_local
-
-################################################################################
-# Main commands
-################################################################################
-
-
-.PHONY: all
-# target: all - Build everything
-all: couch fauxton docs
-
-
-################################################################################
-# Building
-################################################################################
-
-
-.PHONY: couch
-# target: couch - Build CouchDB core, use ERL_COMPILER_OPTIONS to provide custom compiler's options
-couch: config.erl
- @set COUCHDB_VERSION=$(COUCHDB_VERSION) && set COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) && $(REBAR) compile $(COMPILE_OPTS)
- @copy src\couch\priv\couchjs.exe bin
-
-
-.PHONY: docs
-# target: docs - Build documentation
-ifeq ($(IN_RELEASE), true)
-docs: share\docs\html
-else
-docs: src\docs\build
-endif
-
-.PHONY: fauxton
-# target: fauxton - Build Fauxton web UI
-fauxton: share\www
-
-
-################################################################################
-# Testing
-################################################################################
-
-
-.PHONY: check
-# target: check - Test everything
-check: all python-black
- @$(MAKE) eunit
- @$(MAKE) mango-test
- @$(MAKE) elixir
-
-ifdef apps
-subdirs = $(apps)
-else
-subdirs=$(shell dir /b src)
-endif
-
-.PHONY: eunit
-# target: eunit - Run EUnit tests, use EUNIT_OPTS to provide custom options
-eunit: export BUILDDIR = $(shell echo %cd%)
-eunit: export ERL_AFLAGS = $(shell echo "-config rel/files/eunit.config")
-eunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell echo %cd%)/bin/couchjs $(shell echo %cd%)/share/server/main.js
-eunit: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-eunit: couch
- @set COUCHDB_VERSION=$(COUCHDB_VERSION) && set COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) && $(REBAR) setup_eunit 2> nul
- @cmd /c "FOR %d IN ($(subdirs)) DO set COUCHDB_VERSION=$(COUCHDB_VERSION) & set COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) & $(REBAR) -r eunit $(EUNIT_OPTS) apps=%d"
-
-.PHONY: exunit
-# target: exunit - Run ExUnit tests
-exunit: export BUILDDIR = $(shell echo %cd%)
-exunit: export MIX_ENV=test
-exunit: export ERL_LIBS = $(shell echo %cd%)\src
-exunit: export ERL_AFLAGS = $(shell echo "-config rel/files/eunit.config")
-exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell echo %cd%)/bin/couchjs $(shell echo %cd%)/share/server/main.js
-exunit: couch elixir-init setup-eunit elixir-check-formatted elixir-credo
- @mix test --cover --trace $(EXUNIT_OPTS)
-
-setup-eunit: export BUILDDIR = $(shell pwd)
-setup-eunit: export ERL_AFLAGS = $(shell echo "-config rel/files/eunit.config")
-setup-eunit:
- @$(REBAR) setup_eunit 2> nul
-
-just-eunit: export BUILDDIR = $(shell pwd)
-just-eunit: export ERL_AFLAGS = $(shell echo "-config rel/files/eunit.config")
-just-eunit:
- @$(REBAR) -r eunit $(EUNIT_OPTS)
-
-erlfmt-check: export ERLFMT_PATH := $(ERLFMT)
-erlfmt-check:
- @$(PYTHON) dev\format_check.py
-
-erlfmt-format: export ERLFMT_PATH := $(ERLFMT)
-erlfmt-format:
- @$(PYTHON) dev\format_all.py
-
-.venv/bin/black:
- @$(PYTHON) -m venv .venv
- @.venv\Scripts\pip3.exe install black || copy /b .venv\Scripts\black.exe +,,
-
-# Python code formatter - only runs if we're on Python 3.6 or greater
-python-black: .venv/bin/black
- @$(PYTHON) -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \
- echo 'Python formatter not supported on Python < 3.6; check results on a newer platform'
- @$(PYTHON) -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
- .venv\Scripts\black.exe --check \
- --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/erlfmt|src/rebar/pr2relnotes.py|src/fauxton" \
- build-aux dev\run dev\format_*.py src\mango\test src\docs\src\conf.py src\docs\ext .
-
-python-black-update: .venv/bin/black
- @$(PYTHON) -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \
- echo 'Python formatter not supported on Python < 3.6; check results on a newer platform'
- @$(PYTHON) -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
- .venv\Scripts\black.exe \
- --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/erlfmt|src/rebar/pr2relnotes.py|src/fauxton" \
- build-aux dev\run dev\format_*.py src\mango\test src\docs\src\conf.py src\docs\ext .
-
-.PHONY: elixir
-elixir: export MIX_ENV=integration
-elixir: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-elixir: elixir-init elixir-check-formatted elixir-credo devclean
- @dev\run $(TEST_OPTS) -a adm:pass -n 1 --enable-erlang-views \
- --locald-config test/elixir/test/config/test-config.ini \
- --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
-
-.PHONY: elixir-init
-elixir-init: MIX_ENV=test
-elixir-init: config.erl
- @mix local.rebar --force && mix local.hex --force && mix deps.get
-
-.PHONY: elixir-cluster-without-quorum
-elixir-cluster-without-quorum: export MIX_ENV=integration
-elixir-cluster-without-quorum: elixir-init elixir-check-formatted elixir-credo devclean
- @dev\run -n 3 -q -a adm:pass \
- --degrade-cluster 2 \
- --no-eval 'mix test --trace --only without_quorum_test $(EXUNIT_OPTS)'
-
-.PHONY: elixir-cluster-with-quorum
-elixir-cluster-with-quorum: export MIX_ENV=integration
-elixir-cluster-with-quorum: elixir-init elixir-check-formatted elixir-credo devclean
- @dev\run -n 3 -q -a adm:pass \
- --degrade-cluster 1 \
- --no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)'
-
-.PHONY: elixir-suite
-elixir-suite: export MIX_ENV=integration
-elixir-suite: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-elixir-suite: elixir-init elixir-check-formatted elixir-credo devclean
- @dev\run -n 1 -q -a adm:pass \
- --enable-erlang-views \
- --no-join \
- --locald-config test/elixir/test/config/test-config.ini \
- --erlang-config rel/files/eunit.config \
- --no-eval 'mix test --trace --include test\elixir\test\config\suite.elixir --exclude test\elixir\test\config\skip.elixir'
-
-.PHONY: elixir-check-formatted
-elixir-check-formatted: elixir-init
- @mix format --check-formatted
-
-# Credo is a static code analysis tool for Elixir.
-# We use it in our tests
-.PHONY: elixir-credo
-elixir-credo: elixir-init
- @mix credo
-
-.PHONY: check-qs
-# target: check-qs - Run query server tests (ruby and rspec required!)
-check-qs:
- @QS_LANG=js rspec test\view_server\query_server_spec.rb
-
-
-.PHONY: mango-test
-mango-test: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-mango-test: devclean all
- @cd src\mango && \
- python.exe -m venv .venv && \
- .venv\Scripts\pip.exe install -r requirements.txt
- @cd src\mango && .venv\Scripts\python.exe ..\..\dev\run -n 1 --admin=testuser:testpass .venv\Scripts\nose2
-
-
-################################################################################
-# Developing
-################################################################################
-
-
-.PHONY: build-plt
-# target: build-plt - Build project-specific PLT
-build-plt:
- @$(REBAR) -r build-plt $(DIALYZE_OPTS)
-
-
-.PHONY: check-plt
-# target: check-plt - Check the PLT for consistency and rebuild it if it is not up-to-date
-check-plt:
- @$(REBAR) -r check-plt $(DIALYZE_OPTS)
-
-
-.PHONY: dialyze
-# target: dialyze - Analyze the code for discrepancies
-dialyze: .rebar
- @$(REBAR) -r dialyze $(DIALYZE_OPTS)
-
-
-.PHONY: introspect
-# target: introspect - Check for commits difference between rebar.config and repository
-introspect:
- @$(REBAR) -r update-deps
- @escript build-aux\introspect
-
-
-################################################################################
-# Distributing
-################################################################################
-
-
-.PHONY: dist
-# target: dist - Make release tarball
-dist: all derived
- @.\build-aux\couchdb-build-release.sh $(COUCHDB_VERSION)
-
- @copy -r share\www apache-couchdb-$(COUCHDB_VERSION)\share
- @mkdir apache-couchdb-$(COUCHDB_VERSION)\share\docs\html
- @copy -r src\docs\build\html apache-couchdb-$(COUCHDB_VERSION)\share\docs
-
- @mkdir apache-couchdb-$(COUCHDB_VERSION)\share\docs\man
- @copy src\docs\build\man\apachecouchdb.1 apache-couchdb-$(COUCHDB_VERSION)\share\docs\man
-
- @tar czf apache-couchdb-$(COUCHDB_VERSION).tar.gz apache-couchdb-$(COUCHDB_VERSION)
- @echo 'Done: apache-couchdb-$(COUCHDB_VERSION).tar.gz'
-
-
-.PHONY: release
-# target: release - Create an Erlang release including CouchDB!
--include install.mk
-release: all
- @echo 'Installing CouchDB into rel\couchdb\ ...'
- -@rmdir /s/q rel\couchdb
- @$(REBAR) generate
- @copy src\couch\priv\couchjs.exe rel\couchdb\bin
-
-ifeq ($(with_fauxton), 1)
- -@mkdir rel\couchdb\share
- -@xcopy share\www rel\couchdb\share\www /E/I
-endif
-
-ifeq ($(with_docs), 1)
- -@mkdir rel\couchdb\share\www\docs
- -@mkdir rel\couchdb\share\docs
-ifeq ($(IN_RELEASE), true)
- @xcopy share\docs\html rel\couchdb\share\www\docs /E /I
- @copy share\docs\man\apachecouchdb.1 rel\couchdb\share\docs\couchdb.1
-else
- @xcopy src\docs\build\html rel\couchdb\share\www\docs /E /I
- @copy src\docs\build\man\apachecouchdb.1 rel\couchdb\share\docs\couchdb.1
-endif
-endif
-
- @echo ... done
- @echo .
- @echo You can now copy the rel\couchdb directory anywhere on your system.
- @echo Start CouchDB with .\bin\couchdb.cmd from within that directory.
- @echo .
-
-.PHONY: install
-# target: install- install CouchDB :)
-install: release
- @echo .
- @echo Notice: There is no 'make install' command for CouchDB 2.x+.
- @echo .
- @echo To install CouchDB into your system, copy the rel\couchdb
- @echo to your desired installation location. For example:
- @echo xcopy /E rel\couchdb C:\CouchDB\
- @echo .
-
-################################################################################
-# Cleaning
-################################################################################
-
-
-.PHONY: clean
-# target: clean - Remove build artifacts
-clean:
- @$(REBAR) -r clean
- -@rmdir /s/q .rebar
- -@del /f/q bin\couchjs.exe
- -@rmdir /s/q src\*\ebin
- -@rmdir /s/q src\*\.rebar
- -@del /f/q/s src\*.dll
- -@del /f/q src\couch\priv\*.exe
- -@del /f/q share\server\main.js share\server\main-coffee.js
- -@rmdir /s/q tmp
- -@rmdir /s/q dev\data
- -@rmdir /s/q dev\lib
- -@rmdir /s/q dev\logs
- -@rmdir /s/q src\mango\.venv
- -@del /f/q src\couch\priv\couch_js\config.h
- -@del /f/q dev\boot_node.beam dev\pbkdf2.pyc log\crash.log
-
-
-.PHONY: distclean
-# target: distclean - Remove build and release artifacts
-distclean: clean
- -@del install.mk
- -@del config.erl
- -@del rel\couchdb.config
-ifneq ($(IN_RELEASE), true)
-# when we are in a release, don’t delete the
-# copied sources, generated docs, or fauxton
- -@rmdir /s/q rel\couchdb
- -@rmdir /s/q share\www
- -@rmdir /s/q src\docs
-endif
-
-
-.PHONY: devclean
-# target: devclean - Remove dev cluster artifacts
-devclean:
- -@rmdir /s/q dev\lib\node1\data
- -@rmdir /s/q dev\lib\node2\data
- -@rmdir /s/q dev\lib\node3\data
- -@rmdir /s/q dev\lib\node1\etc
- -@rmdir /s/q dev\lib\node2\etc
- -@rmdir /s/q dev\lib\node3\etc
-
-
-################################################################################
-# Misc
-################################################################################
-
-
-.rebar: build-plt
-
-config.erl:
- @echo Apache CouchDB has not been configured.
- @echo Try "powershell -ExecutionPolicy Bypass .\configure.ps1 -?" for help.
- @echo You probably want "powershell -ExecutionPolicy Bypass .\configure.ps1".
- @echo.
- @false
-
-
-src\docs\build:
- @echo 'Building docs...'
-ifeq ($(with_docs), 1)
- @cd src\docs && make.bat html && make.bat man
-endif
-
-
-share\www:
-ifeq ($(with_fauxton), 1)
- @echo 'Building Fauxton'
- @cd src\fauxton && npm install && .\node_modules\.bin\grunt couchdb
-endif
-
-derived:
- @echo "COUCHDB_GIT_SHA: $(COUCHDB_GIT_SHA)"
- @echo "COUCHDB_VERSION: $(COUCHDB_VERSION)"
- @echo "COUCHDB_VERSION_SUFFIX: $(COUCHDB_VERSION_SUFFIX)"
- @echo "DIRTY: $(DIRTY)"
- @echo "IN_RC: $(IN_RC)"
- @echo "IN_RELEASE: $(IN_RELEASE)"
- @echo "ON_TAG: $(ON_TAG)"
- @echo "REL_TAG: $(REL_TAG)"
- @echo "SUB_VSN: $(SUB_VSN)"
diff --git a/NOTICE b/NOTICE
deleted file mode 100644
index 8fd1befd2..000000000
--- a/NOTICE
+++ /dev/null
@@ -1,199 +0,0 @@
-Apache CouchDB
-Copyright 2009-2021 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-This product also includes the following third-party components:
-
-* jQuery (http://jquery.org/)
-
- Copyright 2012 jQuery Foundation and other contributors
-
- * json2.js (http://www.json.org/)
-
- Public domain
-
- * MochiWeb (http://code.google.com/p/mochiweb/)
-
- Copyright 2007, Mochi Media Coporation
-
- * ibrowse (http://github.com/cmullaparthi/ibrowse/tree/master)
-
- Copyright 2005-2012, Chandrashekhar Mullaparthi
-
- * mimeparse.js (http://code.google.com/p/mimeparse/)
-
- Copyright 2009, Chris Anderson <jchris@apache.org>
-
- * base64.js
-
- Copyright 1999, Masanao Izumo <iz@onicos.co.jp>
-
- * jspec.js (http://visionmedia.github.com/jspec/)
-
- Copyright 2010 TJ Holowaychuk <tj@vision-media.ca>
-
- * yajl (http://lloyd.github.com/yajl/)
-
- Copyright 2010, Lloyd Hilaiel
-
- * snappy (http://code.google.com/p/snappy/)
-
- Copyright 2005, Google Inc.
-
- * snappy-erlang-nif (https://github.com/fdmanana/snappy-erlang-nif)
-
- Copyright 2011, Filipe David Manana <fdmanana@apache.org>
-
- * CoffeeScript (http://coffeescript.org/)
-
- Copyright 2011, Jeremy Ashkenas
-
- * Sphinx (http://sphinx-doc.org/)
-
- Copyright 2011, the Sphinx team
-
- * Sizzle (http://sizzlejs.com/)
-
- Copyright 2010, The Dojo Foundation
-
- * Underscore.js 1.4.2 (http://underscorejs.org)
-
- Copyright 2012, Jeremy Ashkenas
-
- * backbone.js (http://backbonejs.org/)
-
- Copyright 2012, Jeremy Ashkenas, DocumentCloud Inc.
-
- * Bootstrap (http://twitter.github.com/bootstrap/)
-
- Copyright 2012, Twitter, Inc.
-
- * d3.js (http://d3js.org)
-
- Copyright 2012, Michael Bostock
-
- * Lodash (http://lodash.com/)
-
- Copyright 2012, John-David Dalton <http://allyoucanleet.com/>
-
- * nvd3.js (http://nvd3.org/)
-
- Copyright 2012, Novus Partners, Inc.
-
- * backbone.layoutmanager.js (https://github.com/tbranyen/backbone.layoutmanager)
-
- Copyright 2012, Tim Branyen (@tbranyen)
-
- * prettify.js (http://code.google.com/p/google-code-prettify/)
-
- Copyright 2011, Mike Samuel et al
-
- * PouchDB (https://github.com/daleharvey/pouchdb)
-
- Copyright 2012, Dale Harvey et al
-
- * require.js (https://github.com/jrburke/requirejs)
-
- Copyright (c) 2010-2011, The Dojo Foundation
-
- * mocha.js (https://github.com/visionmedia/mocha)
-
- Copyright (c) 2011-2013 TJ Holowaychuk <tj@vision-media.ca>
-
- * chaijs https://github.com/chaijs
-
- Copyright (c) 2011-2013 Jake Luer jake@alogicalparadox.com
-
- * sinon-chai
-
- Copyright © 2012–2013 Domenic Denicola <domenic@domenicdenicola.com>
-
- * spin.js
-
- Copyright (c) 2011 Felix Gnass [fgnass at neteye dot de]
-
- * font-awesome http://fortawesome.github.io/Font-Awesome/
-
- Copyright (c) 2013 Dave Gandy
-
- * sandbox.js https://github.com/KlausTrainer/sandbox.js
-
- (c) 2013 Klaus Trainer
-
- * ace editor https://github.com/ajaxorg/ace
-
- Copyright (c) 2010, Ajax.org B.V.
-
- * src/fauxton/asserts/js/plugins/cloudant.pagingcollection.js
-
- Copyright (c) 2014, Cloudant http://cloudant.com
-
- * velocity.js (https://github.com/julianshapiro/velocity)
-
- Copyright (c) 2014 Julian Shapiro
-
-* is_base_dir function in eunit_plugin.erl (https://github.com/ChicagoBoss/ChicagoBoss/blob/master/skel/priv/rebar/boss_plugin.erl)
-
- Copyright (c) 2009-2011 Evan Miller
-
-* ?assertNotMatch in couch_eunit.hrl (https://github.com/richcarl/eunit/blob/master/include/eunit.hrl#L200-L219)
-
- Copyright (C) 2004-2006 Mickaël Rémond, Richard Carlsson
-
-* src/fauxton/test/nightwatch_tests/custom-commands/waitForAttribute.js
-
- Copyright (c) 2014 Dave Koo
-
-* moment.js
-
- Copyright (c) 2011-2014 Tim Wood, Iskren Chernev, moment.js contributors
-
-* React.js
-
- Copyright (c) 2013-2017, Facebook, Inc.
-
-* Flux.js
-
- Copyright (c) 2014, Facebook, Inc. All rights reserved.
-
-* es5-shim.js
-
- Copyright (C) 2009-2014 Kristopher Michael Kowal and contributors
-
-* CSS.escape (https://github.com/mathiasbynens/CSS.escape/)
-
- Copyright Mathias Bynens
-
-* Papaparse.js
-
- Copyright (c) 2015 Matthew Holt
-
-* react-bootstrap.js
-
- Copyright (c) 2014 Stephen J. Collings, Matthew Honnibal, Pieter Vanderwerff
-
-* velocity-react
-
- Copyright (c) 2015 Twitter, Inc.
-
-* esprima.js (https://github.com/jquery/esprima)
-
- Copyright JS Foundation and other contributors, https://js.foundation/
-
-* escodegen.js (https://github.com/estools/escodegen)
-
- Copyright (C) 2012 Yusuke Suzuki (twitter: @Constellation) and other contributors.
-
-* hyper
-
- Copyright (c) 2014 Game Analytics ApS
-
-* recon
-
- Copyright (c) 2012-2017, Frédéric Trottier-Hébert
-
-* weatherreport_getopt.erl
-
- Copyright (C) 2009 Juan Jose Comellas
diff --git a/README-DEV.rst b/README-DEV.rst
deleted file mode 100644
index 863218de9..000000000
--- a/README-DEV.rst
+++ /dev/null
@@ -1,256 +0,0 @@
-Apache CouchDB DEVELOPERS
-=========================
-
-Before you start here, read `INSTALL.Unix` (or `INSTALL.Windows`) and
-follow the setup instructions including the installation of all the
-listed dependencies for your system.
-
-Only follow these instructions if you are building from a source checkout.
-
-If you're unsure what this means, ignore this document.
-
-Dependencies
-------------
-
-You need the following to run tests:
-
-* `Python 3 <https://www.python.org/>`_
-* `Elixir <https://elixir-lang.org/>`_
-
-You need the following optionally to build documentation:
-
-* `Sphinx <http://sphinx.pocoo.org/>`_
-* `GNU help2man <http://www.gnu.org/software/help2man/>`_
-* `GnuPG <http://www.gnupg.org/>`_
-
-You need the following optionally to build releases:
-
-* `md5sum <http://www.microbrew.org/tools/md5sha1sum/>`_
-* `sha1sum <http://www.microbrew.org/tools/md5sha1sum/>`_
-
-You need the following optionally to build Fauxton:
-
-* `nodejs <http://nodejs.org/>`_
-* `npm <https://www.npmjs.com/>`_
-
-You will need these optional dependencies installed if:
-
-* You are working on the documentation, or
-* You are preparing a distribution archive
-
-However, you do not need them if:
-
-* You are building from a distribution archive, or
-* You don't care about building the documentation
-
-If you intend to build Fauxton, you will also need to install its
-dependencies. After running ``./configure`` to download all of the
-dependent repositories, you can read about required dependencies in
-`src/fauxton/readme.md`. Typically, installing npm and node.js are
-sufficient to enable a Fauxton build.
-
-Here is a list of *optional* dependencies for various operating systems.
-Installation will be easiest, when you install them all.
-
-Docker
-~~~~~~
-
-CouchDB maintains a ``Dockerfile`` based on Debian that includes all
-the dependencies noted above in the `.devcontainer <https://github.com/apache/couchdb/tree/main/.devcontainer>`_
-folder.
-
-The ``Dockerfile`` can be used on its own, or together with the
-associated ``devcontainer.json`` file to quickly provision a
-development environment using `GitHub Codespaces <https://github.com/features/codespaces>`_
-or `Visual Studio Code <https://code.visualstudio.com/docs/remote/containers>`_.
-
-Debian-based (inc. Ubuntu) Systems
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-::
-
- sudo apt-get install help2man python-sphinx gnupg nodejs npm \
- python3 python3-venv
-
-Gentoo-based Systems
-~~~~~~~~~~~~~~~~~~~~
-
-::
-
- sudo emerge gnupg coreutils pkgconfig help2man sphinx python
- sudo pip install hypothesis requests nose
-
-Centos 7 and RHEL 7
-~~~~~~~~~~~~~~~~~~~
-
-::
-
- sudo yum install help2man python-sphinx python-docutils \
- python-pygments gnupg nodejs npm
-
-
-Mac OS X
-~~~~~~~~
-
-Install `Homebrew <https://github.com/mxcl/homebrew>`_, if you do not have
-it already.
-
-Unless you want to install the optional dependencies, skip to the next section.
-
-Install what else we can with Homebrew::
-
- brew install help2man gnupg md5sha1sum node python
-
-If you don't already have pip installed, install it::
-
- sudo easy_install pip
-
-Now, install the required Python packages::
-
- sudo pip install sphinx docutils pygments sphinx_rtd_theme
-
-FreeBSD
-~~~~~~~
-
-::
-
- pkg install help2man gnupg py27-sphinx node
- pip install nose requests hypothesis
-
-Windows
-~~~~~~~
-
-Follow the instructions in `INSTALL.Windows` and build all components from
-source, using the same Visual C++ compiler and runtime.
-
-Configuring
------------
-
-Configure the source by running::
-
- ./configure
-
-If you intend to run the test suites::
-
- ./configure -c
-
-If you don't want to build Fauxton or documentation specify
-``--disable-fauxton`` and/or ``--disable-docs`` arguments for ``configure`` to
-ignore their build and avoid any issues with their dependencies.
-
-See ``./configure --help`` for more information.
-
-Developing
-----------
-
-Formatting
-~~~~~~~~~~
-
-The ``erl`` files in ``src`` are formatted using erlfmt_. The checks are run
-for every PR in the CI. To run the checks locally, run ``make erlfmt-check``.
-To format the ``erl`` files in ``src``, run ``make erlfmt-format``.
-To use ``erlfmt`` for specific files only, use the executable ``bin/erlfmt``
-that is installed by ``configure``.
-
-.. _erlfmt: https://github.com/WhatsApp/erlfmt
-
-Testing
--------
-
-To run all the tests use run::
-
- make check
-
-You can also run each test suite individually via ``eunit`` and ``javascript``
-targets::
-
- make eunit
- make javascript
-
-If you need to run specific Erlang tests, you can pass special "options"
-to make targets::
-
- # Run tests only for couch and chttpd apps
- make eunit apps=couch,chttpd
-
- # Run only tests from couch_btree_tests suite
- make eunit apps=couch suites=couch_btree
-
- # Run only only specific tests
- make eunit tests=btree_open_test,reductions_test
-
- # Ignore tests for specified apps
- make eunit skip_deps=couch_log,couch_epi
-
-The ``apps``, ``suites``, ``tests`` and ``skip_deps`` could be combined in any
-way. These are mimics to ``rebar eunit`` arguments. If you're not satisfied by
-these, you can use EUNIT_OPT environment variable to specify exact `rebar eunit`
-options::
-
- make eunit EUNIT_OPTS="apps=couch,chttpd"
-
-JavaScript tests accepts only `suites` option, but in the same way::
-
- # Run all JavaScript tests
- make javascript
-
- # Run only basic and design_options tests
- make javascript suites="basic design_options"
-
- # Ignore specific test suites via command line
- make javascript ignore_js_suites="all_docs bulk_docs"
-
- # Ignore specific test suites in makefile
- ignore_js_suites=all_docs,bulk_docs
-
-Note that tests on the command line are delimited here by whitespace,
-not by comma.You can get list of all possible test targets with the
-following command::
-
- make list-js-suites
-
-Code analyzer could be run by::
-
- make dialyze
-
-If you need to analyze only specific apps, you can specify them in familiar way
-::
-
- make dialyze apps=couch,couch_epi
-
-See ``make help`` for more info and useful commands.
-
-Please report any problems to the developer's mailing list.
-
-Releasing
----------
-
-The release procedure is documented here::
-
- https://cwiki.apache.org/confluence/display/COUCHDB/Release+Procedure
-
-Unix-like Systems
-~~~~~~~~~~~~~~~~~
-
-A release tarball can be built by running::
-
- make dist
-
-An Erlang CouchDB release includes the full Erlang Run Time System and
-all dependent applications necessary to run CouchDB, standalone. The
-release created is completely relocatable on the file system, and is
-the recommended way to distribute binaries of CouchDB. A release can be
-built by running::
-
- make release
-
-The release can then be found in the rel/couchdb directory.
-
-Microsoft Windows
-~~~~~~~~~~~~~~~~~
-
-The release tarball and Erlang CouchDB release commands work on
-Microsoft Windows the same as they do on Unix-like systems. To create
-a full installer, the separate couchdb-glazier repository is required.
-Full instructions are available in that repository's README file.
-
diff --git a/README.md b/README.md
new file mode 100644
index 000000000..91cd09cd5
--- /dev/null
+++ b/README.md
@@ -0,0 +1,3 @@
+3.x moved to main as of 2022-06-07
+
+See https://lists.apache.org/thread/x4lc6vhthj1vkt2xpd0ox5osh959qsc4
diff --git a/README.rst b/README.rst
deleted file mode 100644
index 4c51de83b..000000000
--- a/README.rst
+++ /dev/null
@@ -1,108 +0,0 @@
-Apache CouchDB README
-=====================
-
-+-----+
-| |1| |
-+-----+
-
-.. |1| image:: https://ci-couchdb.apache.org/job/jenkins-cm1/job/FullPlatformMatrix/job/3.x/badge/icon?subject=3.x
- :target: https://ci-couchdb.apache.org/blue/organizations/jenkins/jenkins-cm1%2FFullPlatformMatrix/activity?branch=3.x
-
-Installation
-------------
-
-For a high-level guide to Unix-like systems, inc. Mac OS X and Ubuntu, see:
-
- INSTALL.Unix
-
-For a high-level guide to Microsoft Windows, see:
-
- INSTALL.Windows
-
-Follow the proper instructions to get CouchDB installed on your system.
-
-If you're having problems, skip to the next section.
-
-Documentation
--------------
-
-We have documentation:
-
- http://docs.couchdb.org/
-
-It includes a changelog:
-
- http://docs.couchdb.org/en/latest/whatsnew/
-
-For troubleshooting or cryptic error messages, see:
-
- http://docs.couchdb.org/en/latest/install/troubleshooting.html
-
-For general help, see:
-
- http://couchdb.apache.org/#mailing-list
-
-We also have an IRC channel:
-
- http://webchat.freenode.net/?channels=couchdb
-
-The mailing lists provide a wealth of support and knowledge for you to tap into.
-Feel free to drop by with your questions or discussion. See the official CouchDB
-website for more information about our community resources.
-
-Verifying your Installation
----------------------------
-
-Run a basic test suite for CouchDB by browsing here:
-
- http://127.0.0.1:5984/_utils/#verifyinstall
-
-Getting started with developing
--------------------------------
-
-For more detail, read the README-DEV.rst file in this directory.
-
-Basically you just have to install the needed dependencies which are
-documented in the install docs and then run ``./configure && make``.
-
-You don't need to run ``make install`` after compiling, just use
-``./dev/run`` to spin up three nodes. You can add haproxy as a caching
-layer in front of this cluster by running ``./dev/run --with-haproxy
---haproxy=/path/to/haproxy`` . You will now have a local cluster
-listening on port 5984.
-
-For Fauxton developers fixing the admin-party does not work via the button in
-Fauxton. To fix the admin party you have to run ``./dev/run`` with the ``admin``
-flag, e.g. ``./dev/run --admin=username:password``. If you want to have an
-admin-party, just omit the flag.
-
-Contributing to CouchDB
------------------------
-
-You can learn more about our contributing process here:
-
- https://github.com/apache/couchdb/blob/master/CONTRIBUTING.md
-
-Cryptographic Software Notice
------------------------------
-
-This distribution includes cryptographic software. The country in which you
-currently reside may have restrictions on the import, possession, use, and/or
-re-export to another country, of encryption software. BEFORE using any
-encryption software, please check your country's laws, regulations and policies
-concerning the import, possession, or use, and re-export of encryption software,
-to see if this is permitted. See <http://www.wassenaar.org/> for more
-information.
-
-The U.S. Government Department of Commerce, Bureau of Industry and Security
-(BIS), has classified this software as Export Commodity Control Number (ECCN)
-5D002.C.1, which includes information security software using or performing
-cryptographic functions with asymmetric algorithms. The form and manner of this
-Apache Software Foundation distribution makes it eligible for export under the
-License Exception ENC Technology Software Unrestricted (TSU) exception (see the
-BIS Export Administration Regulations, Section 740.13) for both object code and
-source code.
-
-The following provides more details on the included cryptographic software:
-
-CouchDB includes a HTTP client (ibrowse) with SSL functionality.
diff --git a/bin/erlang-version.escript b/bin/erlang-version.escript
deleted file mode 100644
index 66aae1c41..000000000
--- a/bin/erlang-version.escript
+++ /dev/null
@@ -1,3 +0,0 @@
-
-main(_) ->
- io:format("~s~n", [erlang:system_info(otp_release)]).
diff --git a/build-aux/Jenkinsfile.full b/build-aux/Jenkinsfile.full
deleted file mode 100644
index aeba9e57e..000000000
--- a/build-aux/Jenkinsfile.full
+++ /dev/null
@@ -1,466 +0,0 @@
-#!groovy
-//
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-// Erlang version embedded in binary packages
-ERLANG_VERSION = '23'
-
-// Erlang version used for rebar in release process. CouchDB will not build from
-// the release tarball on Erlang versions older than this
-MINIMUM_ERLANG_VERSION = '20'
-
-// We create parallel build / test / package stages for each OS using the metadata
-// in this map. Adding a new OS should ideally only involve adding a new entry here.
-meta = [
- 'centos7': [
- name: 'CentOS 7',
- spidermonkey_vsn: '1.8.5',
- image: "apache/couchdbci-centos:7-erlang-${ERLANG_VERSION}"
- ],
-
- 'centos8': [
- name: 'CentOS 8',
- spidermonkey_vsn: '60',
- image: "apache/couchdbci-centos:8-erlang-${ERLANG_VERSION}"
- ],
-
- 'bionic': [
- name: 'Ubuntu 18.04',
- spidermonkey_vsn: '1.8.5',
- image: "apache/couchdbci-ubuntu:bionic-erlang-${ERLANG_VERSION}"
- ],
-
- 'focal': [
- name: 'Ubuntu 20.04',
- spidermonkey_vsn: '68',
- image: "apache/couchdbci-ubuntu:focal-erlang-${ERLANG_VERSION}"
- ],
-
- 'buster': [
- name: 'Debian 10',
- spidermonkey_vsn: '60',
- image: "apache/couchdbci-debian:buster-erlang-${ERLANG_VERSION}"
- ],
-
- 'bullseye-arm64': [
- name: 'Debian 11 ARM',
- spidermonkey_vsn: '78',
- image: "apache/couchdbci-debian:bullseye-erlang-${ERLANG_VERSION}",
- node_label: 'arm64v8'
- ],
-
- 'bullseye-ppc64': [
- name: 'Debian 11 POWER',
- spidermonkey_vsn: '78',
- image: "apache/couchdbci-debian:bullseye-erlang-${ERLANG_VERSION}",
- node_label: 'ppc64le'
- ],
-
- 'bullseye': [
- name: 'Debian 11',
- spidermonkey_vsn: '78',
- image: "apache/couchdbci-debian:bullseye-erlang-${ERLANG_VERSION}"
- ],
-
- // Skip freebsd builds for now as adviced by node owner
- // 'freebsd': [
- // name: 'FreeBSD',
- // spidermonkey_vsn: '1.8.5',
- // gnu_make: 'gmake'
- // ],
-
- 'macos': [
- name: 'macOS',
- spidermonkey_vsn: '60',
- gnu_make: 'make'
- ]
-]
-
-// Credit to https://stackoverflow.com/a/69222555 for this technique.
-// We can use the scripted pipeline syntax to dynamically generate stages,
-// and inject them into a map that we pass to the `parallel` step in a script.
-// While the scripting approach is very flexible, it's not able to use some
-// functionality specific to Declarative Pipelines, like the `agent` and `post`
-// directives, so you'll see alternatives like try-catch-finally used for flow
-// control and the nested `node` and `docker` blocks in the container stage to
-// configure the worker environment.
-
-// Returns a build stage suitable for non-containerized environments (currently
-// macOS and FreeBSD). Coincidentally we do not currently support automated
-// package generation on these platforms. This method in invoked when we create
-// `parallelStagesMap` below.
-def generateNativeStage(platform) {
- return {
- stage(platform) {
- node(platform) {
- timeout(time: 90, unit: "MINUTES") {
- try {
- // deleteDir is OK here because we're not inside of a Docker container!
- deleteDir()
- unstash 'tarball'
- withEnv([
- 'HOME='+pwd(),
- 'PATH+USRLOCAL=/usr/local/bin',
- 'MAKE='+meta[platform].gnu_make
- ]) {
- sh( script: "mkdir -p ${COUCHDB_IO_LOG_DIR} ${platform}/build", label: 'Create build directories' )
- sh( script: "tar -xf apache-couchdb-*.tar.gz -C ${platform}/build --strip-components=1", label: 'Unpack release' )
- dir( "${platform}/build" ) {
- sh "./configure --skip-deps --spidermonkey-version ${meta[platform].spidermonkey_vsn}"
- sh '$MAKE'
- sh '$MAKE eunit'
- sh '$MAKE elixir-suite'
- sh '$MAKE exunit'
- sh '$MAKE mango-test'
- sh '$MAKE weatherreport-test'
- }
- }
- }
- catch (err) {
- sh 'ls -l ${WORKSPACE}'
- withEnv([
- 'HOME='+pwd(),
- 'PATH+USRLOCAL=/usr/local/bin',
- 'MAKE='+meta[platform].gnu_make
- ]) {
- dir( "${platform}/build" ) {
- sh 'ls -l'
- sh '${MAKE} build-report'
- }
- }
- error("Build step failed with error: ${err.getMessage()}")
- }
- finally {
- junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml, **/src/mango/nosetests.xml, **/test/javascript/junit.xml'
- sh 'killall -9 beam.smp || true'
- sh 'rm -rf ${WORKSPACE}/* ${COUCHDB_IO_LOG_DIR}'
- }
- }
- }
- }
- }
-}
-
-// Returns a build stage suitable for container-based deployments. This method
-// is invoked when we create the `parallelStagesMap` in the pipeline below.
-def generateContainerStage(platform) {
- return {
- // Important: the stage name here must match the parallelStagesMap key for the
- // Jenkins UI to render the pipeline stages correctly. Don't ask why. -APK
- stage(platform) {
- node(meta[platform].get('node_label', 'docker')) {
- docker.withRegistry('https://docker.io/', 'dockerhub_creds') {
- docker.image(meta[platform].image).inside("${DOCKER_ARGS}") {
- timeout(time: 90, unit: "MINUTES") {
- stage("${meta[platform].name} - build & test") {
- try {
- sh( script: "rm -rf ${platform} apache-couchdb-*", label: 'Clean workspace' )
- unstash 'tarball'
- sh( script: "mkdir -p ${COUCHDB_IO_LOG_DIR} ${platform}/build", label: 'Create build directories' )
- sh( script: "tar -xf apache-couchdb-*.tar.gz -C ${platform}/build --strip-components=1", label: 'Unpack release' )
- dir( "${platform}/build" ) {
- sh "./configure --skip-deps --spidermonkey-version ${meta[platform].spidermonkey_vsn}"
- sh 'make'
- sh 'make eunit'
- sh 'make elixir-suite'
- sh 'make exunit'
- sh 'make mango-test'
- sh 'make weatherreport-test'
- }
- }
- catch (err) {
- sh 'ls -l ${WORKSPACE}'
- dir( "${platform}/build" ) {
- sh 'ls -l'
- sh 'make build-report'
- }
- error("Build step failed with error: ${err.getMessage()}")
- }
- finally {
- junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml, **/src/mango/nosetests.xml, **/test/javascript/junit.xml'
- sh 'rm -rf ${WORKSPACE}/* ${COUCHDB_IO_LOG_DIR}'
- }
- }
-
- stage("${meta[platform].name} - package") {
- try {
- unstash 'tarball'
- sh( script: "mkdir -p ${platform}/couchdb", label: 'Create build directory' )
- sh( script: "tar -xf apache-couchdb-*.tar.gz -C ${platform}/couchdb", label: 'Unpack release' )
- sh( script: "cd ${platform} && git clone https://github.com/apache/couchdb-pkg", label: 'Clone packaging helper repo' )
- dir( "${platform}/couchdb-pkg" ) {
- sh( script: 'make', label: 'Build packages' )
- }
- sh( label: 'Stage package artifacts for archival', script: """
- rm -rf pkgs/${platform}
- mkdir -p pkgs/${platform}
- mv ${platform}/rpmbuild/RPMS/\$(arch)/*rpm pkgs/${platform} || true
- mv ${platform}/couchdb/*.deb pkgs/${platform} || true
- """ )
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true, onlyIfSuccessful: true
- }
- catch (err) {
- sh 'ls -l ${WORKSPACE}'
- error("Build step failed with error: ${err.getMessage()}")
- }
- finally {
- sh 'rm -rf ${WORKSPACE}/*'
- }
- }
- }
- }
- }
- }
- }
- }
-}
-
-// Finally we have the actual Pipeline. It's mostly a Declarative Pipeline,
-// except for the 'Test and Package' stage where we use the `script` step as an
-// "escape hatch" to dynamically generate a set of parallel stages to execute.
-pipeline {
-
- // no top-level agent; agents must be declared for each stage
- agent none
-
- environment {
- COUCHAUTH = credentials('couchdb_vm2_couchdb')
- COUCHDB_IO_LOG_DIR = '/tmp/couchjslogs'
- // Following fix an issue with git <= 2.6.5 where no committer
- // name or email are present for reflog, required for git clone
- GIT_COMMITTER_NAME = 'Jenkins User'
- GIT_COMMITTER_EMAIL = 'couchdb@apache.org'
- // https://github.com/jenkins-infra/jenkins.io/blob/master/Jenkinsfile#64
- // We need the jenkins user mapped inside of the image
- // npm config cache below deals with /home/jenkins not mapping correctly
- // inside the image
- DOCKER_ARGS = '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group'
- }
-
- options {
- buildDiscarder(logRotator(numToKeepStr: '10', artifactNumToKeepStr: '10'))
- preserveStashes(buildCount: 10)
- timeout(time: 3, unit: 'HOURS')
- timestamps()
- }
-
- stages {
- stage('Build Release Tarball') {
- agent {
- docker {
- label 'docker'
- image "apache/couchdbci-debian:erlang-${MINIMUM_ERLANG_VERSION}"
- args "${DOCKER_ARGS}"
- registryUrl 'https://docker.io/'
- registryCredentialsId 'dockerhub_creds'
- }
- }
- environment {
- // TODO find a way to avoid setting this explicitly
- spidermonkey = '60'
- }
- steps {
- timeout(time: 15, unit: "MINUTES") {
- sh (script: 'rm -rf apache-couchdb-*', label: 'Clean workspace of any previous release artifacts' )
- sh "./configure --spidermonkey-version ${spidermonkey}"
- sh 'make erlfmt-check'
- sh 'make elixir-check-formatted'
- sh 'make dist'
- }
- }
- post {
- success {
- stash includes: 'apache-couchdb-*.tar.gz', name: 'tarball'
- archiveArtifacts artifacts: 'apache-couchdb-*.tar.gz', fingerprint: true
- }
- failure {
- sh 'ls -l ${WORKSPACE}'
- }
- cleanup {
- // UGH see https://issues.jenkins-ci.org/browse/JENKINS-41894
- sh 'rm -rf ${WORKSPACE}/*'
- }
- }
- } // stage Build Release Tarball
-
- stage('Test and Package') {
- steps {
- script {
- // Including failFast: true in map fails the build immediately if any parallel step fails
- parallelStagesMap = meta.collectEntries( [failFast: false] ) { key, values ->
- if (values.image) {
- ["${key}": generateContainerStage(key)]
- }
- else {
- ["${key}": generateNativeStage(key)]
- }
- }
- parallel parallelStagesMap
- }
- }
- }
-
- /*
- * Example of how to do a qemu-based run, please leave here
- */
-
-/*
- stage('Debian Buster arm64v8') {
- // the process is convoluted to ensure we have the latest qemu static binaries on the node first
- // before trying to run a foreign docker container type. Alternately ensuring the `update_qemu`
- // container is run on every Jenkins agent *after every restart of the Docker daemon* would work.
- agent {
- any {
- }
- }
- options {
- timeout(time: 120, unit: "MINUTES")
- }
- environment {
- platform = 'aarch64-debian-stretch'
- sm_ver = '60'
- }
- stages {
- stage('Install latest qemu binaries') {
- steps {
- sh( script: 'docker run --rm --privileged multiarch/qemu-user-static --reset -p yes' )
- }
- }
- stage('Pull latest docker image') {
- steps {
- sh "docker pull apache/couchdbci-debian:arm64v8-buster-erlang-${ERLANG_VERSION}"
- }
- }
- stage('Build from tarball & test & packages') {
- steps {
- withDockerContainer(image: "apache/couchdbci-debian:arm64v8-buster-erlang-${ERLANG_VERSION}", args: "${DOCKER_ARGS}") {
- unstash 'tarball'
- withEnv(['MIX_HOME='+pwd(), 'HEX_HOME='+pwd()]) {
- sh( script: build_and_test )
- sh( script: make_packages )
- sh( script: cleanup_and_save )
- }
- }
- }
- post {
- always {
-*/
-// junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml, **/src/mango/nosetests.xml, **/test/javascript/junit.xml'
-/*
- }
- success {
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
- }
- }
- }
- } // stages
- post {
- cleanup {
- sh 'rm -rf ${WORKSPACE}/*'
- }
- } // post
- } // stage
-*/
-
- stage('Publish') {
-
- when {
- expression { return env.BRANCH_NAME ==~ /main|2.*.x|3.*.x|4.*.x|jenkins-.*/ }
- }
-
- agent {
- docker {
- image "apache/couchdbci-debian:erlang-${ERLANG_VERSION}"
- label 'docker'
- args "${DOCKER_ARGS}"
- registryUrl 'https://docker.io/'
- registryCredentialsId 'dockerhub_creds'
- }
- }
- options {
- skipDefaultCheckout()
- timeout(time: 90, unit: "MINUTES")
- }
-
- steps {
- withCredentials([sshUserPrivateKey(credentialsId: 'jenkins-key', keyFileVariable: 'KEY')]) {
- sh 'rm -rf ${WORKSPACE}/*'
- unstash 'tarball'
- unarchive mapping: ['pkgs/' : '.']
-
- sh( label: 'Retrieve & clean current repo-nightly tree', script: '''
- rsync -avz -e "ssh -o StrictHostKeyChecking=no -i $KEY" jenkins@repo-nightly.couchdb.org:/var/www/html/$BRANCH_NAME . || mkdir -p $BRANCH_NAME
- rm -rf $BRANCH_NAME/debian/* $BRANCH_NAME/el6 $BRANCH_NAME/el7/* $BRANCH_NAME/el8/*
- mkdir -p $BRANCH_NAME/debian $BRANCH_NAME/el7 $BRANCH_NAME/el8 $BRANCH_NAME/source
- rsync -avz -e "ssh -o StrictHostKeyChecking=no -i $KEY" jenkins@repo-nightly.couchdb.org:/var/www/html/js .
- ''' )
-
- sh( label: 'Build Debian repo', script: '''
- git clone https://github.com/apache/couchdb-pkg
- cp js/ubuntu-bionic/*.deb pkgs/bionic
- for plat in buster bullseye bionic focal
- do
- reprepro -b couchdb-pkg/repo includedeb $plat pkgs/$plat/*.deb
- done
- ''' )
-
- sh( label: 'Build CentOS repos', script: '''
- cp js/centos-7/*rpm pkgs/centos7
- cp js/centos-8/*rpm pkgs/centos8
- cd pkgs/centos7 && createrepo_c --database .
- cd ../centos8 && createrepo_c --database .
- ''' )
-
- sh( label: 'Build tree to upload', script: '''
- mv couchdb-pkg/repo/pool $BRANCH_NAME/debian
- mv couchdb-pkg/repo/dists $BRANCH_NAME/debian
- mv pkgs/centos7/* $BRANCH_NAME/el7
- mv pkgs/centos8/* $BRANCH_NAME/el8
- mv apache-couchdb-*.tar.gz $BRANCH_NAME/source
- cd $BRANCH_NAME/source
- ls -1tr | head -n -10 | xargs -d '\n' rm -f --
- cd ../..
- ''' )
-
- sh( label: 'Sync tree back to repo-nightly', script: '''
- rsync -avz --delete -e "ssh -o StrictHostKeyChecking=no -i $KEY" $BRANCH_NAME jenkins@repo-nightly.couchdb.org:/var/www/html
- rm -rf $BRANCH_NAME couchdb-pkg *.tar.gz
- ''' )
- } // withCredentials
- } // steps
- } // stage
- } // stages
-
- post {
- success {
- mail to: 'notifications@couchdb.apache.org',
- replyTo: 'notifications@couchdb.apache.org',
- subject: "[Jenkins] SUCCESS: ${currentBuild.fullDisplayName}",
- body: "Yay, we passed. ${env.RUN_DISPLAY_URL}"
- }
- unstable {
- mail to: 'notifications@couchdb.apache.org',
- replyTo: 'notifications@couchdb.apache.org',
- subject: "[Jenkins] SUCCESS: ${currentBuild.fullDisplayName}",
- body: "Eep! Build is unstable... ${env.RUN_DISPLAY_URL}"
- }
- failure {
- mail to: 'notifications@couchdb.apache.org',
- replyTo: 'notifications@couchdb.apache.org',
- subject: "[Jenkins] FAILURE: ${currentBuild.fullDisplayName}",
- body: "Boo, we failed. ${env.RUN_DISPLAY_URL}"
- }
- }
-
-} // pipeline
diff --git a/build-aux/Jenkinsfile.pr b/build-aux/Jenkinsfile.pr
deleted file mode 100644
index 2f91961c5..000000000
--- a/build-aux/Jenkinsfile.pr
+++ /dev/null
@@ -1,203 +0,0 @@
-#!groovy
-//
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-build_and_test = '''
-mkdir -p ${COUCHDB_IO_LOG_DIR} ${ERLANG_VERSION}
-cd ${ERLANG_VERSION}
-rm -rf build
-mkdir build
-cd build
-tar -xf ${WORKSPACE}/apache-couchdb-*.tar.gz
-cd apache-couchdb-*
-./configure
-make check || (make build-report && false)
-'''
-
-pipeline {
-
- // no top-level agent; agents must be declared for each stage
- agent none
-
- environment {
- COUCHAUTH = credentials('couchdb_vm2_couchdb')
- recipient = 'notifications@couchdb.apache.org'
- COUCHDB_IO_LOG_DIR = '/tmp/couchjslogs'
- // Following fix an issue with git <= 2.6.5 where no committer
- // name or email are present for reflog, required for git clone
- GIT_COMMITTER_NAME = 'Jenkins User'
- GIT_COMMITTER_EMAIL = 'couchdb@apache.org'
- // Parameters for the matrix build
- DOCKER_IMAGE_BASE = 'apache/couchdbci-debian:erlang'
- // https://github.com/jenkins-infra/jenkins.io/blob/master/Jenkinsfile#64
- // We need the jenkins user mapped inside of the image
- // npm config cache below deals with /home/jenkins not mapping correctly
- // inside the image
- DOCKER_ARGS = '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group'
-
- // *** BE SURE TO ALSO CHANGE THE ERLANG VERSIONS FARTHER DOWN ***
- // Search for ERLANG_VERSION
- // see https://issues.jenkins.io/browse/JENKINS-61047 for why this cannot
- // be done parametrically
- LOW_ERLANG_VER = '20'
-
- // erlfmt doesn't run with the lowest erlang version so we run it in a
- // separate stage with a higher erlang version.
- ERLFMT_ERLANG_VER = '23'
- }
-
- options {
- buildDiscarder(logRotator(numToKeepStr: '10', artifactNumToKeepStr: '10'))
- // This fails the build immediately if any parallel step fails
- parallelsAlwaysFailFast()
- preserveStashes(buildCount: 10)
- timeout(time: 3, unit: 'HOURS')
- timestamps()
- }
-
- stages {
-
- stage('erlfmt') {
- agent {
- docker {
- image "${DOCKER_IMAGE_BASE}-${ERLFMT_ERLANG_VER}"
- label 'docker'
- args "${DOCKER_ARGS}"
- registryUrl 'https://docker.io/'
- registryCredentialsId 'dockerhub_creds'
- }
- }
- options {
- timeout(time: 15, unit: "MINUTES")
- }
- steps {
- sh '''
- set
- rm -rf apache-couchdb-*
- ./configure --skip-deps
- make erlfmt-check
- '''
- }
- post {
- cleanup {
- // UGH see https://issues.jenkins-ci.org/browse/JENKINS-41894
- sh 'rm -rf ${WORKSPACE}/*'
- }
- }
- } // stage erlfmt
-
-
- stage('Build Release Tarball') {
- agent {
- docker {
- image "${DOCKER_IMAGE_BASE}-${LOW_ERLANG_VER}"
- label 'docker'
- args "${DOCKER_ARGS}"
- registryUrl 'https://docker.io/'
- registryCredentialsId 'dockerhub_creds'
- }
- }
- options {
- timeout(time: 15, unit: "MINUTES")
- }
- steps {
- sh '''
- set
- rm -rf apache-couchdb-*
- ./configure
- make dist
- chmod -R a+w * .
- '''
- }
- post {
- success {
- stash includes: 'apache-couchdb-*.tar.gz', name: 'tarball'
- }
- cleanup {
- // UGH see https://issues.jenkins-ci.org/browse/JENKINS-41894
- sh 'rm -rf ${WORKSPACE}/*'
- }
- }
- } // stage Build Release Tarball
-
- // TODO Rework once Improved Docker Pipeline Engine is released
- // https://issues.jenkins-ci.org/browse/JENKINS-47962
- // https://issues.jenkins-ci.org/browse/JENKINS-48050
-
- stage('Make Check') {
-
- matrix {
- axes {
- axis {
- name 'ERLANG_VERSION'
- values '20', '21', '22', '23', '24'
- }
- axis {
- name 'SM_VSN'
- values '60', '78'
- }
- }
- excludes {
- exclude {
- axis {
- name 'ERLANG_VERSION'
- values '20'
- }
- axis {
- name 'SM_VSN'
- notValues '60'
- }
- }
- exclude {
- axis {
- name 'ERLANG_VERSION'
- values '21', '22', '23', '24'
- }
- axis {
- name 'SM_VSN'
- notValues '78'
- }
- }
- }
-
- stages {
- stage('Build and Test') {
- agent {
- docker {
- image "${DOCKER_IMAGE_BASE}-${ERLANG_VERSION}"
- label 'docker'
- args "${DOCKER_ARGS}"
- }
- }
- options {
- skipDefaultCheckout()
- timeout(time: 90, unit: "MINUTES")
- }
- steps {
- unstash 'tarball'
- sh( script: build_and_test )
- }
- post {
- always {
- junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml, **/src/mango/nosetests.xml, **/test/javascript/junit.xml'
- }
- cleanup {
- sh 'rm -rf ${WORKSPACE}/* ${COUCHDB_IO_LOG_DIR}'
- }
- }
- } // stage
- } // stages
- } // matrix
- } // stage "Make Check"
- } // stages
-} // pipeline
diff --git a/build-aux/README.md b/build-aux/README.md
deleted file mode 100644
index 72cef94b8..000000000
--- a/build-aux/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-# CouchDB and Jenkins CI
-
-In 2019, ASF and CloudBees reached an agreement to allow
-the Foundation to use CloudBees Core to have a farm of managed Jenkins
-masters. This allows the ASF to give larger projects their own dedicated
-Jenkins master, which can be custom configured for the project. They can
-readily manage this farm of Jenkins masters centrally, including push
-updates to all masters and their plugins. Naturally, this also reduces
-contention, as well as providing increased security for project-level
-credentials. CouchDB is the first project to use this setup, via
-https://ci-couchdb.apache.org/ (aka https://jenkins-cm1.apache.org/)
-
-Only members of the ASF LDAP group `couchdb-pmc` have write access to
-the Jenkins CI server, and all jobs are set to not trust changes to the
-Jenkinsfile from forked repos (see below).
-
-Further, IBM is sponsoring CouchDB with cloud-based worker nodes, for
-the project's exclusive use. Combined with the FreeBSD and OSX nodes the
-project already had, this will provide the necessary compute resources
-to meet the needs of the project for some time to come.
-
-# Jenkins Configuration
-
-All jobs on the new Jenkins master are contained in a CouchDB folder.
-Credentials for the project are placed within this folder; this is a
-unique capability of CloudBees Core at this time.
-
-## Pull Requests
-
-[Blue Ocean link](https://ci-couchdb.apache.org/blue/organizations/jenkins/jenkins-cm1%2FPullRequests/activity/)
-
-To implement build-a-PR functionality in the same way that Travis
-performs builds, Jenkins offers the Multibranch Pipeline job. Here's how
-it's configured for CouchDB through the GUI:
-
-* Job name: PullRequests (jobs shouldn't have spaces in them, because
- the job name is used for workspace path naming.)
-* Display Name: "Pull Requests"
-* Description: "This job builds all GitHub pull requests against
- apache/couchdb."
-* Branch sources: Github
- * Credentials: a GitHub API key from wohali. These credentials are
- stored in the top-level Jenkins CouchDB folder on the server.
- The API token credentials are `user:email` and `repo:status`.
- * URL https://github.com/apache/couchdb
- * Behaviors
- * Discover branches: Exclude branches that are also filed as PRs
- * Discover PRs from origin: Merging the PR with the current target
- branch revision
- * Discover PRs from works: Merging the PR with the current target
- branch revision, trust Nobody [2]
- * Advanced clone behaviours:
- * Fetch tags
- * Clear "Shallow clone" [1]
- * Clean before checkout [1]
- * Prune stale remote-tracking branches
- * Property strategy: All branches get the same properties
-* Build Configuration
- * Mode: by Jenkinsfile
- * Script path: `build-aux/Jenkinsfile.pr`
-* Scan Repository Triggers
- * Periodically if not other wise run
- * Interval: 1 day (do not set this too high, GitHub has an API token
- throttle that can cause issues!)
-* Orphaned Item Strategy
- * Discard old items
- * Days to keep old items: <blank>
- * Max # of old items to keep: 10
-* Everything else set as defaults.
-
-[1]: https://issues.jenkins-ci.org/browse/JENKINS-44598 explains why we have the build set to Clean Before Checkout every time, and why clones are not shallow.
-
-[2]: https://issues.apache.org/jira/browse/INFRA-17449 explains why "Discover pull requests from forks/Trust" has been set to "Nobody."
-
-## Full Platform Builds on `main` and release branches
-
-[![main branch status](https://ci-couchdb.apache.org/job/jenkins-cm1/job/FullPlatformMatrix/job/main/badge/icon?subject=main)](https://ci-couchdb.apache.org/blue/organizations/jenkins/jenkins-cm1%2FFullPlatformMatrix/activity?branch=main)
-[![3.x branch status](https://ci-couchdb.apache.org/job/jenkins-cm1/job/FullPlatformMatrix/job/3.x/badge/icon?subject=3.x)](https://ci-couchdb.apache.org/blue/organizations/jenkins/jenkins-cm1%2FFullPlatformMatrix/activity?branch=3.x)
-
-Our original Jenkins job (formerly `/Jenkinsfile`) is now
-`build-aux/Jenkinsfile.full`. This builds CouchDB on `main`, all of
-our release branches (`2.*`, `3.*`, etc.) as well as any branch prefixed with
-`jenkins-` for testing on a wide variety of supported operating systems.
-
-Settings are as follows:
-
-* Job name: FullPlatformMatrix (jobs shouldn't have spaces in them, because
- the job name is used for workspace path naming.)
-* Display Name: "Full Platform Builds"
-* Description: "This job builds on our master and release branches,
- and builds packages on all."
-* Branch sources: Github
- * Credentials: a GitHub API key from wohali. These credentials are
- stored in the top-level Jenkins CouchDB folder on the server.
- The API token credentials are `user:email` and `repo:status`.
- * URL https://github.com/apache/couchdb
- * Behaviors
- * Discover branches: All branches
- * Filter by name (with wildcards): Include: master 2.*.x 3.*.x 4.*.x jenkins-*
- * Advanced clone behaviours:
- * Fetch tags
- * Clear "Shallow clone" [1]
- * Clean before checkout [1]
- * Prune stale remote-tracking branches
- * Property strategy: All branches get the same properties
-* Build Configuration
- * Mode: by Jenkinsfile
- * Script path: `build-aux/Jenkinsfile.pull`
-* Scan Repository Triggers
- * none
-* Orphaned Item Strategy
- * Discard old items
- * Days to keep old items: <blank>
- * Max # of old items to keep: 10
-* Everything else set as defaults.
-
-## Other Resources
-
-The [apache/couchdb-ci](https://github.com/apache/couchdb-ci) repo contains the
-dockerfiles that we use to generate the container images used for our
-container-based builds. These images are hosted on Docker Hub in the following
-repos:
-
-* [apache/couchdbci-debian](https://hub.docker.com/r/apache/couchdbci-debian)
-* [apache/couchdbci-ubuntu](https://hub.docker.com/r/apache/couchdbci-ubuntu)
-* [apache/couchdbci-centos](https://hub.docker.com/r/apache/couchdbci-centos)
-
-The [apache/couchdb-pkg](https://github.com/apache/couchdb-ci) repo contains
-a set of helper scripts to build binary packages for Debian / CentOS / Ubuntu
-from the contents of a release tarball. The packaging stage of our "Full
-Platform Builds" pipeline clones this repo to produces the package artifacts.
diff --git a/build-aux/couchdb-build-release.sh b/build-aux/couchdb-build-release.sh
deleted file mode 100755
index dfd529d13..000000000
--- a/build-aux/couchdb-build-release.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/sh -e
-
-VERSION=$1
-
-if [ -z "${VERSION}" ]; then
- echo "NO VERSION"
- exit 1
-fi
-
-echo "Building Apache CouchDB ${VERSION}"
-
-REL_DIR=apache-couchdb-${VERSION}
-# make release dir
-rm -rf ${REL_DIR}
-mkdir ${REL_DIR}
-
-CURRENT_BRANCH=`git rev-parse --abbrev-ref HEAD`
-
-# copy sources over
-git archive ${CURRENT_BRANCH} | tar -xC ${REL_DIR}/ -f -
-cd src/
-for repo in *; do
- cd ${repo}
- if [ -d ".git" ]; then
- mkdir -p ../../${REL_DIR}/src/${repo}
- git_ish=`git rev-parse --short HEAD`
- git archive ${git_ish} \
- | tar --exclude '*do_not_compile.erl' -xC ../../${REL_DIR}/src/${repo}/ -f -
- fi
- set +e
- grep -rl '{vsn, git}' ../../${REL_DIR}/src/${repo}/ 2>/dev/null \
- | xargs sed -ie "s/{vsn, git}/{vsn, \"${VERSION}\"}/" 2>/dev/null
- set -e
- cd ..
-done
-
-cd ..
-
-if test -e .git; then
- # save git sha in version.mk
- git_sha=`git rev-parse --short HEAD`
- echo "git_sha=${git_sha}" >> ${REL_DIR}/version.mk
- # create CONTRIBUTORS file
- OS=`uname -s`
-
- sed -e "/^#.*/d" CONTRIBUTORS.in > ${REL_DIR}/CONTRIBUTORS
- CONTRIB_EMAIL_SED_COMMAND="s/^[[:blank:]]{5}[[:digit:]]+[[:blank:]]/ * /"
- git shortlog -se 6c976bd..HEAD \
- | grep -v @apache.org \
- | sed -E -e "${CONTRIB_EMAIL_SED_COMMAND}" >> ${REL_DIR}/CONTRIBUTORS
- echo "" >> ${REL_DIR}/CONTRIBUTORS # simplest portable newline
- echo "For a list of authors see the \`AUTHORS\` file." >> ${REL_DIR}/CONTRIBUTORS
-fi
-
-# copy our rebar
-cp bin/rebar ${REL_DIR}/bin/rebar
diff --git a/build-aux/dist-error b/build-aux/dist-error
deleted file mode 100755
index 73486b5db..000000000
--- a/build-aux/dist-error
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh -e
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# This script is called by the build system and is used to provide an error
-# about missing or empty files. Some files are optional, and will be built when
-# the environment allows. But these files are required for distribution.
-
-cat << EOF
-ERROR: This file is missing or incomplete:
-
- $1
-
- This file is optional at build and install time,
- but is required when preparing a distribution.
-EOF
-
-exit 1
diff --git a/build-aux/introspect b/build-aux/introspect
deleted file mode 100755
index 9b527455f..000000000
--- a/build-aux/introspect
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env escript
-%% -*- mode: erlang -*-
-
-main(_) ->
- introspect("rebar.config.script").
-
-introspect(File) ->
- Bindings = [{'SCRIPT', File}, {'CONFIG', []}],
- {ok, Config} = file:script(File, Bindings),
- {deps, Deps} = lists:keyfind(deps, 1, Config),
- introspect_deps(Deps).
-
-introspect_deps([]) ->
- ok;
-introspect_deps([Dep | Rest]) ->
- introspect_dep(Dep),
- introspect_deps(Rest).
-
-introspect_dep({App, VsnRegex, {git, Url, From}, _Raw}) ->
- introspect_dep({App, VsnRegex, {git, Url, From}});
-introspect_dep({App, _VsnRegex, {git, _Url, From}}) ->
- io:format(bold("~s~n"), [App]),
- introspect_diff(App, From),
- io:format("~n", []),
- ok.
-
-revision({branch, Branch}) ->
- Branch;
-revision({tag, Tag}) ->
- Tag;
-revision(Rev) ->
- Rev.
-
-introspect_diff(App, From) ->
- introspect_diff(App, revision(From), "origin/master").
-
-introspect_diff(App, From, ToBranch) ->
- {ok, Log} = sh(App, io_lib:format("git log --pretty=oneline ~s..~s", [From, ToBranch])),
- case Log of
- [] ->
- io:format(" up to date on ~s~n", [bold(ToBranch)]);
- _ ->
- io:format(" ~B commits behind ~s~n", [length(Log), bold(ToBranch)]),
- io:format("~s~n~n", [string:join([" " ++ L || L <- Log], "\n")])
- end.
-
-sh(App, Cmd) ->
- Dir = lists:flatten(["src/", atom_to_list(App)]),
- Port = open_port({spawn, lists:flatten(Cmd)},
- [{cd, Dir},
- {line, 16384},
- exit_status,
- stderr_to_stdout,
- use_stdio]),
- read_port(Port).
-
-read_port(Port) ->
- read_port(Port, []).
-
-read_port(Port, Acc) ->
- receive
- {Port, {data, {eol, Line}}} ->
- read_port(Port, [Line | Acc]);
- {Port, {data, {noeol, Line}}} ->
- read_port(Port, [Line | Acc]);
- {Port, {exit_status, 0}} ->
- {ok, lists:reverse(Acc)};
- {Port, {exit_status, Code}} ->
- {error, Code, Acc}
- end.
-
-bold(Text) ->
- "\e[1m" ++ Text ++ "\e[0m".
diff --git a/build-aux/logfile-uploader.py b/build-aux/logfile-uploader.py
deleted file mode 100755
index f33915a83..000000000
--- a/build-aux/logfile-uploader.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/env python3
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-
-import datetime
-import glob
-import json
-import os
-import tarfile
-import time
-
-import requests
-
-COUCH_URL = "https://logs.couchdb.org/ci_errorlogs"
-TARFILE = "couchlog.tar.gz"
-
-
-def _tojson(req):
- """Support requests v0.x as well as 1.x+"""
- if requests.__version__[0] == "0":
- return json.loads(req.content)
- return req.json()
-
-
-def collect_logfiles():
- """Find and tarball all logfiles"""
- tb = tarfile.open(name=TARFILE, mode="w:gz")
- # Test results
- for log in glob.glob("test-results.log"):
- tb.add(log)
- # EUnit
- for log in glob.glob("src/*/.eunit/couch.log"):
- tb.add(log)
- # JS harness
- for log in glob.glob("dev/logs/node1.log"):
- tb.add(log)
- # couchjs OS process IO logs
- for log in glob.glob("/tmp/couchjslogs/*"):
- tb.add(log)
- tb.close()
-
-
-def build_ci_doc():
- """Build a metadata document with relevant detail from CI env"""
- doc = {}
- if "TRAVIS" in os.environ:
- doc["builder"] = "travis"
- doc["build_id"] = os.environ["TRAVIS_JOB_ID"]
- doc["erlang"] = os.environ["TRAVIS_OTP_RELEASE"]
- doc["url"] = (
- "https://travis-ci.org/apache/couchdb/jobs/" + os.environ["TRAVIS_JOB_ID"]
- )
- doc["branch"] = os.environ["TRAVIS_BRANCH"]
- doc["commit"] = os.environ["TRAVIS_COMMIT"]
- doc["repo"] = "https://github.com/" + os.environ["TRAVIS_REPO_SLUG"]
- elif "JENKINS_URL" in os.environ:
- doc["builder"] = "jenk-ins"
- doc["build_id"] = os.environ["BUILD_NUMBER"]
- doc["url"] = os.environ["BUILD_URL"]
- doc["branch"] = os.environ["BRANCH_NAME"]
- doc["repo"] = "https://github.com/apache/couchdb"
- else:
- doc["builder"] = "manual"
- # TODO: shell out to get correct repo, commit, branch info?
- doc["repo"] = "https://github.com/apache/couchdb"
- doc["build_id"] = str(time.time())
-
- # shorten doc id
- repo = doc["repo"].split("/")[-1]
- repo = repo.replace(".git", "")
-
- doc["_id"] = (
- doc["builder"]
- + "-"
- + repo
- + "-"
- + doc["build_id"]
- + "-"
- + datetime.datetime.utcnow().isoformat()
- )
-
- return doc
-
-
-def upload_logs():
- try:
- lp = os.environ["COUCHAUTH"].split(":")
- except KeyError as e:
- print("ERROR: COUCHAUTH credentials unavailable! " "Unable to upload logfiles.")
- exit(1)
-
- creds = (lp[0], lp[1])
- doc = build_ci_doc()
- req = requests.post(
- COUCH_URL,
- data=json.dumps(doc),
- auth=creds,
- headers={"Content-type": "application/json"},
- )
- req.raise_for_status()
- req = _tojson(req)
- with open(TARFILE, "rb") as f:
- # ancient versions of requests break if data is iterable
- fdata = f.read()
- req2 = requests.put(
- COUCH_URL + "/" + doc["_id"] + "/" + TARFILE,
- headers={"Content-type": "application/x-gtar"},
- auth=creds,
- params={"rev": req["rev"]},
- data=fdata,
- )
- req2.raise_for_status()
- return req2
-
-
-def main():
- """Find latest logfile and upload to Couch logfile db."""
- print("Uploading logfiles...")
- collect_logfiles()
- req = upload_logs()
- print(req.url.split("?")[0])
- print(req.content)
- print("Upload complete!")
-
-
-if __name__ == "__main__":
- main()
diff --git a/build-aux/print-committerlist.sh b/build-aux/print-committerlist.sh
deleted file mode 100755
index f6abc4c78..000000000
--- a/build-aux/print-committerlist.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/sh
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-function get_contributors {
- local OS=`uname -s`
- case "$OS" in
- Linux|CYGWIN*) # GNU sed
- local SED_ERE_FLAG=-r
- ;;
- *) # BSD sed
- local SED_ERE_FLAG=-E
- ;;
- esac
-
- local CONTRIB_EMAIL_SED_COMMAND="s/^[[:blank:]]{5}[[:digit:]]+[[:blank:]]/ * /"
- if [ "$1" == "couchdb-main-repo" ]
- then
- git shortlog -se 6c976bd..HEAD \
- | grep -v @apache.org \
- | sed $SED_ERE_FLAG -e "$CONTRIB_EMAIL_SED_COMMAND"
- else
- cd src/$1
- git shortlog -se HEAD \
- | grep -v @apache.org \
- | sed $SED_ERE_FLAG -e "$CONTRIB_EMAIL_SED_COMMAND"
- cd .. && cd ..
- fi
-}
-
-function print_comitter_list {
- # list of external repos that we exclude
- local EXCLUDE=("bear" "folsom" "goldrush" "ibrowse" "jiffy" "lager" "meck" "mochiweb" "snappy")
- local EXCLUDE=$(printf "\|%s" "${EXCLUDE[@]}")
- local EXCLUDE=${EXCLUDE:2}
- local SUBREPOS=$(ls src/ | grep -v "$EXCLUDE")
-
- if test -e .git; then
-
- {
- for i in $SUBREPOS; do
- get_contributors $i
- done;
- get_contributors "couchdb-main-repo"
- } | git check-mailmap --stdin | awk '
- BEGIN {
- }
- {
- $1 = "";
- persons[$0] = $0;
- }
- END {
- for (i in persons) {
- print persons[i];
- }
- }'
- fi
-}
diff --git a/build-aux/show-test-results.py b/build-aux/show-test-results.py
deleted file mode 100755
index edd6ca13f..000000000
--- a/build-aux/show-test-results.py
+++ /dev/null
@@ -1,412 +0,0 @@
-#!/usr/bin/env python3
-
-import argparse
-import glob
-import json
-import os
-import re
-import xml.dom.minidom as md
-
-
-TEST_COLLECTIONS = {
- "EUnit": "src/**/.eunit/*.xml",
- "EXUnit": "_build/integration/lib/couchdbtest/*.xml",
- "Mango": "src/mango/*.xml",
- "JavaScript": "test/javascript/*.xml",
-}
-
-
-def _attrs(elem):
- ret = {}
- for (k, v) in elem.attributes.items():
- ret[k.lower()] = v
- return ret
-
-
-def _text(elem):
- rc = []
- for node in elem.childNodes:
- if node.nodeType == node.TEXT_NODE:
- rc.append(node.data)
- else:
- rc.append(self._text(node))
- return "".join(rc)
-
-
-class TestCase(object):
- def __init__(self, elem):
- self.elem = elem
-
- attrs = _attrs(elem)
-
- self.name = self._name(attrs)
- self.time = float(attrs["time"])
-
- self.failure = False
- self._check_failure(elem, attrs)
-
- self.error = False
- self._check_error(elem, attrs)
-
- self.skipped = False
- self._check_skipped(elem, attrs)
-
- def _check_failure(self, elem, attrs):
- failures = elem.getElementsByTagName("failure")
- if not failures:
- return
-
- self.failure = True
- self.failure_msg = _text(failures[0]).strip()
-
- def _check_error(self, elem, attrs):
- errors = elem.getElementsByTagName("error")
- if not errors:
- return
-
- self.error = True
- self.error_msg = _text(errors[0]).strip()
-
- def _check_skipped(self, elem, attrs):
- skipped = elem.getElementsByTagName("skipped")
- if not skipped:
- return
-
- attrs = _attrs(skipped[0])
- self.skipped = True
- self.skipped_msg = attrs.get("message", attrs.get("type", "<unknown>"))
-
- def _name(self, attrs):
- klass = attrs.get("classname", "")
- if klass.startswith("Elixir."):
- klass = klass[len("Elixir.") :]
- if klass:
- return "%s - %s" % (klass, attrs["name"])
- return attrs["name"]
-
-
-class TestSuite(object):
- SUITE_NAME_PATTERNS = [re.compile("module '([^']+)'"), re.compile("Elixir\.(.+)")]
-
- def __init__(self, elem):
- self.elem = elem
-
- attrs = _attrs(elem)
-
- self.name = self._name(attrs)
-
- self.time = 0.0
- if "time" in attrs:
- self.time = float(attrs["time"])
-
- self.num_tests = int(attrs["tests"])
- self.num_failures = int(attrs["failures"])
- self.num_errors = int(attrs["errors"])
- self.num_skipped = 0
-
- self.tests = []
- self.test_time = 0.0
-
- for t_elem in elem.getElementsByTagName("testcase"):
- self.tests.append(TestCase(t_elem))
- self.test_time += self.tests[-1].time
- if self.tests[-1].skipped:
- self.num_skipped += 1
-
- if self.time == 0.0 and self.test_time > 0.0:
- self.time = self.test_time
-
- def _name(self, attrs):
- raw_name = attrs["name"]
- for p in self.SUITE_NAME_PATTERNS:
- match = p.match(raw_name)
- if match:
- return match.group(1)
- return raw_name
-
-
-class TestCollection(object):
- def __init__(self, name, pattern):
- self.name = name
- self.pattern = pattern
- self.suites = []
- self.bad_files = []
-
- for fname in glob.glob(pattern):
- self._load_file(fname)
-
- def _load_file(self, filename):
- try:
- dom = md.parse(filename)
- except:
- self.bad_files.append(filename)
- return
- for elem in dom.getElementsByTagName("testsuite"):
- self.suites.append(TestSuite(elem))
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Show test result summaries")
- parser.add_argument(
- "--ignore-failures",
- action="store_true",
- default=False,
- help="Don't display test failures",
- )
- parser.add_argument(
- "--ignore-errors",
- action="store_true",
- default=False,
- help="Don't display test errors",
- )
- parser.add_argument(
- "--ignore-skipped",
- action="store_true",
- default=False,
- help="Don't display skipped tests",
- )
- parser.add_argument(
- "--all", type=int, default=0, help="Number of rows to show for all groups"
- )
- parser.add_argument(
- "--collection",
- action="append",
- default=[],
- help="Which collection to display. May be repeated.",
- )
- parser.add_argument(
- "--suites", type=int, default=0, help="Number of suites to show"
- )
- parser.add_argument("--tests", type=int, default=0, help="Number of tests to show")
- parser.add_argument(
- "--sort",
- default="total",
- choices=["test", "fixture", "total"],
- help="Timing column to sort on",
- )
- return parser.parse_args()
-
-
-def display_failures(collections):
- failures = []
- for collection in collections:
- for suite in collection.suites:
- for test in suite.tests:
- if not test.failure:
- continue
- failures.append((test.name, test.failure_msg))
-
- if not len(failures):
- return
- print("Failures")
- print("========")
- print()
- for failure in failures:
- print(failure[0])
- print("-" * len(failure[0]))
- print()
- print(failure[1])
- print()
-
-
-def display_errors(collections):
- errors = []
- for collection in collections:
- for suite in collection.suites:
- for test in suite.tests:
- if not test.error:
- continue
- errors.append((test.name, test.error_msg))
-
- if not len(errors):
- return
- print("Errors")
- print("======")
- print()
- for error in errors:
- print(error[0])
- print("-" * len(error[0]))
- print()
- print(error[1])
- print()
-
-
-def display_skipped(collections):
- skipped = []
- for collection in collections:
- for suite in collection.suites:
- for test in suite.tests:
- if not test.skipped:
- continue
- name = "%s - %s - %s" % (collection.name, suite.name, test.name)
- skipped.append((name, test.skipped_msg))
- if not skipped:
- return
- print("Skipped")
- print("=======")
- print()
- for row in sorted(skipped):
- print(" %s: %s" % row)
- print()
-
-
-def display_table(table):
- for ridx, row in enumerate(table):
- new_row = []
- for col in row:
- if isinstance(col, float):
- new_row.append("%4.1fs" % col)
- elif isinstance(col, int):
- new_row.append("%d" % col)
- else:
- new_row.append(col)
- table[ridx] = new_row
- for row in table:
- fmt = " ".join(["%10s"] * len(row))
- print(fmt % tuple(row))
-
-
-def display_collections(collections, sort):
- rows = []
- for collection in collections:
- total_time = 0.0
- test_time = 0.0
- num_tests = 0
- num_failures = 0
- num_errors = 0
- num_skipped = 0
- for suite in collection.suites:
- total_time += suite.time
- test_time += suite.test_time
- num_tests += suite.num_tests
- num_failures += suite.num_failures
- num_errors += suite.num_errors
- num_skipped += suite.num_skipped
- cols = (
- total_time,
- max(0.0, total_time - test_time),
- test_time,
- num_tests,
- num_failures,
- num_errors,
- num_skipped,
- collection.name + " ",
- )
- rows.append(cols)
-
- scol = 0
- if sort == "fixture":
- scol = 1
- elif sort == "test":
- scol = 2
-
- def skey(row):
- return (-1.0 * row[scol], row[-1])
-
- rows.sort(key=skey)
-
- print("Collections")
- print("===========")
- print()
- headers = ["Total", "Fixture", "Test", "Count", "Failed", "Errors", "Skipped"]
- display_table([headers] + rows)
- print()
-
-
-def display_suites(collections, count, sort):
- rows = []
- for collection in collections:
- for suite in collection.suites:
- cols = [
- suite.time,
- max(0.0, suite.time - suite.test_time),
- suite.test_time,
- suite.num_tests,
- suite.num_failures,
- suite.num_errors,
- suite.num_skipped,
- collection.name + " - " + suite.name,
- ]
- rows.append(cols)
-
- scol = 0
- if sort == "fixture":
- scol = 1
- elif sort == "test":
- scol = 2
-
- def skey(row):
- return (-1.0 * row[scol], row[-1])
-
- rows.sort(key=skey)
-
- rows = rows[:count]
-
- print("Suites")
- print("======")
- print()
- headers = ["Total", "Fixture", "Test", "Count", "Failed", "Errors", "Skipped"]
- display_table([headers] + rows)
- print()
-
-
-def display_tests(collections, count):
- rows = []
- for collection in collections:
- for suite in collection.suites:
- for test in suite.tests:
- if test.failure or test.error or test.skipped:
- continue
- fmt = "%s - %s - %s"
- display = fmt % (collection.name, suite.name, test.name)
- rows.append((test.time, display))
-
- def skey(row):
- return (-1.0 * row[0], row[-1])
-
- rows.sort(key=skey)
- rows = rows[:count]
-
- print("Tests")
- print("=====")
- print()
- display_table(rows)
- print()
-
-
-def main():
- args = parse_args()
-
- if not args.collection:
- args.collection = ["eunit", "exunit", "mango", "javascript"]
-
- collections = []
- for (name, pattern) in TEST_COLLECTIONS.items():
- if name.lower() not in args.collection:
- continue
- collections.append(TestCollection(name, pattern))
-
- if not args.ignore_failures:
- display_failures(collections)
-
- if not args.ignore_errors:
- display_errors(collections)
-
- if not args.ignore_skipped:
- display_skipped(collections)
-
- display_collections(collections, args.sort)
-
- if args.all > 0:
- args.suites = args.all
- args.tests = args.all
-
- if args.suites > 0:
- display_suites(collections, args.suites, args.sort)
-
- if args.tests > 0:
- display_tests(collections, args.tests)
-
-
-if __name__ == "__main__":
- main()
diff --git a/build-aux/sphinx-build b/build-aux/sphinx-build
deleted file mode 100755
index 8ecf43a55..000000000
--- a/build-aux/sphinx-build
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh -e
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# This script is called by the build system and is used to call sphinx-build if
-# is is available, or alternatively, emit a warning, and perform a no-op. Any
-# required directories or Makefiles are created and stubbed out as appropriate.
-
-if test -z "`which sphinx-build`"; then
- missing=yes
- cat << EOF
-WARNING: 'sphinx-build' is needed, and is missing on your system.
- You might have modified some files without having the
- proper tools for further handling them.
-EOF
-fi
-
-if test "$2" = "html"; then
- if test "$missing" != "yes"; then
- sphinx-build $*
- else
- mkdir -p html
- fi
-fi
diff --git a/build-aux/sphinx-touch b/build-aux/sphinx-touch
deleted file mode 100755
index ed7217de2..000000000
--- a/build-aux/sphinx-touch
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh -e
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# This script is called by the build system and is used to touch the list of
-# expected output files when sphinx-build is not available. If the files exist,
-# this will satisfy make. If they do not exist, we create of empty files.
-
-if test -z "`which sphinx-build`"; then
- for file in $*; do
- mkdir -p `dirname $file`
- touch $file
- done
-fi \ No newline at end of file
diff --git a/config/config.exs b/config/config.exs
deleted file mode 100644
index 8e52433cc..000000000
--- a/config/config.exs
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is responsible for configuring your application
-# and its dependencies with the aid of the Mix.Config module.
-use Mix.Config
-
-# This configuration is loaded before any dependency and is restricted
-# to this project. If another project depends on this project, this
-# file won't be loaded nor affect the parent project. For this reason,
-# if you want to provide default values for your application for
-# 3rd-party users, it should be done in your "mix.exs" file.
-
-# You can configure your application as:
-#
-# config :couchdbtest, key: :value
-#
-# and access this configuration in your application as:
-#
-# Application.get_env(:couchdbtest, :key)
-#
-# You can also configure a 3rd-party app:
-#
-# config :logger, level: :info
-#
-
-# It is also possible to import configuration files, relative to this
-# directory. For example, you can emulate configuration per environment
-# by uncommenting the line below and defining dev.exs, test.exs and such.
-# Configuration from the imported file will override the ones defined
-# here (which is why it is important to import them last).
-#
-import_config "#{Mix.env}.exs" \ No newline at end of file
diff --git a/config/dev.exs b/config/dev.exs
deleted file mode 100644
index d2d855e6d..000000000
--- a/config/dev.exs
+++ /dev/null
@@ -1 +0,0 @@
-use Mix.Config
diff --git a/config/integration.exs b/config/integration.exs
deleted file mode 100644
index 796880266..000000000
--- a/config/integration.exs
+++ /dev/null
@@ -1,9 +0,0 @@
-use Mix.Config
-
-config :logger,
- backends: [:console],
- compile_time_purge_level: :debug,
- level: :debug
-
-config :sasl,
- sasl_error_logger: false
diff --git a/config/prod.exs b/config/prod.exs
deleted file mode 100644
index d2d855e6d..000000000
--- a/config/prod.exs
+++ /dev/null
@@ -1 +0,0 @@
-use Mix.Config
diff --git a/config/test.exs b/config/test.exs
deleted file mode 100644
index c5a5ed24a..000000000
--- a/config/test.exs
+++ /dev/null
@@ -1,12 +0,0 @@
-use Mix.Config
-
-config :logger,
- backends: [:console],
- compile_time_purge_level: :debug,
- level: :debug
-
-config :kernel,
- error_logger: false
-
-config :sasl,
- sasl_error_logger: false
diff --git a/configure b/configure
deleted file mode 100755
index d8e592b9e..000000000
--- a/configure
+++ /dev/null
@@ -1,360 +0,0 @@
-#!/bin/sh -e
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# next steps:
-# try running this, figure out what to do with the vars in the generated files
-# in the bottom
-
-# cd into this script’s directory
-rootdir="$(cd "${0%/*}" 2>/dev/null; echo "$PWD")"
-basename=`basename $0`
-
-PACKAGE_AUTHOR_NAME="The Apache Software Foundation"
-
-REBAR3_BRANCH="main"
-
-# TEST=0
-WITH_PROPER="true"
-WITH_FAUXTON=1
-WITH_DOCS=1
-ERLANG_MD5="false"
-SKIP_DEPS=0
-
-COUCHDB_USER="$(whoami 2>/dev/null || echo couchdb)"
-SM_VSN=${SM_VSN:-"1.8.5"}
-ARCH="$(uname -m)"
-ERLANG_VER="$(erl -eval 'io:put_chars(erlang:system_info(otp_release)), halt().' -noshell)"
-
-. ${rootdir}/version.mk
-COUCHDB_VERSION=${vsn_major}.${vsn_minor}.${vsn_patch}
-
-display_help () {
- cat << EOF
-Usage: $basename [OPTION]
-
-The $basename command is responsible for generating the build
-system for Apache CouchDB.
-
-Options:
-
- -h | --help display a short help message and exit
- -u | --user USER set the username to run as (defaults to $COUCHDB_USER)
- --disable-fauxton do not build Fauxton
- --disable-docs do not build any documentation or manpages
- --erlang-md5 use erlang for md5 hash operations
- --dev alias for --disable-docs --disable-fauxton
- --spidermonkey-version VSN specify the version of SpiderMonkey to use (defaults to $SM_VSN)
- --skip-deps do not update erlang dependencies
- --rebar=PATH use rebar by specified path (version >=2.6.0 && <3.0 required)
- --generate-tls-dev-cert generate a cert for TLS distribution (To enable TLS, change the vm.args file.)
- --rebar3=PATH use rebar3 by specified path
- --erlfmt=PATH use erlfmt by specified path
-EOF
-}
-
-# This is just an example to generate a certfile for TLS distribution.
-# This is not an endorsement of specific expiration limits, key sizes, or algorithms.
-generate_tls_dev_cert() {
- if [ ! -e "${rootdir}/dev/erlserver.pem" ]; then
- openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout key.pem -out cert.pem
- cat key.pem cert.pem > dev/erlserver.pem && rm key.pem cert.pem
- fi
-
- if [ ! -e "${rootdir}/dev/couch_ssl_dist.conf" ]; then
- cat > "${rootdir}/dev/couch_ssl_dist.conf" << EOF
-[{server,
- [{certfile, "${rootdir}/dev/erlserver.pem"},
- {secure_renegotiate, true}]},
- {client,
- [{secure_renegotiate, true}]}].
-EOF
- fi
-}
-
-parse_opts() {
- while :; do
- case $1 in
- -h|--help)
- display_help
- exit
- ;;
-
- --without-proper)
- WITH_PROPER="false"
- shift
- continue
- ;;
-
- --disable-fauxton)
- WITH_FAUXTON=0
- shift
- continue
- ;;
-
- --disable-docs)
- WITH_DOCS=0
- shift
- continue
- ;;
-
- --erlang-md5)
- ERLANG_MD5="true"
- shift
- continue
- ;;
-
- --dev)
- WITH_DOCS=0
- WITH_FAUXTON=0
- shift
- continue
- ;;
-
- --skip-deps)
- SKIP_DEPS=1
- shift
- continue
- ;;
-
- --rebar)
- if [ -x "$2" ]; then
- version=`$2 --version 2> /dev/null | grep -o "2\.[6-9]\.[0-9]"`
- if [ $? -ne 0 ]; then
- printf 'Rebar >=2.6.0 and <3.0.0 required' >&2
- exit 1
- fi
- eval REBAR=$2
- shift 2
- continue
- else
- printf 'ERROR: "--rebar" requires valid path to executable.\n' >&2
- exit 1
- fi
- ;;
-
- --rebar3)
- if [ -x "$2" ]; then
- eval REBAR3=$2
- shift 2
- continue
- else
- printf 'ERROR: "--rebar3" requires valid path to executable.\n' >&2
- exit 1
- fi
- ;;
-
- --erlfmt)
- if [ -x "$2" ]; then
- eval ERLFMT=$2
- shift 2
- continue
- else
- printf 'ERROR: "--erlfmt" requires valid path to executable.\n' >&2
- exit 1
- fi
- ;;
-
- --user|-u)
- if [ -n "$2" ]; then
- eval COUCHDB_USER=$2
- shift 2
- continue
- else
- printf 'ERROR: "--user" requires a non-empty argument.\n' >&2
- exit 1
- fi
- ;;
- --user=?*)
- eval COUCHDB_USER=${1#*=}
- ;;
- --user=)
- printf 'ERROR: "--user" requires a non-empty argument.\n' >&2
- exit 1
- ;;
-
- --spidermonkey-version)
- if [ -n "$2" ]; then
- eval SM_VSN=$2
- shift 2
- continue
- else
- printf 'ERROR: "--spidermonkey-version" requires a non-empty argument.\n' >&2
- exit 1
- fi
- ;;
- --spidermonkey-version=?*)
- eval SM_VSN=${1#*=}
- ;;
- --spidermonkey-version=)
- printf 'ERROR: "--spidermonkey-version" requires a non-empty argument.\n' >&2
- exit 1
- ;;
-
- --generate-tls-dev-cert)
- echo "WARNING: To enable TLS distribution, don't forget to customize vm.args file."
- generate_tls_dev_cert
- shift
- continue
- ;;
-
- --) # End of options
- shift
- break
- ;;
- -?*)
- echo "WARNING: Unknown option '$1', ignoring" >&2
- shift
- ;;
- *) # Done
- break
- esac
- shift
- done
-}
-
-parse_opts $@
-
-if [ "${ARCH}" = "aarch64" ] && [ "${SM_VSN}" = "60" ]
-then
- echo "ERROR: SpiderMonkey 60 is known broken on ARM 64 (aarch64). Use another version instead."
- exit 1
-fi
-
-echo "==> configuring couchdb in rel/couchdb.config"
-cat > rel/couchdb.config << EOF
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% The contents of this file are auto-generated by configure
-%
-{package_author_name, "$PACKAGE_AUTHOR_NAME"}.
-{prefix, "."}.
-{data_dir, "./data"}.
-{view_index_dir, "./data"}.
-{state_dir, "./data"}.
-{log_file, "$LOG_FILE"}.
-{fauxton_root, "./share/www"}.
-{user, "$COUCHDB_USER"}.
-{spidermonkey_version, "$SM_VSN"}.
-{node_name, "-name couchdb@127.0.0.1"}.
-{cluster_port, 5984}.
-{backend_port, 5986}.
-{prometheus_port, 17986}.
-EOF
-
-cat > install.mk << EOF
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-# The contents of this file are auto-generated by configure
-#
-package_author_name = $PACKAGE_AUTHOR_NAME
-
-with_fauxton = $WITH_FAUXTON
-with_docs = $WITH_DOCS
-
-user = $COUCHDB_USER
-spidermonkey_version = $SM_VSN
-EOF
-
-cat > $rootdir/config.erl << EOF
-{with_proper, $WITH_PROPER}.
-{erlang_md5, $ERLANG_MD5}.
-{spidermonkey_version, "$SM_VSN"}.
-EOF
-
-install_local_rebar() {
- if [ ! -x "${rootdir}/bin/rebar" ]; then
- if [ ! -d "${rootdir}/src/rebar" ]; then
- # git clone --depth 1 https://github.com/apache/couchdb-rebar.git ${rootdir}/src/rebar
- git clone https://github.com/apache/couchdb-rebar.git ${rootdir}/src/rebar
- fi
- make -C ${rootdir}/src/rebar
- mv ${rootdir}/src/rebar/rebar ${rootdir}/bin/rebar
- make -C ${rootdir}/src/rebar clean
- fi
-}
-
-install_local_rebar3() {
- if [ ! -x "${rootdir}/bin/rebar3" ]; then
- if [ ! -d "${rootdir}/src/rebar3" ]; then
- git clone --depth 1 --branch ${REBAR3_BRANCH} https://github.com/erlang/rebar3.git ${rootdir}/src/rebar3
- fi
- cd src/rebar3
- ./bootstrap
- mv ${rootdir}/src/rebar3/rebar3 ${rootdir}/bin/rebar3
- cd ../..
- fi
-}
-
-install_local_erlfmt() {
- if [ ! -x "${rootdir}/bin/erlfmt" ]; then
- if [ ! -d "${rootdir}/src/erlfmt" ]; then
- git clone --depth 1 https://github.com/WhatsApp/erlfmt.git ${rootdir}/src/erlfmt
- fi
- cd "${rootdir}"/src/erlfmt
- ${REBAR3} as release escriptize
- mv ${rootdir}/src/erlfmt/_build/release/bin/erlfmt ${rootdir}/bin/erlfmt
- ${REBAR3} clean
- cd ../..
- fi
-}
-
-if [ -z "${REBAR}" ]; then
- install_local_rebar
- REBAR=${rootdir}/bin/rebar
-fi
-
-if [ -z "${REBAR3}" ] && [ "${ERLANG_VER}" != "20" ]; then
- install_local_rebar3
- REBAR3=${rootdir}/bin/rebar3
-fi
-
-if [ -z "${ERLFMT}" ] && [ "${ERLANG_VER}" != "20" ]; then
- install_local_erlfmt
- ERLFMT=${rootdir}/bin/erlfmt
-fi
-
-# only update dependencies, when we are not in a release tarball
-if [ -d .git -a $SKIP_DEPS -ne 1 ]; then
- echo "==> updating dependencies"
- ${REBAR} get-deps update-deps
-fi
-
-# External repos frequently become integrated with the primary repo,
-# resulting in obsolete .git directories, and possible confusion.
-# It is usually a good idea to delete these .git directories.
-for path in $(find src -name .git -type d); do
- git ls-files --error-unmatch $(dirname $path) > /dev/null 2>&1 && \
- echo "WARNING unexpected .git directory $path"
-done
-
-echo "You have configured Apache CouchDB, time to relax. Relax."
diff --git a/configure.ps1 b/configure.ps1
deleted file mode 100644
index 54c63776e..000000000
--- a/configure.ps1
+++ /dev/null
@@ -1,250 +0,0 @@
-<#
-.SYNOPSIS
- Configures CouchDB for building.
-.DESCRIPTION
- This command is responsible for generating the build
- system for Apache CouchDB.
-
- -DisableFauxton request build process skip building Fauxton (default false)
- -DisableDocs request build process skip building documentation (default false)
- -SkipDeps do not update Erlang dependencies (default false)
- -CouchDBUser USER set the username to run as (defaults to current user)
- -SpiderMonkeyVersion VSN select the version of SpiderMonkey to use (defaults to 1.8.5)
-
- Installation directories:
- -Prefix PREFIX install architecture-independent files in PREFIX
- [C:\Program Files\Apache\CouchDB]
- -ExecPrefix EPREFIX install architecture-dependent files in EPREFIX
- [same as PREFIX]
-
- Fine tuning of the installation directories:
- -BinDir DIR user executables [EPREFIX\bin]
- -LibexecDir DIR program executables [EPREFIX\libexec]
- -LibDir DIR object code libraries [EPREFIX\lib]
- -SysconfDir DIR read-only single-machine data [PREFIX\etc]
- -DataRootDir DIR read-only arch.-independent data root [PREFIX\share]
- -LocalStateDir DIR modifiable single-machine data [PREFIX\var]
- -RunStateDir DIR modifiable single-machine runstate data [LOCALSTATEDIR\run]
- -DatabaseDir DIR specify the data directory [LOCALSTATEDIR\lib]
- -ViewindexDir DIR specify the view directory [LOCALSTATEDIR\lib]
- -LogDir DIR specify the log directory [LOCALSTATEDIR\log]
- -DataDir DIR read-only architecture-independent data [DATAROOTDIR]
- -ManDir DIR man documentation [DATAROOTDIR\man]
- -DocDir DIR documentation root [DATAROOTDIR\doc\apache-couchdb]
- -HTMLDir DIR html documentation [DOCDIR\html]
-.LINK
- http://couchdb.apache.org/
-#>
-
-#REQUIRES -Version 2.0
-[cmdletbinding()]
-
-Param(
- [switch]$Test = $false,
- [switch]$DisableFauxton = $false, # do not build Fauxton
- [switch]$DisableDocs = $false, # do not build any documentation or manpages
- [switch]$SkipDeps = $false, # do not update erlang dependencies
-
- [ValidateNotNullOrEmpty()]
- [string]$CouchDBUser = [Environment]::UserName, # set the username to run as (defaults to current user)
- [ValidateNotNullOrEmpty()]
- [string]$SpiderMonkeyVersion = "1.8.5", # select the version of SpiderMonkey to use (default 1.8.5)
- [ValidateNotNullOrEmpty()]
- [string]$Prefix = "C:\Program Files\Apache\CouchDB", # install architecture-independent file location (default C:\Program Files\Apache\CouchDB)
- [ValidateNotNullOrEmpty()]
- [string]$ExecPrefix = $Prefix, # install architecture-dependent file location (default C:\Program Files\Apache\CouchDB)
- [ValidateNotNullOrEmpty()]
- [string]$BinDir = "$ExecPrefix\bin", # user executable file location (default $ExecPrefix\bin)
- [ValidateNotNullOrEmpty()]
- [string]$LibExecDir = "$ExecPrefix\libexec", # user executable file location (default $ExecPrefix\libexec)
- [ValidateNotNullOrEmpty()]
- [string]$LibDir = "$ExecPrefix\lib", # object code libraries (default $ExecPrefix\lib)
- [ValidateNotNullOrEmpty()]
-
- [Alias("EtcDir")]
- [string]$SysConfDir = "$Prefix\etc", # read-only single-machine data (default $Prefix\etc)
- [ValidateNotNullOrEmpty()]
- [string]$DataRootDir = "$Prefix\share", # read-only arch.-independent data root (default $Prefix\share)
-
- [ValidateNotNullOrEmpty()]
- [string]$LocalStateDir = "$Prefix\var", # modifiable single-machine data (default $Prefix\var)
- [ValidateNotNullOrEmpty()]
- [string]$RunStateDir = "$LocalStateDir\run", # modifiable single-machine run state (default $LocalStateDir\run)
- [ValidateNotNullOrEmpty()]
- [string]$DatabaseDir = "$LocalStateDir\lib", # database directory (default $LocalStateDir\lib)
- [ValidateNotNullOrEmpty()]
- [string]$ViewIndexDir = "$LocalStateDir\lib", # database view index directory (default $LocalStateDir\lib)
- [ValidateNotNullOrEmpty()]
- [string]$LogDir = "$LocalStateDir\log", # logging directory (default $LocalStateDir\log)
-
- [ValidateNotNullOrEmpty()]
- [string]$DataDir = "$DataRootDir", # read-only arch.-independent data (default $DataRootDir)
- [ValidateNotNullOrEmpty()]
- [string]$ManDir = "$DataRootDir\man", # man documentation (default $DataRootDir\man)
- [ValidateNotNullOrEmpty()]
-
- [string]$DocDir = "$DataRootDir\doc\apache-couchdb", # man documentation (default $DataRootDir\doc\apache-couchdb)
- [ValidateNotNullOrEmpty()]
- [string]$HTMLDir = "$DocDir\html" # html documentation (default $DocDir\html)
-)
-
-
-# determine this script’s directory and change to it
-$rootdir = split-path -parent $MyInvocation.MyCommand.Definition
-Push-Location $rootdir
-[Environment]::CurrentDirectory = $PWD
-
-# We use this for testing this script
-# The test script lives in test/build/test-configure.sh
-If ($Test) {
- Write-Output @"
-"$Prefix" "$ExecPrefix" "$BinDir" "$LibExecDir" "$SysConfDir" "$DataRootDir" "$DataDir" "$LocalStateDir" "$RunStateDir" "$DocDir" "$LibDir" "$DatabaseDir" "$ViewIndexDir" "$LogDir" "$ManDir" "$HTMLDir"
-"@
- exit 0
-}
-
-# Translate ./configure variables to CouchDB variables
-$PackageAuthorName="The Apache Software Foundation"
-$InstallDir="$LibDir\couchdb"
-$LogFile="$LogDir\couch.log"
-$BuildFauxton = [int](-not $DisableFauxton)
-$BuildDocs = [int](-not $DisableDocs)
-$Hostname = [System.Net.Dns]::GetHostEntry([string]"localhost").HostName
-
-Write-Verbose "==> configuring couchdb in rel\couchdb.config"
-$CouchDBConfig = @"
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% The contents of this file are auto-generated by configure
-%
-{package_author_name, "$PackageAuthorName"}.
-{prefix, "."}.
-{data_dir, "./data"}.
-{view_index_dir, "./data"}.
-{log_file, ""}.
-{fauxton_root, "./share/www"}.
-{user, "$CouchDBUser"}.
-{spidermonkey_version, "$SpiderMonkeyVersion"}.
-{node_name, "-name couchdb@localhost"}.
-{cluster_port, 5984}.
-{backend_port, 5986}.
-"@
-$CouchDBConfig | Out-File "$rootdir\rel\couchdb.config" -encoding ascii
-
-#TODO: Output MS NMake file format? Stick with GNU Make?
-$InstallMk = @"
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-# The contents of this file are auto-generated by configure
-#
-package_author_name = $PackageAuthorName
-install_dir = $InstallDir
-
-bin_dir = $BinDir
-libexec_dir = $LibExecDir\couchdb
-doc_dir = $DocDir\couchdb
-sysconf_dir = $SysConfDir\couchdb
-data_dir = $DataDir\couchdb
-
-database_dir = $DatabaseDir
-view_index_dir = $ViewIndexDir
-log_file = $LogFile
-
-html_dir = $HTMLDir
-man_dir = $ManDir
-
-with_fauxton = $BuildFauxton
-with_docs = $BuildDocs
-
-user = $CouchDBUser
-spidermonkey_version = $SpiderMonkeyVersion
-"@
-$InstallMk | Out-File "$rootdir\install.mk" -encoding ascii
-
-$ConfigERL = @"
-{spidermonkey_version, "$SpiderMonkeyVersion"}.
-"@
-$ConfigERL | Out-File "$rootdir\config.erl" -encoding ascii
-
-if (((Get-Command "rebar.cmd" -ErrorAction SilentlyContinue) -eq $null) -or
- ((Get-Command "rebar3.cmd" -ErrorAction SilentlyContinue) -eq $null) -or
- ((Get-Command "erlfmt.cmd" -ErrorAction SilentlyContinue) -eq $null)) {
- $env:Path += ";$rootdir\bin"
-}
-
-# check for rebar; if not found, build it and add it to our path
-if ((Get-Command "rebar.cmd" -ErrorAction SilentlyContinue) -eq $null)
-{
- Write-Verbose "==> rebar.cmd not found; bootstrapping..."
- if (-Not (Test-Path "src\rebar"))
- {
- git clone --depth 1 https://github.com/apache/couchdb-rebar.git $rootdir\src\rebar
- }
- cmd /c "cd src\rebar && $rootdir\src\rebar\bootstrap.bat"
- cp $rootdir\src\rebar\rebar $rootdir\bin\rebar
- cp $rootdir\src\rebar\rebar.cmd $rootdir\bin\rebar.cmd
- make -C $rootdir\src\rebar clean
-}
-
-# check for rebar3; if not found, build it and add it to our path
-if ((Get-Command "rebar3.cmd" -ErrorAction SilentlyContinue) -eq $null)
-{
- Write-Verbose "==> rebar3.cmd not found; bootstrapping..."
- if (-Not (Test-Path "src\rebar3"))
- {
- git clone --depth 1 https://github.com/erlang/rebar3.git $rootdir\src\rebar3
- }
- cd src\rebar3
- .\bootstrap.ps1
- cp $rootdir\src\rebar3\rebar3 $rootdir\bin\rebar3
- cp $rootdir\src\rebar3\rebar3.cmd $rootdir\bin\rebar3.cmd
- cp $rootdir\src\rebar3\rebar3.ps1 $rootdir\bin\rebar3.ps1
- make -C $rootdir\src\rebar3 clean
- cd ..\..
-}
-
-# check for erlfmt; if not found, build it and add it to our path
-if ((Get-Command "erlfmt.cmd" -ErrorAction SilentlyContinue) -eq $null)
-{
- Write-Verbose "==> erlfmt.cmd not found; bootstrapping..."
- if (-Not (Test-Path "src\erlfmt"))
- {
- git clone --depth 1 https://github.com/WhatsApp/erlfmt.git $rootdir\src\erlfmt
- }
- cd src\erlfmt
- rebar3 as release escriptize
- cp $rootdir\src\erlfmt\_build\release\bin\erlfmt $rootdir\bin\erlfmt
- cp $rootdir\src\erlfmt\_build\release\bin\erlfmt.cmd $rootdir\bin\erlfmt.cmd
- make -C $rootdir\src\erlfmt clean
- cd ..\..
-}
-
-# only update dependencies, when we are not in a release tarball
-if ( (Test-Path .git -PathType Container) -and (-not $SkipDeps) ) {
- Write-Verbose "==> updating dependencies"
- rebar get-deps update-deps
-}
-
-Pop-Location
-[Environment]::CurrentDirectory = $PWD
-Write-Verbose "You have configured Apache CouchDB, time to relax. Relax."
diff --git a/dev/format_all.py b/dev/format_all.py
deleted file mode 100644
index 1927fb59e..000000000
--- a/dev/format_all.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python3
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-"""Erlang formatter for CouchDB
-Warning: this file needs to run from the CouchDB repo root.
-USAGE: ERLFMT_PATH=<path_to_erlfmt> python3 dev/format_all.py
-"""
-
-import os
-import sys
-import subprocess
-
-from format_lib import get_source_paths, get_erlang_version
-
-if __name__ == "__main__":
- if get_erlang_version() < 21:
- print("Erlang version is < 21. Skipping format check")
- sys.exit(0)
-
- for path in get_source_paths():
- subprocess.run(
- [os.environ["ERLFMT_PATH"], "-w", path],
- stdout=subprocess.PIPE,
- )
diff --git a/dev/format_check.py b/dev/format_check.py
deleted file mode 100644
index cbb0126d9..000000000
--- a/dev/format_check.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python3
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-"""Erlang formatter for CouchDB
-Warning: this file needs to run from the CouchDB repo root.
-USAGE: ERLFMT_PATH=<path_to_erlfmt> python3 dev/format_check.py
-"""
-
-import os
-import subprocess
-import sys
-
-from format_lib import get_source_paths, get_erlang_version
-
-
-if __name__ == "__main__":
- if get_erlang_version() < 21:
- print("Erlang version is < 21. Skipping format check")
- sys.exit(0)
-
- exit_code = 0
-
- for path in get_source_paths():
- run_result = subprocess.run(
- [os.environ["ERLFMT_PATH"], "-c", path],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
- rc = run_result.returncode
- if rc != 0:
- print("\n %s error for %s" % (rc, path))
- stderr_lines = run_result.stderr.decode("utf-8").split("\n")
- for line in stderr_lines:
- print(" > %s" % line, file=sys.stderr)
- exit_code = 1
-
- sys.exit(exit_code)
diff --git a/dev/format_lib.py b/dev/format_lib.py
deleted file mode 100644
index 3db0057fc..000000000
--- a/dev/format_lib.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python3
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-"""Erlang formatter lib for CouchDB
-Warning: this file is not meant to be executed manually
-"""
-
-import pathlib
-import subprocess
-
-
-def get_erlang_version():
- args = [
- "erl",
- "-eval",
- "io:put_chars(erlang:system_info(otp_release)), halt().",
- "-noshell",
- ]
- res = subprocess.run(args, stdout=subprocess.PIPE, check=True)
- str_version = res.stdout.decode("utf-8").strip().strip('"')
- return int(str_version)
-
-
-# Generate source paths as "directory/*.erl" wildcard patterns
-# those can be directly consumed by erlfmt and processed in parallel
-#
-def get_source_paths():
- curdir = None
- for item in (
- subprocess.run(
- ["git", "ls-files", "--", "*.erl"],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
- .stdout.decode("utf-8")
- .split("\n")
- ):
- path = pathlib.Path(item)
- if path.parent != curdir:
- yield str(path.parent.joinpath("*.erl"))
- curdir = path.parent
- if curdir is not None:
- yield str(curdir.joinpath("*.erl"))
diff --git a/dev/make_boot_script b/dev/make_boot_script
deleted file mode 100755
index 549dd9a07..000000000
--- a/dev/make_boot_script
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env escript
-
-main(_) ->
- {ok, Server} = reltool:start_server([
- {config, "../rel/reltool.config"}
- ]),
- {ok, Release} = reltool:get_rel(Server, "couchdb"),
- ok = file:write_file("devnode.rel", io_lib:format("~p.~n", [Release])),
- ok = systools:make_script("devnode", [local]).
diff --git a/dev/monitor_parent.erl b/dev/monitor_parent.erl
deleted file mode 100644
index 0e9e6c5b7..000000000
--- a/dev/monitor_parent.erl
+++ /dev/null
@@ -1,41 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(monitor_parent).
-
--export([start/0]).
-
-start() ->
- {ok, [[PPid]]} = init:get_argument(parent_pid),
- spawn(fun() -> monitor_parent(PPid) end).
-
-monitor_parent(PPid) ->
- timer:sleep(1000),
- case os:type() of
- {unix, _} ->
- case os:cmd("kill -0 " ++ PPid) of
- "" ->
- monitor_parent(PPid);
- _Else ->
- % Assume _Else is a no such process error
- init:stop()
- end;
- {win32, _} ->
- Fmt = "tasklist /fi \"PID eq ~s\" /fo csv /nh",
- Retval = os:cmd(io_lib:format(Fmt, [PPid])),
- case re:run(Retval, "^\"python.exe\",*") of
- {match, _} ->
- monitor_parent(PPid);
- nomatch ->
- init:stop()
- end
- end.
diff --git a/dev/pbkdf2.py b/dev/pbkdf2.py
deleted file mode 100644
index 4416f8632..000000000
--- a/dev/pbkdf2.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- pbkdf2
- ~~~~~~
-
- This module implements pbkdf2 for Python. It also has some basic
- tests that ensure that it works. The implementation is straightforward
- and uses stdlib only stuff and can be easily be copy/pasted into
- your favourite application.
-
- Use this as replacement for bcrypt that does not need a c implementation
- of a modified blowfish crypto algo.
-
- Example usage:
-
- >>> pbkdf2_hex('what i want to hash', 'the random salt')
- 'fa7cc8a2b0a932f8e6ea42f9787e9d36e592e0c222ada6a9'
-
- How to use this:
-
- 1. Use a constant time string compare function to compare the stored hash
- with the one you're generating::
-
- def safe_str_cmp(a, b):
- if len(a) != len(b):
- return False
- rv = 0
- for x, y in izip(a, b):
- rv |= ord(x) ^ ord(y)
- return rv == 0
-
- 2. Use `os.urandom` to generate a proper salt of at least 8 byte.
- Use a unique salt per hashed password.
-
- 3. Store ``algorithm$salt:costfactor$hash`` in the database so that
- you can upgrade later easily to a different algorithm if you need
- one. For instance ``PBKDF2-256$thesalt:10000$deadbeef...``.
-
-
- :copyright: (c) Copyright 2011 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-from binascii import hexlify
-import hmac
-import hashlib
-import sys
-from struct import Struct
-from operator import xor
-from itertools import starmap
-
-PY3 = sys.version_info[0] == 3
-
-if not PY3:
- from itertools import izip as zip
-
-if PY3:
- text_type = str
-else:
- text_type = unicode
-
-
-_pack_int = Struct(">I").pack
-
-
-def bytes_(s, encoding="utf8", errors="strict"):
- if isinstance(s, text_type):
- return s.encode(encoding, errors)
- return s
-
-
-def hexlify_(s):
- if PY3:
- return str(hexlify(s), encoding="utf8")
- else:
- return s.encode("hex")
-
-
-def range_(*args):
- if PY3:
- return range(*args)
- else:
- return xrange(*args)
-
-
-def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc=None):
- """Like :func:`pbkdf2_bin` but returns a hex encoded string."""
- return hexlify_(pbkdf2_bin(data, salt, iterations, keylen, hashfunc))
-
-
-def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None):
- """Returns a binary digest for the PBKDF2 hash algorithm of `data`
- with the given `salt`. It iterates `iterations` time and produces a
- key of `keylen` bytes. By default SHA-1 is used as hash function,
- a different hashlib `hashfunc` can be provided.
- """
- hashfunc = hashfunc or hashlib.sha1
- mac = hmac.new(bytes_(data), None, hashfunc)
-
- def _pseudorandom(x, mac=mac):
- h = mac.copy()
- h.update(bytes_(x))
- if PY3:
- return [x for x in h.digest()]
- else:
- return map(ord, h.digest())
-
- buf = []
- for block in range_(1, -(-keylen // mac.digest_size) + 1):
- rv = u = _pseudorandom(bytes_(salt) + _pack_int(block))
- for i in range_(iterations - 1):
- if PY3:
- u = _pseudorandom(bytes(u))
- else:
- u = _pseudorandom("".join(map(chr, u)))
- rv = starmap(xor, zip(rv, u))
- buf.extend(rv)
- if PY3:
- return bytes(buf)[:keylen]
- else:
- return "".join(map(chr, buf))[:keylen]
-
-
-def test():
- failed = []
-
- def check(data, salt, iterations, keylen, expected):
- rv = pbkdf2_hex(data, salt, iterations, keylen)
- if rv != expected:
- print("Test failed:")
- print(" Expected: %s" % expected)
- print(" Got: %s" % rv)
- print(" Parameters:")
- print(" data=%s" % data)
- print(" salt=%s" % salt)
- print(" iterations=%d" % iterations)
- failed.append(1)
-
- # From RFC 6070
- check("password", "salt", 1, 20, "0c60c80f961f0e71f3a9b524af6012062fe037a6")
- check("password", "salt", 2, 20, "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957")
- check("password", "salt", 4096, 20, "4b007901b765489abead49d926f721d065a429c1")
- check(
- "passwordPASSWORDpassword",
- "saltSALTsaltSALTsaltSALTsaltSALTsalt",
- 4096,
- 25,
- "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038",
- )
- check("pass\x00word", "sa\x00lt", 4096, 16, "56fa6aa75548099dcc37d7f03425e0c3")
- # This one is from the RFC but it just takes for ages
- ##check('password', 'salt', 16777216, 20,
- ## 'eefe3d61cd4da4e4e9945b3d6ba2158c2634e984')
-
- # From Crypt-PBKDF2
- check(
- "password", "ATHENA.MIT.EDUraeburn", 1, 16, "cdedb5281bb2f801565a1122b2563515"
- )
- check(
- "password",
- "ATHENA.MIT.EDUraeburn",
- 1,
- 32,
- "cdedb5281bb2f801565a1122b25635150ad1f7a04bb9f3a333ecc0e2e1f70837",
- )
- check(
- "password", "ATHENA.MIT.EDUraeburn", 2, 16, "01dbee7f4a9e243e988b62c73cda935d"
- )
- check(
- "password",
- "ATHENA.MIT.EDUraeburn",
- 2,
- 32,
- "01dbee7f4a9e243e988b62c73cda935da05378b93244ec8f48a99e61ad799d86",
- )
- check(
- "password",
- "ATHENA.MIT.EDUraeburn",
- 1200,
- 32,
- "5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddbc5e5142f708a31e2e62b1e13",
- )
- check(
- "X" * 64,
- "pass phrase equals block size",
- 1200,
- 32,
- "139c30c0966bc32ba55fdbf212530ac9c5ec59f1a452f5cc9ad940fea0598ed1",
- )
- check(
- "X" * 65,
- "pass phrase exceeds block size",
- 1200,
- 32,
- "9ccad6d468770cd51b10e6a68721be611a8b4d282601db3b36be9246915ec82a",
- )
-
- raise SystemExit(bool(failed))
-
-
-if __name__ == "__main__":
- test()
diff --git a/dev/remsh b/dev/remsh
deleted file mode 100755
index 347a799d0..000000000
--- a/dev/remsh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-if [ -z $NODE ]; then
- if [ -z $1 ]; then
- NODE=1
- else
- NODE=$1
- fi
-fi
-
-if [ -z $HOST ]; then
- HOST="127.0.0.1"
-fi
-
-NAME="remsh$$@$HOST"
-NODE="node$NODE@$HOST"
-erl -name $NAME -remsh $NODE -hidden
diff --git a/dev/remsh-tls b/dev/remsh-tls
deleted file mode 100755
index 089db669f..000000000
--- a/dev/remsh-tls
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-if [ -z $NODE ]; then
- if [ -z $1 ]; then
- NODE=1
- else
- NODE=$1
- fi
-fi
-
-if [ -z $HOST ]; then
- HOST="127.0.0.1"
-fi
-
-NAME="remsh$$@$HOST"
-NODE="node$NODE@$HOST"
-rootdir="$(cd "${0%/*}" 2>/dev/null; echo "$PWD")"
-erl -name $NAME -remsh $NODE -hidden -proto_dist inet_tls -ssl_dist_optfile "${rootdir}/couch_ssl_dist.conf"
diff --git a/dev/run b/dev/run
deleted file mode 100755
index 05ed16abb..000000000
--- a/dev/run
+++ /dev/null
@@ -1,862 +0,0 @@
-#!/usr/bin/env python3
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import atexit
-import base64
-import contextlib
-import functools
-import glob
-import inspect
-import json
-import ntpath
-import optparse
-import os
-import posixpath
-import re
-import socket
-import subprocess as sp
-import sys
-import time
-import uuid
-import traceback
-from configparser import ConfigParser
-
-from pbkdf2 import pbkdf2_hex
-
-COMMON_SALT = uuid.uuid4().hex
-
-try:
- from urllib.request import urlopen
-except ImportError:
- from urllib.request import urlopen
-
-try:
- import http.client as httpclient
-except ImportError:
- import http.client as httpclient
-
-
-def toposixpath(path):
- if os.sep == ntpath.sep:
- return path.replace(ntpath.sep, posixpath.sep)
- else:
- return path
-
-
-def log(msg):
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- def print_(chars):
- if log.verbose:
- sys.stdout.write(chars)
- sys.stdout.flush()
-
- argnames = list(inspect.signature(func).parameters.keys())
- callargs = dict(list(zip(argnames, args)))
- callargs.update(kwargs)
- print_("[ * ] " + msg.format(**callargs) + " ... ")
- try:
- res = func(*args, **kwargs)
- except KeyboardInterrupt:
- print_("ok\n")
- except Exception as err:
- print_("failed: %s\n" % err)
- raise
- else:
- print_("ok\n")
- return res
-
- return wrapper
-
- return decorator
-
-
-log.verbose = True
-
-
-def main():
- ctx = setup()
- startup(ctx)
- if ctx["cmd"]:
- run_command(ctx, ctx["cmd"])
- else:
- join(ctx, cluster_port(ctx, 1), *ctx["admin"])
-
-
-def setup():
- opts, args = setup_argparse()
- ctx = setup_context(opts, args)
- setup_logging(ctx)
- setup_dirs(ctx)
- check_beams(ctx)
- check_boot_script(ctx)
- setup_configs(ctx)
- return ctx
-
-
-def setup_logging(ctx):
- log.verbose = ctx["verbose"]
-
-
-def setup_argparse():
- parser = get_args_parser()
- return parser.parse_args()
-
-
-def get_args_parser():
- parser = optparse.OptionParser(description="Runs CouchDB 2.0 dev cluster")
- parser.add_option(
- "-a",
- "--admin",
- metavar="USER:PASS",
- default=None,
- help="Add an admin account to the development cluster",
- )
- parser.add_option(
- "-n",
- "--nodes",
- metavar="nodes",
- default=3,
- type=int,
- help="Number of development nodes to be spun up",
- )
- parser.add_option(
- "-q",
- "--quiet",
- action="store_false",
- dest="verbose",
- default=True,
- help="Don't print anything to STDOUT",
- )
- parser.add_option(
- "--with-admin-party-please",
- dest="with_admin_party",
- default=False,
- action="store_true",
- help="Runs a dev cluster with admin party mode on",
- )
- parser.add_option(
- "--enable-erlang-views",
- action="store_true",
- help="Enables the Erlang view server",
- )
- parser.add_option(
- "--no-join",
- dest="no_join",
- default=False,
- action="store_true",
- help="Do not join nodes on boot",
- )
- parser.add_option(
- "--with-haproxy",
- dest="with_haproxy",
- default=False,
- action="store_true",
- help="Use HAProxy",
- )
- parser.add_option(
- "--haproxy", dest="haproxy", default="haproxy", help="HAProxy executable path"
- )
- parser.add_option(
- "--haproxy-port", dest="haproxy_port", default="5984", help="HAProxy port"
- )
- parser.add_option(
- "--node-number",
- dest="node_number",
- type=int,
- default=1,
- help="The node number to seed them when creating the node(s)",
- )
- parser.add_option(
- "-c",
- "--config-overrides",
- action="append",
- default=[],
- help="Optional key=val config overrides. Can be repeated",
- )
- parser.add_option(
- "--erlang-config",
- dest="erlang_config",
- default="rel/files/sys.config",
- help="Specify an alternative Erlang application configuration",
- )
- parser.add_option(
- "--degrade-cluster",
- dest="degrade_cluster",
- type=int,
- default=0,
- help="The number of nodes that should be stopped after cluster config",
- )
- parser.add_option(
- "--no-eval",
- action="store_true",
- default=False,
- help="Do not eval subcommand output",
- )
- parser.add_option(
- "--auto-ports",
- dest="auto_ports",
- default=False,
- action="store_true",
- help="Select available ports for nodes automatically",
- )
- parser.add_option(
- "--extra_args",
- dest="extra_args",
- default=None,
- help="Extra arguments to pass to beam process",
- )
- parser.add_option(
- "-l",
- "--locald-config",
- dest="locald_configs",
- action="append",
- default=[],
- help="Path to config to place in 'local.d'. Can be repeated",
- )
- return parser
-
-
-def setup_context(opts, args):
- fpath = os.path.abspath(__file__)
- return {
- "N": opts.nodes,
- "no_join": opts.no_join,
- "with_admin_party": opts.with_admin_party,
- "enable_erlang_views": opts.enable_erlang_views,
- "admin": opts.admin.split(":", 1) if opts.admin else None,
- "nodes": ["node%d" % (i + opts.node_number) for i in range(opts.nodes)],
- "node_number": opts.node_number,
- "degrade_cluster": opts.degrade_cluster,
- "devdir": os.path.dirname(fpath),
- "rootdir": os.path.dirname(os.path.dirname(fpath)),
- "cmd": " ".join(args),
- "verbose": opts.verbose,
- "with_haproxy": opts.with_haproxy,
- "haproxy": opts.haproxy,
- "haproxy_port": opts.haproxy_port,
- "config_overrides": opts.config_overrides,
- "erlang_config": opts.erlang_config,
- "no_eval": opts.no_eval,
- "extra_args": opts.extra_args,
- "reset_logs": True,
- "procs": [],
- "auto_ports": opts.auto_ports,
- "locald_configs": opts.locald_configs,
- }
-
-
-@log("Setup environment")
-def setup_dirs(ctx):
- ensure_dir_exists(ctx["devdir"], "logs")
-
-
-def ensure_dir_exists(root, *segments):
- path = os.path.join(root, *segments)
- if not os.path.exists(path):
- os.makedirs(path)
- return path
-
-
-@log("Ensure CouchDB is built")
-def check_beams(ctx):
- for fname in glob.glob(os.path.join(ctx["devdir"], "*.erl")):
- sp.check_call(["erlc", "-o", ctx["devdir"] + os.sep, fname])
-
-
-@log("Ensure Erlang boot script exists")
-def check_boot_script(ctx):
- if not os.path.exists(os.path.join(ctx["devdir"], "devnode.boot")):
- env = os.environ.copy()
- env["ERL_LIBS"] = os.path.join(ctx["rootdir"], "src")
- sp.check_call(["escript", "make_boot_script"], env=env, cwd=ctx["devdir"])
-
-
-@log("Prepare configuration files")
-def setup_configs(ctx):
- for idx, node in enumerate(ctx["nodes"]):
- cluster_port, backend_port, prometheus_port = get_ports(
- ctx, idx + ctx["node_number"]
- )
- env = {
- "prefix": toposixpath(ctx["rootdir"]),
- "package_author_name": "The Apache Software Foundation",
- "data_dir": toposixpath(
- ensure_dir_exists(ctx["devdir"], "lib", node, "data")
- ),
- "view_index_dir": toposixpath(
- ensure_dir_exists(ctx["devdir"], "lib", node, "data")
- ),
- "state_dir": toposixpath(
- ensure_dir_exists(ctx["devdir"], "lib", node, "data")
- ),
- "node_name": "-name %s@127.0.0.1" % node,
- "cluster_port": cluster_port,
- "backend_port": backend_port,
- "prometheus_port": prometheus_port,
- "uuid": "fake_uuid_for_dev",
- "_default": "",
- }
- write_config(ctx, node, env)
- write_locald_configs(ctx, node, env)
- generate_haproxy_config(ctx)
-
-
-def write_locald_configs(ctx, node, env):
- for locald_config in ctx["locald_configs"]:
- config_src = os.path.join(ctx["rootdir"], locald_config)
- if os.path.exists(config_src):
- config_filename = os.path.basename(config_src)
- config_tgt = os.path.join(
- ctx["devdir"], "lib", node, "etc", "local.d", config_filename
- )
- with open(config_src) as handle:
- content = handle.read()
- with open(config_tgt, "w") as handle:
- handle.write(content)
-
-
-def generate_haproxy_config(ctx):
- haproxy_config = os.path.join(ctx["devdir"], "lib", "haproxy.cfg")
- template = os.path.join(ctx["rootdir"], "rel", "haproxy.cfg")
-
- with open(template) as handle:
- config = handle.readlines()
-
- out = []
- for line in config:
- match = re.match("(.*?)<<(.*?)>>(.*?)", line, re.S)
- if match:
- prefix, template, suffix = match.groups()
- for node in ctx["nodes"]:
- node_idx = int(node.replace("node", ""))
- text = template.format(
- **{"node_idx": node_idx, "port": cluster_port(ctx, node_idx)}
- )
- out.append(prefix + text + suffix)
- else:
- out.append(line)
-
- with open(haproxy_config, "w") as handle:
- handle.write("\n".join(out))
-
-
-def apply_config_overrides(ctx, content):
- for kv_str in ctx["config_overrides"]:
- key, val = kv_str.split("=")
- key, val = key.strip(), val.strip()
- match = "[;=]{0,2}%s.*" % key
- repl = "%s = %s" % (key, val)
- content = re.sub(match, repl, content)
- return content
-
-
-def get_ports(ctx, idnode):
- assert idnode
- if idnode <= 5 and not ctx["auto_ports"]:
- return (
- (10000 * idnode) + 5984,
- (10000 * idnode) + 5986,
- (10000 * idnode) + 7986,
- )
- else:
- return tuple(get_available_ports(2))
-
-
-def get_available_ports(num):
- ports = []
- while len(ports) < num + 1:
- with contextlib.closing(
- socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- ) as soc:
- soc.bind(("localhost", 0))
- _, port = soc.getsockname()
- if port not in ports:
- ports.append(port)
- return ports
-
-
-def get_node_config(ctx, node_idx):
- node = "node{}".format(node_idx)
- config_dir = os.path.join(ctx["devdir"], "lib", node, "etc")
- config = ConfigParser()
- config.read(
- [os.path.join(config_dir, "default.ini"), os.path.join(config_dir, "local.ini")]
- )
- return config
-
-
-def backend_port(ctx, n):
- return int(get_node_config(ctx, n).get("httpd", "port"))
-
-
-def cluster_port(ctx, n):
- return int(get_node_config(ctx, n).get("chttpd", "port"))
-
-
-def write_config(ctx, node, env):
- etc_src = os.path.join(ctx["rootdir"], "rel", "overlay", "etc")
- etc_tgt = ensure_dir_exists(ctx["devdir"], "lib", node, "etc")
-
- for fname in glob.glob(os.path.join(etc_src, "*")):
- base = os.path.basename(fname)
- tgt = os.path.join(etc_tgt, base)
-
- if os.path.isdir(fname):
- continue
-
- with open(fname) as handle:
- content = handle.read()
-
- for key in env:
- content = re.sub("{{%s}}" % key, str(env[key]), content)
-
- if base == "default.ini":
- content = hack_default_ini(ctx, node, content)
- content = apply_config_overrides(ctx, content)
- elif base == "local.ini":
- content = hack_local_ini(ctx, content)
-
- with open(tgt, "w") as handle:
- handle.write(content)
-
- ensure_dir_exists(etc_tgt, "local.d")
-
-
-def boot_haproxy(ctx):
- if not ctx["with_haproxy"]:
- return
- config = os.path.join(ctx["devdir"], "lib", "haproxy.cfg")
- cmd = [ctx["haproxy"], "-f", config]
- logfname = os.path.join(ctx["devdir"], "logs", "haproxy.log")
- log = open(logfname, "w")
- env = os.environ.copy()
- if "HAPROXY_PORT" not in env:
- env["HAPROXY_PORT"] = ctx["haproxy_port"]
- return sp.Popen(
- " ".join(cmd), shell=True, stdin=sp.PIPE, stdout=log, stderr=sp.STDOUT, env=env
- )
-
-
-def hack_default_ini(ctx, node, contents):
-
- contents = re.sub(
- "^\[httpd\]$",
- "[httpd]\nenable = true",
- contents,
- flags=re.MULTILINE,
- )
-
- if ctx["enable_erlang_views"]:
- contents = re.sub(
- "^\[native_query_servers\]$",
- "[native_query_servers]\nerlang = {couch_native_process, start_link, []}",
- contents,
- flags=re.MULTILINE,
- )
- contents = re.sub("n=3", "n=%s" % ctx["N"], contents)
- return contents
-
-
-def hack_local_ini(ctx, contents):
- # make sure all three nodes have the same secret
- secret_line = "secret = %s\n" % COMMON_SALT
- previous_line = "; require_valid_user = false\n"
- contents = contents.replace(previous_line, previous_line + secret_line)
-
- if ctx["with_admin_party"]:
- os.environ["COUCHDB_TEST_ADMIN_PARTY_OVERRIDE"] = "1"
- ctx["admin"] = ("Admin Party!", "You do not need any password.")
- return contents
-
- # handle admin credentials passed from cli or generate own one
- if ctx["admin"] is None:
- ctx["admin"] = user, pswd = "root", gen_password()
- else:
- user, pswd = ctx["admin"]
-
- return contents + "\n%s = %s" % (user, hashify(pswd))
-
-
-def gen_password():
- # TODO: figure how to generate something more friendly here
- return base64.b64encode(os.urandom(6)).decode()
-
-
-def hashify(pwd, salt=COMMON_SALT, iterations=10, keylen=20):
- """
- Implements password hashing according to:
- - https://issues.apache.org/jira/browse/COUCHDB-1060
- - https://issues.apache.org/jira/secure/attachment/12492631/0001-Integrate-PBKDF2.patch
-
- This test uses 'candeira:candeira'
-
- >>> hashify(candeira)
- -pbkdf2-99eb34d97cdaa581e6ba7b5386e112c265c5c670,d1d2d4d8909c82c81b6c8184429a0739,10
- """
- derived_key = pbkdf2_hex(pwd, salt, iterations, keylen)
- return "-pbkdf2-%s,%s,%s" % (derived_key, salt, iterations)
-
-
-def startup(ctx):
- atexit.register(kill_processes, ctx)
- boot_nodes(ctx)
- ensure_all_nodes_alive(ctx)
- if ctx["no_join"]:
- return
- if ctx["with_admin_party"]:
- cluster_setup_with_admin_party(ctx)
- else:
- cluster_setup(ctx)
- if ctx["degrade_cluster"] > 0:
- degrade_cluster(ctx)
-
-
-def kill_processes(ctx):
- for proc in ctx["procs"]:
- if proc and proc.returncode is None:
- proc.kill()
-
-
-def degrade_cluster(ctx):
- if ctx["with_haproxy"]:
- haproxy_proc = ctx["procs"].pop()
- for i in range(0, ctx["degrade_cluster"]):
- proc = ctx["procs"].pop()
- if proc is not None:
- kill_process(proc)
- if ctx["with_haproxy"]:
- ctx["procs"].append(haproxy_proc)
-
-
-@log("Stoping proc {proc.pid}")
-def kill_process(proc):
- if proc and proc.returncode is None:
- proc.kill()
-
-
-def boot_nodes(ctx):
- for node in ctx["nodes"]:
- ctx["procs"].append(boot_node(ctx, node))
- haproxy_proc = boot_haproxy(ctx)
- if haproxy_proc is not None:
- ctx["procs"].append(haproxy_proc)
-
-
-def ensure_all_nodes_alive(ctx):
- status = dict((num, False) for num in list(range(ctx["N"])))
- for _ in range(10):
- for num in range(ctx["N"]):
- if status[num]:
- continue
- local_port = cluster_port(ctx, num + 1)
- url = "http://127.0.0.1:{0}/".format(local_port)
- try:
- check_node_alive(url)
- except:
- pass
- else:
- status[num] = True
- if all(status.values()):
- return
- time.sleep(1)
- if not all(status.values()):
- print("Failed to start all the nodes." " Check the dev/logs/*.log for errors.")
- sys.exit(1)
-
-
-@log("Check node at {url}")
-def check_node_alive(url):
- error = None
- for _ in range(10):
- try:
- with contextlib.closing(urlopen(url)):
- pass
- except Exception as exc:
- error = exc
- time.sleep(1)
- else:
- error = None
- break
- if error is not None:
- raise error
-
-
-def set_boot_env(ctx):
-
- # fudge fauxton path
- if os.path.exists("src/fauxton/dist/release"):
- fauxton_root = "src/fauxton/dist/release"
- else:
- fauxton_root = "share/www"
-
- os.environ["COUCHDB_FAUXTON_DOCROOT"] = fauxton_root
-
- # fudge default query server paths
- couchjs = os.path.join(ctx["rootdir"], "src", "couch", "priv", "couchjs")
- mainjs = os.path.join(ctx["rootdir"], "share", "server", "main.js")
- coffeejs = os.path.join(ctx["rootdir"], "share", "server", "main-coffee.js")
-
- qs_javascript = toposixpath("%s %s" % (couchjs, mainjs))
- qs_coffescript = toposixpath("%s %s" % (couchjs, coffeejs))
-
- os.environ["COUCHDB_QUERY_SERVER_JAVASCRIPT"] = qs_javascript
- os.environ["COUCHDB_QUERY_SERVER_COFFEESCRIPT"] = qs_coffescript
-
-
-@log("Start node {node}")
-def boot_node(ctx, node):
- set_boot_env(ctx)
- env = os.environ.copy()
- env["ERL_LIBS"] = os.path.join(ctx["rootdir"], "src")
-
- node_etcdir = os.path.join(ctx["devdir"], "lib", node, "etc")
- reldir = os.path.join(ctx["rootdir"], "rel")
-
- cmd = [
- "erl",
- "-args_file",
- os.path.join(node_etcdir, "vm.args"),
- "-config",
- os.path.join(ctx["rootdir"], ctx["erlang_config"]),
- "-couch_ini",
- os.path.join(node_etcdir, "default.ini"),
- os.path.join(node_etcdir, "local.ini"),
- os.path.join(node_etcdir, "local.d"),
- "-reltool_config",
- os.path.join(reldir, "reltool.config"),
- "-parent_pid",
- str(os.getpid()),
- "-boot",
- os.path.join(ctx["devdir"], "devnode"),
- "-pa",
- ctx["devdir"],
- "-s monitor_parent",
- ]
- if ctx["reset_logs"]:
- mode = "wb"
- else:
- mode = "r+b"
- logfname = os.path.join(ctx["devdir"], "logs", "%s.log" % node)
- log = open(logfname, mode)
- if "extra_args" in ctx and ctx["extra_args"]:
- cmd += ctx["extra_args"].split(" ")
- cmd = [toposixpath(x) for x in cmd]
- return sp.Popen(cmd, stdin=sp.PIPE, stdout=log, stderr=sp.STDOUT, env=env)
-
-
-@log("Running cluster setup")
-def cluster_setup(ctx):
- lead_port = cluster_port(ctx, 1)
- if enable_cluster(ctx["N"], lead_port, *ctx["admin"]):
- for num in range(1, ctx["N"]):
- node_port = cluster_port(ctx, num + 1)
- node_name = ctx["nodes"][num]
- enable_cluster(ctx["N"], node_port, *ctx["admin"])
- add_node(lead_port, node_name, node_port, *ctx["admin"])
- finish_cluster(lead_port, *ctx["admin"])
- return lead_port
-
-
-def enable_cluster(node_count, port, user, pswd):
- conn = httpclient.HTTPConnection("127.0.0.1", port)
- conn.request(
- "POST",
- "/_cluster_setup",
- json.dumps(
- {
- "action": "enable_cluster",
- "bind_address": "0.0.0.0",
- "username": user,
- "password": pswd,
- "node_count": node_count,
- }
- ),
- {
- "Authorization": basic_auth_header(user, pswd),
- "Content-Type": "application/json",
- },
- )
- resp = conn.getresponse()
- if resp.status == 400:
- resp.close()
- return False
- assert resp.status == 201, resp.read()
- resp.close()
- return True
-
-
-def add_node(lead_port, node_name, node_port, user, pswd):
- conn = httpclient.HTTPConnection("127.0.0.1", lead_port)
- conn.request(
- "POST",
- "/_cluster_setup",
- json.dumps(
- {
- "action": "add_node",
- "host": "127.0.0.1",
- "port": node_port,
- "name": node_name,
- "username": user,
- "password": pswd,
- }
- ),
- {
- "Authorization": basic_auth_header(user, pswd),
- "Content-Type": "application/json",
- },
- )
- resp = conn.getresponse()
- assert resp.status in (201, 409), resp.read()
- resp.close()
-
-
-def set_cookie(port, user, pswd):
- conn = httpclient.HTTPConnection("127.0.0.1", port)
- conn.request(
- "POST",
- "/_cluster_setup",
- json.dumps({"action": "receive_cookie", "cookie": generate_cookie()}),
- {
- "Authorization": basic_auth_header(user, pswd),
- "Content-Type": "application/json",
- },
- )
- resp = conn.getresponse()
- assert resp.status == 201, resp.read()
- resp.close()
-
-
-def finish_cluster(port, user, pswd):
- conn = httpclient.HTTPConnection("127.0.0.1", port)
- conn.request(
- "POST",
- "/_cluster_setup",
- json.dumps({"action": "finish_cluster"}),
- {
- "Authorization": basic_auth_header(user, pswd),
- "Content-Type": "application/json",
- },
- )
- resp = conn.getresponse()
- # 400 for already set up'ed cluster
- assert resp.status in (201, 400), resp.read()
- resp.close()
-
-
-def basic_auth_header(user, pswd):
- return "Basic " + base64.b64encode((user + ":" + pswd).encode()).decode()
-
-
-def generate_cookie():
- return base64.b64encode(os.urandom(12)).decode()
-
-
-def cluster_setup_with_admin_party(ctx):
- connect_nodes(ctx)
- host, port = "127.0.0.1", cluster_port(ctx, 1)
- create_system_databases(host, port)
-
-
-def connect_nodes(ctx):
- host, port = "127.0.0.1", backend_port(ctx, 1)
- for node in ctx["nodes"]:
- path = "/_nodes/%s@127.0.0.1" % node
- try_request(
- host,
- port,
- "PUT",
- path,
- (200, 201, 202, 409),
- body="{}",
- error="Failed to join %s into cluster:\n" % node,
- )
-
-
-def try_request(
- host, port, meth, path, success_codes, body=None, retries=10, retry_dt=1, error=""
-):
- while True:
- conn = httpclient.HTTPConnection(host, port)
- conn.request(meth, path, body=body)
- resp = conn.getresponse()
- if resp.status in success_codes:
- return resp.status, resp.read()
- elif retries <= 0:
- assert resp.status in success_codes, "%s%s" % (error, resp.read())
- retries -= 1
- time.sleep(retry_dt)
-
-
-def create_system_databases(host, port):
- for dbname in ["_users", "_replicator", "_global_changes"]:
- conn = httpclient.HTTPConnection(host, port)
- conn.request("HEAD", "/" + dbname)
- resp = conn.getresponse()
- if resp.status == 404:
- try_request(
- host,
- port,
- "PUT",
- "/" + dbname,
- (201, 202, 412),
- error="Failed to create '%s' database:\n" % dbname,
- )
-
-
-@log(
- "Developers cluster is set up at http://127.0.0.1:{lead_port}.\n"
- "Admin username: {user}\n"
- "Password: {password}\n"
- "Time to hack!"
-)
-def join(ctx, lead_port, user, password):
- while True:
- for proc in ctx["procs"]:
- if proc is not None and proc.returncode is not None:
- exit(1)
- time.sleep(2)
-
-
-@log("Exec command {cmd}")
-def run_command(ctx, cmd):
- if ctx["no_eval"]:
- p = sp.Popen(cmd, shell=True)
- p.wait()
- exit(p.returncode)
- else:
- p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sys.stderr)
- while True:
- line = p.stdout.readline()
- if not line:
- break
- eval(line)
- p.wait()
- exit(p.returncode)
-
-
-@log("Restart all nodes")
-def reboot_nodes(ctx):
- ctx["reset_logs"] = False
- kill_processes(ctx)
- boot_nodes(ctx)
- ensure_all_nodes_alive(ctx)
-
-
-if __name__ == "__main__":
- try:
- main()
- except KeyboardInterrupt:
- pass
diff --git a/dev/run.cmd b/dev/run.cmd
deleted file mode 100644
index 14ce2276c..000000000
--- a/dev/run.cmd
+++ /dev/null
@@ -1,15 +0,0 @@
-@ECHO OFF
-
-:: Licensed under the Apache License, Version 2.0 (the "License"); you may not
-:: use this file except in compliance with the License. You may obtain a copy of
-:: the License at
-::
-:: http://www.apache.org/licenses/LICENSE-2.0
-::
-:: Unless required by applicable law or agreed to in writing, software
-:: distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-:: WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-:: License for the specific language governing permissions and limitations under
-:: the License.
-
-python %~dp0\run %*
diff --git a/erlang_ls.config b/erlang_ls.config
deleted file mode 100644
index 94483cfec..000000000
--- a/erlang_ls.config
+++ /dev/null
@@ -1,5 +0,0 @@
-apps_dirs:
- - "src/*"
-include_dirs:
- - "src"
- - "src/*/include"
diff --git a/make.cmd b/make.cmd
deleted file mode 100644
index bf8677898..000000000
--- a/make.cmd
+++ /dev/null
@@ -1,3 +0,0 @@
-@ECHO OFF
-
-make.exe -f Makefile.win %*
diff --git a/mix.exs b/mix.exs
deleted file mode 100644
index 577a20491..000000000
--- a/mix.exs
+++ /dev/null
@@ -1,162 +0,0 @@
-defmodule CoverTool do
- def start(path, options) do
- {dirs, options} = Keyword.pop(options, :dirs, [])
- fun = ExCoveralls.start(path, options)
- Mix.shell().info("Cover compiling modules ...")
- :cover.stop()
- :cover.start()
-
- Enum.each(dirs, fn path ->
- path
- |> Path.expand(__DIR__)
- |> String.to_charlist()
- |> :cover.compile_beam_directory()
- end)
-
- ExCoveralls.ConfServer.start()
- ExCoveralls.ConfServer.set(options)
- ExCoveralls.StatServer.start()
- fun
- end
-end
-
-defmodule Mix.Tasks.Suite do
- @moduledoc """
- Helper task to create `suites.elixir` file. It suppose to be used as follows
- ```
- MIX_ENV=integration mix suite > test/elixir/test/config/suite.elixir
- ```
- """
- use Mix.Task
- @shortdoc "Outputs all availabe integration tests"
- def run(_) do
- Path.wildcard(Path.join(Mix.Project.build_path(), "/**/ebin"))
- |> Enum.filter(&File.dir?/1)
- |> Enum.map(&Code.append_path/1)
-
- tests =
- Couch.Test.Suite.list()
- |> Enum.sort()
- |> Couch.Test.Suite.group_by()
-
- IO.puts(Couch.Test.Suite.pretty_print(tests))
- end
-end
-
-defmodule CouchDBTest.Mixfile do
- use Mix.Project
-
- def project do
- [
- app: :couchdbtest,
- version: "0.1.0",
- elixir: "~> 1.5",
- lockfile: Path.expand("mix.lock", __DIR__),
- deps_path: Path.expand("src", __DIR__),
- build_path: Path.expand("_build", __DIR__),
- compilers: [:elixir, :app],
- start_permanent: Mix.env() == :prod,
- build_embedded: Mix.env() == :prod,
- deps: deps(),
- consolidate_protocols: Mix.env() not in [:test, :dev, :integration],
- test_paths: get_test_paths(Mix.env()),
- elixirc_paths: elixirc_paths(Mix.env()),
- test_coverage: [
- tool: CoverTool,
- dirs: get_coverage_paths(),
- type: "html"
- ]
- ]
- end
-
- # Run "mix help compile.app" to learn about applications.
- def application do
- [
- extra_applications: [:logger],
- applications: [:httpotion]
- ]
- end
-
- # Specifies which paths to compile per environment.
- defp elixirc_paths(:test), do: ["test/elixir/lib", "test/elixir/test/support"]
- defp elixirc_paths(:integration), do: ["test/elixir/lib", "test/elixir/test/support"]
- defp elixirc_paths(_), do: ["test/elixir/lib"]
-
- # Run "mix help deps" to learn about dependencies.
- defp deps() do
- [
- {:junit_formatter, "~> 3.0", only: [:dev, :test, :integration]},
- {:httpotion, ">= 3.1.3", only: [:dev, :test, :integration], runtime: false},
- {:excoveralls, "~> 0.12", only: :test},
- {:b64url, path: Path.expand("src/b64url", __DIR__)},
- {:jiffy, path: Path.expand("src/jiffy", __DIR__)},
- {:jwtf, path: Path.expand("src/jwtf", __DIR__)},
- {:ibrowse,
- path: Path.expand("src/ibrowse", __DIR__), override: true, compile: false},
- {:credo, "~> 1.5.6", only: [:dev, :test, :integration], runtime: false}
- ]
- end
-
- def get_test_paths(:test) do
- Path.wildcard("src/*/test/exunit") |> Enum.filter(&File.dir?/1)
- end
-
- def get_test_paths(:integration) do
- integration_tests =
- Path.wildcard("src/*/test/integration") |> Enum.filter(&File.dir?/1)
-
- ["test/elixir/test" | integration_tests]
- end
-
- def get_test_paths(_) do
- []
- end
-
- defp get_deps_paths() do
- deps = [
- "bunt",
- "certifi",
- "credo",
- "excoveralls",
- "hackney",
- "httpotion",
- "ibrowse",
- "idna",
- "jason",
- "jiffy",
- "junit_formatter",
- "metrics",
- "mimerl",
- "parse_trans",
- "ssl_verify_fun",
- "unicode_util_compat",
- "b64url",
- "bear",
- "mochiweb",
- "snappy",
- "rebar",
- "proper",
- "mochiweb",
- "meck",
- "khash",
- "hyper",
- "fauxton",
- "folsom",
- "hqueue"
- ]
-
- deps |> Enum.map(fn app -> "src/#{app}" end)
- end
-
- defp get_coverage_paths() do
- deps =
- get_deps_paths()
- |> Enum.reduce(MapSet.new(), fn x, set ->
- MapSet.put(set, "#{x}/ebin")
- end)
-
- Path.wildcard("src/*/ebin")
- |> Enum.filter(&File.dir?/1)
- |> Enum.filter(fn path -> not MapSet.member?(deps, path) end)
- end
-end
diff --git a/mix.lock b/mix.lock
deleted file mode 100644
index 48e7c719f..000000000
--- a/mix.lock
+++ /dev/null
@@ -1,19 +0,0 @@
-%{
- "bunt": {:hex, :bunt, "0.2.0", "951c6e801e8b1d2cbe58ebbd3e616a869061ddadcc4863d0a2182541acae9a38", [:mix], [], "hexpm", "7af5c7e09fe1d40f76c8e4f9dd2be7cebd83909f31fee7cd0e9eadc567da8353"},
- "certifi": {:hex, :certifi, "2.5.1", "867ce347f7c7d78563450a18a6a28a8090331e77fa02380b4a21962a65d36ee5", [:rebar3], [{:parse_trans, "~>3.3", [hex: :parse_trans, repo: "hexpm", optional: false]}], "hexpm", "805abd97539caf89ec6d4732c91e62ba9da0cda51ac462380bbd28ee697a8c42"},
- "credo": {:hex, :credo, "1.5.6", "e04cc0fdc236fefbb578e0c04bd01a471081616e741d386909e527ac146016c6", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "4b52a3e558bd64e30de62a648518a5ea2b6e3e5d2b164ef5296244753fc7eb17"},
- "excoveralls": {:hex, :excoveralls, "0.12.1", "a553c59f6850d0aff3770e4729515762ba7c8e41eedde03208182a8dc9d0ce07", [:mix], [{:hackney, "~> 1.0", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "5c1f717066a299b1b732249e736c5da96bb4120d1e55dc2e6f442d251e18a812"},
- "file_system": {:hex, :file_system, "0.2.10", "fb082005a9cd1711c05b5248710f8826b02d7d1784e7c3451f9c1231d4fc162d", [:mix], [], "hexpm", "41195edbfb562a593726eda3b3e8b103a309b733ad25f3d642ba49696bf715dc"},
- "hackney": {:hex, :hackney, "1.15.2", "07e33c794f8f8964ee86cebec1a8ed88db5070e52e904b8f12209773c1036085", [:rebar3], [{:certifi, "2.5.1", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "6.0.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "1.0.1", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "1.1.5", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "e0100f8ef7d1124222c11ad362c857d3df7cb5f4204054f9f0f4a728666591fc"},
- "httpotion": {:hex, :httpotion, "3.1.3", "fdaf1e16b9318dcb722de57e75ac368c93d4c6e3c9125f93e960f953a750fb77", [:mix], [{:ibrowse, "== 4.4.0", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm", "e420172ef697a0f1f4dc40f89a319d5a3aad90ec51fa424f08c115f04192ae43"},
- "ibrowse": {:hex, :ibrowse, "4.4.0", "2d923325efe0d2cb09b9c6a047b2835a5eda69d8a47ed6ff8bc03628b764e991", [:rebar3], [], "hexpm"},
- "idna": {:hex, :idna, "6.0.0", "689c46cbcdf3524c44d5f3dde8001f364cd7608a99556d8fbd8239a5798d4c10", [:rebar3], [{:unicode_util_compat, "0.4.1", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "4bdd305eb64e18b0273864920695cb18d7a2021f31a11b9c5fbcd9a253f936e2"},
- "jason": {:hex, :jason, "1.3.0", "fa6b82a934feb176263ad2df0dbd91bf633d4a46ebfdffea0c8ae82953714946", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "53fc1f51255390e0ec7e50f9cb41e751c260d065dcba2bf0d08dc51a4002c2ac"},
- "jiffy": {:hex, :jiffy, "0.15.2", "de266c390111fd4ea28b9302f0bc3d7472468f3b8e0aceabfbefa26d08cd73b7", [:rebar3], [], "hexpm"},
- "junit_formatter": {:hex, :junit_formatter, "3.0.0", "13950d944dbd295da7d8cc4798b8faee808a8bb9b637c88069954eac078ac9da", [:mix], [], "hexpm", "d77b7b9a1601185b18dfe7682b27c46d5d12721f12fdc75180a6fc573b4e64b1"},
- "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"},
- "mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm", "f278585650aa581986264638ebf698f8bb19df297f66ad91b18910dfc6e19323"},
- "parse_trans": {:hex, :parse_trans, "3.3.0", "09765507a3c7590a784615cfd421d101aec25098d50b89d7aa1d66646bc571c1", [:rebar3], [], "hexpm", "17ef63abde837ad30680ea7f857dd9e7ced9476cdd7b0394432af4bfc241b960"},
- "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.5", "6eaf7ad16cb568bb01753dbbd7a95ff8b91c7979482b95f38443fe2c8852a79b", [:make, :mix, :rebar3], [], "hexpm", "13104d7897e38ed7f044c4de953a6c28597d1c952075eb2e328bc6d6f2bfc496"},
- "unicode_util_compat": {:hex, :unicode_util_compat, "0.4.1", "d869e4c68901dd9531385bb0c8c40444ebf624e60b6962d95952775cac5e90cd", [:rebar3], [], "hexpm", "1d1848c40487cdb0b30e8ed975e34e025860c02e419cb615d255849f3427439d"},
-}
diff --git a/rebar.config.script b/rebar.config.script
deleted file mode 100644
index 31855098b..000000000
--- a/rebar.config.script
+++ /dev/null
@@ -1,220 +0,0 @@
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%
-% Blacklist some bad releases.
-%
-{ok, Version} = file:read_file(filename:join(
- [code:root_dir(), "releases", erlang:system_info(otp_release), "OTP_VERSION"]
-)).
-
-% Version may be binary if file has /n at end :(
-% there is no string:trim/1 in Erlang 19 :(
-VerString = case Version of
- V when is_binary(V) -> string:strip(binary_to_list(V), right, $\n);
- _ -> string:strip(Version, right, $\n)
-end.
-VerList = lists:map(fun(X) -> {Int, _} = string:to_integer(X), Int end,
- string:tokens(VerString, ".")).
-
-DisplayMsg = fun(Msg, Args) ->
- Base = iolist_to_binary(io_lib:format(Msg, Args)),
- Lines = binary:split(Base, <<"\n">>, [global]),
- MaxLen = lists:foldl(fun(Line, Acc) ->
- max(Acc, size(Line))
- end, 0, Lines),
- Decoration = iolist_to_binary(["*" || _ <- lists:seq(1, MaxLen)]),
- ReNewLined = [[L, "~n"] || L <- Lines],
- NewLines = ["~n", Decoration, "~n", ReNewLined, Decoration, "~n~n"],
- MsgBin = iolist_to_binary(NewLines),
- io:format(binary_to_list(MsgBin), [])
-end.
-
-ErlangTooOld = fun(Ver) ->
- DisplayMsg(
- "This version of Erlang (~p) is too old for use with Apache CouchDB.~n~n"
- "See https://docs.couchdb.org/en/stable/install/unix.html#dependencies~n"
- "for the latest information on dependencies.",
- [Ver]
- ),
- halt(1)
-end.
-
-NotSupported = fun(Ver) ->
- DisplayMsg(
- "This version of Erlang (~p) is not officially supported by Apache~n"
- "CouchDB. While we do not officially support this version, there~n"
- "are also no known bugs or incompatibilities.~n~n"
- "See https://docs.couchdb.org/en/stable/install/unix.html#dependencies~n"
- "for the latest information on dependencies.",
- [Ver]
- )
-end.
-
-BadErlang = fun(Ver) ->
- DisplayMsg(
- "This version of Erlang (~p) is known to contain bugs that directly~n"
- "affect the correctness of Apache CouchDB.~n~n"
- "You should *NOT* use this version of Erlang.~n~n"
- "See https://docs.couchdb.org/en/stable/install/unix.html#dependencies~n"
- "for the latest information on dependencies.",
- [Ver]
- ),
- case os:getenv("TRAVIS") of
- "true" ->
- io:fwrite("Travis run, ignoring bad release. You have been warned!~n"),
- ok;
- _ ->
- halt(1)
- end
-end.
-
-case VerList of
- [OldVer | _] when OldVer < 19 -> ErlangTooOld(VerString);
-
- [19 | _] -> NotSupported(VerString);
-
- [20 | _] = V20 when V20 < [20, 3, 8, 11] -> BadErlang(VerString);
- [21 | _] = V21 when V21 < [21, 2, 3] -> BadErlang(VerString);
- [22, 0, N | _] when N < 5 -> BadErlang(VerString);
-
- _ -> ok
-end.
-
-% Set the path to the configuration environment generated
-% by `./configure`.
-
-COUCHDB_ROOT = filename:dirname(SCRIPT).
-os:putenv("COUCHDB_ROOT", COUCHDB_ROOT).
-
-ConfigureEnv = filename:join(COUCHDB_ROOT, "config.erl").
-os:putenv("COUCHDB_CONFIG", ConfigureEnv).
-
-CouchConfig = case filelib:is_file(ConfigureEnv) of
- true ->
- {ok, Result} = file:consult(ConfigureEnv),
- Result;
- false ->
- []
-end.
-
-os:putenv("COUCHDB_APPS_CONFIG_DIR", filename:join([COUCHDB_ROOT, "rel/apps"])).
-
-SubDirs = [
- %% must be compiled first as it has a custom behavior
- "src/couch_epi",
- "src/couch_log",
- "src/chttpd",
- "src/couch",
- "src/couch_event",
- "src/mem3",
- "src/couch_index",
- "src/couch_mrview",
- "src/couch_replicator",
- "src/couch_plugins",
- "src/couch_pse_tests",
- "src/couch_stats",
- "src/couch_peruser",
- "src/couch_tests",
- "src/couch_dist",
- "src/custodian",
- "src/ddoc_cache",
- "src/dreyfus",
- "src/fabric",
- "src/global_changes",
- "src/ioq",
- "src/jwtf",
- "src/ken",
- "src/mango",
- "src/rexi",
- "src/setup",
- "src/smoosh",
- "src/weatherreport",
- "src/couch_prometheus",
- "rel"
-].
-
-DepDescs = [
-%% Independent Apps
-{config, "config", {tag, "2.1.9"}},
-{b64url, "b64url", {tag, "1.0.3"}},
-{ets_lru, "ets-lru", {tag, "1.1.0"}},
-{khash, "khash", {tag, "1.1.0"}},
-{snappy, "snappy", {tag, "CouchDB-1.0.7"}},
-
-%% Non-Erlang deps
-{docs, {url, "https://github.com/apache/couchdb-documentation"},
- {tag, "3.2.1-1"}, [raw]},
-{fauxton, {url, "https://github.com/apache/couchdb-fauxton"},
- {tag, "v1.2.8"}, [raw]},
-%% Third party deps
-{folsom, "folsom", {tag, "CouchDB-0.8.4"}},
-{hyper, "hyper", {tag, "CouchDB-2.2.0-7"}},
-{ibrowse, "ibrowse", {tag, "CouchDB-4.4.2-5"}},
-{jiffy, "jiffy", {tag, "1.1.1"}},
-{mochiweb, "mochiweb", {tag, "v3.0.0"}},
-{meck, "meck", {tag, "0.9.2"}},
-{recon, "recon", {tag, "2.5.2"}}
-].
-
-WithProper = lists:keyfind(with_proper, 1, CouchConfig) == {with_proper, true}.
-
-OptionalDeps = case WithProper of
- true ->
- [{proper, {url, "https://github.com/proper-testing/proper"}, {tag, "v1.4"}}];
- false ->
- []
-end.
-
-BaseUrl = "https://github.com/apache/".
-
-MakeDep = fun
- ({AppName, {url, Url}, Version}) ->
- {AppName, ".*", {git, Url, Version}};
- ({AppName, {url, Url}, Version, Options}) ->
- {AppName, ".*", {git, Url, Version}, Options};
- ({AppName, RepoName, Version}) ->
- Url = BaseUrl ++ "couchdb-" ++ RepoName ++ ".git",
- {AppName, ".*", {git, Url, Version}};
- ({AppName, RepoName, Version, Options}) ->
- Url = BaseUrl ++ "couchdb-" ++ RepoName ++ ".git",
- {AppName, ".*", {git, Url, Version}, Options}
-end.
-
-AddConfig = [
- {require_otp_vsn, "20|21|22|23|24"},
- {deps_dir, "src"},
- {deps, lists:map(MakeDep, DepDescs ++ OptionalDeps)},
- {sub_dirs, SubDirs},
- {lib_dirs, ["src"]},
- {erl_opts, [{i, "../"}, {d, 'COUCHDB_ERLANG_VERSION', VerString}]},
- {eunit_opts, [verbose, {report,{eunit_surefire,[{dir,"."}]}}]},
- {plugins, [eunit_plugin]},
- {dialyzer, [
- {plt_location, local},
- {plt_location, COUCHDB_ROOT},
- {plt_extra_apps, [
- asn1, compiler, crypto, inets, kernel, runtime_tools,
- sasl, setup, ssl, stdlib, syntax_tools, xmerl]},
- {warnings, [unmatched_returns, error_handling, race_conditions]}]},
- {post_hooks, [{compile, "escript support/build_js.escript"}]}
-].
-
-lists:foldl(fun({K, V}, CfgAcc) ->
- case lists:keyfind(K, 1, CfgAcc) of
- {K, Existent} when is_list(Existent) andalso is_list(V) ->
- lists:keystore(K, 1, CfgAcc, {K, Existent ++ V});
- false ->
- lists:keystore(K, 1, CfgAcc, {K, V})
- end
-end, CONFIG, AddConfig).
diff --git a/rel/apps/config.config b/rel/apps/config.config
deleted file mode 100644
index 0cbc1c58e..000000000
--- a/rel/apps/config.config
+++ /dev/null
@@ -1,4 +0,0 @@
-{sensitive, #{
- "admins" => all,
- "replicator" => ["password"]
-}}.
diff --git a/rel/apps/couch_epi.config b/rel/apps/couch_epi.config
deleted file mode 100644
index a53721a48..000000000
--- a/rel/apps/couch_epi.config
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{plugins, [
- couch_db_epi,
- chttpd_epi,
- couch_index_epi,
- dreyfus_epi,
- global_changes_epi,
- mango_epi,
- mem3_epi,
- setup_epi
-]}.
diff --git a/rel/boot_dev_cluster.sh b/rel/boot_dev_cluster.sh
deleted file mode 100755
index 1dfeb5568..000000000
--- a/rel/boot_dev_cluster.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# Make log directory
-mkdir -p ./rel/logs/
-
-HAPROXY=`which haproxy`
-
-# Start each node
-./rel/dev1/bin/couchdb > ./rel/logs/couchdb1.log 2>&1 &
-DB1_PID=$!
-
-./rel/dev2/bin/couchdb > ./rel/logs/couchdb2.log 2>&1 &
-DB2_PID=$!
-
-./rel/dev3/bin/couchdb > ./rel/logs/couchdb3.log 2>&1 &
-DB3_PID=$!
-
-$HAPROXY -f rel/haproxy.cfg > ./rel/logs/haproxy.log 2>&1 &
-HP_PID=$!
-
-sleep 2
-
-# Connect the cluster
-curl localhost:15986/nodes/dev2@127.0.0.1 -X PUT -d '{}'
-curl localhost:15986/nodes/dev3@127.0.0.1 -X PUT -d '{}'
-
-trap "kill $DB1_PID $DB2_PID $DB3_PID $HP_PID" SIGINT SIGTERM SIGHUP
-
-wait
diff --git a/rel/files/README b/rel/files/README
deleted file mode 100644
index d22e2f086..000000000
--- a/rel/files/README
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-Ignore these files for now.
-
-This is to pacify newer rebar that insists on having a sys.config and
-a vm.args in releases/$VSN/.
-
-eunit.ini is only for local testing so it is not copied into release
diff --git a/rel/files/couchdb.cmd.in b/rel/files/couchdb.cmd.in
deleted file mode 100644
index 244803bc8..000000000
--- a/rel/files/couchdb.cmd.in
+++ /dev/null
@@ -1,37 +0,0 @@
-@ECHO OFF
-
-:: Licensed under the Apache License, Version 2.0 (the "License"); you may not
-:: use this file except in compliance with the License. You may obtain a copy of
-:: the License at
-::
-:: http://www.apache.org/licenses/LICENSE-2.0
-::
-:: Unless required by applicable law or agreed to in writing, software
-:: distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-:: WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-:: License for the specific language governing permissions and limitations under
-:: the License.
-
-SET COUCHDB_BIN_DIR=%~dp0
-SET ROOTDIR=%COUCHDB_BIN_DIR%\..\
-CD "%ROOTDIR%"
-
-SET /P START_ERL= < releases\start_erl.data
-FOR /F "tokens=1" %%G IN ("%START_ERL%") DO SET ERTS_VSN=%%G
-FOR /F "tokens=2" %%G IN ("%START_ERL%") DO SET APP_VSN=%%G
-
-set BINDIR=%ROOTDIR%/erts-%ERTS_VSN%/bin
-set EMU=beam
-set PROGNAME=%~n0
-set PATH=%COUCHDB_BIN_DIR%;%SystemRoot%\system32;%SystemRoot%;%SystemRoot%\System32\Wbem;%SYSTEMROOT%\System32\WindowsPowerShell\v1.0\
-
-IF NOT DEFINED COUCHDB_QUERY_SERVER_JAVASCRIPT SET COUCHDB_QUERY_SERVER_JAVASCRIPT={{prefix}}/bin/couchjs {{prefix}}/share/server/main.js
-IF NOT DEFINED COUCHDB_QUERY_SERVER_COFFEESCRIPT SET COUCHDB_QUERY_SERVER_COFFEESCRIPT={{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js
-IF NOT DEFINED COUCHDB_FAUXTON_DOCROOT SET COUCHDB_FAUXTON_DOCROOT={{fauxton_root}}
-
-"%BINDIR%\erl" -boot "%ROOTDIR%\releases\%APP_VSN%\couchdb" ^
--args_file "%ROOTDIR%\etc\vm.args" ^
--epmd "%BINDIR%\epmd.exe" ^
--config "%ROOTDIR%\releases\%APP_VSN%\sys.config" %*
-
-:: EXIT /B
diff --git a/rel/files/couchdb.in b/rel/files/couchdb.in
deleted file mode 100755
index 3ebb2e4ee..000000000
--- a/rel/files/couchdb.in
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/sh
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-canonical_readlink ()
- {
- FILE=$(dirname "$1")/$(basename "$1");
- if [ -h "$FILE" ]; then
- cd $(dirname "$1")
- canonical_readlink $(readlink "$FILE");
- else
- cd "${1%/*}" && pwd -P;
- fi
-}
-COUCHDB_BIN_DIR=$(canonical_readlink "$0")
-ERTS_BIN_DIR=$COUCHDB_BIN_DIR/../
-cd "$COUCHDB_BIN_DIR/../"
-
-export ROOTDIR=${ERTS_BIN_DIR%/*}
-
-START_ERL=`cat "$ROOTDIR/releases/start_erl.data"`
-ERTS_VSN=${START_ERL% *}
-APP_VSN=${START_ERL#* }
-
-export BINDIR="$ROOTDIR/erts-$ERTS_VSN/bin"
-export EMU=beam
-export PROGNAME=`echo $0 | sed 's/.*\///'`
-
-export COUCHDB_QUERY_SERVER_JAVASCRIPT="${COUCHDB_QUERY_SERVER_JAVASCRIPT:-{{prefix}}/bin/couchjs {{prefix}}/share/server/main.js}"
-export COUCHDB_QUERY_SERVER_COFFEESCRIPT="${COUCHDB_QUERY_SERVER_COFFEESCRIPT:-{{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js}"
-# Use a separate var to work around rebar's mustache template bug
-DEFAULT_FAUXTON_ROOT={{fauxton_root}}
-export COUCHDB_FAUXTON_DOCROOT="${COUCHDB_FAUXTON_DOCROOT:-${DEFAULT_FAUXTON_ROOT}}"
-
-ARGS_FILE="${COUCHDB_ARGS_FILE:-$ROOTDIR/etc/vm.args}"
-[ -n "${COUCHDB_INI_FILES:-}" ] && INI_ARGS="-couch_ini $COUCHDB_INI_FILES"
-SYSCONFIG_FILE="${COUCHDB_SYSCONFIG_FILE:-$ROOTDIR/releases/$APP_VSN/sys.config}"
-
-exec "$BINDIR/erlexec" -boot "$ROOTDIR/releases/$APP_VSN/couchdb" \
- -args_file "${ARGS_FILE}" \
- ${INI_ARGS:-} \
- -config "${SYSCONFIG_FILE}" "$@"
diff --git a/rel/files/eunit.config b/rel/files/eunit.config
deleted file mode 100644
index 3c7457d3a..000000000
--- a/rel/files/eunit.config
+++ /dev/null
@@ -1,16 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-[
- {kernel, [{error_logger, silent}]},
- {sasl, [{sasl_error_logger, false}]}
-].
diff --git a/rel/files/eunit.ini b/rel/files/eunit.ini
deleted file mode 100644
index 361ea6669..000000000
--- a/rel/files/eunit.ini
+++ /dev/null
@@ -1,38 +0,0 @@
-; Licensed to the Apache Software Foundation (ASF) under one
-; or more contributor license agreements. See the NOTICE file
-; distributed with this work for additional information
-; regarding copyright ownership. The ASF licenses this file
-; to you under the Apache License, Version 2.0 (the
-; "License"); you may not use this file except in compliance
-; with the License. You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing,
-; software distributed under the License is distributed on an
-; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-; KIND, either express or implied. See the License for the
-; specific language governing permissions and limitations
-; under the License.
-
-[couchdb]
-; time to relax!
-uuid = 74696d6520746f2072656c617821
-default_security = everyone
-
-[httpd]
-enable = true
-port = 0
-
-[chttpd]
-port = 0
-
-[log]
-; log to a file to save our terminals from log spam
-writer = file
-file = couch.log
-level = info
-
-[replicator]
-; disable jitter to reduce test run times
-startup_jitter = 0 \ No newline at end of file
diff --git a/rel/files/sys.config b/rel/files/sys.config
deleted file mode 100644
index 97562f561..000000000
--- a/rel/files/sys.config
+++ /dev/null
@@ -1,13 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-[].
diff --git a/rel/files/vm.args b/rel/files/vm.args
deleted file mode 100644
index 82b9fe5aa..000000000
--- a/rel/files/vm.args
+++ /dev/null
@@ -1,11 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
diff --git a/rel/haproxy.cfg b/rel/haproxy.cfg
deleted file mode 100644
index 540075761..000000000
--- a/rel/haproxy.cfg
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-global
- maxconn 512
- spread-checks 5
-
-defaults
- mode http
- log global
- monitor-uri /_haproxy_health_check
- option log-health-checks
- option httplog
- balance roundrobin
- option forwardfor
- option redispatch
- retries 4
- option http-server-close
- timeout client 150000
- timeout server 3600000
- timeout connect 500
-
- stats enable
- stats uri /_haproxy_stats
- # stats auth admin:admin # Uncomment for basic auth
-
-frontend http-in
- # This requires HAProxy 1.5.x
- # bind *:$HAPROXY_PORT
- bind *:5984
- default_backend couchdbs
-
-backend couchdbs
- option httpchk GET /_up
- http-check disable-on-404
- <<server couchdb{node_idx} 127.0.0.1:{port} check inter 5s>>
diff --git a/rel/overlay/bin/remsh b/rel/overlay/bin/remsh
deleted file mode 100755
index de37d6cc2..000000000
--- a/rel/overlay/bin/remsh
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/bin/sh
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-canonical_readlink ()
- {
- FILE=$(dirname "$1")/$(basename "$1");
- if [ -h "$FILE" ]; then
- cd $(dirname "$1")
- canonical_readlink $(readlink "$FILE");
- else
- cd "${1%/*}" && pwd -P;
- fi
-}
-COUCHDB_BIN_DIR=$(canonical_readlink "$0")
-ERTS_BIN_DIR=$COUCHDB_BIN_DIR/../
-ROOTDIR=${ERTS_BIN_DIR%/*}
-START_ERL=$(cat "$ROOTDIR/releases/start_erl.data")
-ERTS_VSN=${START_ERL% *}
-APP_VSN=${START_ERL#* }
-BINDIR=$ROOTDIR/erts-$ERTS_VSN/bin
-
-PROGNAME=${0##*/}
-VERBOSE=""
-DEFAULT_NODE="couchdb@127.0.0.1"
-LHOST=127.0.0.1
-
-ARGS_FILE="${COUCHDB_ARGS_FILE:-$ROOTDIR/etc/vm.args}"
-
-# If present, extract cookie from ERL_FLAGS
-# This is used by the CouchDB Dockerfile and Helm chart
-NODE=$(echo "$ERL_FLAGS" | sed 's/^.*name \([^ ][^ ]*\).*$/\1/g')
-if test -f "$ARGS_FILE"; then
-# else attempt to extract from vm.args
- ARGS_FILE_COOKIE=$(awk '$1=="-name"{print $2}' "$ARGS_FILE")
- NODE="${NODE:-$ARGS_FILE_COOKIE}"
-fi
-NODE="${NODE:-$DEFAULT_NODE}"
-
-# If present, extract cookie from ERL_FLAGS
-# This is used by the CouchDB Dockerfile and Helm chart
-COOKIE=$(echo "$ERL_FLAGS" | sed 's/^.*setcookie \([^ ][^ ]*\).*$/\1/g')
-if test -f "$ARGS_FILE"; then
-# else attempt to extract from vm.args
- ARGS_FILE_COOKIE=$(awk '$1=="-setcookie"{print $2}' "$ARGS_FILE")
- COOKIE="${COOKIE:-$ARGS_FILE_COOKIE}"
-fi
-
-printHelpAndExit() {
- echo "Usage: ${PROGNAME} [OPTION]... [-- <additional Erlang cli options>]"
- echo " -c cookie specify shared Erlang cookie"
- echo " -l HOST specify remsh's host name (default: 127.0.0.1)"
- echo " -m use output of \`hostname -f\` as remsh's host name"
- echo " -n NAME@HOST specify couchdb's Erlang node name (-name in vm.args)"
- echo " -v verbose; print invocation line"
- echo " -t path/to/conf enable TLS distribution (customize in vm.args)"
- echo " -h this help message"
- exit
-}
-
-while getopts ":hn:c:l:mvt:" optionName; do
- case "$optionName" in
- h)
- printHelpAndExit 0
- ;;
- n)
- NODE=$OPTARG
- ;;
- c)
- COOKIE=$OPTARG
- ;;
- l)
- LHOST=$OPTARG
- ;;
- m)
- LHOST=$(hostname -f)
- ;;
- v)
- VERBOSE=0
- ;;
- t)
- TLSCONF=$OPTARG
- if [ ! -f "$TLSCONF" ]; then
- echo "ERROR: Could't find the file \"$TLSCONF\"." >&2
- exit 1
- fi
- ;;
- \?)
- echo "Invalid option: -$OPTARG" >&2
- printHelpAndExit 0
- ;;
- esac
-done
-
-shift $((OPTIND - 1))
-
-if [ ! -z "$VERBOSE" ]; then
- # cheap but it works
- set -x
-fi
-
-# If present, strip -name or -setcookie from ERL_FLAGS
-# to avoid conflicts with the cli parameters
-ERL_FLAGS_CLEAN=$(echo "$ERL_FLAGS" | sed 's/-setcookie \([^ ][^ ]*\)//g' | sed 's/-name \([^ ][^ ]*\)//g')
-
-if [ -z "${COOKIE}" ]; then
- echo "No Erlang cookie could be found, please specify with -c" >&2
- exit 1
-fi
-
-if [ -z "$TLSCONF" ]; then
- exec env ERL_FLAGS="$ERL_FLAGS_CLEAN" "$BINDIR/erl" -boot "$ROOTDIR/releases/$APP_VSN/start_clean" \
- -name remsh$$@$LHOST -remsh $NODE -hidden -setcookie $COOKIE \
- "$@"
-else
- exec env ERL_FLAGS="$ERL_FLAGS_CLEAN" "$BINDIR/erl" -boot "$ROOTDIR/releases/$APP_VSN/start_clean" \
- -name remsh$$@$LHOST -remsh $NODE -hidden -setcookie $COOKIE \
- -proto_dist inet_tls -ssl_dist_optfile $TLSCONF \
- "$@"
-fi
diff --git a/rel/overlay/etc/default.d/README b/rel/overlay/etc/default.d/README
deleted file mode 100644
index cae343ba4..000000000
--- a/rel/overlay/etc/default.d/README
+++ /dev/null
@@ -1,11 +0,0 @@
-CouchDB default configuration files
-
-Files found under the etc/default.d directory that end with .ini are
-parsed within couchdb(1) at startup.
-
-This directory is intended for distribution-specific overrides of
-CouchDB defaults. Package maintainers should be placing overrides in
-this directory.
-
-System administrator should place overrides in the etc/local.d directory
-instead.
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
deleted file mode 100644
index 162ccb926..000000000
--- a/rel/overlay/etc/default.ini
+++ /dev/null
@@ -1,749 +0,0 @@
-; Upgrading CouchDB will overwrite this file.
-[vendor]
-name = {{package_author_name}}
-
-[couchdb]
-uuid = {{uuid}}
-database_dir = {{data_dir}}
-view_index_dir = {{view_index_dir}}
-; util_driver_dir =
-; plugin_dir =
-;os_process_timeout = 5000 ; 5 seconds. for view servers.
-
-; Maximum number of .couch files to open at once.
-; The actual limit may be slightly lower depending on how
-; many schedulers you have as the allowance is divided evenly
-; among them.
-;max_dbs_open = 500
-
-; Method used to compress everything that is appended to database and view index files, except
-; for attachments (see the attachments section). Available methods are:
-;
-; none - no compression
-; snappy - use google snappy, a very fast compressor/decompressor
-; deflate_N - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
-; lowest compression ratio) to 9 (slowest, highest compression ratio)
-;file_compression = snappy
-; Higher values may give better read performance due to less read operations
-; and/or more OS page cache hits, but they can also increase overall response
-; time for writes when there are many attachment write requests in parallel.
-;attachment_stream_buffer_size = 4096
-; Default security object for databases if not explicitly set
-; everyone - same as couchdb 1.0, everyone can read/write
-; admin_only - only admins can read/write
-; admin_local - sharded dbs on :5984 are read/write for everyone,
-; local dbs on :5986 are read/write for admins only
-;default_security = admin_only
-; btree_chunk_size = 1279
-; maintenance_mode = false
-; stem_interactive_updates = true
-; uri_file =
-; The speed of processing the _changes feed with doc_ids filter can be
-; influenced directly with this setting - increase for faster processing at the
-; expense of more memory usage.
-;changes_doc_ids_optimization_threshold = 100
-; Maximum document ID length. Can be set to an integer or 'infinity'.
-;max_document_id_length = infinity
-;
-; Limit maximum document size. Requests to create / update documents with a body
-; size larger than this will fail with a 413 http error. This limit applies to
-; requests which update a single document as well as individual documents from
-; a _bulk_docs request. The size limit is approximate due to the nature of JSON
-; encoding.
-;max_document_size = 8000000 ; bytes
-;
-; Maximum attachment size.
-; max_attachment_size = 1073741824 ; 1 gibibyte
-;
-; Do not update the least recently used DB cache on reads, only writes
-;update_lru_on_read = false
-;
-; The default storage engine to use when creating databases
-; is set as a key into the [couchdb_engines] section.
-;default_engine = couch
-;
-; Enable this to only "soft-delete" databases when DELETE /{db} requests are
-; made. This will place a .recovery directory in your data directory and
-; move deleted databases/shards there instead. You can then manually delete
-; these files later, as desired.
-;enable_database_recovery = false
-;
-; Set the maximum size allowed for a partition. This helps users avoid
-; inadvertently abusing partitions resulting in hot shards. The default
-; is 10GiB. A value of 0 or less will disable partition size checks.
-;max_partition_size = 10737418240
-;
-; When true, system databases _users and _replicator are created immediately
-; on startup if not present.
-;single_node = false
-
-; Allow edits on the _security object in the user db. By default, it's disabled.
-;users_db_security_editable = false
-
-; Sets the maximum time that the coordinator node will wait for cluster members
-; to request attachment data before returning a response to the client.
-;attachment_writer_timeout = 300000
-
-; Sets the log level for informational compaction related entries.
-;compaction_log_level = info
-
-[purge]
-; Allowed maximum number of documents in one purge request
-;max_document_id_number = 100
-;
-; Allowed maximum number of accumulated revisions in one purge request
-;max_revisions_number = 1000
-;
-; Allowed durations when index is not updated for local purge checkpoint
-; document. Default is 24 hours.
-;index_lag_warn_seconds = 86400
-
-[couchdb_engines]
-; The keys in this section are the filename extension that
-; the specified engine module will use. This is important so
-; that couch_server is able to find an existing database without
-; having to ask every configured engine.
-couch = couch_bt_engine
-
-[process_priority]
-; Selectively disable altering process priorities for modules that request it.
-; * NOTE: couch_server priority has been shown to lead to CouchDB hangs and
-; failures on Erlang releases 21.0 - 21.3.8.12 and 22.0 -> 22.2.4. Do not
-; enable when running with those versions.
-;couch_server = false
-
-[cluster]
-;q=2
-;n=3
-; placement = metro-dc-a:2,metro-dc-b:1
-
-; Supply a comma-delimited list of node names that this node should
-; contact in order to join a cluster. If a seedlist is configured the ``_up``
-; endpoint will return a 404 until the node has successfully contacted at
-; least one of the members of the seedlist and replicated an up-to-date copy
-; of the ``_nodes``, ``_dbs``, and ``_users`` system databases.
-; seedlist = couchdb@node1.example.com,couchdb@node2.example.com
-
-[chttpd]
-; These settings affect the main, clustered port (5984 by default).
-port = {{cluster_port}}
-bind_address = 127.0.0.1
-;backlog = 512
-;socket_options = [{sndbuf, 262144}, {nodelay, true}]
-;server_options = [{recbuf, undefined}]
-;require_valid_user = false
-; require_valid_user_except_for_up = false
-; List of headers that will be kept when the header Prefer: return=minimal is included in a request.
-; If Server header is left out, Mochiweb will add its own one in.
-;prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
-;
-; Limit maximum number of databases when tying to get detailed information using
-; _dbs_info in a request
-;max_db_number_for_dbs_info_req = 100
-
-; set to true to delay the start of a response until the end has been calculated
-;buffer_response = false
-
-; authentication handlers
-; authentication_handlers = {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
-; uncomment the next line to enable proxy authentication
-; authentication_handlers = {chttpd_auth, proxy_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
-; uncomment the next line to enable JWT authentication
-; authentication_handlers = {chttpd_auth, jwt_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
-
-; prevent non-admins from accessing /_all_dbs
-; admin_only_all_dbs = true
-
-; These options are moved from [httpd]
-;secure_rewrites = true
-;allow_jsonp = false
-
-;enable_cors = false
-;enable_xframe_options = false
-
-; CouchDB can optionally enforce a maximum uri length;
-;max_uri_length = 8000
-
-;changes_timeout = 60000
-;config_whitelist =
-;rewrite_limit = 100
-;x_forwarded_host = X-Forwarded-Host
-;x_forwarded_proto = X-Forwarded-Proto
-;x_forwarded_ssl = X-Forwarded-Ssl
-
-; Maximum allowed http request size. Applies to both clustered and local port.
-;max_http_request_size = 4294967296 ; 4GB
-
-; Set to true to decode + to space in db and doc_id parts.
-; decode_plus_to_space = true
-
-;[jwt_auth]
-; List of claims to validate
-; can be the name of a claim like "exp" or a tuple if the claim requires
-; a parameter
-; required_claims = exp, {iss, "IssuerNameHere"}
-; roles_claim_name = https://example.com/roles
-;
-; [jwt_keys]
-; Configure at least one key here if using the JWT auth handler.
-; If your JWT tokens do not include a "kid" attribute, use "_default"
-; as the config key, otherwise use the kid as the config key.
-; Examples
-; hmac:_default = aGVsbG8=
-; hmac:foo = aGVsbG8=
-; The config values can represent symmetric and asymmetrics keys.
-; For symmetrics keys, the value is base64 encoded;
-; hmac:_default = aGVsbG8= # base64-encoded form of "hello"
-; For asymmetric keys, the value is the PEM encoding of the public
-; key with newlines replaced with the escape sequence \n.
-; rsa:foo = -----BEGIN PUBLIC KEY-----\nMIIBIjAN...IDAQAB\n-----END PUBLIC KEY-----\n
-; ec:bar = -----BEGIN PUBLIC KEY-----\nMHYwEAYHK...AzztRs\n-----END PUBLIC KEY-----\n
-
-[couch_peruser]
-; If enabled, couch_peruser ensures that a private per-user database
-; exists for each document in _users. These databases are writable only
-; by the corresponding user. Databases are in the following form:
-; userdb-{hex encoded username}
-;enable = false
-; If set to true and a user is deleted, the respective database gets
-; deleted as well.
-;delete_dbs = false
-; Set a default q value for peruser-created databases that is different from
-; cluster / q
-;q = 1
-; prefix for user databases. If you change this after user dbs have been
-; created, the existing databases won't get deleted if the associated user
-; gets deleted because of the then prefix mismatch.
-;database_prefix = userdb-
-
-[httpd]
-port = {{backend_port}}
-bind_address = 127.0.0.1
-;authentication_handlers = {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
-
-; Options for the MochiWeb HTTP server.
-;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
-; For more socket options, consult Erlang's module 'inet' man page.
-;socket_options = [{recbuf, undefined}, {sndbuf, 262144}, {nodelay, true}]
-;socket_options = [{sndbuf, 262144}]
-
-; These settings were moved to [chttpd]
-; secure_rewrites, allow_jsonp, enable_cors, enable_xframe_options,
-; max_uri_length, changes_timeout, config_whitelist, rewrite_limit,
-; x_forwarded_host, x_forwarded_proto, x_forwarded_ssl, max_http_request_size
-
-; [httpd_design_handlers]
-; _view =
-
-; [ioq]
-; concurrency = 10
-; ratio = 0.01
-
-[ssl]
-;port = 6984
-
-[chttpd_auth]
-;authentication_db = _users
-
-; These options are moved from [couch_httpd_auth]
-;authentication_redirect = /_utils/session.html
-;require_valid_user = false
-;timeout = 600 ; number of seconds before automatic logout
-;auth_cache_size = 50 ; size is number of cache entries
-;allow_persistent_cookies = true ; set to false to disallow persistent cookies
-;iterations = 10 ; iterations for password hashing
-;min_iterations = 1
-;max_iterations = 1000000000
-;password_scheme = pbkdf2
-; List of Erlang RegExp or tuples of RegExp and an optional error message.
-; Where a new password must match all RegExp.
-; Example: [{".{10,}", "Password min length is 10 characters."}, "\\d+"]
-;password_regexp = []
-;proxy_use_secret = false
-; comma-separated list of public fields, 404 if empty
-;public_fields =
-;secret =
-;users_db_public = false
-;cookie_domain = example.com
-; Set the SameSite cookie property for the auth cookie. If empty, the SameSite property is not set.
-;same_site =
-
-; [chttpd_auth_cache]
-; max_lifetime = 600000
-; max_objects =
-; max_size = 104857600
-
-; [mem3]
-; nodes_db = _nodes
-; shard_cache_size = 25000
-; shards_db = _dbs
-; sync_concurrency = 10
-
-; [fabric]
-; all_docs_concurrency = 10
-; changes_duration =
-; shard_timeout_factor = 2
-; shard_timeout_min_msec = 100
-; uuid_prefix_len = 7
-; request_timeout = 60000
-; all_docs_timeout = 10000
-; attachments_timeout = 60000
-; view_timeout = 3600000
-; partition_view_timeout = 3600000
-
-; [rexi]
-; buffer_count = 2000
-; server_per_node = true
-; stream_limit = 5
-;
-; Use a single message to kill a group of remote workers. This feature is
-; available starting with 3.0. When performing a rolling upgrade from 2.x to
-; 3.x, set this value to false, then after all nodes were upgraded delete it so
-; it can use the default true value.
-;use_kill_all = true
-
-; [global_changes]
-; max_event_delay = 25
-; max_write_delay = 500
-; update_db = true
-
-; [view_updater]
-; min_writer_items = 100
-; min_writer_size = 16777216
-
-[couch_httpd_auth]
-; WARNING! This only affects the node-local port (5986 by default).
-; You probably want the settings under [chttpd].
-authentication_db = _users
-
-; These settings were moved to [chttpd_auth]
-; authentication_redirect, require_valid_user, timeout,
-; auth_cache_size, allow_persistent_cookies, iterations, min_iterations,
-; max_iterations, password_scheme, password_regexp, proxy_use_secret,
-; public_fields, secret, users_db_public, cookie_domain, same_site
-
-; CSP (Content Security Policy) Support
-[csp]
-;utils_enable = true
-;utils_header_value = default-src 'self'; img-src 'self'; font-src *; script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';
-;attachments_enable = true
-;attachments_header_value = sandbox
-;showlist_enable = true
-;showlist_header_value = sandbox
-
-[cors]
-;credentials = false
-; List of origins separated by a comma, * means accept all
-; Origins must include the scheme: http://example.com
-; You can't set origins: * and credentials = true at the same time.
-;origins = *
-; List of accepted headers separated by a comma
-; headers =
-; List of accepted methods
-; methods =
-
-; Configuration for a vhost
-;[cors:http://example.com]
-; credentials = false
-; List of origins separated by a comma
-; Origins must include the scheme: http://example.com
-; You can't set origins: * and credentials = true at the same time.
-;origins =
-; List of accepted headers separated by a comma
-; headers =
-; List of accepted methods
-; methods =
-
-; Configuration for the design document cache
-;[ddoc_cache]
-; The maximum size of the cache in bytes
-;max_size = 104857600 ; 100MiB
-; The period each cache entry should wait before
-; automatically refreshing in milliseconds
-;refresh_timeout = 67000
-
-[x_frame_options]
-; Settings same-origin will return X-Frame-Options: SAMEORIGIN.
-; If same origin is set, it will ignore the hosts setting
-; same_origin = true
-; Settings hosts will return X-Frame-Options: ALLOW-FROM https://example.com/
-; List of hosts separated by a comma. * means accept all
-; hosts =
-
-[native_query_servers]
-; erlang query server
-; enable_erlang_query_server = false
-
-; Changing reduce_limit to false will disable reduce_limit.
-; If you think you're hitting reduce_limit with a "good" reduce function,
-; please let us know on the mailing list so we can fine tune the heuristic.
-[query_server_config]
-; commit_freq = 5
-;reduce_limit = true
-;os_process_limit = 100
-; os_process_idle_limit = 300
-; os_process_soft_limit = 100
-; Timeout for how long a response from a busy view group server can take.
-; "infinity" is also a valid configuration value.
-;group_info_timeout = 5000
-;query_limit = 268435456
-;partition_query_limit = 268435456
-
-[mango]
-; Set to true to disable the "index all fields" text index, which can lead
-; to out of memory issues when users have documents with nested array fields.
-;index_all_disabled = false
-; Default limit value for mango _find queries.
-;default_limit = 25
-; Ratio between documents scanned and results matched that will
-; generate a warning in the _find response. Setting this to 0 disables
-; the warning.
-;index_scan_warning_threshold = 10
-
-[indexers]
-couch_mrview = true
-
-[feature_flags]
-; This enables any database to be created as a partitioned databases (except system db's).
-; Setting this to false will stop the creation of paritioned databases.
-; paritioned||allowed* = true will scope the creation of partitioned databases
-; to databases with 'allowed' prefix.
-partitioned||* = true
-
-[uuids]
-; Known algorithms:
-; random - 128 bits of random awesome
-; All awesome, all the time.
-; sequential - monotonically increasing ids with random increments
-; First 26 hex characters are random. Last 6 increment in
-; random amounts until an overflow occurs. On overflow, the
-; random prefix is regenerated and the process starts over.
-; utc_random - Time since Jan 1, 1970 UTC with microseconds
-; First 14 characters are the time in hex. Last 18 are random.
-; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
-; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
-;algorithm = sequential
-; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
-; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
-;utc_id_suffix =
-# Maximum number of UUIDs retrievable from /_uuids in a single request
-;max_count = 1000
-
-[attachments]
-;compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
-;compressible_types = text/*, application/javascript, application/json, application/xml
-
-[replicator]
-; Random jitter applied on replication job startup (milliseconds)
-;startup_jitter = 5000
-; Number of actively running replications
-;max_jobs = 500
-;Scheduling interval in milliseconds. During each reschedule cycle
-;interval = 60000
-; Maximum number of replications to start and stop during rescheduling.
-;max_churn = 20
-; More worker processes can give higher network throughput but can also
-; imply more disk and network IO.
-;worker_processes = 4
-; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
-; also reduce the total amount of used RAM memory.
-;worker_batch_size = 500
-; Maximum number of HTTP connections per replication.
-;http_connections = 20
-; HTTP connection timeout per replication.
-; Even for very fast/reliable networks it might need to be increased if a remote
-; database is too busy.
-;connection_timeout = 30000
-; Request timeout
-;request_timeout = infinity
-; If a request fails, the replicator will retry it up to N times.
-;retries_per_request = 5
-; Use checkpoints
-;use_checkpoints = true
-; Checkpoint interval
-;checkpoint_interval = 30000
-; Some socket options that might boost performance in some scenarios:
-; {nodelay, boolean()}
-; {sndbuf, integer()}
-; {recbuf, integer()}
-; {priority, integer()}
-; See the `inet` Erlang module's man page for the full list of options.
-;socket_options = [{keepalive, true}, {nodelay, false}]
-; Path to a file containing the user's certificate.
-;cert_file = /full/path/to/server_cert.pem
-; Path to file containing user's private PEM encoded key.
-;key_file = /full/path/to/server_key.pem
-; String containing the user's password. Only used if the private keyfile is password protected.
-;password = somepassword
-; Set to true to validate peer certificates.
-;verify_ssl_certificates = false
-; File containing a list of peer trusted certificates (in the PEM format).
-;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
-; Maximum peer certificate depth (must be set even if certificate validation is off).
-;ssl_certificate_max_depth = 3
-; Maximum document ID length for replication.
-;max_document_id_length = infinity
-; How much time to wait before retrying after a missing doc exception. This
-; exception happens if the document was seen in the changes feed, but internal
-; replication hasn't caught up yet, and fetching document's revisions
-; fails. This a common scenario when source is updated while continous
-; replication is running. The retry period would depend on how quickly internal
-; replication is expected to catch up. In general this is an optimisation to
-; avoid crashing the whole replication job, which would consume more resources
-; and add log noise.
-;missing_doc_retry_msec = 2000
-; Wait this many seconds after startup before attaching changes listeners
-; cluster_start_period = 5
-; Re-check cluster state at least every cluster_quiet_period seconds
-; cluster_quiet_period = 60
-
-; List of replicator client authentication plugins to try. Plugins will be
-; tried in order. The first to initialize successfully will be used for that
-; particular endpoint (source or target). Normally couch_replicator_auth_noop
-; would be used at the end of the list as a "catch-all". It doesn't do anything
-; and effectively implements the previous behavior of using basic auth.
-; There are currently two plugins available:
-; couch_replicator_auth_session - use _session cookie authentication
-; couch_replicator_auth_noop - use basic authentication (previous default)
-; Currently, the new _session cookie authentication is tried first, before
-; falling back to the old basic authenticaion default:
-;auth_plugins = couch_replicator_auth_session,couch_replicator_auth_noop
-; To restore the old behaviour, use the following value:
-;auth_plugins = couch_replicator_auth_noop
-
-; Force couch_replicator_auth_session plugin to refresh the session
-; periodically if max-age is not present in the cookie. This is mostly to
-; handle the case where anonymous writes are allowed to the database and a VDU
-; function is used to forbid writes based on the authenticated user name. In
-; that case this value should be adjusted based on the expected minimum session
-; expiry timeout on replication endpoints. If session expiry results in a 401
-; or 403 response this setting is not needed.
-;session_refresh_interval_sec = 550
-
-; Usage coefficient decays historic fair share usage every scheduling
-; cycle. The value must be between 0.0 and 1.0. Lower values will
-; ensure historic usage decays quicker and higher values means it will
-; be remembered longer.
-;usage_coeff = 0.5
-
-; Priority coefficient decays all the job priorities such that they slowly
-; drift towards the front of the run queue. This coefficient defines a maximum
-; time window over which this algorithm would operate. For example, if this
-; value is too small (0.1), after a few cycles quite a few jobs would end up at
-; priority 0, and would render this algorithm useless. The default value of
-; 0.98 is picked such that if a job ran for one scheduler cycle, then didn't
-; get to run for 7 hours, it would still have priority > 0. 7 hours was picked
-; as it was close enought to 8 hours which is the default maximum error backoff
-; interval.
-;priority_coeff = 0.98
-
-
-[replicator.shares]
-; Fair share configuration section. More shares result in a higher
-; chance that jobs from that db get to run. The default value is 100,
-; minimum is 1 and maximum is 1000. The configuration may be set even
-; if the database does not exist.
-;_replicator = 100
-
-
-[log]
-; Possible log levels:
-; debug
-; info
-; notice
-; warning, warn
-; error, err
-; critical, crit
-; alert
-; emergency, emerg
-; none
-;
-;level = info
-;
-; Set the maximum log message length in bytes that will be
-; passed through the writer
-;
-; max_message_size = 16000
-;
-; Do not log last message received by terminated process
-; strip_last_msg = true
-;
-; List of fields to remove before logging the crash report
-; filter_fields = [pid, registered_name, error_info, messages]
-;
-; There are four different log writers that can be configured
-; to write log messages. The default writes to stderr of the
-; Erlang VM which is useful for debugging/development as well
-; as a lot of container deployments.
-;
-; There's also a file writer that works with logrotate, a
-; rsyslog writer for deployments that need to have logs sent
-; over the network, and a journald writer that's more suitable
-; when using systemd journald.
-;
-;writer = stderr
-; Journald Writer notes:
-;
-; The journald writer doesn't have any options. It still writes
-; the logs to stderr, but without the timestamp prepended, since
-; the journal will add it automatically, and with the log level
-; formated as per
-; https://www.freedesktop.org/software/systemd/man/sd-daemon.html
-;
-;
-; File Writer Options:
-;
-; The file writer will check every 30s to see if it needs
-; to reopen its file. This is useful for people that configure
-; logrotate to move log files periodically.
-;
-; file = ./couch.log ; Path name to write logs to
-;
-; Write operations will happen either every write_buffer bytes
-; or write_delay milliseconds. These are passed directly to the
-; Erlang file module with the write_delay option documented here:
-;
-; http://erlang.org/doc/man/file.html
-;
-; write_buffer = 0
-; write_delay = 0
-;
-;
-; Syslog Writer Options:
-;
-; The syslog writer options all correspond to their obvious
-; counter parts in rsyslog nomenclature.
-;
-; syslog_host =
-; syslog_port = 514
-; syslog_appid = couchdb
-; syslog_facility = local2
-
-[stats]
-; Stats collection interval in seconds. Default 10 seconds.
-;interval = 10
-
-[smoosh]
-;
-; More documentation on these is in the Automatic Compaction
-; section of the documentation.
-;
-;db_channels = upgrade_dbs,ratio_dbs,slack_dbs
-;view_channels = upgrade_views,ratio_views,slack_views
-;
-;[smoosh.ratio_dbs]
-;priority = ratio
-;min_priority = 2.0
-;
-;[smoosh.ratio_views]
-;priority = ratio
-;min_priority = 2.0
-;
-;[smoosh.slack_dbs]
-;priority = slack
-;min_priority = 536870912
-;
-;[smoosh.slack_views]
-;priority = slack
-;min_priority = 536870912
-;
-; Directory to store the state of smoosh
-state_dir = {{state_dir}}
-
-; Sets the log level for informational compaction related entries.
-;compaction_log_level = debug
-
-[ioq]
-; The maximum number of concurrent in-flight IO requests that
-;concurrency = 10
-
-; The fraction of the time that a background IO request will be selected
-; over an interactive IO request when both queues are non-empty
-;ratio = 0.01
-
-[ioq.bypass]
-; System administrators can choose to submit specific classes of IO directly
-; to the underlying file descriptor or OS process, bypassing the queues
-; altogether. Installing a bypass can yield higher throughput and lower
-; latency, but relinquishes some control over prioritization. The following
-; classes are recognized with the following defaults:
-
-; Messages on their way to an external process (e.g., couchjs) are bypassed
-;os_process = true
-
-; Disk IO fulfilling interactive read requests is bypassed
-;read = true
-
-; Disk IO required to update a database is bypassed
-;write = true
-
-; Disk IO required to update views and other secondary indexes is bypassed
-;view_update = true
-
-; Disk IO issued by the background replication processes that fix any
-; inconsistencies between shard copies is queued
-;shard_sync = false
-
-; Disk IO issued by compaction jobs is queued
-;compaction = false
-
-[dreyfus]
-; The name and location of the Clouseau Java service required to
-; enable Search functionality.
-; name = clouseau@127.0.0.1
-
-; CouchDB will try to re-connect to Clouseau using a bounded
-; exponential backoff with the following number of iterations.
-; retry_limit = 5
-
-; The default number of results returned from a global search query.
-; limit = 25
-
-; The default number of results returned from a search on a partition
-; of a database.
-; limit_partitions = 2000
-
-; The maximum number of results that can be returned from a global
-; search query (or any search query on a database without user-defined
-; partitions). Attempts to set ?limit=N higher than this value will
-; be rejected.
-; max_limit = 200
-
-; The maximum number of results that can be returned when searching
-; a partition of a database. Attempts to set ?limit=N higher than this
-; value will be rejected. If this config setting is not defined,
-; CouchDB will use the value of `max_limit` instead. If neither is
-; defined, the default is 2000 as stated here.
-; max_limit_partitions = 2000
-
-[reshard]
-;max_jobs = 48
-;max_history = 20
-;max_retries = 5
-;retry_interval_sec = 10
-;delete_source = true
-;update_shard_map_timeout_sec = 60
-;source_close_timeout_sec = 600
-;require_node_param = false
-;require_range_param = false
-
-; How many times to retry building an individual index
-;index_max_retries = 5
-
-; How many seconds to wait between retries for an individual index
-;index_retry_interval_sec = 10
-
-[prometheus]
-additional_port = false
-bind_address = 127.0.0.1
-port = {{prometheus_port}}
-
-[view_upgrade]
-; When enabled, views with more than one collator versions will be submitted
-; for auto-compaction to smoosh's "upgrade_views" channel.
-;compact_on_collator_upgrade = true
-
-; Eagerly commit views which been upgraded from older header formats. A reason
-; to disable this setting could be if the views need an upgrade but located on
-; read-only file system.
-;commit_on_header_upgrade = true
diff --git a/rel/overlay/etc/local.d/README b/rel/overlay/etc/local.d/README
deleted file mode 100644
index 5cc9ed123..000000000
--- a/rel/overlay/etc/local.d/README
+++ /dev/null
@@ -1,8 +0,0 @@
-CouchDB local configuration files
-
-Files found under the etc/local.d directory that end with .ini are parsed
-within couchdb(1) at startup.
-
-This directory is intended for system administrator overrides of CouchDB
-defaults. Package maintainers should be placing overrides in the
-etc/default.d directory instead.
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
deleted file mode 100644
index 398cf3e2c..000000000
--- a/rel/overlay/etc/local.ini
+++ /dev/null
@@ -1,95 +0,0 @@
-; CouchDB Configuration Settings
-
-; Custom settings should be made in this file. They will override settings
-; in default.ini, but unlike changes made to default.ini, this file won't be
-; overwritten on server upgrade.
-
-[couchdb]
-;max_document_size = 4294967296 ; bytes
-;os_process_timeout = 5000
-
-[couch_peruser]
-; If enabled, couch_peruser ensures that a private per-user database
-; exists for each document in _users. These databases are writable only
-; by the corresponding user. Databases are in the following form:
-; userdb-{hex encoded username}
-;enable = true
-; If set to true and a user is deleted, the respective database gets
-; deleted as well.
-;delete_dbs = true
-; Set a default q value for peruser-created databases that is different from
-; cluster / q
-;q = 1
-
-[chttpd]
-;port = 5984
-;bind_address = 127.0.0.1
-; Options for the MochiWeb HTTP server.
-;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
-; For more socket options, consult Erlang's module 'inet' man page.
-;socket_options = [{sndbuf, 262144}, {nodelay, true}]
-
-[httpd]
-; NOTE that this only configures the "backend" node-local port, not the
-; "frontend" clustered port. You probably don't want to change anything in
-; this section.
-; Uncomment next line to trigger basic-auth popup on unauthorized requests.
-;WWW-Authenticate = Basic realm="administrator"
-
-; Uncomment next line to set the configuration modification whitelist. Only
-; whitelisted values may be changed via the /_config URLs. To allow the admin
-; to change this value over HTTP, remember to include {httpd,config_whitelist}
-; itself. Excluding it from the list would require editing this file to update
-; the whitelist.
-;config_whitelist = [{httpd,config_whitelist}, {log,level}, {etc,etc}]
-
-[chttpd_auth]
-; If you set this to true, you should also uncomment the WWW-Authenticate line
-; above. If you don't configure a WWW-Authenticate header, CouchDB will send
-; Basic realm="server" in order to prevent you getting logged out.
-; require_valid_user = false
-
-[ssl]
-;enable = true
-;cert_file = /full/path/to/server_cert.pem
-;key_file = /full/path/to/server_key.pem
-;password = somepassword
-; set to true to validate peer certificates
-;verify_ssl_certificates = false
-; Set to true to fail if the client does not send a certificate. Only used if verify_ssl_certificates is true.
-;fail_if_no_peer_cert = false
-; Path to file containing PEM encoded CA certificates (trusted
-; certificates used for verifying a peer certificate). May be omitted if
-; you do not want to verify the peer.
-;cacert_file = /full/path/to/cacertf
-; The verification fun (optional) if not specified, the default
-; verification fun will be used.
-;verify_fun = {Module, VerifyFun}
-; maximum peer certificate depth
-;ssl_certificate_max_depth = 1
-;
-; Reject renegotiations that do not live up to RFC 5746.
-;secure_renegotiate = true
-; The cipher suites that should be supported.
-; Can be specified in erlang format "{ecdhe_ecdsa,aes_128_cbc,sha256}"
-; or in OpenSSL format "ECDHE-ECDSA-AES128-SHA256".
-;ciphers = ["ECDHE-ECDSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA"]
-; The SSL/TLS versions to support
-;tls_versions = [tlsv1, 'tlsv1.1', 'tlsv1.2']
-
-; To enable Virtual Hosts in CouchDB, add a vhost = path directive. All requests to
-; the Virual Host will be redirected to the path. In the example below all requests
-; to http://example.com/ are redirected to /database.
-; If you run CouchDB on a specific port, include the port number in the vhost:
-; example.com:5984 = /database
-[vhosts]
-;example.com = /database/
-
-; To create an admin account uncomment the '[admins]' section below and add a
-; line in the format 'username = password'. When you next start CouchDB, it
-; will change the password to a hash (so that your passwords don't linger
-; around in plain-text files). You can add more admin accounts with more
-; 'username = password' lines. Don't forget to restart CouchDB after
-; changing this.
-[admins]
-;admin = mysecretpassword
diff --git a/rel/overlay/etc/vm.args b/rel/overlay/etc/vm.args
deleted file mode 100644
index 3ade5cbe5..000000000
--- a/rel/overlay/etc/vm.args
+++ /dev/null
@@ -1,97 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# Each node in the system must have a unique name. These are specified through
-# the Erlang -name flag, which takes the form:
-#
-# -name nodename@<FQDN>
-#
-# or
-#
-# -name nodename@<IP-ADDRESS>
-#
-# CouchDB recommends the following values for this flag:
-#
-# 1. If this is a single node, not in a cluster, use:
-# -name couchdb@127.0.0.1
-#
-# 2. If DNS is configured for this host, use the FQDN, such as:
-# -name couchdb@my.host.domain.com
-#
-# 3. If DNS isn't configured for this host, use IP addresses only, such as:
-# -name couchdb@192.168.0.1
-#
-# Do not rely on tricks with /etc/hosts or libresolv to handle anything
-# other than the above 3 approaches correctly. They will not work reliably.
-#
-# Multiple CouchDBs running on the same machine can use couchdb1@, couchdb2@,
-# etc.
-{{node_name}}
-
-# All nodes must share the same magic cookie for distributed Erlang to work.
-# Uncomment the following line and append a securely generated random value.
-# -setcookie
-
-# Which interfaces should the node listen on?
--kernel inet_dist_use_interface {127,0,0,1}
-
-# Tell kernel and SASL not to log anything
--kernel error_logger silent
--sasl sasl_error_logger false
-
-# Use kernel poll functionality if supported by emulator
-+K true
-
-# Start a pool of asynchronous IO threads
-+A 16
-
-# Comment this line out to enable the interactive Erlang shell on startup
-+Bd -noinput
-
-# Force use of the smp scheduler, fixes #1296
--smp enable
-
-# Set maximum SSL session lifetime to reap terminated replication readers
--ssl session_lifetime 300
-
-## TLS Distribution
-## Use TLS for connections between Erlang cluster members.
-## http://erlang.org/doc/apps/ssl/ssl_distribution.html
-##
-## Generate Cert(PEM) File
-## This is just an example command to generate a certfile (PEM).
-## This is not an endorsement of specific expiration limits, key sizes, or algorithms.
-## $ openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout key.pem -out cert.pem
-## $ cat key.pem cert.pem > dev/erlserver.pem && rm key.pem cert.pem
-##
-## Generate a Config File (couch_ssl_dist.conf)
-## [{server,
-## [{certfile, "</path/to/erlserver.pem>"},
-## {secure_renegotiate, true}]},
-## {client,
-## [{secure_renegotiate, true}]}].
-##
-## CouchDB recommends the following values for no_tls flag:
-## 1. Use TCP only, set to true, such as:
-## -couch_dist no_tls true
-## 2. Use TLS only, set to false, such as:
-## -couch_dist no_tls false
-## 3. Specify which node to use TCP, such as:
-## -couch_dist no_tls \"*@127.0.0.1\"
-##
-## To ensure search works, make sure to set 'no_tls' option for the clouseau node.
-## By default that would be "clouseau@127.0.0.1".
-## Don't forget to override the paths to point to your certificate(s) and key(s)!
-##
-#-proto_dist couch
-#-couch_dist no_tls '"clouseau@127.0.0.1"'
-#-ssl_dist_optfile <path/to/couch_ssl_dist.conf>
diff --git a/rel/plugins/eunit_plugin.erl b/rel/plugins/eunit_plugin.erl
deleted file mode 100644
index 8f298db5f..000000000
--- a/rel/plugins/eunit_plugin.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(eunit_plugin).
-
--export([setup_eunit/2]).
-
-setup_eunit(Config, AppFile) ->
- case is_base_dir(Config) of
- false -> ok;
- true -> build_eunit_config(Config, AppFile)
- end.
-
-%% from https://github.com/ChicagoBoss/ChicagoBoss/blob/master/skel/priv/rebar/boss_plugin.erl
-is_base_dir(RebarConf) ->
- filename:absname(rebar_utils:get_cwd()) =:=
- rebar_config:get_xconf(RebarConf, base_dir, undefined).
-
-build_eunit_config(Config0, AppFile) ->
- Cwd = filename:absname(rebar_utils:get_cwd()),
- DataDir = Cwd ++ "/tmp/data",
- ViewIndexDir = Cwd ++ "/tmp/data",
- StateDir = Cwd ++ "/tmp/data",
- TmpDataDir = Cwd ++ "/tmp/tmp_data",
- cleanup_dirs([DataDir, TmpDataDir]),
- Config1 = rebar_config:set_global(Config0, template, "setup_eunit"),
- Config2 = rebar_config:set_global(Config1, prefix, Cwd),
- Config3 = rebar_config:set_global(Config2, data_dir, DataDir),
- Config4 = rebar_config:set_global(Config3, view_index_dir, ViewIndexDir),
- Config = rebar_config:set_global(Config4, state_dir, StateDir),
- rebar_templater:create(Config, AppFile).
-
-cleanup_dirs(Dirs) ->
- lists:foreach(
- fun(Dir) ->
- case filelib:is_dir(Dir) of
- true -> del_dir(Dir);
- false -> ok
- end
- end,
- Dirs
- ).
-
-del_dir(Dir) ->
- All = filelib:wildcard(Dir ++ "/**"),
- {Dirs, Files} = lists:partition(fun filelib:is_dir/1, All),
- ok = lists:foreach(fun file:delete/1, Files),
- SortedDirs = lists:sort(fun(A, B) -> length(A) > length(B) end, Dirs),
- ok = lists:foreach(fun file:del_dir/1, SortedDirs),
- ok = file:del_dir(Dir).
diff --git a/rel/reltool.config b/rel/reltool.config
deleted file mode 100644
index ab26fb2ed..000000000
--- a/rel/reltool.config
+++ /dev/null
@@ -1,152 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{sys, [
- {lib_dirs, ["../src"]},
- {rel, "couchdb", "3.2.2", [
- %% stdlib
- asn1,
- compiler,
- crypto,
- inets,
- kernel,
- runtime_tools,
- sasl,
- ssl,
- stdlib,
- syntax_tools,
- xmerl,
- %% couchdb
- b64url,
- bear,
- chttpd,
- config,
- couch,
- couch_epi,
- couch_index,
- couch_log,
- couch_mrview,
- couch_plugins,
- couch_replicator,
- couch_stats,
- couch_event,
- couch_peruser,
- couch_dist,
- custodian,
- ddoc_cache,
- dreyfus,
- ets_lru,
- fabric,
- folsom,
- global_changes,
- hyper,
- ibrowse,
- ioq,
- jiffy,
- jwtf,
- ken,
- khash,
- mango,
- mem3,
- mochiweb,
- rexi,
- setup,
- smoosh,
- snappy,
- weatherreport,
- couch_prometheus,
-
- %% extra
- recon
- ]},
- {rel, "start_clean", "", [kernel, stdlib]},
- {boot_rel, "couchdb"},
- {profile, embedded},
- {excl_sys_filters, ["^bin/.*", "^erts.*/bin/(dialyzer|typer)"]},
- {excl_archive_filters, [".*"]},
- {incl_cond, exclude},
-
- %% stdlib
- {app, asn1, [{incl_cond, include}]},
- {app, compiler, [{incl_cond, include}]},
- {app, crypto, [{incl_cond, include}]},
- {app, inets, [{incl_cond, include}]},
- {app, kernel, [{incl_cond, include}]},
- {app, public_key, [{incl_cond, include}]},
- {app, runtime_tools, [{incl_cond, include}]},
- {app, sasl, [{incl_cond, include}]},
- {app, ssl, [{incl_cond, include}]},
- {app, stdlib, [{incl_cond, include}]},
- {app, syntax_tools, [{incl_cond, include}]},
- {app, xmerl, [{incl_cond, include}]},
-
- %% couchdb
- {app, b64url, [{incl_cond, include}]},
- {app, bear, [{incl_cond, include}]},
- {app, chttpd, [{incl_cond, include}]},
- {app, config, [{incl_cond, include}]},
- {app, couch, [{incl_cond, include}]},
- {app, couch_epi, [{incl_cond, include}]},
- {app, couch_index, [{incl_cond, include}]},
- {app, couch_log, [{incl_cond, include}]},
- {app, couch_mrview, [{incl_cond, include}]},
- {app, couch_plugins, [{incl_cond, include}]},
- {app, couch_replicator, [{incl_cond, include}]},
- {app, couch_stats, [{incl_cond, include}]},
- {app, couch_event, [{incl_cond, include}]},
- {app, couch_peruser, [{incl_cond, include}]},
- {app, couch_dist ,[{incl_cond, include}]},
- {app, custodian, [{incl_cond, include}]},
- {app, ddoc_cache, [{incl_cond, include}]},
- {app, dreyfus, [{incl_cond, include}]},
- {app, ets_lru, [{incl_cond, include}]},
- {app, fabric, [{incl_cond, include}]},
- {app, folsom, [{incl_cond, include}]},
- {app, global_changes, [{incl_cond, include}]},
- {app, hyper, [{incl_cond, include}]},
- {app, ibrowse, [{incl_cond, include}]},
- {app, ioq, [{incl_cond, include}]},
- {app, jiffy, [{incl_cond, include}]},
- {app, jwtf, [{incl_cond, include}]},
- {app, ken, [{incl_cond, include}]},
- {app, khash, [{incl_cond, include}]},
- {app, mango, [{incl_cond, include}]},
- {app, mem3, [{incl_cond, include}]},
- {app, mochiweb, [{incl_cond, include}]},
- {app, rexi, [{incl_cond, include}]},
- {app, setup, [{incl_cond, include}]},
- {app, smoosh, [{incl_cond, include}]},
- {app, snappy, [{incl_cond, include}]},
- {app, weatherreport, [{incl_cond, include}]},
- {app, couch_prometheus, [{incl_cond, include}]},
-
- %% extra
- {app, recon, [{incl_cond, include}]}
-]}.
-
-{overlay_vars, "couchdb.config"}.
-{overlay, [
- {copy, "../LICENSE", "LICENSE"},
- {mkdir, "var/log"},
- {copy, "overlay/bin"},
- {copy, "overlay/etc"},
- {copy, "../src/couch/priv/couchjs", "bin/couchjs"},
- {copy, "../share/server/main.js", "share/server/main.js"},
- {copy, "../share/server/main-coffee.js", "share/server/main-coffee.js"},
- {copy, "../src/weatherreport/weatherreport", "bin/weatherreport"},
- {copy, "files/sys.config", "releases/\{\{rel_vsn\}\}/sys.config"},
- {copy, "files/vm.args", "releases/\{\{rel_vsn\}\}/vm.args"},
- {template, "overlay/etc/default.ini", "etc/default.ini"},
- {template, "overlay/etc/vm.args", "etc/vm.args"},
- {template, "files/couchdb.in", "bin/couchdb"},
- {template, "files/couchdb.cmd.in", "bin/couchdb.cmd"}
-]}.
diff --git a/setup_eunit.template b/setup_eunit.template
deleted file mode 100644
index ceef60d12..000000000
--- a/setup_eunit.template
+++ /dev/null
@@ -1,20 +0,0 @@
-{variables, [
- {package_author_name, "The Apache Software Foundation"},
- {cluster_port, 5984},
- {backend_port, 5986},
- {prometheus_port, 17986},
- {node_name, "-name couchdbtest@127.0.0.1"},
-
- {data_dir, "/tmp"},
- {prefix, "/tmp"},
- {view_index_dir, "/tmp"},
- {state_dir, "/tmp"}
-]}.
-{dir, "tmp"}.
-{dir, "tmp/etc"}.
-{dir, "tmp/data"}.
-{dir, "tmp/tmp_data"}.
-{template, "rel/overlay/etc/default.ini", "tmp/etc/default_eunit.ini"}.
-{template, "rel/overlay/etc/local.ini", "tmp/etc/local_eunit.ini"}.
-{template, "rel/files/eunit.ini", "tmp/etc/eunit.ini"}.
-{template, "rel/overlay/etc/vm.args", "tmp/etc/vm.args"}.
diff --git a/share/server/60/escodegen.js b/share/server/60/escodegen.js
deleted file mode 100644
index 747a7322c..000000000
--- a/share/server/60/escodegen.js
+++ /dev/null
@@ -1 +0,0 @@
-(function(b){function a(b,d){if({}.hasOwnProperty.call(a.cache,b))return a.cache[b];var e=a.resolve(b);if(!e)throw new Error('Failed to resolve module '+b);var c={id:b,require:a,filename:b,exports:{},loaded:!1,parent:d,children:[]};d&&d.children.push(c);var f=b.slice(0,b.lastIndexOf('/')+1);return a.cache[b]=c.exports,e.call(c.exports,c,c.exports,f,b),c.loaded=!0,a.cache[b]=c.exports}a.modules={},a.cache={},a.resolve=function(b){return{}.hasOwnProperty.call(a.modules,b)?a.modules[b]:void 0},a.define=function(b,c){a.modules[b]=c};var c=function(a){return a='/',{title:'browser',version:'v6.8.0',browser:!0,env:{},argv:[],nextTick:b.setImmediate||function(a){setTimeout(a,0)},cwd:function(){return a},chdir:function(b){a=b}}}();a.define('/tools/entry-point.js',function(c,d,e,f){!function(){'use strict';b.escodegen=a('/escodegen.js',c),escodegen.browser=!0}()}),a.define('/escodegen.js',function(d,c,e,f){!function(k,e,af,N,_,m,J,n,F,A,Z,ae,S,ad,i,f,W,ac,L,aa,t,Y,B,C,x,a9,a7,w,E,G,V,Q,q,U,P,g,R,a6,X,l,y,K,a5,a4){'use strict';function ap(a){return o.Expression.hasOwnProperty(a.type)}function a3(a){return o.Statement.hasOwnProperty(a.type)}function a2(){return{indent:null,base:null,parse:null,comment:!1,format:{indent:{style:' ',base:0,adjustMultilineComment:!1},newline:'\n',space:' ',json:!1,renumber:!1,hexadecimal:!1,quotes:'single',escapeless:!1,compact:!1,parentheses:!0,semicolons:!0,safeConcatenation:!1,preserveBlankLines:!1},moz:{comprehensionExpressionStartsWithAssignment:!1,starlessGenerator:!1},sourceMap:null,sourceMapRoot:null,sourceMapWithCode:!1,directive:!1,raw:!0,verbatim:null,sourceCode:null}}function H(b,a){var c='';for(a|=0;a>0;a>>>=1,b+=b)a&1&&(c+=b);return c}function am(a){return/[\r\n]/g.test(a)}function r(b){var a=b.length;return a&&m.code.isLineTerminator(b.charCodeAt(a-1))}function a0(c,b){var a;for(a in b)b.hasOwnProperty(a)&&(c[a]=b[a]);return c}function T(b,d){function e(a){return typeof a==='object'&&a instanceof Object&&!(a instanceof RegExp)}var a,c;for(a in d)d.hasOwnProperty(a)&&(c=d[a],e(c)?e(b[a])?T(b[a],c):b[a]=T({},c):b[a]=c);return b}function ao(c){var b,e,a,f,d;if(c!==c)throw new Error('Numeric literal whose value is NaN');if(c<0||c===0&&1/c<0)throw new Error('Numeric literal whose value is negative');if(c===1/0)return A?'null':Z?'1e400':'1e+400';if(b=''+c,!Z||b.length<3)return b;e=b.indexOf('.'),!A&&b.charCodeAt(0)===48&&e===1&&(e=0,b=b.slice(1)),a=b,b=b.replace('e+','e'),f=0,(d=a.indexOf('e'))>0&&(f=+a.slice(d+1),a=a.slice(0,d)),e>=0&&(f-=a.length-e-1,a=+(a.slice(0,e)+a.slice(e+1))+''),d=0;while(a.charCodeAt(a.length+d-1)===48)--d;return d!==0&&(f-=d,a=a.slice(0,d)),f!==0&&(a+='e'+f),(a.length<b.length||ae&&c>1e12&&Math.floor(c)===c&&(a='0x'+c.toString(16)).length<b.length)&&+a===c&&(b=a),b}function a8(a,b){return(a&-2)===8232?(b?'u':'\\u')+(a===8232?'2028':'2029'):a===10||a===13?(b?'':'\\')+(a===10?'n':'r'):String.fromCharCode(a)}function aq(d){var g,a,h,e,i,b,f,c;if(a=d.toString(),d.source){if(g=a.match(/\/([^\/]*)$/),!g)return a;for(h=g[1],a='',f=!1,c=!1,e=0,i=d.source.length;e<i;++e)b=d.source.charCodeAt(e),c?(a+=a8(b,c),c=!1):(f?b===93&&(f=!1):b===47?a+='\\':b===91&&(f=!0),a+=a8(b,c),c=b===92);return'/'+a+'/'+h}return a}function ar(a,c){var b;return a===8?'\\b':a===12?'\\f':a===9?'\\t':(b=a.toString(16).toUpperCase(),A||a>255?'\\u'+'0000'.slice(b.length)+b:a===0&&!m.code.isDecimalDigit(c)?'\\0':a===11?'\\x0B':'\\x'+'00'.slice(b.length)+b)}function ai(a){if(a===92)return'\\\\';if(a===10)return'\\n';if(a===13)return'\\r';if(a===8232)return'\\u2028';if(a===8233)return'\\u2029';throw new Error('Incorrectly classified character')}function aj(d){var a,e,c,b;for(b=S==='double'?'"':"'",a=0,e=d.length;a<e;++a){if(c=d.charCodeAt(a),c===39){b='"';break}if(c===34){b="'";break}c===92&&++a}return b+d+b}function ak(d){var b='',c,g,a,h=0,i=0,e,f;for(c=0,g=d.length;c<g;++c){if(a=d.charCodeAt(c),a===39)++h;else if(a===34)++i;else if(a===47&&A)b+='\\';else if(m.code.isLineTerminator(a)||a===92){b+=ai(a);continue}else if(!m.code.isIdentifierPartES5(a)&&(A&&a<32||!(A||ad)&&(a<32||a>126))){b+=ar(a,d.charCodeAt(c+1));continue}b+=String.fromCharCode(a)}if(e=!(S==='double'||S==='auto'&&i<h),f=e?"'":'"',!(e?h:i))return f+b+f;for(d=b,b=f,c=0,g=d.length;c<g;++c)a=d.charCodeAt(c),(a===39&&e||a===34&&!e)&&(b+='\\'),b+=String.fromCharCode(a);return b+f}function a1(d){var a,e,b,c='';for(a=0,e=d.length;a<e;++a)b=d[a],c+=J(b)?a1(b):b;return c}function j(b,a){if(!B)return J(b)?a1(b):b;if(a==null)if(b instanceof N)return b;else a={};return a.loc==null?new N(null,null,B,b,a.name||null):new N(a.loc.start.line,a.loc.start.column,B===!0?a.loc.source||null:B,b,a.name||null)}function v(){return f?f:' '}function h(c,d){var e,g,a,b;return e=j(c).toString(),e.length===0?[d]:(g=j(d).toString(),g.length===0?[c]:(a=e.charCodeAt(e.length-1),b=g.charCodeAt(0),(a===43||a===45)&&a===b||m.code.isIdentifierPartES5(a)&&m.code.isIdentifierPartES5(b)||a===47&&b===105?[c,v(),d]:m.code.isWhiteSpace(a)||m.code.isLineTerminator(a)||m.code.isWhiteSpace(b)||m.code.isLineTerminator(b)?[c,d]:[c,f,d]))}function u(a){return[n,a]}function p(b){var a;a=n,n+=F,b(n),n=a}function as(b){var a;for(a=b.length-1;a>=0;--a)if(m.code.isLineTerminator(b.charCodeAt(a)))break;return b.length-1-a}function ah(k,i){var b,a,e,g,d,c,f,h;for(b=k.split(/\r\n|[\r\n]/),c=Number.MAX_VALUE,a=1,e=b.length;a<e;++a){g=b[a],d=0;while(d<g.length&&m.code.isWhiteSpace(g.charCodeAt(d)))++d;c>d&&(c=d)}for(i!==void 0?(f=n,b[1][c]==='*'&&(i+=' '),n=i):(c&1&&--c,f=n),a=1,e=b.length;a<e;++a)h=j(u(b[a].slice(c))),b[a]=B?h.join(''):h;return n=f,b.join('\n')}function D(a,c){if(a.type==='Line')if(r(a.value))return'//'+a.value;else{var b='//'+a.value;return x||(b+='\n'),b}return t.format.indent.adjustMultilineComment&&/[\n\r]/.test(a.value)?ah('/*'+a.value+'*/',c):'/*'+a.value+'*/'}function $(d,a){var c,g,b,q,p,m,l,i,f,o,h,s,t,e;if(d.leadingComments&&d.leadingComments.length>0){if(q=a,x){for(b=d.leadingComments[0],a=[],i=b.extendedRange,f=b.range,h=C.substring(i[0],f[0]),e=(h.match(/\n/g)||[]).length,e>0?(a.push(H('\n',e)),a.push(u(D(b)))):(a.push(h),a.push(D(b))),o=f,c=1,g=d.leadingComments.length;c<g;c++)b=d.leadingComments[c],f=b.range,s=C.substring(o[1],f[0]),e=(s.match(/\n/g)||[]).length,a.push(H('\n',e)),a.push(u(D(b))),o=f;t=C.substring(f[1],i[1]),e=(t.match(/\n/g)||[]).length,a.push(H('\n',e))}else for(b=d.leadingComments[0],a=[],L&&d.type===k.Program&&d.body.length===0&&a.push('\n'),a.push(D(b)),r(j(a).toString())||a.push('\n'),c=1,g=d.leadingComments.length;c<g;++c)b=d.leadingComments[c],l=[D(b)],r(j(l).toString())||l.push('\n'),a.push(u(l));a.push(u(q))}if(d.trailingComments)if(x)b=d.trailingComments[0],i=b.extendedRange,f=b.range,h=C.substring(i[0],f[0]),e=(h.match(/\n/g)||[]).length,e>0?(a.push(H('\n',e)),a.push(u(D(b)))):(a.push(h),a.push(D(b)));else for(p=!r(j(a).toString()),m=H(' ',as(j([n,a,F]).toString())),c=0,g=d.trailingComments.length;c<g;++c)b=d.trailingComments[c],p?(c===0?a=[a,F]:a=[a,m],a.push(D(b,m))):a=[a,u(D(b))],c!==g-1&&!r(j(a).toString())&&(a=[a,'\n']);return a}function I(c,d,e){var a,b=0;for(a=c;a<d;a++)C[a]==='\n'&&b++;for(a=1;a<b;a++)e.push(i)}function s(a,b,c){return b<c?['(',a,')']:a}function ab(d){var a,c,b;for(b=d.split(/\r\n|\n/),a=1,c=b.length;a<c;a++)b[a]=i+n+b[a];return b}function an(c,d){var a,b,f;return a=c[t.verbatim],typeof a==='string'?b=s(ab(a),e.Sequence,d):(b=ab(a.content),f=a.precedence!=null?a.precedence:e.Sequence,b=s(b,f,d)),j(b,c)}function o(){}function z(a){return j(a.name,a)}function M(a,b){return a.async?'async'+(b?v():f):''}function O(b){var a=b.generator&&!t.moz.starlessGenerator;return a?'*'+f:''}function ag(b){var a=b.value;return a.async?M(a,!b.computed):O(a)?'*':''}function at(a){var b;if(b=new o,a3(a))return b.generateStatement(a,l);if(ap(a))return b.generateExpression(a,e.Sequence,g);throw new Error('Unknown node type: '+a.type)}function al(k,e){var h=a2(),j,g;return e!=null?(typeof e.indent==='string'&&(h.format.indent.style=e.indent),typeof e.base==='number'&&(h.format.indent.base=e.base),e=T(h,e),F=e.format.indent.style,typeof e.base==='string'?n=e.base:n=H(F,e.format.indent.base)):(e=h,F=e.format.indent.style,n=H(F,e.format.indent.base)),A=e.format.json,Z=e.format.renumber,ae=A?!1:e.format.hexadecimal,S=A?'double':e.format.quotes,ad=e.format.escapeless,i=e.format.newline,f=e.format.space,e.format.compact&&(i=f=F=n=''),W=e.format.parentheses,ac=e.format.semicolons,L=e.format.safeConcatenation,aa=e.directive,Y=A?null:e.parse,B=e.sourceMap,C=e.sourceCode,x=e.format.preserveBlankLines&&C!==null,t=e,B&&(c.browser?N=b.sourceMap.SourceNode:N=a('/node_modules/source-map/lib/source-map.js',d).SourceNode),j=at(k),B?(g=j.toStringWithSourceMap({file:e.file,sourceRoot:e.sourceMapRoot}),e.sourceContent&&g.map.setSourceContent(e.sourceMap,e.sourceContent),e.sourceMapWithCode?g:g.map.toString()):(g={code:j.toString(),map:null},e.sourceMapWithCode?g:g.code)}_=a('/node_modules/estraverse/estraverse.js',d),m=a('/node_modules/esutils/lib/utils.js',d),k=_.Syntax,e={Sequence:0,Yield:1,Await:1,Assignment:1,Conditional:2,ArrowFunction:2,LogicalOR:3,LogicalAND:4,BitwiseOR:5,BitwiseXOR:6,BitwiseAND:7,Equality:8,Relational:9,BitwiseSHIFT:10,Additive:11,Multiplicative:12,Unary:13,Postfix:14,Call:15,New:16,TaggedTemplate:17,Member:18,Primary:19},af={'||':e.LogicalOR,'&&':e.LogicalAND,'|':e.BitwiseOR,'^':e.BitwiseXOR,'&':e.BitwiseAND,'==':e.Equality,'!=':e.Equality,'===':e.Equality,'!==':e.Equality,is:e.Equality,isnt:e.Equality,'<':e.Relational,'>':e.Relational,'<=':e.Relational,'>=':e.Relational,'in':e.Relational,'instanceof':e.Relational,'<<':e.BitwiseSHIFT,'>>':e.BitwiseSHIFT,'>>>':e.BitwiseSHIFT,'+':e.Additive,'-':e.Additive,'*':e.Multiplicative,'%':e.Multiplicative,'/':e.Multiplicative},w=1,E=2,G=4,V=8,Q=16,q=32,U=E|G,P=w|E,g=w|E|G,R=w,a6=G,X=w|G,l=w,y=w|q,K=0,a5=w|Q,a4=w|V,J=Array.isArray,J||(J=function a(b){return Object.prototype.toString.call(b)==='[object Array]'}),o.prototype.maybeBlock=function(a,c){var d,b,e=this;return b=!t.comment||!a.leadingComments,a.type===k.BlockStatement&&b?[f,this.generateStatement(a,c)]:a.type===k.EmptyStatement&&b?';':(p(function(){d=[i,u(e.generateStatement(a,c))]}),d)},o.prototype.maybeBlockSuffix=function(c,a){var b=r(j(a).toString());return c.type===k.BlockStatement&&!(t.comment&&c.leadingComments)&&!b?[a,f]:b?[a,n]:[a,i,n]},o.prototype.generatePattern=function(a,b,c){return a.type===k.Identifier?z(a):this.generateExpression(a,b,c)},o.prototype.generateFunctionParams=function(a){var c,d,b,h;if(h=!1,a.type===k.ArrowFunctionExpression&&!a.rest&&(!a.defaults||a.defaults.length===0)&&a.params.length===1&&a.params[0].type===k.Identifier)b=[M(a,!0),z(a.params[0])];else{for(b=a.type===k.ArrowFunctionExpression?[M(a,!1)]:[],b.push('('),a.defaults&&(h=!0),c=0,d=a.params.length;c<d;++c)h&&a.defaults[c]?b.push(this.generateAssignment(a.params[c],a.defaults[c],'=',e.Assignment,g)):b.push(this.generatePattern(a.params[c],e.Assignment,g)),c+1<d&&b.push(','+f);a.rest&&(a.params.length&&b.push(','+f),b.push('...'),b.push(z(a.rest))),b.push(')')}return b},o.prototype.generateFunctionBody=function(b){var a,c;return a=this.generateFunctionParams(b),b.type===k.ArrowFunctionExpression&&(a.push(f),a.push('=>')),b.expression?(a.push(f),c=this.generateExpression(b.body,e.Assignment,g),c.toString().charAt(0)==='{'&&(c=['(',c,')']),a.push(c)):a.push(this.maybeBlock(b.body,a4)),a},o.prototype.generateIterationForStatement=function(d,b,i){var a=['for'+f+'('],c=this;return p(function(){b.left.type===k.VariableDeclaration?p(function(){a.push(b.left.kind+v()),a.push(c.generateStatement(b.left.declarations[0],K))}):a.push(c.generateExpression(b.left,e.Call,g)),a=h(a,d),a=[h(a,c.generateExpression(b.right,e.Sequence,g)),')']}),a.push(this.maybeBlock(b.body,i)),a},o.prototype.generatePropertyKey=function(d,b,c){var a=[];return b&&a.push('['),c.type==='AssignmentPattern'?a.push(this.AssignmentPattern(c,e.Sequence,g)):a.push(this.generateExpression(d,e.Sequence,g)),b&&a.push(']'),a},o.prototype.generateAssignment=function(c,d,g,b,a){return e.Assignment<b&&(a|=w),s([this.generateExpression(c,e.Call,a),f+g+f,this.generateExpression(d,e.Assignment,a)],e.Assignment,b)},o.prototype.semicolon=function(a){return!ac&&a&q?'':';'},o.Statement={BlockStatement:function(a,f){var c,d,b=['{',i],e=this;return p(function(){a.body.length===0&&x&&(c=a.range,c[1]-c[0]>2)&&(d=C.substring(c[0]+1,c[1]-1),d[0]==='\n'&&(b=['{']),b.push(d));var g,h,m,k;for(k=l,f&V&&(k|=Q),g=0,h=a.body.length;g<h;++g)x&&(g===0&&(a.body[0].leadingComments&&(c=a.body[0].leadingComments[0].extendedRange,d=C.substring(c[0],c[1]),d[0]==='\n'&&(b=['{'])),a.body[0].leadingComments||I(a.range[0],a.body[0].range[0],b)),g>0&&!(a.body[g-1].trailingComments||a.body[g].leadingComments)&&I(a.body[g-1].range[1],a.body[g].range[0],b)),g===h-1&&(k|=q),a.body[g].leadingComments&&x?m=e.generateStatement(a.body[g],k):m=u(e.generateStatement(a.body[g],k)),b.push(m),r(j(m).toString())||(x&&g<h-1?a.body[g+1].leadingComments||b.push(i):b.push(i)),x&&g===h-1&&(a.body[g].trailingComments||I(a.body[g].range[1],a.range[1],b))}),b.push(u('}')),b},BreakStatement:function(a,b){return a.label?'break '+a.label.name+this.semicolon(b):'break'+this.semicolon(b)},ContinueStatement:function(a,b){return a.label?'continue '+a.label.name+this.semicolon(b):'continue'+this.semicolon(b)},ClassBody:function(b,d){var a=['{',i],c=this;return p(function(h){var d,f;for(d=0,f=b.body.length;d<f;++d)a.push(h),a.push(c.generateExpression(b.body[d],e.Sequence,g)),d+1<f&&a.push(i)}),r(j(a).toString())||a.push(i),a.push(n),a.push('}'),a},ClassDeclaration:function(b,d){var a,c;return a=['class'],b.id&&(a=h(a,this.generateExpression(b.id,e.Sequence,g))),b.superClass&&(c=h('extends',this.generateExpression(b.superClass,e.Assignment,g)),a=h(a,c)),a.push(f),a.push(this.generateStatement(b.body,y)),a},DirectiveStatement:function(a,b){return t.raw&&a.raw?a.raw+this.semicolon(b):aj(a.directive)+this.semicolon(b)},DoWhileStatement:function(b,c){var a=h('do',this.maybeBlock(b.body,l));return a=this.maybeBlockSuffix(b.body,a),h(a,['while'+f+'(',this.generateExpression(b.test,e.Sequence,g),')'+this.semicolon(c)])},CatchClause:function(a,d){var b,c=this;return p(function(){var d;b=['catch'+f+'(',c.generateExpression(a.param,e.Sequence,g),')'],a.guard&&(d=c.generateExpression(a.guard,e.Sequence,g),b.splice(2,0,' if ',d))}),b.push(this.maybeBlock(a.body,l)),b},DebuggerStatement:function(b,a){return'debugger'+this.semicolon(a)},EmptyStatement:function(a,b){return';'},ExportDefaultDeclaration:function(b,c){var a=['export'],d;return d=c&q?y:l,a=h(a,'default'),a3(b.declaration)?a=h(a,this.generateStatement(b.declaration,d)):a=h(a,this.generateExpression(b.declaration,e.Assignment,g)+this.semicolon(c)),a},ExportNamedDeclaration:function(b,c){var a=['export'],d,m=this;return d=c&q?y:l,b.declaration?h(a,this.generateStatement(b.declaration,d)):(b.specifiers&&(b.specifiers.length===0?a=h(a,'{'+f+'}'):b.specifiers[0].type===k.ExportBatchSpecifier?a=h(a,this.generateExpression(b.specifiers[0],e.Sequence,g)):(a=h(a,'{'),p(function(f){var c,d;for(a.push(i),c=0,d=b.specifiers.length;c<d;++c)a.push(f),a.push(m.generateExpression(b.specifiers[c],e.Sequence,g)),c+1<d&&a.push(','+i)}),r(j(a).toString())||a.push(i),a.push(n+'}')),b.source?a=h(a,['from'+f,this.generateExpression(b.source,e.Sequence,g),this.semicolon(c)]):a.push(this.semicolon(c))),a)},ExportAllDeclaration:function(a,b){return['export'+f,'*'+f,'from'+f,this.generateExpression(a.source,e.Sequence,g),this.semicolon(b)]},ExpressionStatement:function(c,d){function f(b){var a;return b.slice(0,5)!=='class'?!1:(a=b.charCodeAt(5),a===123||m.code.isWhiteSpace(a)||m.code.isLineTerminator(a))}function h(b){var a;return b.slice(0,8)!=='function'?!1:(a=b.charCodeAt(8),a===40||m.code.isWhiteSpace(a)||a===42||m.code.isLineTerminator(a))}function i(b){var c,a,d;if(b.slice(0,5)!=='async')return!1;if(!m.code.isWhiteSpace(b.charCodeAt(5)))return!1;for(a=6,d=b.length;a<d;++a)if(!m.code.isWhiteSpace(b.charCodeAt(a)))break;return a===d?!1:b.slice(a,a+8)!=='function'?!1:(c=b.charCodeAt(a+8),c===40||m.code.isWhiteSpace(c)||c===42||m.code.isLineTerminator(c))}var a,b;return a=[this.generateExpression(c.expression,e.Sequence,g)],b=j(a).toString(),b.charCodeAt(0)===123||f(b)||h(b)||i(b)||aa&&d&Q&&c.expression.type===k.Literal&&typeof c.expression.value==='string'?a=['(',a,')'+this.semicolon(d)]:a.push(this.semicolon(d)),a},ImportDeclaration:function(b,d){var a,c,l=this;return b.specifiers.length===0?['import',f,this.generateExpression(b.source,e.Sequence,g),this.semicolon(d)]:(a=['import'],c=0,b.specifiers[c].type===k.ImportDefaultSpecifier&&(a=h(a,[this.generateExpression(b.specifiers[c],e.Sequence,g)]),++c),b.specifiers[c]&&(c!==0&&a.push(','),b.specifiers[c].type===k.ImportNamespaceSpecifier?a=h(a,[f,this.generateExpression(b.specifiers[c],e.Sequence,g)]):(a.push(f+'{'),b.specifiers.length-c===1?(a.push(f),a.push(this.generateExpression(b.specifiers[c],e.Sequence,g)),a.push(f+'}'+f)):(p(function(h){var d,f;for(a.push(i),d=c,f=b.specifiers.length;d<f;++d)a.push(h),a.push(l.generateExpression(b.specifiers[d],e.Sequence,g)),d+1<f&&a.push(','+i)}),r(j(a).toString())||a.push(i),a.push(n+'}'+f)))),a=h(a,['from'+f,this.generateExpression(b.source,e.Sequence,g),this.semicolon(d)]),a)},VariableDeclarator:function(a,c){var b=c&w?g:U;return a.init?[this.generateExpression(a.id,e.Assignment,b),f,'=',f,this.generateExpression(a.init,e.Assignment,b)]:this.generatePattern(a.id,e.Assignment,b)},VariableDeclaration:function(c,h){function j(){for(b=c.declarations[0],t.comment&&b.leadingComments?(a.push('\n'),a.push(u(e.generateStatement(b,d)))):(a.push(v()),a.push(e.generateStatement(b,d))),g=1,k=c.declarations.length;g<k;++g)b=c.declarations[g],t.comment&&b.leadingComments?(a.push(','+i),a.push(u(e.generateStatement(b,d)))):(a.push(','+f),a.push(e.generateStatement(b,d)))}var a,g,k,b,d,e=this;return a=[c.kind],d=h&w?l:K,c.declarations.length>1?p(j):j(),a.push(this.semicolon(h)),a},ThrowStatement:function(a,b){return[h('throw',this.generateExpression(a.argument,e.Sequence,g)),this.semicolon(b)]},TryStatement:function(b,f){var a,c,d,e;if(a=['try',this.maybeBlock(b.block,l)],a=this.maybeBlockSuffix(b.block,a),b.handlers)for(c=0,d=b.handlers.length;c<d;++c)a=h(a,this.generateStatement(b.handlers[c],l)),(b.finalizer||c+1!==d)&&(a=this.maybeBlockSuffix(b.handlers[c].body,a));else{for(e=b.guardedHandlers||[],c=0,d=e.length;c<d;++c)a=h(a,this.generateStatement(e[c],l)),(b.finalizer||c+1!==d)&&(a=this.maybeBlockSuffix(e[c].body,a));if(b.handler)if(J(b.handler))for(c=0,d=b.handler.length;c<d;++c)a=h(a,this.generateStatement(b.handler[c],l)),(b.finalizer||c+1!==d)&&(a=this.maybeBlockSuffix(b.handler[c].body,a));else a=h(a,this.generateStatement(b.handler,l)),b.finalizer&&(a=this.maybeBlockSuffix(b.handler.body,a))}return b.finalizer&&(a=h(a,['finally',this.maybeBlock(b.finalizer,l)])),a},SwitchStatement:function(c,n){var a,d,b,h,k,m=this;if(p(function(){a=['switch'+f+'(',m.generateExpression(c.discriminant,e.Sequence,g),')'+f+'{'+i]}),c.cases)for(k=l,b=0,h=c.cases.length;b<h;++b)b===h-1&&(k|=q),d=u(this.generateStatement(c.cases[b],k)),a.push(d),r(j(d).toString())||a.push(i);return a.push(u('}')),a},SwitchCase:function(c,o){var a,f,b,d,n,m=this;return p(function(){for(c.test?a=[h('case',m.generateExpression(c.test,e.Sequence,g)),':']:a=['default:'],b=0,d=c.consequent.length,d&&c.consequent[0].type===k.BlockStatement&&(f=m.maybeBlock(c.consequent[0],l),a.push(f),b=1),b!==d&&!r(j(a).toString())&&a.push(i),n=l;b<d;++b)b===d-1&&o&q&&(n|=q),f=u(m.generateStatement(c.consequent[b],n)),a.push(f),b+1!==d&&!r(j(f).toString())&&a.push(i)}),a},IfStatement:function(b,j){var a,c,d,i=this;return p(function(){a=['if'+f+'(',i.generateExpression(b.test,e.Sequence,g),')']}),d=j&q,c=l,d&&(c|=q),b.alternate?(a.push(this.maybeBlock(b.consequent,l)),a=this.maybeBlockSuffix(b.consequent,a),b.alternate.type===k.IfStatement?a=h(a,['else ',this.generateStatement(b.alternate,c)]):a=h(a,h('else',this.maybeBlock(b.alternate,c)))):a.push(this.maybeBlock(b.consequent,c)),a},ForStatement:function(b,d){var a,c=this;return p(function(){a=['for'+f+'('],b.init?b.init.type===k.VariableDeclaration?a.push(c.generateStatement(b.init,K)):(a.push(c.generateExpression(b.init,e.Sequence,U)),a.push(';')):a.push(';'),b.test?(a.push(f),a.push(c.generateExpression(b.test,e.Sequence,g)),a.push(';')):a.push(';'),b.update?(a.push(f),a.push(c.generateExpression(b.update,e.Sequence,g)),a.push(')')):a.push(')')}),a.push(this.maybeBlock(b.body,d&q?y:l)),a},ForInStatement:function(a,b){return this.generateIterationForStatement('in',a,b&q?y:l)},ForOfStatement:function(a,b){return this.generateIterationForStatement('of',a,b&q?y:l)},LabeledStatement:function(a,b){return[a.label.name+':',this.maybeBlock(a.body,b&q?y:l)]},Program:function(b,g){var c,e,a,d,f;for(d=b.body.length,c=[L&&d>0?'\n':''],f=a5,a=0;a<d;++a)!L&&a===d-1&&(f|=q),x&&(a===0&&(b.body[0].leadingComments||I(b.range[0],b.body[a].range[0],c)),a>0&&!(b.body[a-1].trailingComments||b.body[a].leadingComments)&&I(b.body[a-1].range[1],b.body[a].range[0],c)),e=u(this.generateStatement(b.body[a],f)),c.push(e),a+1<d&&!r(j(e).toString())&&(x?b.body[a+1].leadingComments||c.push(i):c.push(i)),x&&a===d-1&&(b.body[a].trailingComments||I(b.body[a].range[1],b.range[1],c));return c},FunctionDeclaration:function(a,b){return[M(a,!0),'function',O(a)||v(),a.id?z(a.id):'',this.generateFunctionBody(a)]},ReturnStatement:function(a,b){return a.argument?[h('return',this.generateExpression(a.argument,e.Sequence,g)),this.semicolon(b)]:['return'+this.semicolon(b)]},WhileStatement:function(b,d){var a,c=this;return p(function(){a=['while'+f+'(',c.generateExpression(b.test,e.Sequence,g),')']}),a.push(this.maybeBlock(b.body,d&q?y:l)),a},WithStatement:function(b,d){var a,c=this;return p(function(){a=['with'+f+'(',c.generateExpression(b.object,e.Sequence,g),')']}),a.push(this.maybeBlock(b.body,d&q?y:l)),a}},a0(o.prototype,o.Statement),o.Expression={SequenceExpression:function(d,g,h){var b,a,c;for(e.Sequence<g&&(h|=w),b=[],a=0,c=d.expressions.length;a<c;++a)b.push(this.generateExpression(d.expressions[a],e.Assignment,h)),a+1<c&&b.push(','+f);return s(b,e.Sequence,g)},AssignmentExpression:function(a,b,c){return this.generateAssignment(a.left,a.right,a.operator,b,c)},ArrowFunctionExpression:function(a,b,c){return s(this.generateFunctionBody(a),e.ArrowFunction,b)},ConditionalExpression:function(b,c,a){return e.Conditional<c&&(a|=w),s([this.generateExpression(b.test,e.LogicalOR,a),f+'?'+f,this.generateExpression(b.consequent,e.Assignment,a),f+':'+f,this.generateExpression(b.alternate,e.Assignment,a)],e.Conditional,c)},LogicalExpression:function(a,b,c){return this.BinaryExpression(a,b,c)},BinaryExpression:function(a,g,e){var c,d,b,f;return d=af[a.operator],d<g&&(e|=w),b=this.generateExpression(a.left,d,e),f=b.toString(),f.charCodeAt(f.length-1)===47&&m.code.isIdentifierPartES5(a.operator.charCodeAt(0))?c=[b,v(),a.operator]:c=h(b,a.operator),b=this.generateExpression(a.right,d+1,e),a.operator==='/'&&b.toString().charAt(0)==='/'||a.operator.slice(-1)==='<'&&b.toString().slice(0,3)==='!--'?(c.push(v()),c.push(b)):c=h(c,b),a.operator==='in'&&!(e&w)?['(',c,')']:s(c,d,g)},CallExpression:function(c,h,i){var a,b,d;for(a=[this.generateExpression(c.callee,e.Call,P)],a.push('('),b=0,d=c['arguments'].length;b<d;++b)a.push(this.generateExpression(c['arguments'][b],e.Assignment,g)),b+1<d&&a.push(','+f);return a.push(')'),i&E?s(a,e.Call,h):['(',a,')']},NewExpression:function(d,l,j){var a,c,b,i,k;if(c=d['arguments'].length,k=j&G&&!W&&c===0?X:R,a=h('new',this.generateExpression(d.callee,e.New,k)),!(j&G)||W||c>0){for(a.push('('),b=0,i=c;b<i;++b)a.push(this.generateExpression(d['arguments'][b],e.Assignment,g)),b+1<i&&a.push(','+f);a.push(')')}return s(a,e.New,l)},MemberExpression:function(c,f,d){var a,b;return a=[this.generateExpression(c.object,e.Call,d&E?P:R)],c.computed?(a.push('['),a.push(this.generateExpression(c.property,e.Sequence,d&E?g:X)),a.push(']')):(c.object.type===k.Literal&&typeof c.object.value==='number'&&(b=j(a).toString(),b.indexOf('.')<0&&!/[eExX]/.test(b)&&m.code.isDecimalDigit(b.charCodeAt(b.length-1))&&!(b.length>=2&&b.charCodeAt(0)===48)&&a.push('.')),a.push('.'),a.push(z(c.property))),s(a,e.Member,f)},MetaProperty:function(b,c,d){var a;return a=[],a.push(b.meta),a.push('.'),a.push(b.property),s(a,e.Member,c)},UnaryExpression:function(d,l,n){var a,b,i,k,c;return b=this.generateExpression(d.argument,e.Unary,g),f===''?a=h(d.operator,b):(a=[d.operator],d.operator.length>2?a=h(a,b):(k=j(a).toString(),c=k.charCodeAt(k.length-1),i=b.toString().charCodeAt(0),(c===43||c===45)&&c===i||m.code.isIdentifierPartES5(c)&&m.code.isIdentifierPartES5(i)?(a.push(v()),a.push(b)):a.push(b))),s(a,e.Unary,l)},YieldExpression:function(b,c,d){var a;return b.delegate?a='yield*':a='yield',b.argument&&(a=h(a,this.generateExpression(b.argument,e.Yield,g))),s(a,e.Yield,c)},AwaitExpression:function(a,c,d){var b=h(a.all?'await*':'await',this.generateExpression(a.argument,e.Await,g));return s(b,e.Await,c)},UpdateExpression:function(a,b,c){return a.prefix?s([a.operator,this.generateExpression(a.argument,e.Unary,g)],e.Unary,b):s([this.generateExpression(a.argument,e.Postfix,g),a.operator],e.Postfix,b)},FunctionExpression:function(a,c,d){var b=[M(a,!0),'function'];return a.id?(b.push(O(a)||v()),b.push(z(a.id))):b.push(O(a)||f),b.push(this.generateFunctionBody(a)),b},ArrayPattern:function(a,b,c){return this.ArrayExpression(a,b,c,!0)},ArrayExpression:function(c,k,l,h){var a,b,d=this;return c.elements.length?(b=h?!1:c.elements.length>1,a=['[',b?i:''],p(function(k){var h,j;for(h=0,j=c.elements.length;h<j;++h)c.elements[h]?(a.push(b?k:''),a.push(d.generateExpression(c.elements[h],e.Assignment,g))):(b&&a.push(k),h+1===j&&a.push(',')),h+1<j&&a.push(','+(b?i:f))}),b&&!r(j(a).toString())&&a.push(i),a.push(b?n:''),a.push(']'),a):'[]'},RestElement:function(a,b,c){return'...'+this.generatePattern(a.argument)},ClassExpression:function(b,d,i){var a,c;return a=['class'],b.id&&(a=h(a,this.generateExpression(b.id,e.Sequence,g))),b.superClass&&(c=h('extends',this.generateExpression(b.superClass,e.Assignment,g)),a=h(a,c)),a.push(f),a.push(this.generateStatement(b.body,y)),a},MethodDefinition:function(a,d,e){var b,c;return a['static']?b=['static'+f]:b=[],a.kind==='get'||a.kind==='set'?c=[h(a.kind,this.generatePropertyKey(a.key,a.computed,a.value)),this.generateFunctionBody(a.value)]:c=[ag(a),this.generatePropertyKey(a.key,a.computed,a.value),this.generateFunctionBody(a.value)],h(b,c)},Property:function(a,b,c){return a.kind==='get'||a.kind==='set'?[a.kind,v(),this.generatePropertyKey(a.key,a.computed,a.value),this.generateFunctionBody(a.value)]:a.shorthand?this.generatePropertyKey(a.key,a.computed,a.value):a.method?[ag(a),this.generatePropertyKey(a.key,a.computed,a.value),this.generateFunctionBody(a.value)]:[this.generatePropertyKey(a.key,a.computed,a.value),':'+f,this.generateExpression(a.value,e.Assignment,g)]},ObjectExpression:function(b,k,l){var d,a,c,h=this;return b.properties.length?(d=b.properties.length>1,p(function(){c=h.generateExpression(b.properties[0],e.Sequence,g)}),d||am(j(c).toString())?(p(function(k){var f,j;if(a=['{',i,k,c],d)for(a.push(','+i),f=1,j=b.properties.length;f<j;++f)a.push(k),a.push(h.generateExpression(b.properties[f],e.Sequence,g)),f+1<j&&a.push(','+i)}),r(j(a).toString())||a.push(i),a.push(n),a.push('}'),a):['{',f,c,f,'}']):'{}'},AssignmentPattern:function(a,b,c){return this.generateAssignment(a.left,a.right,'=',b,c)},ObjectPattern:function(c,o,q){var a,d,l,b,h,m=this;if(!c.properties.length)return'{}';if(b=!1,c.properties.length===1)h=c.properties[0],h.value.type!==k.Identifier&&(b=!0);else for(d=0,l=c.properties.length;d<l;++d)if(h=c.properties[d],!h.shorthand){b=!0;break}return a=['{',b?i:''],p(function(j){var d,h;for(d=0,h=c.properties.length;d<h;++d)a.push(b?j:''),a.push(m.generateExpression(c.properties[d],e.Sequence,g)),d+1<h&&a.push(','+(b?i:f))}),b&&!r(j(a).toString())&&a.push(i),a.push(b?n:''),a.push('}'),a},ThisExpression:function(a,b,c){return'this'},Super:function(a,b,c){return'super'},Identifier:function(a,b,c){return z(a)},ImportDefaultSpecifier:function(a,b,c){return z(a.id||a.local)},ImportNamespaceSpecifier:function(c,d,e){var a=['*'],b=c.id||c.local;return b&&a.push(f+'as'+v()+z(b)),a},ImportSpecifier:function(d,e,f){var b=d.imported,c=[b.name],a=d.local;return a&&a.name!==b.name&&c.push(v()+'as'+v()+z(a)),c},ExportSpecifier:function(d,e,f){var b=d.local,c=[b.name],a=d.exported;return a&&a.name!==b.name&&c.push(v()+'as'+v()+z(a)),c},Literal:function(a,c,d){var b;if(a.hasOwnProperty('raw')&&Y&&t.raw)try{if(b=Y(a.raw).body[0].expression,b.type===k.Literal&&b.value===a.value)return a.raw}catch(a){}return a.value===null?'null':typeof a.value==='string'?ak(a.value):typeof a.value==='number'?ao(a.value):typeof a.value==='boolean'?a.value?'true':'false':a.regex?'/'+a.regex.pattern+'/'+a.regex.flags:aq(a.value)},GeneratorExpression:function(a,b,c){return this.ComprehensionExpression(a,b,c)},ComprehensionExpression:function(b,l,m){var a,d,i,c,j=this;return a=b.type===k.GeneratorExpression?['(']:['['],t.moz.comprehensionExpressionStartsWithAssignment&&(c=this.generateExpression(b.body,e.Assignment,g),a.push(c)),b.blocks&&p(function(){for(d=0,i=b.blocks.length;d<i;++d)c=j.generateExpression(b.blocks[d],e.Sequence,g),d>0||t.moz.comprehensionExpressionStartsWithAssignment?a=h(a,c):a.push(c)}),b.filter&&(a=h(a,'if'+f),c=this.generateExpression(b.filter,e.Sequence,g),a=h(a,['(',c,')'])),t.moz.comprehensionExpressionStartsWithAssignment||(c=this.generateExpression(b.body,e.Assignment,g),a=h(a,c)),a.push(b.type===k.GeneratorExpression?')':']'),a},ComprehensionBlock:function(b,c,d){var a;return b.left.type===k.VariableDeclaration?a=[b.left.kind,v(),this.generateStatement(b.left.declarations[0],K)]:a=this.generateExpression(b.left,e.Call,g),a=h(a,b.of?'of':'in'),a=h(a,this.generateExpression(b.right,e.Sequence,g)),['for'+f+'(',a,')']},SpreadElement:function(a,b,c){return['...',this.generateExpression(a.argument,e.Assignment,g)]},TaggedTemplateExpression:function(b,d,f){var a=P;f&E||(a=R);var c=[this.generateExpression(b.tag,e.Call,a),this.generateExpression(b.quasi,e.Primary,a6)];return s(c,e.TaggedTemplate,d)},TemplateElement:function(a,b,c){return a.value.raw},TemplateLiteral:function(c,h,i){var a,b,d;for(a=['`'],b=0,d=c.quasis.length;b<d;++b)a.push(this.generateExpression(c.quasis[b],e.Primary,g)),b+1<d&&(a.push('${'+f),a.push(this.generateExpression(c.expressions[b],e.Sequence,g)),a.push(f+'}'));return a.push('`'),a},ModuleSpecifier:function(a,b,c){return this.Literal(a,b,c)}},a0(o.prototype,o.Expression),o.prototype.generateExpression=function(a,c,e){var b,d;return d=a.type||k.Property,t.verbatim&&a.hasOwnProperty(t.verbatim)?an(a,c):(b=this[d](a,c,e),t.comment&&(b=$(a,b)),j(b,a))},o.prototype.generateStatement=function(b,d){var a,c;return a=this[b.type](b,d),t.comment&&(a=$(b,a)),c=j(a).toString(),b.type===k.Program&&!L&&i===''&&c.charAt(c.length-1)==='\n'&&(a=B?j(a).replaceRight(/\s+$/,''):c.replace(/\s+$/,'')),j(a,b)},a9={indent:{style:'',base:0},renumber:!0,hexadecimal:!0,quotes:'auto',escapeless:!0,compact:!0,parentheses:!1,semicolons:!1},a7=a2().format,c.version=a('/package.json',d).version,c.generate=al,c.attachComments=_.attachComments,c.Precedence=T({},e),c.browser=!1,c.FORMAT_MINIFY=a9,c.FORMAT_DEFAULTS=a7}()}),a.define('/package.json',function(a,b,c,d){a.exports={name:'escodegen',description:'ECMAScript code generator',homepage:'http://github.com/estools/escodegen',main:'escodegen.js',bin:{esgenerate:'./bin/esgenerate.js',escodegen:'./bin/escodegen.js'},files:['LICENSE.BSD','LICENSE.source-map','README.md','bin','escodegen.js','package.json'],version:'1.8.1',engines:{node:'>=0.12.0'},maintainers:[{name:'Yusuke Suzuki',email:'utatane.tea@gmail.com',web:'http://github.com/Constellation'}],repository:{type:'git',url:'http://github.com/estools/escodegen.git'},dependencies:{estraverse:'^1.9.1',esutils:'^2.0.2',esprima:'^2.7.1',optionator:'^0.8.1'},optionalDependencies:{'source-map':'~0.2.0'},devDependencies:{acorn:'^2.7.0',bluebird:'^2.3.11','bower-registry-client':'^0.2.1',chai:'^1.10.0','commonjs-everywhere':'^0.9.7',gulp:'^3.8.10','gulp-eslint':'^0.2.0','gulp-mocha':'^2.0.0',semver:'^5.1.0'},license:'BSD-2-Clause',scripts:{test:'gulp travis','unit-test':'gulp test',lint:'gulp lint',release:'node tools/release.js','build-min':'./node_modules/.bin/cjsify -ma path: tools/entry-point.js > escodegen.browser.min.js',build:'./node_modules/.bin/cjsify -a path: tools/entry-point.js > escodegen.browser.js'}}}),a.define('/node_modules/source-map/lib/source-map.js',function(b,c,d,e){c.SourceMapGenerator=a('/node_modules/source-map/lib/source-map/source-map-generator.js',b).SourceMapGenerator,c.SourceMapConsumer=a('/node_modules/source-map/lib/source-map/source-map-consumer.js',b).SourceMapConsumer,c.SourceNode=a('/node_modules/source-map/lib/source-map/source-node.js',b).SourceNode}),a.define('/node_modules/source-map/lib/source-map/source-node.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(d,i,e){function a(a,c,d,e,f){this.children=[],this.sourceContents={},this.line=a==null?null:a,this.column=c==null?null:c,this.source=d==null?null:d,this.name=f==null?null:f,this[b]=!0,e!=null&&this.add(e)}var f=d('/node_modules/source-map/lib/source-map/source-map-generator.js',e).SourceMapGenerator,c=d('/node_modules/source-map/lib/source-map/util.js',e),g=/(\r?\n)/,h=10,b='$$$isSourceNode$$$';a.fromStringWithSourceMap=function b(n,m,j){function l(b,d){if(b===null||b.source===undefined)e.add(d);else{var f=j?c.join(j,b.source):b.source;e.add(new a(b.originalLine,b.originalColumn,f,d,b.name))}}var e=new a,d=n.split(g),k=function(){var a=d.shift(),b=d.shift()||'';return a+b},i=1,h=0,f=null;return m.eachMapping(function(a){if(f!==null)if(i<a.generatedLine){var c='';l(f,k()),i++,h=0}else{var b=d[0],c=b.substr(0,a.generatedColumn-h);d[0]=b.substr(a.generatedColumn-h),h=a.generatedColumn,l(f,c),f=a;return}while(i<a.generatedLine)e.add(k()),i++;if(h<a.generatedColumn){var b=d[0];e.add(b.substr(0,a.generatedColumn)),d[0]=b.substr(a.generatedColumn),h=a.generatedColumn}f=a},this),d.length>0&&(f&&l(f,k()),e.add(d.join(''))),m.sources.forEach(function(a){var b=m.sourceContentFor(a);b!=null&&(j!=null&&(a=c.join(j,a)),e.setSourceContent(a,b))}),e},a.prototype.add=function a(c){if(Array.isArray(c))c.forEach(function(a){this.add(a)},this);else if(c[b]||typeof c==='string')c&&this.children.push(c);else throw new TypeError('Expected a SourceNode, string, or an array of SourceNodes and strings. Got '+c);return this},a.prototype.prepend=function a(c){if(Array.isArray(c))for(var d=c.length-1;d>=0;d--)this.prepend(c[d]);else if(c[b]||typeof c==='string')this.children.unshift(c);else throw new TypeError('Expected a SourceNode, string, or an array of SourceNodes and strings. Got '+c);return this},a.prototype.walk=function a(e){var c;for(var d=0,f=this.children.length;d<f;d++)c=this.children[d],c[b]?c.walk(e):c!==''&&e(c,{source:this.source,line:this.line,column:this.column,name:this.name})},a.prototype.join=function a(e){var b,c,d=this.children.length;if(d>0){for(b=[],c=0;c<d-1;c++)b.push(this.children[c]),b.push(e);b.push(this.children[c]),this.children=b}return this},a.prototype.replaceRight=function a(d,e){var c=this.children[this.children.length-1];return c[b]?c.replaceRight(d,e):typeof c==='string'?this.children[this.children.length-1]=c.replace(d,e):this.children.push(''.replace(d,e)),this},a.prototype.setSourceContent=function a(b,d){this.sourceContents[c.toSetString(b)]=d},a.prototype.walkSourceContents=function a(g){for(var d=0,e=this.children.length;d<e;d++)this.children[d][b]&&this.children[d].walkSourceContents(g);var f=Object.keys(this.sourceContents);for(var d=0,e=f.length;d<e;d++)g(c.fromSetString(f[d]),this.sourceContents[f[d]])},a.prototype.toString=function a(){var b='';return this.walk(function(a){b+=a}),b},a.prototype.toStringWithSourceMap=function a(k){var b={code:'',line:1,column:0},c=new f(k),d=!1,e=null,g=null,i=null,j=null;return this.walk(function(k,a){b.code+=k,a.source!==null&&a.line!==null&&a.column!==null?((e!==a.source||g!==a.line||i!==a.column||j!==a.name)&&c.addMapping({source:a.source,original:{line:a.line,column:a.column},generated:{line:b.line,column:b.column},name:a.name}),e=a.source,g=a.line,i=a.column,j=a.name,d=!0):d&&(c.addMapping({generated:{line:b.line,column:b.column}}),e=null,d=!1);for(var f=0,l=k.length;f<l;f++)k.charCodeAt(f)===h?(b.line++,b.column=0,f+1===l?(e=null,d=!1):d&&c.addMapping({source:a.source,original:{line:a.line,column:a.column},generated:{line:b.line,column:b.column},name:a.name})):b.column++}),this.walkSourceContents(function(a,b){c.setSourceContent(a,b)}),{code:b.code,map:c}},i.SourceNode=a})}),a.define('/node_modules/source-map/lib/source-map/util.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(o,a,p){function m(b,a,c){if(a in b)return b[a];else if(arguments.length===3)return c;else throw new Error('"'+a+'" is a required argument.')}function b(b){var a=b.match(f);return a?{scheme:a[1],auth:a[2],host:a[3],port:a[4],path:a[5]}:null}function c(a){var b='';return a.scheme&&(b+=a.scheme+':'),b+='//',a.auth&&(b+=a.auth+'@'),a.host&&(b+=a.host),a.port&&(b+=':'+a.port),a.path&&(b+=a.path),b}function g(i){var a=i,d=b(i);if(d){if(!d.path)return i;a=d.path}var j=a.charAt(0)==='/',e=a.split(/\/+/);for(var h,g=0,f=e.length-1;f>=0;f--)h=e[f],h==='.'?e.splice(f,1):h==='..'?g++:g>0&&(h===''?(e.splice(f+1,g),g=0):(e.splice(f,2),g--));return a=e.join('/'),a===''&&(a=j?'/':'.'),d?(d.path=a,c(d)):a}function h(h,d){h===''&&(h='.'),d===''&&(d='.');var f=b(d),a=b(h);if(a&&(h=a.path||'/'),f&&!f.scheme)return a&&(f.scheme=a.scheme),c(f);if(f||d.match(e))return d;if(a&&!a.host&&!a.path)return a.host=d,c(a);var i=d.charAt(0)==='/'?d:g(h.replace(/\/+$/,'')+'/'+d);return a?(a.path=i,c(a)):i}function j(a,c){a===''&&(a='.'),a=a.replace(/\/$/,'');var d=b(a);return c.charAt(0)=='/'&&d&&d.path=='/'?c.slice(1):c.indexOf(a+'/')===0?c.substr(a.length+1):c}function k(a){return'$'+a}function l(a){return a.substr(1)}function d(c,d){var a=c||'',b=d||'';return(a>b)-(a<b)}function n(b,c,e){var a;return a=d(b.source,c.source),a?a:(a=b.originalLine-c.originalLine,a?a:(a=b.originalColumn-c.originalColumn,a||e?a:(a=d(b.name,c.name),a?a:(a=b.generatedLine-c.generatedLine,a?a:b.generatedColumn-c.generatedColumn))))}function i(b,c,e){var a;return a=b.generatedLine-c.generatedLine,a?a:(a=b.generatedColumn-c.generatedColumn,a||e?a:(a=d(b.source,c.source),a?a:(a=b.originalLine-c.originalLine,a?a:(a=b.originalColumn-c.originalColumn,a?a:d(b.name,c.name)))))}a.getArg=m;var f=/^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.]*)(?::(\d+))?(\S*)$/,e=/^data:.+\,.+$/;a.urlParse=b,a.urlGenerate=c,a.normalize=g,a.join=h,a.relative=j,a.toSetString=k,a.fromSetString=l,a.compareByOriginalPositions=n,a.compareByGeneratedPositions=i})}),a.define('/node_modules/amdefine/amdefine.js',function(b,f,g,d){'use strict';function e(e,i){'use strict';function q(b){var a,c;for(a=0;b[a];a+=1)if(c=b[a],c==='.')b.splice(a,1),a-=1;else if(c==='..')if(a===1&&(b[2]==='..'||b[0]==='..'))break;else a>0&&(b.splice(a-1,2),a-=2)}function j(b,c){var a;return b&&b.charAt(0)==='.'&&c&&(a=c.split('/'),a=a.slice(0,a.length-1),a=a.concat(b.split('/')),q(a),b=a.join('/')),b}function p(a){return function(b){return j(b,a)}}function o(c){function a(a){b[c]=a}return a.fromText=function(a,b){throw new Error('amdefine does not implement load.fromText')},a}function m(c,h,l){var m,f,a,j;if(c)f=b[c]={},a={id:c,uri:d,exports:f},m=g(i,f,a,c);else{if(k)throw new Error('amdefine with no module ID cannot be called more than once per file.');k=!0,f=e.exports,a=e,m=g(i,f,a,e.id)}h&&(h=h.map(function(a){return m(a)})),typeof l==='function'?j=l.apply(a.exports,h):j=l,j!==undefined&&(a.exports=j,c&&(b[c]=a.exports))}function l(b,a,c){Array.isArray(b)?(c=a,a=b,b=undefined):typeof b!=='string'&&(c=b,b=a=undefined),a&&!Array.isArray(a)&&(c=a,a=undefined),a||(a=['require','exports','module']),b?f[b]=[b,a,c]:m(b,a,c)}var f={},b={},k=!1,n=a('path',e),g,h;return g=function(b,d,a,e){function f(f,g){if(typeof f==='string')return h(b,d,a,f,e);f=f.map(function(c){return h(b,d,a,c,e)}),g&&c.nextTick(function(){g.apply(null,f)})}return f.toUrl=function(b){return b.indexOf('.')===0?j(b,n.dirname(a.filename)):b},f},i=i||function a(){return e.require.apply(e,arguments)},h=function(d,e,i,a,c){var k=a.indexOf('!'),n=a,q,l;if(k===-1)if(a=j(a,c),a==='require')return g(d,e,i,c);else if(a==='exports')return e;else if(a==='module')return i;else if(b.hasOwnProperty(a))return b[a];else if(f[a])return m.apply(null,f[a]),b[a];else if(d)return d(n);else throw new Error('No module with ID: '+a);else return q=a.substring(0,k),a=a.substring(k+1,a.length),l=h(d,e,i,q,c),l.normalize?a=l.normalize(a,p(c)):a=j(a,c),b[a]?b[a]:(l.load(a,g(d,e,i,c),o(a),{}),b[a])},l.require=function(a){return b[a]?b[a]:f[a]?(m.apply(null,f[a]),b[a]):void 0},l.amd={},l}b.exports=e}),a.define('/node_modules/source-map/lib/source-map/source-map-generator.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(e,h,f){function b(b){b||(b={}),this._file=a.getArg(b,'file',null),this._sourceRoot=a.getArg(b,'sourceRoot',null),this._skipValidation=a.getArg(b,'skipValidation',!1),this._sources=new d,this._names=new d,this._mappings=new g,this._sourcesContents=null}var c=e('/node_modules/source-map/lib/source-map/base64-vlq.js',f),a=e('/node_modules/source-map/lib/source-map/util.js',f),d=e('/node_modules/source-map/lib/source-map/array-set.js',f).ArraySet,g=e('/node_modules/source-map/lib/source-map/mapping-list.js',f).MappingList;b.prototype._version=3,b.fromSourceMap=function c(d){var e=d.sourceRoot,f=new b({file:d.file,sourceRoot:e});return d.eachMapping(function(b){var c={generated:{line:b.generatedLine,column:b.generatedColumn}};b.source!=null&&(c.source=b.source,e!=null&&(c.source=a.relative(e,c.source)),c.original={line:b.originalLine,column:b.originalColumn},b.name!=null&&(c.name=b.name)),f.addMapping(c)}),d.sources.forEach(function(b){var a=d.sourceContentFor(b);a!=null&&f.setSourceContent(b,a)}),f},b.prototype.addMapping=function b(f){var g=a.getArg(f,'generated'),c=a.getArg(f,'original',null),d=a.getArg(f,'source',null),e=a.getArg(f,'name',null);this._skipValidation||this._validateMapping(g,c,d,e),d!=null&&!this._sources.has(d)&&this._sources.add(d),e!=null&&!this._names.has(e)&&this._names.add(e),this._mappings.add({generatedLine:g.line,generatedColumn:g.column,originalLine:c!=null&&c.line,originalColumn:c!=null&&c.column,source:d,name:e})},b.prototype.setSourceContent=function b(e,d){var c=e;this._sourceRoot!=null&&(c=a.relative(this._sourceRoot,c)),d!=null?(this._sourcesContents||(this._sourcesContents={}),this._sourcesContents[a.toSetString(c)]=d):this._sourcesContents&&(delete this._sourcesContents[a.toSetString(c)],Object.keys(this._sourcesContents).length===0&&(this._sourcesContents=null))},b.prototype.applySourceMap=function b(e,j,g){var f=j;if(j==null){if(e.file==null)throw new Error('SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, or the source map\'s "file" property. Both were omitted.');f=e.file}var c=this._sourceRoot;c!=null&&(f=a.relative(c,f));var h=new d,i=new d;this._mappings.unsortedForEach(function(b){if(b.source===f&&b.originalLine!=null){var d=e.originalPositionFor({line:b.originalLine,column:b.originalColumn});d.source!=null&&(b.source=d.source,g!=null&&(b.source=a.join(g,b.source)),c!=null&&(b.source=a.relative(c,b.source)),b.originalLine=d.line,b.originalColumn=d.column,d.name!=null&&(b.name=d.name))}var j=b.source;j!=null&&!h.has(j)&&h.add(j);var k=b.name;k!=null&&!i.has(k)&&i.add(k)},this),this._sources=h,this._names=i,e.sources.forEach(function(b){var d=e.sourceContentFor(b);d!=null&&(g!=null&&(b=a.join(g,b)),c!=null&&(b=a.relative(c,b)),this.setSourceContent(b,d))},this)},b.prototype._validateMapping=function a(b,c,d,e){if(b&&'line'in b&&'column'in b&&b.line>0&&b.column>=0&&!c&&!d&&!e)return;else if(b&&'line'in b&&'column'in b&&c&&'line'in c&&'column'in c&&b.line>0&&b.column>=0&&c.line>0&&c.column>=0&&d)return;else throw new Error('Invalid mapping: '+JSON.stringify({generated:b,source:d,original:c,name:e}))},b.prototype._serializeMappings=function b(){var h=0,g=1,k=0,l=0,m=0,j=0,e='',d,i=this._mappings.toArray();for(var f=0,n=i.length;f<n;f++){if(d=i[f],d.generatedLine!==g){h=0;while(d.generatedLine!==g)e+=';',g++}else if(f>0){if(!a.compareByGeneratedPositions(d,i[f-1]))continue;e+=','}e+=c.encode(d.generatedColumn-h),h=d.generatedColumn,d.source!=null&&(e+=c.encode(this._sources.indexOf(d.source)-j),j=this._sources.indexOf(d.source),e+=c.encode(d.originalLine-1-l),l=d.originalLine-1,e+=c.encode(d.originalColumn-k),k=d.originalColumn,d.name!=null&&(e+=c.encode(this._names.indexOf(d.name)-m),m=this._names.indexOf(d.name)))}return e},b.prototype._generateSourcesContent=function b(d,c){return d.map(function(b){if(!this._sourcesContents)return null;c!=null&&(b=a.relative(c,b));var d=a.toSetString(b);return Object.prototype.hasOwnProperty.call(this._sourcesContents,d)?this._sourcesContents[d]:null},this)},b.prototype.toJSON=function a(){var b={version:this._version,sources:this._sources.toArray(),names:this._names.toArray(),mappings:this._serializeMappings()};return this._file!=null&&(b.file=this._file),this._sourceRoot!=null&&(b.sourceRoot=this._sourceRoot),this._sourcesContents&&(b.sourcesContent=this._generateSourcesContent(b.sources,b.sourceRoot)),b},b.prototype.toString=function a(){return JSON.stringify(this)},h.SourceMapGenerator=b})}),a.define('/node_modules/source-map/lib/source-map/mapping-list.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(c,d,e){function f(a,c){var d=a.generatedLine,e=c.generatedLine,f=a.generatedColumn,g=c.generatedColumn;return e>d||e==d&&g>=f||b.compareByGeneratedPositions(a,c)<=0}function a(){this._array=[],this._sorted=!0,this._last={generatedLine:-1,generatedColumn:0}}var b=c('/node_modules/source-map/lib/source-map/util.js',e);a.prototype.unsortedForEach=function a(b,c){this._array.forEach(b,c)},a.prototype.add=function a(b){f(this._last,b)?(this._last=b,this._array.push(b)):(this._sorted=!1,this._array.push(b))},a.prototype.toArray=function a(){return this._sorted||(this._array.sort(b.compareByGeneratedPositions),this._sorted=!0),this._array},d.MappingList=a})}),a.define('/node_modules/source-map/lib/source-map/array-set.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(c,d,e){function a(){this._array=[],this._set={}}var b=c('/node_modules/source-map/lib/source-map/util.js',e);a.fromArray=function b(e,g){var d=new a;for(var c=0,f=e.length;c<f;c++)d.add(e[c],g);return d},a.prototype.add=function a(c,f){var d=this.has(c),e=this._array.length;(!d||f)&&this._array.push(c),d||(this._set[b.toSetString(c)]=e)},a.prototype.has=function a(c){return Object.prototype.hasOwnProperty.call(this._set,b.toSetString(c))},a.prototype.indexOf=function a(c){if(this.has(c))return this._set[b.toSetString(c)];throw new Error('"'+c+'" is not in the set.')},a.prototype.at=function a(b){if(b>=0&&b<this._array.length)return this._array[b];throw new Error('No element indexed by '+b)},a.prototype.toArray=function a(){return this._array.slice()},d.ArraySet=a})}),a.define('/node_modules/source-map/lib/source-map/base64-vlq.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(j,f,h){function i(a){return a<0?(-a<<1)+1:(a<<1)+0}function g(b){var c=(b&1)===1,a=b>>1;return c?-a:a}var c=j('/node_modules/source-map/lib/source-map/base64.js',h),a=5,d=1<<a,e=d-1,b=d;f.encode=function d(j){var g='',h,f=i(j);do h=f&e,f>>>=a,f>0&&(h|=b),g+=c.encode(h);while(f>0);return g},f.decode=function d(i,l){var f=0,m=i.length,j=0,k=0,n,h;do{if(f>=m)throw new Error('Expected more digits in base 64 VLQ value.');h=c.decode(i.charAt(f++)),n=!!(h&b),h&=e,j+=h<<k,k+=a}while(n);l.value=g(j),l.rest=i.slice(f)}})}),a.define('/node_modules/source-map/lib/source-map/base64.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(d,c,e){var a={},b={};'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'.split('').forEach(function(c,d){a[c]=d,b[d]=c}),c.encode=function a(c){if(c in b)return b[c];throw new TypeError('Must be between 0 and 63: '+c)},c.decode=function b(c){if(c in a)return a[c];throw new TypeError('Not a valid base 64 digit: '+c)}})}),a.define('/node_modules/source-map/lib/source-map/source-map-consumer.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(c,e,d){function a(b){var a=b;if(typeof b==='string'&&(a=JSON.parse(b.replace(/^\)\]\}'/,''))),a.sections!=null){var e=c('/node_modules/source-map/lib/source-map/indexed-source-map-consumer.js',d);return new e.IndexedSourceMapConsumer(a)}else{var f=c('/node_modules/source-map/lib/source-map/basic-source-map-consumer.js',d);return new f.BasicSourceMapConsumer(a)}}var b=c('/node_modules/source-map/lib/source-map/util.js',d);a.fromSourceMap=function(b){var a=c('/node_modules/source-map/lib/source-map/basic-source-map-consumer.js',d);return a.BasicSourceMapConsumer.fromSourceMap(b)},a.prototype._version=3,a.prototype.__generatedMappings=null,Object.defineProperty(a.prototype,'_generatedMappings',{get:function(){return this.__generatedMappings||(this.__generatedMappings=[],this.__originalMappings=[],this._parseMappings(this._mappings,this.sourceRoot)),this.__generatedMappings}}),a.prototype.__originalMappings=null,Object.defineProperty(a.prototype,'_originalMappings',{get:function(){return this.__originalMappings||(this.__generatedMappings=[],this.__originalMappings=[],this._parseMappings(this._mappings,this.sourceRoot)),this.__originalMappings}}),a.prototype._nextCharIsMappingSeparator=function a(c){var b=c.charAt(0);return b===';'||b===','},a.prototype._parseMappings=function a(b,c){throw new Error('Subclasses must implement _parseMappings')},a.GENERATED_ORDER=1,a.ORIGINAL_ORDER=2,a.prototype.eachMapping=function c(h,i,j){var f=i||null,g=j||a.GENERATED_ORDER,d;switch(g){case a.GENERATED_ORDER:d=this._generatedMappings;break;case a.ORIGINAL_ORDER:d=this._originalMappings;break;default:throw new Error('Unknown order of iteration.')}var e=this.sourceRoot;d.map(function(a){var c=a.source;return c!=null&&e!=null&&(c=b.join(e,c)),{source:c,generatedLine:a.generatedLine,generatedColumn:a.generatedColumn,originalLine:a.originalLine,originalColumn:a.originalColumn,name:a.name}}).forEach(h,f)},a.prototype.allGeneratedPositionsFor=function a(g){var d={source:b.getArg(g,'source'),originalLine:b.getArg(g,'line'),originalColumn:Infinity};this.sourceRoot!=null&&(d.source=b.relative(this.sourceRoot,d.source));var f=[],e=this._findMapping(d,this._originalMappings,'originalLine','originalColumn',b.compareByOriginalPositions);if(e>=0){var c=this._originalMappings[e];while(c&&c.originalLine===d.originalLine)f.push({line:b.getArg(c,'generatedLine',null),column:b.getArg(c,'generatedColumn',null),lastColumn:b.getArg(c,'lastGeneratedColumn',null)}),c=this._originalMappings[--e]}return f.reverse()},e.SourceMapConsumer=a})}),a.define('/node_modules/source-map/lib/source-map/basic-source-map-consumer.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(d,i,e){function b(d){var b=d;typeof d==='string'&&(b=JSON.parse(d.replace(/^\)\]\}'/,'')));var e=a.getArg(b,'version'),c=a.getArg(b,'sources'),g=a.getArg(b,'names',[]),h=a.getArg(b,'sourceRoot',null),i=a.getArg(b,'sourcesContent',null),j=a.getArg(b,'mappings'),k=a.getArg(b,'file',null);if(e!=this._version)throw new Error('Unsupported version: '+e);c=c.map(a.normalize),this._names=f.fromArray(g,!0),this._sources=f.fromArray(c,!0),this.sourceRoot=h,this.sourcesContent=i,this._mappings=j,this.file=k}var a=d('/node_modules/source-map/lib/source-map/util.js',e),h=d('/node_modules/source-map/lib/source-map/binary-search.js',e),f=d('/node_modules/source-map/lib/source-map/array-set.js',e).ArraySet,c=d('/node_modules/source-map/lib/source-map/base64-vlq.js',e),g=d('/node_modules/source-map/lib/source-map/source-map-consumer.js',e).SourceMapConsumer;b.prototype=Object.create(g.prototype),b.prototype.consumer=g,b.fromSourceMap=function c(e){var d=Object.create(b.prototype);return d._names=f.fromArray(e._names.toArray(),!0),d._sources=f.fromArray(e._sources.toArray(),!0),d.sourceRoot=e._sourceRoot,d.sourcesContent=e._generateSourcesContent(d._sources.toArray(),d.sourceRoot),d.file=e._file,d.__generatedMappings=e._mappings.toArray().slice(),d.__originalMappings=e._mappings.toArray().slice().sort(a.compareByOriginalPositions),d},b.prototype._version=3,Object.defineProperty(b.prototype,'sources',{get:function(){return this._sources.toArray().map(function(b){return this.sourceRoot!=null?a.join(this.sourceRoot,b):b},this)}}),b.prototype._parseMappings=function b(m,n){var j=1,g=0,i=0,h=0,k=0,l=0,d=m,e={},f;while(d.length>0)if(d.charAt(0)===';')j++,d=d.slice(1),g=0;else if(d.charAt(0)===',')d=d.slice(1);else{if(f={},f.generatedLine=j,c.decode(d,e),f.generatedColumn=g+e.value,g=f.generatedColumn,d=e.rest,d.length>0&&!this._nextCharIsMappingSeparator(d)){if(c.decode(d,e),f.source=this._sources.at(k+e.value),k+=e.value,d=e.rest,d.length===0||this._nextCharIsMappingSeparator(d))throw new Error('Found a source, but no line and column');if(c.decode(d,e),f.originalLine=i+e.value,i=f.originalLine,f.originalLine+=1,d=e.rest,d.length===0||this._nextCharIsMappingSeparator(d))throw new Error('Found a source and line, but no column');c.decode(d,e),f.originalColumn=h+e.value,h=f.originalColumn,d=e.rest,d.length>0&&!this._nextCharIsMappingSeparator(d)&&(c.decode(d,e),f.name=this._names.at(l+e.value),l+=e.value,d=e.rest)}this.__generatedMappings.push(f),typeof f.originalLine==='number'&&this.__originalMappings.push(f)}this.__generatedMappings.sort(a.compareByGeneratedPositions),this.__originalMappings.sort(a.compareByOriginalPositions)},b.prototype._findMapping=function a(b,e,c,d,f){if(b[c]<=0)throw new TypeError('Line must be greater than or equal to 1, got '+b[c]);if(b[d]<0)throw new TypeError('Column must be greater than or equal to 0, got '+b[d]);return h.search(b,e,f)},b.prototype.computeColumnSpans=function a(){for(var b=0;b<this._generatedMappings.length;++b){var c=this._generatedMappings[b];if(b+1<this._generatedMappings.length){var d=this._generatedMappings[b+1];if(c.generatedLine===d.generatedLine){c.lastGeneratedColumn=d.generatedColumn-1;continue}}c.lastGeneratedColumn=Infinity}},b.prototype.originalPositionFor=function b(g){var e={generatedLine:a.getArg(g,'line'),generatedColumn:a.getArg(g,'column')},f=this._findMapping(e,this._generatedMappings,'generatedLine','generatedColumn',a.compareByGeneratedPositions);if(f>=0){var c=this._generatedMappings[f];if(c.generatedLine===e.generatedLine){var d=a.getArg(c,'source',null);return d!=null&&this.sourceRoot!=null&&(d=a.join(this.sourceRoot,d)),{source:d,line:a.getArg(c,'originalLine',null),column:a.getArg(c,'originalColumn',null),name:a.getArg(c,'name',null)}}}return{source:null,line:null,column:null,name:null}},b.prototype.sourceContentFor=function b(c,f){if(!this.sourcesContent)return null;if(this.sourceRoot!=null&&(c=a.relative(this.sourceRoot,c)),this._sources.has(c))return this.sourcesContent[this._sources.indexOf(c)];var d;if(this.sourceRoot!=null&&(d=a.urlParse(this.sourceRoot))){var e=c.replace(/^file:\/\//,'');if(d.scheme=='file'&&this._sources.has(e))return this.sourcesContent[this._sources.indexOf(e)];if((!d.path||d.path=='/')&&this._sources.has('/'+c))return this.sourcesContent[this._sources.indexOf('/'+c)]}if(f)return null;else throw new Error('"'+c+'" is not in the SourceMap.')},b.prototype.generatedPositionFor=function b(e){var c={source:a.getArg(e,'source'),originalLine:a.getArg(e,'line'),originalColumn:a.getArg(e,'column')};this.sourceRoot!=null&&(c.source=a.relative(this.sourceRoot,c.source));var f=this._findMapping(c,this._originalMappings,'originalLine','originalColumn',a.compareByOriginalPositions);if(f>=0){var d=this._originalMappings[f];return{line:a.getArg(d,'generatedLine',null),column:a.getArg(d,'generatedColumn',null),lastColumn:a.getArg(d,'lastGeneratedColumn',null)}}return{line:null,column:null,lastColumn:null}},i.BasicSourceMapConsumer=b})}),a.define('/node_modules/source-map/lib/source-map/binary-search.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(c,b,d){function a(c,d,e,f,g){var b=Math.floor((d-c)/2)+c,h=g(e,f[b],!0);return h===0?b:h>0?d-b>1?a(b,d,e,f,g):b:b-c>1?a(c,b,e,f,g):c<0?-1:c}b.search=function b(d,c,e){return c.length===0?-1:a(-1,c.length,d,c,e)}})}),a.define('/node_modules/source-map/lib/source-map/indexed-source-map-consumer.js',function(c,d,e,f){if(typeof b!=='function')var b=a('/node_modules/amdefine/amdefine.js',c)(c,a);b(function(c,g,d){function b(d){var c=d;typeof d==='string'&&(c=JSON.parse(d.replace(/^\)\]\}'/,'')));var f=a.getArg(c,'version'),g=a.getArg(c,'sections');if(f!=this._version)throw new Error('Unsupported version: '+f);var b={line:-1,column:0};this._sections=g.map(function(f){if(f.url)throw new Error('Support for url field in sections not implemented.');var c=a.getArg(f,'offset'),d=a.getArg(c,'line'),g=a.getArg(c,'column');if(d<b.line||d===b.line&&g<b.column)throw new Error('Section offsets must be ordered and non-overlapping.');return b=c,{generatedOffset:{generatedLine:d+1,generatedColumn:g+1},consumer:new e(a.getArg(f,'map'))}})}var a=c('/node_modules/source-map/lib/source-map/util.js',d),f=c('/node_modules/source-map/lib/source-map/binary-search.js',d),e=c('/node_modules/source-map/lib/source-map/source-map-consumer.js',d).SourceMapConsumer,h=c('/node_modules/source-map/lib/source-map/basic-source-map-consumer.js',d).BasicSourceMapConsumer;b.prototype=Object.create(e.prototype),b.prototype.constructor=e,b.prototype._version=3,Object.defineProperty(b.prototype,'sources',{get:function(){var c=[];for(var a=0;a<this._sections.length;a++)for(var b=0;b<this._sections[a].consumer.sources.length;b++)c.push(this._sections[a].consumer.sources[b]);return c}}),b.prototype.originalPositionFor=function b(e){var d={generatedLine:a.getArg(e,'line'),generatedColumn:a.getArg(e,'column')},g=f.search(d,this._sections,function(b,c){var a=b.generatedLine-c.generatedOffset.generatedLine;return a?a:b.generatedColumn-c.generatedOffset.generatedColumn}),c=this._sections[g];return c?c.consumer.originalPositionFor({line:d.generatedLine-(c.generatedOffset.generatedLine-1),column:d.generatedColumn-(c.generatedOffset.generatedLine===d.generatedLine?c.generatedOffset.generatedColumn-1:0)}):{source:null,line:null,column:null,name:null}},b.prototype.sourceContentFor=function a(d,f){for(var b=0;b<this._sections.length;b++){var e=this._sections[b],c=e.consumer.sourceContentFor(d,!0);if(c)return c}if(f)return null;else throw new Error('"'+d+'" is not in the SourceMap.')},b.prototype.generatedPositionFor=function b(f){for(var e=0;e<this._sections.length;e++){var c=this._sections[e];if(c.consumer.sources.indexOf(a.getArg(f,'source'))===-1)continue;var d=c.consumer.generatedPositionFor(f);if(d){var g={line:d.line+(c.generatedOffset.generatedLine-1),column:d.column+(c.generatedOffset.generatedLine===d.line?c.generatedOffset.generatedColumn-1:0)};return g}}return{line:null,column:null}},b.prototype._parseMappings=function b(k,l){this.__generatedMappings=[],this.__originalMappings=[];for(var e=0;e<this._sections.length;e++){var d=this._sections[e],h=d.consumer._generatedMappings;for(var i=0;i<h.length;i++){var c=h[e],f=c.source,j=d.consumer.sourceRoot;f!=null&&j!=null&&(f=a.join(j,f));var g={source:f,generatedLine:c.generatedLine+(d.generatedOffset.generatedLine-1),generatedColumn:c.column+(d.generatedOffset.generatedLine===c.generatedLine)?d.generatedOffset.generatedColumn-1:0,originalLine:c.originalLine,originalColumn:c.originalColumn,name:c.name};this.__generatedMappings.push(g),typeof g.originalLine==='number'&&this.__originalMappings.push(g)}}this.__generatedMappings.sort(a.compareByGeneratedPositions),this.__originalMappings.sort(a.compareByOriginalPositions)},g.IndexedSourceMapConsumer=b})}),a.define('/node_modules/esutils/lib/utils.js',function(b,c,d,e){!function(){'use strict';c.ast=a('/node_modules/esutils/lib/ast.js',b),c.code=a('/node_modules/esutils/lib/code.js',b),c.keyword=a('/node_modules/esutils/lib/keyword.js',b)}()}),a.define('/node_modules/esutils/lib/keyword.js',function(b,c,d,e){!function(c){'use strict';function m(a){switch(a){case'implements':case'interface':case'package':case'private':case'protected':case'public':case'static':case'let':return!0;default:return!1}}function f(a,b){return!b&&a==='yield'?!1:d(a,b)}function d(a,b){if(b&&m(a))return!0;switch(a.length){case 2:return a==='if'||a==='in'||a==='do';case 3:return a==='var'||a==='for'||a==='new'||a==='try';case 4:return a==='this'||a==='else'||a==='case'||a==='void'||a==='with'||a==='enum';case 5:return a==='while'||a==='break'||a==='catch'||a==='throw'||a==='const'||a==='yield'||a==='class'||a==='super';case 6:return a==='return'||a==='typeof'||a==='delete'||a==='switch'||a==='export'||a==='import';case 7:return a==='default'||a==='finally'||a==='extends';case 8:return a==='function'||a==='continue'||a==='debugger';case 10:return a==='instanceof';default:return!1}}function g(a,b){return a==='null'||a==='true'||a==='false'||f(a,b)}function e(a,b){return a==='null'||a==='true'||a==='false'||d(a,b)}function j(a){return a==='eval'||a==='arguments'}function h(a){var b,e,d;if(a.length===0)return!1;if(d=a.charCodeAt(0),!c.isIdentifierStartES5(d))return!1;for(b=1,e=a.length;b<e;++b)if(d=a.charCodeAt(b),!c.isIdentifierPartES5(d))return!1;return!0}function l(a,b){return(a-55296)*1024+(b-56320)+65536}function i(d){var a,f,b,e,g;if(d.length===0)return!1;for(g=c.isIdentifierStartES6,a=0,f=d.length;a<f;++a){if(b=d.charCodeAt(a),55296<=b&&b<=56319){if(++a,a>=f)return!1;if(e=d.charCodeAt(a),!(56320<=e&&e<=57343))return!1;b=l(b,e)}if(!g(b))return!1;g=c.isIdentifierPartES6}return!0}function n(a,b){return h(a)&&!g(a,b)}function k(a,b){return i(a)&&!e(a,b)}c=a('/node_modules/esutils/lib/code.js',b),b.exports={isKeywordES5:f,isKeywordES6:d,isReservedWordES5:g,isReservedWordES6:e,isRestrictedWord:j,isIdentifierNameES5:h,isIdentifierNameES6:i,isIdentifierES5:n,isIdentifierES6:k}}()}),a.define('/node_modules/esutils/lib/code.js',function(a,b,c,d){!function(g,f,h,c,d,b){'use strict';function n(a){return 48<=a&&a<=57}function i(a){return 48<=a&&a<=57||97<=a&&a<=102||65<=a&&a<=70}function k(a){return a>=48&&a<=55}function l(a){return a===32||a===9||a===11||a===12||a===160||a>=5760&&h.indexOf(a)>=0}function m(a){return a===10||a===13||a===8232||a===8233}function e(a){if(a<=65535)return String.fromCharCode(a);var b=String.fromCharCode(Math.floor((a-65536)/1024)+55296),c=String.fromCharCode((a-65536)%1024+56320);return b+c}function o(a){return a<128?c[a]:f.NonAsciiIdentifierStart.test(e(a))}function p(a){return a<128?d[a]:f.NonAsciiIdentifierPart.test(e(a))}function q(a){return a<128?c[a]:g.NonAsciiIdentifierStart.test(e(a))}function j(a){return a<128?d[a]:g.NonAsciiIdentifierPart.test(e(a))}for(f={NonAsciiIdentifierStart:/[\xAA\xB5\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0-\u08B2\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191E\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303C\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA66E\uA67F-\uA69D\uA6A0-\uA6EF\uA717-\uA71F\uA722-\uA788\uA78B-\uA78E\uA790-\uA7AD\uA7B0\uA7B1\uA7F7-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uA9E0-\uA9E4\uA9E6-\uA9EF\uA9FA-\uA9FE\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA7E-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB5F\uAB64\uAB65\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]/,NonAsciiIdentifierPart:/[\xAA\xB5\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0300-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u0483-\u0487\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05BD\u05BF\u05C1\u05C2\u05C4\u05C5\u05C7\u05D0-\u05EA\u05F0-\u05F2\u0610-\u061A\u0620-\u0669\u066E-\u06D3\u06D5-\u06DC\u06DF-\u06E8\u06EA-\u06FC\u06FF\u0710-\u074A\u074D-\u07B1\u07C0-\u07F5\u07FA\u0800-\u082D\u0840-\u085B\u08A0-\u08B2\u08E4-\u0963\u0966-\u096F\u0971-\u0983\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BC-\u09C4\u09C7\u09C8\u09CB-\u09CE\u09D7\u09DC\u09DD\u09DF-\u09E3\u09E6-\u09F1\u0A01-\u0A03\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A3C\u0A3E-\u0A42\u0A47\u0A48\u0A4B-\u0A4D\u0A51\u0A59-\u0A5C\u0A5E\u0A66-\u0A75\u0A81-\u0A83\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABC-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0AD0\u0AE0-\u0AE3\u0AE6-\u0AEF\u0B01-\u0B03\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3C-\u0B44\u0B47\u0B48\u0B4B-\u0B4D\u0B56\u0B57\u0B5C\u0B5D\u0B5F-\u0B63\u0B66-\u0B6F\u0B71\u0B82\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD0\u0BD7\u0BE6-\u0BEF\u0C00-\u0C03\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55\u0C56\u0C58\u0C59\u0C60-\u0C63\u0C66-\u0C6F\u0C81-\u0C83\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBC-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5\u0CD6\u0CDE\u0CE0-\u0CE3\u0CE6-\u0CEF\u0CF1\u0CF2\u0D01-\u0D03\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D-\u0D44\u0D46-\u0D48\u0D4A-\u0D4E\u0D57\u0D60-\u0D63\u0D66-\u0D6F\u0D7A-\u0D7F\u0D82\u0D83\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0DCA\u0DCF-\u0DD4\u0DD6\u0DD8-\u0DDF\u0DE6-\u0DEF\u0DF2\u0DF3\u0E01-\u0E3A\u0E40-\u0E4E\u0E50-\u0E59\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB9\u0EBB-\u0EBD\u0EC0-\u0EC4\u0EC6\u0EC8-\u0ECD\u0ED0-\u0ED9\u0EDC-\u0EDF\u0F00\u0F18\u0F19\u0F20-\u0F29\u0F35\u0F37\u0F39\u0F3E-\u0F47\u0F49-\u0F6C\u0F71-\u0F84\u0F86-\u0F97\u0F99-\u0FBC\u0FC6\u1000-\u1049\u1050-\u109D\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u135D-\u135F\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176C\u176E-\u1770\u1772\u1773\u1780-\u17D3\u17D7\u17DC\u17DD\u17E0-\u17E9\u180B-\u180D\u1810-\u1819\u1820-\u1877\u1880-\u18AA\u18B0-\u18F5\u1900-\u191E\u1920-\u192B\u1930-\u193B\u1946-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u19D0-\u19D9\u1A00-\u1A1B\u1A20-\u1A5E\u1A60-\u1A7C\u1A7F-\u1A89\u1A90-\u1A99\u1AA7\u1AB0-\u1ABD\u1B00-\u1B4B\u1B50-\u1B59\u1B6B-\u1B73\u1B80-\u1BF3\u1C00-\u1C37\u1C40-\u1C49\u1C4D-\u1C7D\u1CD0-\u1CD2\u1CD4-\u1CF6\u1CF8\u1CF9\u1D00-\u1DF5\u1DFC-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u200C\u200D\u203F\u2040\u2054\u2071\u207F\u2090-\u209C\u20D0-\u20DC\u20E1\u20E5-\u20F0\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D7F-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2DE0-\u2DFF\u2E2F\u3005-\u3007\u3021-\u302F\u3031-\u3035\u3038-\u303C\u3041-\u3096\u3099\u309A\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA62B\uA640-\uA66F\uA674-\uA67D\uA67F-\uA69D\uA69F-\uA6F1\uA717-\uA71F\uA722-\uA788\uA78B-\uA78E\uA790-\uA7AD\uA7B0\uA7B1\uA7F7-\uA827\uA840-\uA873\uA880-\uA8C4\uA8D0-\uA8D9\uA8E0-\uA8F7\uA8FB\uA900-\uA92D\uA930-\uA953\uA960-\uA97C\uA980-\uA9C0\uA9CF-\uA9D9\uA9E0-\uA9FE\uAA00-\uAA36\uAA40-\uAA4D\uAA50-\uAA59\uAA60-\uAA76\uAA7A-\uAAC2\uAADB-\uAADD\uAAE0-\uAAEF\uAAF2-\uAAF6\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB5F\uAB64\uAB65\uABC0-\uABEA\uABEC\uABED\uABF0-\uABF9\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE00-\uFE0F\uFE20-\uFE2D\uFE33\uFE34\uFE4D-\uFE4F\uFE70-\uFE74\uFE76-\uFEFC\uFF10-\uFF19\uFF21-\uFF3A\uFF3F\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]/},g={NonAsciiIdentifierStart:/[\xAA\xB5\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0-\u08B2\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191E\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2118-\u211D\u2124\u2126\u2128\u212A-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303C\u3041-\u3096\u309B-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA66E\uA67F-\uA69D\uA6A0-\uA6EF\uA717-\uA71F\uA722-\uA788\uA78B-\uA78E\uA790-\uA7AD\uA7B0\uA7B1\uA7F7-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uA9E0-\uA9E4\uA9E6-\uA9EF\uA9FA-\uA9FE\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA7E-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB5F\uAB64\uAB65\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]|\uD800[\uDC00-\uDC0B\uDC0D-\uDC26\uDC28-\uDC3A\uDC3C\uDC3D\uDC3F-\uDC4D\uDC50-\uDC5D\uDC80-\uDCFA\uDD40-\uDD74\uDE80-\uDE9C\uDEA0-\uDED0\uDF00-\uDF1F\uDF30-\uDF4A\uDF50-\uDF75\uDF80-\uDF9D\uDFA0-\uDFC3\uDFC8-\uDFCF\uDFD1-\uDFD5]|\uD801[\uDC00-\uDC9D\uDD00-\uDD27\uDD30-\uDD63\uDE00-\uDF36\uDF40-\uDF55\uDF60-\uDF67]|\uD802[\uDC00-\uDC05\uDC08\uDC0A-\uDC35\uDC37\uDC38\uDC3C\uDC3F-\uDC55\uDC60-\uDC76\uDC80-\uDC9E\uDD00-\uDD15\uDD20-\uDD39\uDD80-\uDDB7\uDDBE\uDDBF\uDE00\uDE10-\uDE13\uDE15-\uDE17\uDE19-\uDE33\uDE60-\uDE7C\uDE80-\uDE9C\uDEC0-\uDEC7\uDEC9-\uDEE4\uDF00-\uDF35\uDF40-\uDF55\uDF60-\uDF72\uDF80-\uDF91]|\uD803[\uDC00-\uDC48]|\uD804[\uDC03-\uDC37\uDC83-\uDCAF\uDCD0-\uDCE8\uDD03-\uDD26\uDD50-\uDD72\uDD76\uDD83-\uDDB2\uDDC1-\uDDC4\uDDDA\uDE00-\uDE11\uDE13-\uDE2B\uDEB0-\uDEDE\uDF05-\uDF0C\uDF0F\uDF10\uDF13-\uDF28\uDF2A-\uDF30\uDF32\uDF33\uDF35-\uDF39\uDF3D\uDF5D-\uDF61]|\uD805[\uDC80-\uDCAF\uDCC4\uDCC5\uDCC7\uDD80-\uDDAE\uDE00-\uDE2F\uDE44\uDE80-\uDEAA]|\uD806[\uDCA0-\uDCDF\uDCFF\uDEC0-\uDEF8]|\uD808[\uDC00-\uDF98]|\uD809[\uDC00-\uDC6E]|[\uD80C\uD840-\uD868\uD86A-\uD86C][\uDC00-\uDFFF]|\uD80D[\uDC00-\uDC2E]|\uD81A[\uDC00-\uDE38\uDE40-\uDE5E\uDED0-\uDEED\uDF00-\uDF2F\uDF40-\uDF43\uDF63-\uDF77\uDF7D-\uDF8F]|\uD81B[\uDF00-\uDF44\uDF50\uDF93-\uDF9F]|\uD82C[\uDC00\uDC01]|\uD82F[\uDC00-\uDC6A\uDC70-\uDC7C\uDC80-\uDC88\uDC90-\uDC99]|\uD835[\uDC00-\uDC54\uDC56-\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDEA5\uDEA8-\uDEC0\uDEC2-\uDEDA\uDEDC-\uDEFA\uDEFC-\uDF14\uDF16-\uDF34\uDF36-\uDF4E\uDF50-\uDF6E\uDF70-\uDF88\uDF8A-\uDFA8\uDFAA-\uDFC2\uDFC4-\uDFCB]|\uD83A[\uDC00-\uDCC4]|\uD83B[\uDE00-\uDE03\uDE05-\uDE1F\uDE21\uDE22\uDE24\uDE27\uDE29-\uDE32\uDE34-\uDE37\uDE39\uDE3B\uDE42\uDE47\uDE49\uDE4B\uDE4D-\uDE4F\uDE51\uDE52\uDE54\uDE57\uDE59\uDE5B\uDE5D\uDE5F\uDE61\uDE62\uDE64\uDE67-\uDE6A\uDE6C-\uDE72\uDE74-\uDE77\uDE79-\uDE7C\uDE7E\uDE80-\uDE89\uDE8B-\uDE9B\uDEA1-\uDEA3\uDEA5-\uDEA9\uDEAB-\uDEBB]|\uD869[\uDC00-\uDED6\uDF00-\uDFFF]|\uD86D[\uDC00-\uDF34\uDF40-\uDFFF]|\uD86E[\uDC00-\uDC1D]|\uD87E[\uDC00-\uDE1D]/,NonAsciiIdentifierPart:/[\xAA\xB5\xB7\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0300-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u0483-\u0487\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05BD\u05BF\u05C1\u05C2\u05C4\u05C5\u05C7\u05D0-\u05EA\u05F0-\u05F2\u0610-\u061A\u0620-\u0669\u066E-\u06D3\u06D5-\u06DC\u06DF-\u06E8\u06EA-\u06FC\u06FF\u0710-\u074A\u074D-\u07B1\u07C0-\u07F5\u07FA\u0800-\u082D\u0840-\u085B\u08A0-\u08B2\u08E4-\u0963\u0966-\u096F\u0971-\u0983\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BC-\u09C4\u09C7\u09C8\u09CB-\u09CE\u09D7\u09DC\u09DD\u09DF-\u09E3\u09E6-\u09F1\u0A01-\u0A03\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A3C\u0A3E-\u0A42\u0A47\u0A48\u0A4B-\u0A4D\u0A51\u0A59-\u0A5C\u0A5E\u0A66-\u0A75\u0A81-\u0A83\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABC-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0AD0\u0AE0-\u0AE3\u0AE6-\u0AEF\u0B01-\u0B03\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3C-\u0B44\u0B47\u0B48\u0B4B-\u0B4D\u0B56\u0B57\u0B5C\u0B5D\u0B5F-\u0B63\u0B66-\u0B6F\u0B71\u0B82\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD0\u0BD7\u0BE6-\u0BEF\u0C00-\u0C03\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55\u0C56\u0C58\u0C59\u0C60-\u0C63\u0C66-\u0C6F\u0C81-\u0C83\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBC-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5\u0CD6\u0CDE\u0CE0-\u0CE3\u0CE6-\u0CEF\u0CF1\u0CF2\u0D01-\u0D03\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D-\u0D44\u0D46-\u0D48\u0D4A-\u0D4E\u0D57\u0D60-\u0D63\u0D66-\u0D6F\u0D7A-\u0D7F\u0D82\u0D83\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0DCA\u0DCF-\u0DD4\u0DD6\u0DD8-\u0DDF\u0DE6-\u0DEF\u0DF2\u0DF3\u0E01-\u0E3A\u0E40-\u0E4E\u0E50-\u0E59\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB9\u0EBB-\u0EBD\u0EC0-\u0EC4\u0EC6\u0EC8-\u0ECD\u0ED0-\u0ED9\u0EDC-\u0EDF\u0F00\u0F18\u0F19\u0F20-\u0F29\u0F35\u0F37\u0F39\u0F3E-\u0F47\u0F49-\u0F6C\u0F71-\u0F84\u0F86-\u0F97\u0F99-\u0FBC\u0FC6\u1000-\u1049\u1050-\u109D\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u135D-\u135F\u1369-\u1371\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176C\u176E-\u1770\u1772\u1773\u1780-\u17D3\u17D7\u17DC\u17DD\u17E0-\u17E9\u180B-\u180D\u1810-\u1819\u1820-\u1877\u1880-\u18AA\u18B0-\u18F5\u1900-\u191E\u1920-\u192B\u1930-\u193B\u1946-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u19D0-\u19DA\u1A00-\u1A1B\u1A20-\u1A5E\u1A60-\u1A7C\u1A7F-\u1A89\u1A90-\u1A99\u1AA7\u1AB0-\u1ABD\u1B00-\u1B4B\u1B50-\u1B59\u1B6B-\u1B73\u1B80-\u1BF3\u1C00-\u1C37\u1C40-\u1C49\u1C4D-\u1C7D\u1CD0-\u1CD2\u1CD4-\u1CF6\u1CF8\u1CF9\u1D00-\u1DF5\u1DFC-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u200C\u200D\u203F\u2040\u2054\u2071\u207F\u2090-\u209C\u20D0-\u20DC\u20E1\u20E5-\u20F0\u2102\u2107\u210A-\u2113\u2115\u2118-\u211D\u2124\u2126\u2128\u212A-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D7F-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2DE0-\u2DFF\u3005-\u3007\u3021-\u302F\u3031-\u3035\u3038-\u303C\u3041-\u3096\u3099-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA62B\uA640-\uA66F\uA674-\uA67D\uA67F-\uA69D\uA69F-\uA6F1\uA717-\uA71F\uA722-\uA788\uA78B-\uA78E\uA790-\uA7AD\uA7B0\uA7B1\uA7F7-\uA827\uA840-\uA873\uA880-\uA8C4\uA8D0-\uA8D9\uA8E0-\uA8F7\uA8FB\uA900-\uA92D\uA930-\uA953\uA960-\uA97C\uA980-\uA9C0\uA9CF-\uA9D9\uA9E0-\uA9FE\uAA00-\uAA36\uAA40-\uAA4D\uAA50-\uAA59\uAA60-\uAA76\uAA7A-\uAAC2\uAADB-\uAADD\uAAE0-\uAAEF\uAAF2-\uAAF6\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB5F\uAB64\uAB65\uABC0-\uABEA\uABEC\uABED\uABF0-\uABF9\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE00-\uFE0F\uFE20-\uFE2D\uFE33\uFE34\uFE4D-\uFE4F\uFE70-\uFE74\uFE76-\uFEFC\uFF10-\uFF19\uFF21-\uFF3A\uFF3F\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]|\uD800[\uDC00-\uDC0B\uDC0D-\uDC26\uDC28-\uDC3A\uDC3C\uDC3D\uDC3F-\uDC4D\uDC50-\uDC5D\uDC80-\uDCFA\uDD40-\uDD74\uDDFD\uDE80-\uDE9C\uDEA0-\uDED0\uDEE0\uDF00-\uDF1F\uDF30-\uDF4A\uDF50-\uDF7A\uDF80-\uDF9D\uDFA0-\uDFC3\uDFC8-\uDFCF\uDFD1-\uDFD5]|\uD801[\uDC00-\uDC9D\uDCA0-\uDCA9\uDD00-\uDD27\uDD30-\uDD63\uDE00-\uDF36\uDF40-\uDF55\uDF60-\uDF67]|\uD802[\uDC00-\uDC05\uDC08\uDC0A-\uDC35\uDC37\uDC38\uDC3C\uDC3F-\uDC55\uDC60-\uDC76\uDC80-\uDC9E\uDD00-\uDD15\uDD20-\uDD39\uDD80-\uDDB7\uDDBE\uDDBF\uDE00-\uDE03\uDE05\uDE06\uDE0C-\uDE13\uDE15-\uDE17\uDE19-\uDE33\uDE38-\uDE3A\uDE3F\uDE60-\uDE7C\uDE80-\uDE9C\uDEC0-\uDEC7\uDEC9-\uDEE6\uDF00-\uDF35\uDF40-\uDF55\uDF60-\uDF72\uDF80-\uDF91]|\uD803[\uDC00-\uDC48]|\uD804[\uDC00-\uDC46\uDC66-\uDC6F\uDC7F-\uDCBA\uDCD0-\uDCE8\uDCF0-\uDCF9\uDD00-\uDD34\uDD36-\uDD3F\uDD50-\uDD73\uDD76\uDD80-\uDDC4\uDDD0-\uDDDA\uDE00-\uDE11\uDE13-\uDE37\uDEB0-\uDEEA\uDEF0-\uDEF9\uDF01-\uDF03\uDF05-\uDF0C\uDF0F\uDF10\uDF13-\uDF28\uDF2A-\uDF30\uDF32\uDF33\uDF35-\uDF39\uDF3C-\uDF44\uDF47\uDF48\uDF4B-\uDF4D\uDF57\uDF5D-\uDF63\uDF66-\uDF6C\uDF70-\uDF74]|\uD805[\uDC80-\uDCC5\uDCC7\uDCD0-\uDCD9\uDD80-\uDDB5\uDDB8-\uDDC0\uDE00-\uDE40\uDE44\uDE50-\uDE59\uDE80-\uDEB7\uDEC0-\uDEC9]|\uD806[\uDCA0-\uDCE9\uDCFF\uDEC0-\uDEF8]|\uD808[\uDC00-\uDF98]|\uD809[\uDC00-\uDC6E]|[\uD80C\uD840-\uD868\uD86A-\uD86C][\uDC00-\uDFFF]|\uD80D[\uDC00-\uDC2E]|\uD81A[\uDC00-\uDE38\uDE40-\uDE5E\uDE60-\uDE69\uDED0-\uDEED\uDEF0-\uDEF4\uDF00-\uDF36\uDF40-\uDF43\uDF50-\uDF59\uDF63-\uDF77\uDF7D-\uDF8F]|\uD81B[\uDF00-\uDF44\uDF50-\uDF7E\uDF8F-\uDF9F]|\uD82C[\uDC00\uDC01]|\uD82F[\uDC00-\uDC6A\uDC70-\uDC7C\uDC80-\uDC88\uDC90-\uDC99\uDC9D\uDC9E]|\uD834[\uDD65-\uDD69\uDD6D-\uDD72\uDD7B-\uDD82\uDD85-\uDD8B\uDDAA-\uDDAD\uDE42-\uDE44]|\uD835[\uDC00-\uDC54\uDC56-\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDEA5\uDEA8-\uDEC0\uDEC2-\uDEDA\uDEDC-\uDEFA\uDEFC-\uDF14\uDF16-\uDF34\uDF36-\uDF4E\uDF50-\uDF6E\uDF70-\uDF88\uDF8A-\uDFA8\uDFAA-\uDFC2\uDFC4-\uDFCB\uDFCE-\uDFFF]|\uD83A[\uDC00-\uDCC4\uDCD0-\uDCD6]|\uD83B[\uDE00-\uDE03\uDE05-\uDE1F\uDE21\uDE22\uDE24\uDE27\uDE29-\uDE32\uDE34-\uDE37\uDE39\uDE3B\uDE42\uDE47\uDE49\uDE4B\uDE4D-\uDE4F\uDE51\uDE52\uDE54\uDE57\uDE59\uDE5B\uDE5D\uDE5F\uDE61\uDE62\uDE64\uDE67-\uDE6A\uDE6C-\uDE72\uDE74-\uDE77\uDE79-\uDE7C\uDE7E\uDE80-\uDE89\uDE8B-\uDE9B\uDEA1-\uDEA3\uDEA5-\uDEA9\uDEAB-\uDEBB]|\uD869[\uDC00-\uDED6\uDF00-\uDFFF]|\uD86D[\uDC00-\uDF34\uDF40-\uDFFF]|\uD86E[\uDC00-\uDC1D]|\uD87E[\uDC00-\uDE1D]|\uDB40[\uDD00-\uDDEF]/},h=[5760,6158,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8239,8287,12288,65279],c=new Array(128),b=0;b<128;++b)c[b]=b>=97&&b<=122||b>=65&&b<=90||b===36||b===95;for(d=new Array(128),b=0;b<128;++b)d[b]=b>=97&&b<=122||b>=65&&b<=90||b>=48&&b<=57||b===36||b===95;a.exports={isDecimalDigit:n,isHexDigit:i,isOctalDigit:k,isWhiteSpace:l,isLineTerminator:m,isIdentifierStartES5:o,isIdentifierPartES5:p,isIdentifierStartES6:q,isIdentifierPartES6:j}}()}),a.define('/node_modules/esutils/lib/ast.js',function(a,b,c,d){!function(){'use strict';function d(a){if(a==null)return!1;switch(a.type){case'ArrayExpression':case'AssignmentExpression':case'BinaryExpression':case'CallExpression':case'ConditionalExpression':case'FunctionExpression':case'Identifier':case'Literal':case'LogicalExpression':case'MemberExpression':case'NewExpression':case'ObjectExpression':case'SequenceExpression':case'ThisExpression':case'UnaryExpression':case'UpdateExpression':return!0}return!1}function e(a){if(a==null)return!1;switch(a.type){case'DoWhileStatement':case'ForInStatement':case'ForStatement':case'WhileStatement':return!0}return!1}function b(a){if(a==null)return!1;switch(a.type){case'BlockStatement':case'BreakStatement':case'ContinueStatement':case'DebuggerStatement':case'DoWhileStatement':case'EmptyStatement':case'ExpressionStatement':case'ForInStatement':case'ForStatement':case'IfStatement':case'LabeledStatement':case'ReturnStatement':case'SwitchStatement':case'ThrowStatement':case'TryStatement':case'VariableDeclaration':case'WhileStatement':case'WithStatement':return!0}return!1}function f(a){return b(a)||a!=null&&a.type==='FunctionDeclaration'}function c(a){switch(a.type){case'IfStatement':return a.alternate!=null?a.alternate:a.consequent;case'LabeledStatement':case'ForStatement':case'ForInStatement':case'WhileStatement':case'WithStatement':return a.body}return null}function g(b){var a;if(b.type!=='IfStatement')return!1;if(b.alternate==null)return!1;a=b.consequent;do{if(a.type==='IfStatement'&&a.alternate==null)return!0;a=c(a)}while(a);return!1}a.exports={isExpression:d,isStatement:b,isIterationStatement:e,isSourceElement:f,isProblematicIfStatement:g,trailingStatement:c}}()}),a.define('/node_modules/estraverse/estraverse.js',function(b,a,c,d){!function(c,b){'use strict';typeof define==='function'&&define.amd?define(['exports'],b):a!==void 0?b(a):b(c.estraverse={})}(this,function a(d){'use strict';function s(){}function p(d){var c={},a,b;for(a in d)d.hasOwnProperty(a)&&(b=d[a],typeof b==='object'&&b!==null?c[a]=p(b):c[a]=b);return c}function y(b){var c={},a;for(a in b)b.hasOwnProperty(a)&&(c[a]=b[a]);return c}function x(e,f){var b,a,c,d;a=e.length,c=0;while(a)b=a>>>1,d=c+b,f(e[d])?a=b:(c=d+1,a-=b+1);return c}function t(e,f){var b,a,c,d;a=e.length,c=0;while(a)b=a>>>1,d=c+b,f(e[d])?(c=d+1,a-=b+1):a=b;return c}function u(d,e){var b=l(e),c,a,f;for(a=0,f=b.length;a<f;a+=1)c=b[a],d[c]=e[c];return d}function i(a,b){this.parent=a,this.key=b}function e(a,b,c,d){this.node=a,this.path=b,this.wrap=c,this.ref=d}function b(){}function k(a){return a==null?!1:typeof a==='object'&&typeof a.type==='string'}function q(a,b){return(a===m.ObjectExpression||a===m.ObjectPattern)&&'properties'===b}function o(c,d){var a=new b;return a.traverse(c,d)}function w(c,d){var a=new b;return a.replace(c,d)}function z(a,c){var b;return b=x(c,function b(c){return c.range[0]>a.range[0]}),a.extendedRange=[a.range[0],a.range[1]],b!==c.length&&(a.extendedRange[1]=c[b].range[0]),b-=1,b>=0&&(a.extendedRange[0]=c[b].range[1]),a}function v(d,e,h){var a=[],g,f,c,b;if(!d.range)throw new Error('attachComments needs range information');if(!h.length){if(e.length){for(c=0,f=e.length;c<f;c+=1)g=p(e[c]),g.extendedRange=[0,d.range[0]],a.push(g);d.leadingComments=a}return d}for(c=0,f=e.length;c<f;c+=1)a.push(z(p(e[c]),h));return b=0,o(d,{enter:function(c){var d;while(b<a.length){if(d=a[b],d.extendedRange[1]>c.range[0])break;d.extendedRange[1]===c.range[0]?(c.leadingComments||(c.leadingComments=[]),c.leadingComments.push(d),a.splice(b,1)):b+=1}return b===a.length?j.Break:a[b].extendedRange[0]>c.range[1]?j.Skip:void 0}}),b=0,o(d,{leave:function(c){var d;while(b<a.length){if(d=a[b],c.range[1]<d.extendedRange[0])break;c.range[1]===d.extendedRange[0]?(c.trailingComments||(c.trailingComments=[]),c.trailingComments.push(d),a.splice(b,1)):b+=1}return b===a.length?j.Break:a[b].extendedRange[0]>c.range[1]?j.Skip:void 0}}),d}var m,h,j,n,r,l,c,g,f;return h=Array.isArray,h||(h=function a(b){return Object.prototype.toString.call(b)==='[object Array]'}),s(y),s(t),r=Object.create||function(){function a(){}return function(b){return a.prototype=b,new a}}(),l=Object.keys||function(c){var a=[],b;for(b in c)a.push(b);return a},m={AssignmentExpression:'AssignmentExpression',ArrayExpression:'ArrayExpression',ArrayPattern:'ArrayPattern',ArrowFunctionExpression:'ArrowFunctionExpression',AwaitExpression:'AwaitExpression',BlockStatement:'BlockStatement',BinaryExpression:'BinaryExpression',BreakStatement:'BreakStatement',CallExpression:'CallExpression',CatchClause:'CatchClause',ClassBody:'ClassBody',ClassDeclaration:'ClassDeclaration',ClassExpression:'ClassExpression',ComprehensionBlock:'ComprehensionBlock',ComprehensionExpression:'ComprehensionExpression',ConditionalExpression:'ConditionalExpression',ContinueStatement:'ContinueStatement',DebuggerStatement:'DebuggerStatement',DirectiveStatement:'DirectiveStatement',DoWhileStatement:'DoWhileStatement',EmptyStatement:'EmptyStatement',ExportBatchSpecifier:'ExportBatchSpecifier',ExportDeclaration:'ExportDeclaration',ExportSpecifier:'ExportSpecifier',ExpressionStatement:'ExpressionStatement',ForStatement:'ForStatement',ForInStatement:'ForInStatement',ForOfStatement:'ForOfStatement',FunctionDeclaration:'FunctionDeclaration',FunctionExpression:'FunctionExpression',GeneratorExpression:'GeneratorExpression',Identifier:'Identifier',IfStatement:'IfStatement',ImportDeclaration:'ImportDeclaration',ImportDefaultSpecifier:'ImportDefaultSpecifier',ImportNamespaceSpecifier:'ImportNamespaceSpecifier',ImportSpecifier:'ImportSpecifier',Literal:'Literal',LabeledStatement:'LabeledStatement',LogicalExpression:'LogicalExpression',MemberExpression:'MemberExpression',MethodDefinition:'MethodDefinition',ModuleSpecifier:'ModuleSpecifier',NewExpression:'NewExpression',ObjectExpression:'ObjectExpression',ObjectPattern:'ObjectPattern',Program:'Program',Property:'Property',ReturnStatement:'ReturnStatement',SequenceExpression:'SequenceExpression',SpreadElement:'SpreadElement',SwitchStatement:'SwitchStatement',SwitchCase:'SwitchCase',TaggedTemplateExpression:'TaggedTemplateExpression',TemplateElement:'TemplateElement',TemplateLiteral:'TemplateLiteral',ThisExpression:'ThisExpression',ThrowStatement:'ThrowStatement',TryStatement:'TryStatement',UnaryExpression:'UnaryExpression',UpdateExpression:'UpdateExpression',VariableDeclaration:'VariableDeclaration',VariableDeclarator:'VariableDeclarator',WhileStatement:'WhileStatement',WithStatement:'WithStatement',YieldExpression:'YieldExpression'},n={AssignmentExpression:['left','right'],ArrayExpression:['elements'],ArrayPattern:['elements'],ArrowFunctionExpression:['params','defaults','rest','body'],AwaitExpression:['argument'],BlockStatement:['body'],BinaryExpression:['left','right'],BreakStatement:['label'],CallExpression:['callee','arguments'],CatchClause:['param','body'],ClassBody:['body'],ClassDeclaration:['id','body','superClass'],ClassExpression:['id','body','superClass'],ComprehensionBlock:['left','right'],ComprehensionExpression:['blocks','filter','body'],ConditionalExpression:['test','consequent','alternate'],ContinueStatement:['label'],DebuggerStatement:[],DirectiveStatement:[],DoWhileStatement:['body','test'],EmptyStatement:[],ExportBatchSpecifier:[],ExportDeclaration:['declaration','specifiers','source'],ExportSpecifier:['id','name'],ExpressionStatement:['expression'],ForStatement:['init','test','update','body'],ForInStatement:['left','right','body'],ForOfStatement:['left','right','body'],FunctionDeclaration:['id','params','defaults','rest','body'],FunctionExpression:['id','params','defaults','rest','body'],GeneratorExpression:['blocks','filter','body'],Identifier:[],IfStatement:['test','consequent','alternate'],ImportDeclaration:['specifiers','source'],ImportDefaultSpecifier:['id'],ImportNamespaceSpecifier:['id'],ImportSpecifier:['id','name'],Literal:[],LabeledStatement:['label','body'],LogicalExpression:['left','right'],MemberExpression:['object','property'],MethodDefinition:['key','value'],ModuleSpecifier:[],NewExpression:['callee','arguments'],ObjectExpression:['properties'],ObjectPattern:['properties'],Program:['body'],Property:['key','value'],ReturnStatement:['argument'],SequenceExpression:['expressions'],SpreadElement:['argument'],SwitchStatement:['discriminant','cases'],SwitchCase:['test','consequent'],TaggedTemplateExpression:['tag','quasi'],TemplateElement:[],TemplateLiteral:['quasis','expressions'],ThisExpression:[],ThrowStatement:['argument'],TryStatement:['block','handlers','handler','guardedHandlers','finalizer'],UnaryExpression:['argument'],UpdateExpression:['argument'],VariableDeclaration:['declarations'],VariableDeclarator:['id','init'],WhileStatement:['test','body'],WithStatement:['object','body'],YieldExpression:['argument']},c={},g={},f={},j={Break:c,Skip:g,Remove:f},i.prototype.replace=function a(b){this.parent[this.key]=b},i.prototype.remove=function a(){return h(this.parent)?(this.parent.splice(this.key,1),!0):(this.replace(null),!1)},b.prototype.path=function a(){function e(b,a){if(h(a))for(c=0,g=a.length;c<g;++c)b.push(a[c]);else b.push(a)}var b,f,c,g,d,i;if(!this.__current.path)return null;for(d=[],b=2,f=this.__leavelist.length;b<f;++b)i=this.__leavelist[b],e(d,i.path);return e(d,this.__current.path),d},b.prototype.type=function(){var a=this.current();return a.type||this.__current.wrap},b.prototype.parents=function a(){var b,d,c;for(c=[],b=1,d=this.__leavelist.length;b<d;++b)c.push(this.__leavelist[b].node);return c},b.prototype.current=function a(){return this.__current.node},b.prototype.__execute=function a(c,d){var e,b;return b=undefined,e=this.__current,this.__current=d,this.__state=null,c&&(b=c.call(this,d.node,this.__leavelist[this.__leavelist.length-1].node)),this.__current=e,b},b.prototype.notify=function a(b){this.__state=b},b.prototype.skip=function(){this.notify(g)},b.prototype['break']=function(){this.notify(c)},b.prototype.remove=function(){this.notify(f)},b.prototype.__initialize=function(b,a){this.visitor=a,this.root=b,this.__worklist=[],this.__leavelist=[],this.__current=null,this.__state=null,this.__fallback=a.fallback==='iteration',this.__keys=n,a.keys&&(this.__keys=u(r(this.__keys),a.keys))},b.prototype.traverse=function a(v,u){var i,r,b,o,s,m,n,p,f,j,d,t;this.__initialize(v,u),t={},i=this.__worklist,r=this.__leavelist,i.push(new e(v,null,null,null)),r.push(new e(null,null,null,null));while(i.length){if(b=i.pop(),b===t){if(b=r.pop(),m=this.__execute(u.leave,b),this.__state===c||m===c)return;continue}if(b.node){if(m=this.__execute(u.enter,b),this.__state===c||m===c)return;if(i.push(t),r.push(b),this.__state===g||m===g)continue;if(o=b.node,s=b.wrap||o.type,j=this.__keys[s],!j)if(this.__fallback)j=l(o);else throw new Error('Unknown node type '+s+'.');p=j.length;while((p-=1)>=0){if(n=j[p],d=o[n],!d)continue;if(h(d)){f=d.length;while((f-=1)>=0){if(!d[f])continue;if(q(s,j[p]))b=new e(d[f],[n,f],'Property',null);else if(k(d[f]))b=new e(d[f],[n,f],null,null);else continue;i.push(b)}}else k(d)&&i.push(new e(d,n,null,null))}}}},b.prototype.replace=function a(w,x){function z(b){var c,d,a,e;if(b.ref.remove()){d=b.ref.key,e=b.ref.parent,c=n.length;while(c--)if(a=n[c],a.ref&&a.ref.parent===e){if(a.ref.key<d)break;--a.ref.key}}}var n,v,p,t,d,b,u,m,o,j,y,s,r;this.__initialize(w,x),y={},n=this.__worklist,v=this.__leavelist,s={root:w},b=new e(w,null,null,new i(s,'root')),n.push(b),v.push(b);while(n.length){if(b=n.pop(),b===y){if(b=v.pop(),d=this.__execute(x.leave,b),d!==undefined&&d!==c&&d!==g&&d!==f&&b.ref.replace(d),(this.__state===f||d===f)&&z(b),this.__state===c||d===c)return s.root;continue}if(d=this.__execute(x.enter,b),d!==undefined&&d!==c&&d!==g&&d!==f&&(b.ref.replace(d),b.node=d),(this.__state===f||d===f)&&(z(b),b.node=null),this.__state===c||d===c)return s.root;if(p=b.node,!p)continue;if(n.push(y),v.push(b),this.__state===g||d===g)continue;if(t=b.wrap||p.type,o=this.__keys[t],!o)if(this.__fallback)o=l(p);else throw new Error('Unknown node type '+t+'.');u=o.length;while((u-=1)>=0){if(r=o[u],j=p[r],!j)continue;if(h(j)){m=j.length;while((m-=1)>=0){if(!j[m])continue;if(q(t,o[u]))b=new e(j[m],[r,m],'Property',new i(j,m));else if(k(j[m]))b=new e(j[m],[r,m],null,new i(j,m));else continue;n.push(b)}}else k(j)&&n.push(new e(j,r,null,new i(p,r)))}}return s.root},d.version='1.8.1-dev',d.Syntax=m,d.traverse=o,d.replace=w,d.attachComments=v,d.VisitorKeys=n,d.VisitorOption=j,d.Controller=b,d.cloneEnvironment=function(){return a({})},d})}),a('/tools/entry-point.js')}.call(this,this)) \ No newline at end of file
diff --git a/share/server/60/esprima.js b/share/server/60/esprima.js
deleted file mode 100644
index ae8b47783..000000000
--- a/share/server/60/esprima.js
+++ /dev/null
@@ -1,6711 +0,0 @@
-(function webpackUniversalModuleDefinition(root, factory) {
-/* istanbul ignore next */
- if(typeof exports === 'object' && typeof module === 'object')
- module.exports = factory();
- else if(typeof define === 'function' && define.amd)
- define([], factory);
-/* istanbul ignore next */
- else if(typeof exports === 'object')
- exports["esprima"] = factory();
- else
- root["esprima"] = factory();
-})(this, function() {
-return /******/ (function(modules) { // webpackBootstrap
-/******/ // The module cache
-/******/ var installedModules = {};
-
-/******/ // The require function
-/******/ function __webpack_require__(moduleId) {
-
-/******/ // Check if module is in cache
-/* istanbul ignore if */
-/******/ if(installedModules[moduleId])
-/******/ return installedModules[moduleId].exports;
-
-/******/ // Create a new module (and put it into the cache)
-/******/ var module = installedModules[moduleId] = {
-/******/ exports: {},
-/******/ id: moduleId,
-/******/ loaded: false
-/******/ };
-
-/******/ // Execute the module function
-/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
-
-/******/ // Flag the module as loaded
-/******/ module.loaded = true;
-
-/******/ // Return the exports of the module
-/******/ return module.exports;
-/******/ }
-
-
-/******/ // expose the modules object (__webpack_modules__)
-/******/ __webpack_require__.m = modules;
-
-/******/ // expose the module cache
-/******/ __webpack_require__.c = installedModules;
-
-/******/ // __webpack_public_path__
-/******/ __webpack_require__.p = "";
-
-/******/ // Load entry module and return exports
-/******/ return __webpack_require__(0);
-/******/ })
-/************************************************************************/
-/******/ ([
-/* 0 */
-/***/ function(module, exports, __webpack_require__) {
-
- "use strict";
- /*
- Copyright JS Foundation and other contributors, https://js.foundation/
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- Object.defineProperty(exports, "__esModule", { value: true });
- var comment_handler_1 = __webpack_require__(1);
- var jsx_parser_1 = __webpack_require__(3);
- var parser_1 = __webpack_require__(8);
- var tokenizer_1 = __webpack_require__(15);
- function parse(code, options, delegate) {
- var commentHandler = null;
- var proxyDelegate = function (node, metadata) {
- if (delegate) {
- delegate(node, metadata);
- }
- if (commentHandler) {
- commentHandler.visit(node, metadata);
- }
- };
- var parserDelegate = (typeof delegate === 'function') ? proxyDelegate : null;
- var collectComment = false;
- if (options) {
- collectComment = (typeof options.comment === 'boolean' && options.comment);
- var attachComment = (typeof options.attachComment === 'boolean' && options.attachComment);
- if (collectComment || attachComment) {
- commentHandler = new comment_handler_1.CommentHandler();
- commentHandler.attach = attachComment;
- options.comment = true;
- parserDelegate = proxyDelegate;
- }
- }
- var isModule = false;
- if (options && typeof options.sourceType === 'string') {
- isModule = (options.sourceType === 'module');
- }
- var parser;
- if (options && typeof options.jsx === 'boolean' && options.jsx) {
- parser = new jsx_parser_1.JSXParser(code, options, parserDelegate);
- }
- else {
- parser = new parser_1.Parser(code, options, parserDelegate);
- }
- var program = isModule ? parser.parseModule() : parser.parseScript();
- var ast = program;
- if (collectComment && commentHandler) {
- ast.comments = commentHandler.comments;
- }
- if (parser.config.tokens) {
- ast.tokens = parser.tokens;
- }
- if (parser.config.tolerant) {
- ast.errors = parser.errorHandler.errors;
- }
- return ast;
- }
- exports.parse = parse;
- function parseModule(code, options, delegate) {
- var parsingOptions = options || {};
- parsingOptions.sourceType = 'module';
- return parse(code, parsingOptions, delegate);
- }
- exports.parseModule = parseModule;
- function parseScript(code, options, delegate) {
- var parsingOptions = options || {};
- parsingOptions.sourceType = 'script';
- return parse(code, parsingOptions, delegate);
- }
- exports.parseScript = parseScript;
- function tokenize(code, options, delegate) {
- var tokenizer = new tokenizer_1.Tokenizer(code, options);
- var tokens;
- tokens = [];
- try {
- while (true) {
- var token = tokenizer.getNextToken();
- if (!token) {
- break;
- }
- if (delegate) {
- token = delegate(token);
- }
- tokens.push(token);
- }
- }
- catch (e) {
- tokenizer.errorHandler.tolerate(e);
- }
- if (tokenizer.errorHandler.tolerant) {
- tokens.errors = tokenizer.errors();
- }
- return tokens;
- }
- exports.tokenize = tokenize;
- var syntax_1 = __webpack_require__(2);
- exports.Syntax = syntax_1.Syntax;
- // Sync with *.json manifests.
- exports.version = '4.0.1';
-
-
-/***/ },
-/* 1 */
-/***/ function(module, exports, __webpack_require__) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- var syntax_1 = __webpack_require__(2);
- var CommentHandler = (function () {
- function CommentHandler() {
- this.attach = false;
- this.comments = [];
- this.stack = [];
- this.leading = [];
- this.trailing = [];
- }
- CommentHandler.prototype.insertInnerComments = function (node, metadata) {
- // innnerComments for properties empty block
- // `function a() {/** comments **\/}`
- if (node.type === syntax_1.Syntax.BlockStatement && node.body.length === 0) {
- var innerComments = [];
- for (var i = this.leading.length - 1; i >= 0; --i) {
- var entry = this.leading[i];
- if (metadata.end.offset >= entry.start) {
- innerComments.unshift(entry.comment);
- this.leading.splice(i, 1);
- this.trailing.splice(i, 1);
- }
- }
- if (innerComments.length) {
- node.innerComments = innerComments;
- }
- }
- };
- CommentHandler.prototype.findTrailingComments = function (metadata) {
- var trailingComments = [];
- if (this.trailing.length > 0) {
- for (var i = this.trailing.length - 1; i >= 0; --i) {
- var entry_1 = this.trailing[i];
- if (entry_1.start >= metadata.end.offset) {
- trailingComments.unshift(entry_1.comment);
- }
- }
- this.trailing.length = 0;
- return trailingComments;
- }
- var entry = this.stack[this.stack.length - 1];
- if (entry && entry.node.trailingComments) {
- var firstComment = entry.node.trailingComments[0];
- if (firstComment && firstComment.range[0] >= metadata.end.offset) {
- trailingComments = entry.node.trailingComments;
- delete entry.node.trailingComments;
- }
- }
- return trailingComments;
- };
- CommentHandler.prototype.findLeadingComments = function (metadata) {
- var leadingComments = [];
- var target;
- while (this.stack.length > 0) {
- var entry = this.stack[this.stack.length - 1];
- if (entry && entry.start >= metadata.start.offset) {
- target = entry.node;
- this.stack.pop();
- }
- else {
- break;
- }
- }
- if (target) {
- var count = target.leadingComments ? target.leadingComments.length : 0;
- for (var i = count - 1; i >= 0; --i) {
- var comment = target.leadingComments[i];
- if (comment.range[1] <= metadata.start.offset) {
- leadingComments.unshift(comment);
- target.leadingComments.splice(i, 1);
- }
- }
- if (target.leadingComments && target.leadingComments.length === 0) {
- delete target.leadingComments;
- }
- return leadingComments;
- }
- for (var i = this.leading.length - 1; i >= 0; --i) {
- var entry = this.leading[i];
- if (entry.start <= metadata.start.offset) {
- leadingComments.unshift(entry.comment);
- this.leading.splice(i, 1);
- }
- }
- return leadingComments;
- };
- CommentHandler.prototype.visitNode = function (node, metadata) {
- if (node.type === syntax_1.Syntax.Program && node.body.length > 0) {
- return;
- }
- this.insertInnerComments(node, metadata);
- var trailingComments = this.findTrailingComments(metadata);
- var leadingComments = this.findLeadingComments(metadata);
- if (leadingComments.length > 0) {
- node.leadingComments = leadingComments;
- }
- if (trailingComments.length > 0) {
- node.trailingComments = trailingComments;
- }
- this.stack.push({
- node: node,
- start: metadata.start.offset
- });
- };
- CommentHandler.prototype.visitComment = function (node, metadata) {
- var type = (node.type[0] === 'L') ? 'Line' : 'Block';
- var comment = {
- type: type,
- value: node.value
- };
- if (node.range) {
- comment.range = node.range;
- }
- if (node.loc) {
- comment.loc = node.loc;
- }
- this.comments.push(comment);
- if (this.attach) {
- var entry = {
- comment: {
- type: type,
- value: node.value,
- range: [metadata.start.offset, metadata.end.offset]
- },
- start: metadata.start.offset
- };
- if (node.loc) {
- entry.comment.loc = node.loc;
- }
- node.type = type;
- this.leading.push(entry);
- this.trailing.push(entry);
- }
- };
- CommentHandler.prototype.visit = function (node, metadata) {
- if (node.type === 'LineComment') {
- this.visitComment(node, metadata);
- }
- else if (node.type === 'BlockComment') {
- this.visitComment(node, metadata);
- }
- else if (this.attach) {
- this.visitNode(node, metadata);
- }
- };
- return CommentHandler;
- }());
- exports.CommentHandler = CommentHandler;
-
-
-/***/ },
-/* 2 */
-/***/ function(module, exports) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- exports.Syntax = {
- AssignmentExpression: 'AssignmentExpression',
- AssignmentPattern: 'AssignmentPattern',
- ArrayExpression: 'ArrayExpression',
- ArrayPattern: 'ArrayPattern',
- ArrowFunctionExpression: 'ArrowFunctionExpression',
- AwaitExpression: 'AwaitExpression',
- BlockStatement: 'BlockStatement',
- BinaryExpression: 'BinaryExpression',
- BreakStatement: 'BreakStatement',
- CallExpression: 'CallExpression',
- CatchClause: 'CatchClause',
- ClassBody: 'ClassBody',
- ClassDeclaration: 'ClassDeclaration',
- ClassExpression: 'ClassExpression',
- ConditionalExpression: 'ConditionalExpression',
- ContinueStatement: 'ContinueStatement',
- DoWhileStatement: 'DoWhileStatement',
- DebuggerStatement: 'DebuggerStatement',
- EmptyStatement: 'EmptyStatement',
- ExportAllDeclaration: 'ExportAllDeclaration',
- ExportDefaultDeclaration: 'ExportDefaultDeclaration',
- ExportNamedDeclaration: 'ExportNamedDeclaration',
- ExportSpecifier: 'ExportSpecifier',
- ExpressionStatement: 'ExpressionStatement',
- ForStatement: 'ForStatement',
- ForOfStatement: 'ForOfStatement',
- ForInStatement: 'ForInStatement',
- FunctionDeclaration: 'FunctionDeclaration',
- FunctionExpression: 'FunctionExpression',
- Identifier: 'Identifier',
- IfStatement: 'IfStatement',
- ImportDeclaration: 'ImportDeclaration',
- ImportDefaultSpecifier: 'ImportDefaultSpecifier',
- ImportNamespaceSpecifier: 'ImportNamespaceSpecifier',
- ImportSpecifier: 'ImportSpecifier',
- Literal: 'Literal',
- LabeledStatement: 'LabeledStatement',
- LogicalExpression: 'LogicalExpression',
- MemberExpression: 'MemberExpression',
- MetaProperty: 'MetaProperty',
- MethodDefinition: 'MethodDefinition',
- NewExpression: 'NewExpression',
- ObjectExpression: 'ObjectExpression',
- ObjectPattern: 'ObjectPattern',
- Program: 'Program',
- Property: 'Property',
- RestElement: 'RestElement',
- ReturnStatement: 'ReturnStatement',
- SequenceExpression: 'SequenceExpression',
- SpreadElement: 'SpreadElement',
- Super: 'Super',
- SwitchCase: 'SwitchCase',
- SwitchStatement: 'SwitchStatement',
- TaggedTemplateExpression: 'TaggedTemplateExpression',
- TemplateElement: 'TemplateElement',
- TemplateLiteral: 'TemplateLiteral',
- ThisExpression: 'ThisExpression',
- ThrowStatement: 'ThrowStatement',
- TryStatement: 'TryStatement',
- UnaryExpression: 'UnaryExpression',
- UpdateExpression: 'UpdateExpression',
- VariableDeclaration: 'VariableDeclaration',
- VariableDeclarator: 'VariableDeclarator',
- WhileStatement: 'WhileStatement',
- WithStatement: 'WithStatement',
- YieldExpression: 'YieldExpression'
- };
-
-
-/***/ },
-/* 3 */
-/***/ function(module, exports, __webpack_require__) {
-
- "use strict";
-/* istanbul ignore next */
- var __extends = (this && this.__extends) || (function () {
- var extendStatics = Object.setPrototypeOf ||
- ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
- function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
- return function (d, b) {
- extendStatics(d, b);
- function __() { this.constructor = d; }
- d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
- };
- })();
- Object.defineProperty(exports, "__esModule", { value: true });
- var character_1 = __webpack_require__(4);
- var JSXNode = __webpack_require__(5);
- var jsx_syntax_1 = __webpack_require__(6);
- var Node = __webpack_require__(7);
- var parser_1 = __webpack_require__(8);
- var token_1 = __webpack_require__(13);
- var xhtml_entities_1 = __webpack_require__(14);
- token_1.TokenName[100 /* Identifier */] = 'JSXIdentifier';
- token_1.TokenName[101 /* Text */] = 'JSXText';
- // Fully qualified element name, e.g. <svg:path> returns "svg:path"
- function getQualifiedElementName(elementName) {
- var qualifiedName;
- switch (elementName.type) {
- case jsx_syntax_1.JSXSyntax.JSXIdentifier:
- var id = elementName;
- qualifiedName = id.name;
- break;
- case jsx_syntax_1.JSXSyntax.JSXNamespacedName:
- var ns = elementName;
- qualifiedName = getQualifiedElementName(ns.namespace) + ':' +
- getQualifiedElementName(ns.name);
- break;
- case jsx_syntax_1.JSXSyntax.JSXMemberExpression:
- var expr = elementName;
- qualifiedName = getQualifiedElementName(expr.object) + '.' +
- getQualifiedElementName(expr.property);
- break;
- /* istanbul ignore next */
- default:
- break;
- }
- return qualifiedName;
- }
- var JSXParser = (function (_super) {
- __extends(JSXParser, _super);
- function JSXParser(code, options, delegate) {
- return _super.call(this, code, options, delegate) || this;
- }
- JSXParser.prototype.parsePrimaryExpression = function () {
- return this.match('<') ? this.parseJSXRoot() : _super.prototype.parsePrimaryExpression.call(this);
- };
- JSXParser.prototype.startJSX = function () {
- // Unwind the scanner before the lookahead token.
- this.scanner.index = this.startMarker.index;
- this.scanner.lineNumber = this.startMarker.line;
- this.scanner.lineStart = this.startMarker.index - this.startMarker.column;
- };
- JSXParser.prototype.finishJSX = function () {
- // Prime the next lookahead.
- this.nextToken();
- };
- JSXParser.prototype.reenterJSX = function () {
- this.startJSX();
- this.expectJSX('}');
- // Pop the closing '}' added from the lookahead.
- if (this.config.tokens) {
- this.tokens.pop();
- }
- };
- JSXParser.prototype.createJSXNode = function () {
- this.collectComments();
- return {
- index: this.scanner.index,
- line: this.scanner.lineNumber,
- column: this.scanner.index - this.scanner.lineStart
- };
- };
- JSXParser.prototype.createJSXChildNode = function () {
- return {
- index: this.scanner.index,
- line: this.scanner.lineNumber,
- column: this.scanner.index - this.scanner.lineStart
- };
- };
- JSXParser.prototype.scanXHTMLEntity = function (quote) {
- var result = '&';
- var valid = true;
- var terminated = false;
- var numeric = false;
- var hex = false;
- while (!this.scanner.eof() && valid && !terminated) {
- var ch = this.scanner.source[this.scanner.index];
- if (ch === quote) {
- break;
- }
- terminated = (ch === ';');
- result += ch;
- ++this.scanner.index;
- if (!terminated) {
- switch (result.length) {
- case 2:
- // e.g. '&#123;'
- numeric = (ch === '#');
- break;
- case 3:
- if (numeric) {
- // e.g. '&#x41;'
- hex = (ch === 'x');
- valid = hex || character_1.Character.isDecimalDigit(ch.charCodeAt(0));
- numeric = numeric && !hex;
- }
- break;
- default:
- valid = valid && !(numeric && !character_1.Character.isDecimalDigit(ch.charCodeAt(0)));
- valid = valid && !(hex && !character_1.Character.isHexDigit(ch.charCodeAt(0)));
- break;
- }
- }
- }
- if (valid && terminated && result.length > 2) {
- // e.g. '&#x41;' becomes just '#x41'
- var str = result.substr(1, result.length - 2);
- if (numeric && str.length > 1) {
- result = String.fromCharCode(parseInt(str.substr(1), 10));
- }
- else if (hex && str.length > 2) {
- result = String.fromCharCode(parseInt('0' + str.substr(1), 16));
- }
- else if (!numeric && !hex && xhtml_entities_1.XHTMLEntities[str]) {
- result = xhtml_entities_1.XHTMLEntities[str];
- }
- }
- return result;
- };
- // Scan the next JSX token. This replaces Scanner#lex when in JSX mode.
- JSXParser.prototype.lexJSX = function () {
- var cp = this.scanner.source.charCodeAt(this.scanner.index);
- // < > / : = { }
- if (cp === 60 || cp === 62 || cp === 47 || cp === 58 || cp === 61 || cp === 123 || cp === 125) {
- var value = this.scanner.source[this.scanner.index++];
- return {
- type: 7 /* Punctuator */,
- value: value,
- lineNumber: this.scanner.lineNumber,
- lineStart: this.scanner.lineStart,
- start: this.scanner.index - 1,
- end: this.scanner.index
- };
- }
- // " '
- if (cp === 34 || cp === 39) {
- var start = this.scanner.index;
- var quote = this.scanner.source[this.scanner.index++];
- var str = '';
- while (!this.scanner.eof()) {
- var ch = this.scanner.source[this.scanner.index++];
- if (ch === quote) {
- break;
- }
- else if (ch === '&') {
- str += this.scanXHTMLEntity(quote);
- }
- else {
- str += ch;
- }
- }
- return {
- type: 8 /* StringLiteral */,
- value: str,
- lineNumber: this.scanner.lineNumber,
- lineStart: this.scanner.lineStart,
- start: start,
- end: this.scanner.index
- };
- }
- // ... or .
- if (cp === 46) {
- var n1 = this.scanner.source.charCodeAt(this.scanner.index + 1);
- var n2 = this.scanner.source.charCodeAt(this.scanner.index + 2);
- var value = (n1 === 46 && n2 === 46) ? '...' : '.';
- var start = this.scanner.index;
- this.scanner.index += value.length;
- return {
- type: 7 /* Punctuator */,
- value: value,
- lineNumber: this.scanner.lineNumber,
- lineStart: this.scanner.lineStart,
- start: start,
- end: this.scanner.index
- };
- }
- // `
- if (cp === 96) {
- // Only placeholder, since it will be rescanned as a real assignment expression.
- return {
- type: 10 /* Template */,
- value: '',
- lineNumber: this.scanner.lineNumber,
- lineStart: this.scanner.lineStart,
- start: this.scanner.index,
- end: this.scanner.index
- };
- }
- // Identifer can not contain backslash (char code 92).
- if (character_1.Character.isIdentifierStart(cp) && (cp !== 92)) {
- var start = this.scanner.index;
- ++this.scanner.index;
- while (!this.scanner.eof()) {
- var ch = this.scanner.source.charCodeAt(this.scanner.index);
- if (character_1.Character.isIdentifierPart(ch) && (ch !== 92)) {
- ++this.scanner.index;
- }
- else if (ch === 45) {
- // Hyphen (char code 45) can be part of an identifier.
- ++this.scanner.index;
- }
- else {
- break;
- }
- }
- var id = this.scanner.source.slice(start, this.scanner.index);
- return {
- type: 100 /* Identifier */,
- value: id,
- lineNumber: this.scanner.lineNumber,
- lineStart: this.scanner.lineStart,
- start: start,
- end: this.scanner.index
- };
- }
- return this.scanner.lex();
- };
- JSXParser.prototype.nextJSXToken = function () {
- this.collectComments();
- this.startMarker.index = this.scanner.index;
- this.startMarker.line = this.scanner.lineNumber;
- this.startMarker.column = this.scanner.index - this.scanner.lineStart;
- var token = this.lexJSX();
- this.lastMarker.index = this.scanner.index;
- this.lastMarker.line = this.scanner.lineNumber;
- this.lastMarker.column = this.scanner.index - this.scanner.lineStart;
- if (this.config.tokens) {
- this.tokens.push(this.convertToken(token));
- }
- return token;
- };
- JSXParser.prototype.nextJSXText = function () {
- this.startMarker.index = this.scanner.index;
- this.startMarker.line = this.scanner.lineNumber;
- this.startMarker.column = this.scanner.index - this.scanner.lineStart;
- var start = this.scanner.index;
- var text = '';
- while (!this.scanner.eof()) {
- var ch = this.scanner.source[this.scanner.index];
- if (ch === '{' || ch === '<') {
- break;
- }
- ++this.scanner.index;
- text += ch;
- if (character_1.Character.isLineTerminator(ch.charCodeAt(0))) {
- ++this.scanner.lineNumber;
- if (ch === '\r' && this.scanner.source[this.scanner.index] === '\n') {
- ++this.scanner.index;
- }
- this.scanner.lineStart = this.scanner.index;
- }
- }
- this.lastMarker.index = this.scanner.index;
- this.lastMarker.line = this.scanner.lineNumber;
- this.lastMarker.column = this.scanner.index - this.scanner.lineStart;
- var token = {
- type: 101 /* Text */,
- value: text,
- lineNumber: this.scanner.lineNumber,
- lineStart: this.scanner.lineStart,
- start: start,
- end: this.scanner.index
- };
- if ((text.length > 0) && this.config.tokens) {
- this.tokens.push(this.convertToken(token));
- }
- return token;
- };
- JSXParser.prototype.peekJSXToken = function () {
- var state = this.scanner.saveState();
- this.scanner.scanComments();
- var next = this.lexJSX();
- this.scanner.restoreState(state);
- return next;
- };
- // Expect the next JSX token to match the specified punctuator.
- // If not, an exception will be thrown.
- JSXParser.prototype.expectJSX = function (value) {
- var token = this.nextJSXToken();
- if (token.type !== 7 /* Punctuator */ || token.value !== value) {
- this.throwUnexpectedToken(token);
- }
- };
- // Return true if the next JSX token matches the specified punctuator.
- JSXParser.prototype.matchJSX = function (value) {
- var next = this.peekJSXToken();
- return next.type === 7 /* Punctuator */ && next.value === value;
- };
- JSXParser.prototype.parseJSXIdentifier = function () {
- var node = this.createJSXNode();
- var token = this.nextJSXToken();
- if (token.type !== 100 /* Identifier */) {
- this.throwUnexpectedToken(token);
- }
- return this.finalize(node, new JSXNode.JSXIdentifier(token.value));
- };
- JSXParser.prototype.parseJSXElementName = function () {
- var node = this.createJSXNode();
- var elementName = this.parseJSXIdentifier();
- if (this.matchJSX(':')) {
- var namespace = elementName;
- this.expectJSX(':');
- var name_1 = this.parseJSXIdentifier();
- elementName = this.finalize(node, new JSXNode.JSXNamespacedName(namespace, name_1));
- }
- else if (this.matchJSX('.')) {
- while (this.matchJSX('.')) {
- var object = elementName;
- this.expectJSX('.');
- var property = this.parseJSXIdentifier();
- elementName = this.finalize(node, new JSXNode.JSXMemberExpression(object, property));
- }
- }
- return elementName;
- };
- JSXParser.prototype.parseJSXAttributeName = function () {
- var node = this.createJSXNode();
- var attributeName;
- var identifier = this.parseJSXIdentifier();
- if (this.matchJSX(':')) {
- var namespace = identifier;
- this.expectJSX(':');
- var name_2 = this.parseJSXIdentifier();
- attributeName = this.finalize(node, new JSXNode.JSXNamespacedName(namespace, name_2));
- }
- else {
- attributeName = identifier;
- }
- return attributeName;
- };
- JSXParser.prototype.parseJSXStringLiteralAttribute = function () {
- var node = this.createJSXNode();
- var token = this.nextJSXToken();
- if (token.type !== 8 /* StringLiteral */) {
- this.throwUnexpectedToken(token);
- }
- var raw = this.getTokenRaw(token);
- return this.finalize(node, new Node.Literal(token.value, raw));
- };
- JSXParser.prototype.parseJSXExpressionAttribute = function () {
- var node = this.createJSXNode();
- this.expectJSX('{');
- this.finishJSX();
- if (this.match('}')) {
- this.tolerateError('JSX attributes must only be assigned a non-empty expression');
- }
- var expression = this.parseAssignmentExpression();
- this.reenterJSX();
- return this.finalize(node, new JSXNode.JSXExpressionContainer(expression));
- };
- JSXParser.prototype.parseJSXAttributeValue = function () {
- return this.matchJSX('{') ? this.parseJSXExpressionAttribute() :
- this.matchJSX('<') ? this.parseJSXElement() : this.parseJSXStringLiteralAttribute();
- };
- JSXParser.prototype.parseJSXNameValueAttribute = function () {
- var node = this.createJSXNode();
- var name = this.parseJSXAttributeName();
- var value = null;
- if (this.matchJSX('=')) {
- this.expectJSX('=');
- value = this.parseJSXAttributeValue();
- }
- return this.finalize(node, new JSXNode.JSXAttribute(name, value));
- };
- JSXParser.prototype.parseJSXSpreadAttribute = function () {
- var node = this.createJSXNode();
- this.expectJSX('{');
- this.expectJSX('...');
- this.finishJSX();
- var argument = this.parseAssignmentExpression();
- this.reenterJSX();
- return this.finalize(node, new JSXNode.JSXSpreadAttribute(argument));
- };
- JSXParser.prototype.parseJSXAttributes = function () {
- var attributes = [];
- while (!this.matchJSX('/') && !this.matchJSX('>')) {
- var attribute = this.matchJSX('{') ? this.parseJSXSpreadAttribute() :
- this.parseJSXNameValueAttribute();
- attributes.push(attribute);
- }
- return attributes;
- };
- JSXParser.prototype.parseJSXOpeningElement = function () {
- var node = this.createJSXNode();
- this.expectJSX('<');
- var name = this.parseJSXElementName();
- var attributes = this.parseJSXAttributes();
- var selfClosing = this.matchJSX('/');
- if (selfClosing) {
- this.expectJSX('/');
- }
- this.expectJSX('>');
- return this.finalize(node, new JSXNode.JSXOpeningElement(name, selfClosing, attributes));
- };
- JSXParser.prototype.parseJSXBoundaryElement = function () {
- var node = this.createJSXNode();
- this.expectJSX('<');
- if (this.matchJSX('/')) {
- this.expectJSX('/');
- var name_3 = this.parseJSXElementName();
- this.expectJSX('>');
- return this.finalize(node, new JSXNode.JSXClosingElement(name_3));
- }
- var name = this.parseJSXElementName();
- var attributes = this.parseJSXAttributes();
- var selfClosing = this.matchJSX('/');
- if (selfClosing) {
- this.expectJSX('/');
- }
- this.expectJSX('>');
- return this.finalize(node, new JSXNode.JSXOpeningElement(name, selfClosing, attributes));
- };
- JSXParser.prototype.parseJSXEmptyExpression = function () {
- var node = this.createJSXChildNode();
- this.collectComments();
- this.lastMarker.index = this.scanner.index;
- this.lastMarker.line = this.scanner.lineNumber;
- this.lastMarker.column = this.scanner.index - this.scanner.lineStart;
- return this.finalize(node, new JSXNode.JSXEmptyExpression());
- };
- JSXParser.prototype.parseJSXExpressionContainer = function () {
- var node = this.createJSXNode();
- this.expectJSX('{');
- var expression;
- if (this.matchJSX('}')) {
- expression = this.parseJSXEmptyExpression();
- this.expectJSX('}');
- }
- else {
- this.finishJSX();
- expression = this.parseAssignmentExpression();
- this.reenterJSX();
- }
- return this.finalize(node, new JSXNode.JSXExpressionContainer(expression));
- };
- JSXParser.prototype.parseJSXChildren = function () {
- var children = [];
- while (!this.scanner.eof()) {
- var node = this.createJSXChildNode();
- var token = this.nextJSXText();
- if (token.start < token.end) {
- var raw = this.getTokenRaw(token);
- var child = this.finalize(node, new JSXNode.JSXText(token.value, raw));
- children.push(child);
- }
- if (this.scanner.source[this.scanner.index] === '{') {
- var container = this.parseJSXExpressionContainer();
- children.push(container);
- }
- else {
- break;
- }
- }
- return children;
- };
- JSXParser.prototype.parseComplexJSXElement = function (el) {
- var stack = [];
- while (!this.scanner.eof()) {
- el.children = el.children.concat(this.parseJSXChildren());
- var node = this.createJSXChildNode();
- var element = this.parseJSXBoundaryElement();
- if (element.type === jsx_syntax_1.JSXSyntax.JSXOpeningElement) {
- var opening = element;
- if (opening.selfClosing) {
- var child = this.finalize(node, new JSXNode.JSXElement(opening, [], null));
- el.children.push(child);
- }
- else {
- stack.push(el);
- el = { node: node, opening: opening, closing: null, children: [] };
- }
- }
- if (element.type === jsx_syntax_1.JSXSyntax.JSXClosingElement) {
- el.closing = element;
- var open_1 = getQualifiedElementName(el.opening.name);
- var close_1 = getQualifiedElementName(el.closing.name);
- if (open_1 !== close_1) {
- this.tolerateError('Expected corresponding JSX closing tag for %0', open_1);
- }
- if (stack.length > 0) {
- var child = this.finalize(el.node, new JSXNode.JSXElement(el.opening, el.children, el.closing));
- el = stack[stack.length - 1];
- el.children.push(child);
- stack.pop();
- }
- else {
- break;
- }
- }
- }
- return el;
- };
- JSXParser.prototype.parseJSXElement = function () {
- var node = this.createJSXNode();
- var opening = this.parseJSXOpeningElement();
- var children = [];
- var closing = null;
- if (!opening.selfClosing) {
- var el = this.parseComplexJSXElement({ node: node, opening: opening, closing: closing, children: children });
- children = el.children;
- closing = el.closing;
- }
- return this.finalize(node, new JSXNode.JSXElement(opening, children, closing));
- };
- JSXParser.prototype.parseJSXRoot = function () {
- // Pop the opening '<' added from the lookahead.
- if (this.config.tokens) {
- this.tokens.pop();
- }
- this.startJSX();
- var element = this.parseJSXElement();
- this.finishJSX();
- return element;
- };
- JSXParser.prototype.isStartOfExpression = function () {
- return _super.prototype.isStartOfExpression.call(this) || this.match('<');
- };
- return JSXParser;
- }(parser_1.Parser));
- exports.JSXParser = JSXParser;
-
-
-/***/ },
-/* 4 */
-/***/ function(module, exports) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- // See also tools/generate-unicode-regex.js.
- var Regex = {
- // Unicode v8.0.0 NonAsciiIdentifierStart:
- NonAsciiIdentifierStart: /[\xAA\xB5\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0-\u08B4\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0AF9\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D\u0C58-\u0C5A\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D\u0D4E\u0D5F-\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F5\u13F8-\u13FD\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191E\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2118-\u211D\u2124\u2126\u2128\u212A-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303C\u3041-\u3096\u309B-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FD5\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA66E\uA67F-\uA69D\uA6A0-\uA6EF\uA717-\uA71F\uA722-\uA788\uA78B-\uA7AD\uA7B0-\uA7B7\uA7F7-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA8FD\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uA9E0-\uA9E4\uA9E6-\uA9EF\uA9FA-\uA9FE\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA7E-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB65\uAB70-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]|\uD800[\uDC00-\uDC0B\uDC0D-\uDC26\uDC28-\uDC3A\uDC3C\uDC3D\uDC3F-\uDC4D\uDC50-\uDC5D\uDC80-\uDCFA\uDD40-\uDD74\uDE80-\uDE9C\uDEA0-\uDED0\uDF00-\uDF1F\uDF30-\uDF4A\uDF50-\uDF75\uDF80-\uDF9D\uDFA0-\uDFC3\uDFC8-\uDFCF\uDFD1-\uDFD5]|\uD801[\uDC00-\uDC9D\uDD00-\uDD27\uDD30-\uDD63\uDE00-\uDF36\uDF40-\uDF55\uDF60-\uDF67]|\uD802[\uDC00-\uDC05\uDC08\uDC0A-\uDC35\uDC37\uDC38\uDC3C\uDC3F-\uDC55\uDC60-\uDC76\uDC80-\uDC9E\uDCE0-\uDCF2\uDCF4\uDCF5\uDD00-\uDD15\uDD20-\uDD39\uDD80-\uDDB7\uDDBE\uDDBF\uDE00\uDE10-\uDE13\uDE15-\uDE17\uDE19-\uDE33\uDE60-\uDE7C\uDE80-\uDE9C\uDEC0-\uDEC7\uDEC9-\uDEE4\uDF00-\uDF35\uDF40-\uDF55\uDF60-\uDF72\uDF80-\uDF91]|\uD803[\uDC00-\uDC48\uDC80-\uDCB2\uDCC0-\uDCF2]|\uD804[\uDC03-\uDC37\uDC83-\uDCAF\uDCD0-\uDCE8\uDD03-\uDD26\uDD50-\uDD72\uDD76\uDD83-\uDDB2\uDDC1-\uDDC4\uDDDA\uDDDC\uDE00-\uDE11\uDE13-\uDE2B\uDE80-\uDE86\uDE88\uDE8A-\uDE8D\uDE8F-\uDE9D\uDE9F-\uDEA8\uDEB0-\uDEDE\uDF05-\uDF0C\uDF0F\uDF10\uDF13-\uDF28\uDF2A-\uDF30\uDF32\uDF33\uDF35-\uDF39\uDF3D\uDF50\uDF5D-\uDF61]|\uD805[\uDC80-\uDCAF\uDCC4\uDCC5\uDCC7\uDD80-\uDDAE\uDDD8-\uDDDB\uDE00-\uDE2F\uDE44\uDE80-\uDEAA\uDF00-\uDF19]|\uD806[\uDCA0-\uDCDF\uDCFF\uDEC0-\uDEF8]|\uD808[\uDC00-\uDF99]|\uD809[\uDC00-\uDC6E\uDC80-\uDD43]|[\uD80C\uD840-\uD868\uD86A-\uD86C\uD86F-\uD872][\uDC00-\uDFFF]|\uD80D[\uDC00-\uDC2E]|\uD811[\uDC00-\uDE46]|\uD81A[\uDC00-\uDE38\uDE40-\uDE5E\uDED0-\uDEED\uDF00-\uDF2F\uDF40-\uDF43\uDF63-\uDF77\uDF7D-\uDF8F]|\uD81B[\uDF00-\uDF44\uDF50\uDF93-\uDF9F]|\uD82C[\uDC00\uDC01]|\uD82F[\uDC00-\uDC6A\uDC70-\uDC7C\uDC80-\uDC88\uDC90-\uDC99]|\uD835[\uDC00-\uDC54\uDC56-\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDEA5\uDEA8-\uDEC0\uDEC2-\uDEDA\uDEDC-\uDEFA\uDEFC-\uDF14\uDF16-\uDF34\uDF36-\uDF4E\uDF50-\uDF6E\uDF70-\uDF88\uDF8A-\uDFA8\uDFAA-\uDFC2\uDFC4-\uDFCB]|\uD83A[\uDC00-\uDCC4]|\uD83B[\uDE00-\uDE03\uDE05-\uDE1F\uDE21\uDE22\uDE24\uDE27\uDE29-\uDE32\uDE34-\uDE37\uDE39\uDE3B\uDE42\uDE47\uDE49\uDE4B\uDE4D-\uDE4F\uDE51\uDE52\uDE54\uDE57\uDE59\uDE5B\uDE5D\uDE5F\uDE61\uDE62\uDE64\uDE67-\uDE6A\uDE6C-\uDE72\uDE74-\uDE77\uDE79-\uDE7C\uDE7E\uDE80-\uDE89\uDE8B-\uDE9B\uDEA1-\uDEA3\uDEA5-\uDEA9\uDEAB-\uDEBB]|\uD869[\uDC00-\uDED6\uDF00-\uDFFF]|\uD86D[\uDC00-\uDF34\uDF40-\uDFFF]|\uD86E[\uDC00-\uDC1D\uDC20-\uDFFF]|\uD873[\uDC00-\uDEA1]|\uD87E[\uDC00-\uDE1D]/,
- // Unicode v8.0.0 NonAsciiIdentifierPart:
- NonAsciiIdentifierPart: /[\xAA\xB5\xB7\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0300-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u0483-\u0487\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05BD\u05BF\u05C1\u05C2\u05C4\u05C5\u05C7\u05D0-\u05EA\u05F0-\u05F2\u0610-\u061A\u0620-\u0669\u066E-\u06D3\u06D5-\u06DC\u06DF-\u06E8\u06EA-\u06FC\u06FF\u0710-\u074A\u074D-\u07B1\u07C0-\u07F5\u07FA\u0800-\u082D\u0840-\u085B\u08A0-\u08B4\u08E3-\u0963\u0966-\u096F\u0971-\u0983\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BC-\u09C4\u09C7\u09C8\u09CB-\u09CE\u09D7\u09DC\u09DD\u09DF-\u09E3\u09E6-\u09F1\u0A01-\u0A03\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A3C\u0A3E-\u0A42\u0A47\u0A48\u0A4B-\u0A4D\u0A51\u0A59-\u0A5C\u0A5E\u0A66-\u0A75\u0A81-\u0A83\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABC-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0AD0\u0AE0-\u0AE3\u0AE6-\u0AEF\u0AF9\u0B01-\u0B03\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3C-\u0B44\u0B47\u0B48\u0B4B-\u0B4D\u0B56\u0B57\u0B5C\u0B5D\u0B5F-\u0B63\u0B66-\u0B6F\u0B71\u0B82\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD0\u0BD7\u0BE6-\u0BEF\u0C00-\u0C03\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55\u0C56\u0C58-\u0C5A\u0C60-\u0C63\u0C66-\u0C6F\u0C81-\u0C83\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBC-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5\u0CD6\u0CDE\u0CE0-\u0CE3\u0CE6-\u0CEF\u0CF1\u0CF2\u0D01-\u0D03\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D-\u0D44\u0D46-\u0D48\u0D4A-\u0D4E\u0D57\u0D5F-\u0D63\u0D66-\u0D6F\u0D7A-\u0D7F\u0D82\u0D83\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0DCA\u0DCF-\u0DD4\u0DD6\u0DD8-\u0DDF\u0DE6-\u0DEF\u0DF2\u0DF3\u0E01-\u0E3A\u0E40-\u0E4E\u0E50-\u0E59\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB9\u0EBB-\u0EBD\u0EC0-\u0EC4\u0EC6\u0EC8-\u0ECD\u0ED0-\u0ED9\u0EDC-\u0EDF\u0F00\u0F18\u0F19\u0F20-\u0F29\u0F35\u0F37\u0F39\u0F3E-\u0F47\u0F49-\u0F6C\u0F71-\u0F84\u0F86-\u0F97\u0F99-\u0FBC\u0FC6\u1000-\u1049\u1050-\u109D\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u135D-\u135F\u1369-\u1371\u1380-\u138F\u13A0-\u13F5\u13F8-\u13FD\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176C\u176E-\u1770\u1772\u1773\u1780-\u17D3\u17D7\u17DC\u17DD\u17E0-\u17E9\u180B-\u180D\u1810-\u1819\u1820-\u1877\u1880-\u18AA\u18B0-\u18F5\u1900-\u191E\u1920-\u192B\u1930-\u193B\u1946-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u19D0-\u19DA\u1A00-\u1A1B\u1A20-\u1A5E\u1A60-\u1A7C\u1A7F-\u1A89\u1A90-\u1A99\u1AA7\u1AB0-\u1ABD\u1B00-\u1B4B\u1B50-\u1B59\u1B6B-\u1B73\u1B80-\u1BF3\u1C00-\u1C37\u1C40-\u1C49\u1C4D-\u1C7D\u1CD0-\u1CD2\u1CD4-\u1CF6\u1CF8\u1CF9\u1D00-\u1DF5\u1DFC-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u200C\u200D\u203F\u2040\u2054\u2071\u207F\u2090-\u209C\u20D0-\u20DC\u20E1\u20E5-\u20F0\u2102\u2107\u210A-\u2113\u2115\u2118-\u211D\u2124\u2126\u2128\u212A-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D7F-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2DE0-\u2DFF\u3005-\u3007\u3021-\u302F\u3031-\u3035\u3038-\u303C\u3041-\u3096\u3099-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FD5\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA62B\uA640-\uA66F\uA674-\uA67D\uA67F-\uA6F1\uA717-\uA71F\uA722-\uA788\uA78B-\uA7AD\uA7B0-\uA7B7\uA7F7-\uA827\uA840-\uA873\uA880-\uA8C4\uA8D0-\uA8D9\uA8E0-\uA8F7\uA8FB\uA8FD\uA900-\uA92D\uA930-\uA953\uA960-\uA97C\uA980-\uA9C0\uA9CF-\uA9D9\uA9E0-\uA9FE\uAA00-\uAA36\uAA40-\uAA4D\uAA50-\uAA59\uAA60-\uAA76\uAA7A-\uAAC2\uAADB-\uAADD\uAAE0-\uAAEF\uAAF2-\uAAF6\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB65\uAB70-\uABEA\uABEC\uABED\uABF0-\uABF9\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE00-\uFE0F\uFE20-\uFE2F\uFE33\uFE34\uFE4D-\uFE4F\uFE70-\uFE74\uFE76-\uFEFC\uFF10-\uFF19\uFF21-\uFF3A\uFF3F\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]|\uD800[\uDC00-\uDC0B\uDC0D-\uDC26\uDC28-\uDC3A\uDC3C\uDC3D\uDC3F-\uDC4D\uDC50-\uDC5D\uDC80-\uDCFA\uDD40-\uDD74\uDDFD\uDE80-\uDE9C\uDEA0-\uDED0\uDEE0\uDF00-\uDF1F\uDF30-\uDF4A\uDF50-\uDF7A\uDF80-\uDF9D\uDFA0-\uDFC3\uDFC8-\uDFCF\uDFD1-\uDFD5]|\uD801[\uDC00-\uDC9D\uDCA0-\uDCA9\uDD00-\uDD27\uDD30-\uDD63\uDE00-\uDF36\uDF40-\uDF55\uDF60-\uDF67]|\uD802[\uDC00-\uDC05\uDC08\uDC0A-\uDC35\uDC37\uDC38\uDC3C\uDC3F-\uDC55\uDC60-\uDC76\uDC80-\uDC9E\uDCE0-\uDCF2\uDCF4\uDCF5\uDD00-\uDD15\uDD20-\uDD39\uDD80-\uDDB7\uDDBE\uDDBF\uDE00-\uDE03\uDE05\uDE06\uDE0C-\uDE13\uDE15-\uDE17\uDE19-\uDE33\uDE38-\uDE3A\uDE3F\uDE60-\uDE7C\uDE80-\uDE9C\uDEC0-\uDEC7\uDEC9-\uDEE6\uDF00-\uDF35\uDF40-\uDF55\uDF60-\uDF72\uDF80-\uDF91]|\uD803[\uDC00-\uDC48\uDC80-\uDCB2\uDCC0-\uDCF2]|\uD804[\uDC00-\uDC46\uDC66-\uDC6F\uDC7F-\uDCBA\uDCD0-\uDCE8\uDCF0-\uDCF9\uDD00-\uDD34\uDD36-\uDD3F\uDD50-\uDD73\uDD76\uDD80-\uDDC4\uDDCA-\uDDCC\uDDD0-\uDDDA\uDDDC\uDE00-\uDE11\uDE13-\uDE37\uDE80-\uDE86\uDE88\uDE8A-\uDE8D\uDE8F-\uDE9D\uDE9F-\uDEA8\uDEB0-\uDEEA\uDEF0-\uDEF9\uDF00-\uDF03\uDF05-\uDF0C\uDF0F\uDF10\uDF13-\uDF28\uDF2A-\uDF30\uDF32\uDF33\uDF35-\uDF39\uDF3C-\uDF44\uDF47\uDF48\uDF4B-\uDF4D\uDF50\uDF57\uDF5D-\uDF63\uDF66-\uDF6C\uDF70-\uDF74]|\uD805[\uDC80-\uDCC5\uDCC7\uDCD0-\uDCD9\uDD80-\uDDB5\uDDB8-\uDDC0\uDDD8-\uDDDD\uDE00-\uDE40\uDE44\uDE50-\uDE59\uDE80-\uDEB7\uDEC0-\uDEC9\uDF00-\uDF19\uDF1D-\uDF2B\uDF30-\uDF39]|\uD806[\uDCA0-\uDCE9\uDCFF\uDEC0-\uDEF8]|\uD808[\uDC00-\uDF99]|\uD809[\uDC00-\uDC6E\uDC80-\uDD43]|[\uD80C\uD840-\uD868\uD86A-\uD86C\uD86F-\uD872][\uDC00-\uDFFF]|\uD80D[\uDC00-\uDC2E]|\uD811[\uDC00-\uDE46]|\uD81A[\uDC00-\uDE38\uDE40-\uDE5E\uDE60-\uDE69\uDED0-\uDEED\uDEF0-\uDEF4\uDF00-\uDF36\uDF40-\uDF43\uDF50-\uDF59\uDF63-\uDF77\uDF7D-\uDF8F]|\uD81B[\uDF00-\uDF44\uDF50-\uDF7E\uDF8F-\uDF9F]|\uD82C[\uDC00\uDC01]|\uD82F[\uDC00-\uDC6A\uDC70-\uDC7C\uDC80-\uDC88\uDC90-\uDC99\uDC9D\uDC9E]|\uD834[\uDD65-\uDD69\uDD6D-\uDD72\uDD7B-\uDD82\uDD85-\uDD8B\uDDAA-\uDDAD\uDE42-\uDE44]|\uD835[\uDC00-\uDC54\uDC56-\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDEA5\uDEA8-\uDEC0\uDEC2-\uDEDA\uDEDC-\uDEFA\uDEFC-\uDF14\uDF16-\uDF34\uDF36-\uDF4E\uDF50-\uDF6E\uDF70-\uDF88\uDF8A-\uDFA8\uDFAA-\uDFC2\uDFC4-\uDFCB\uDFCE-\uDFFF]|\uD836[\uDE00-\uDE36\uDE3B-\uDE6C\uDE75\uDE84\uDE9B-\uDE9F\uDEA1-\uDEAF]|\uD83A[\uDC00-\uDCC4\uDCD0-\uDCD6]|\uD83B[\uDE00-\uDE03\uDE05-\uDE1F\uDE21\uDE22\uDE24\uDE27\uDE29-\uDE32\uDE34-\uDE37\uDE39\uDE3B\uDE42\uDE47\uDE49\uDE4B\uDE4D-\uDE4F\uDE51\uDE52\uDE54\uDE57\uDE59\uDE5B\uDE5D\uDE5F\uDE61\uDE62\uDE64\uDE67-\uDE6A\uDE6C-\uDE72\uDE74-\uDE77\uDE79-\uDE7C\uDE7E\uDE80-\uDE89\uDE8B-\uDE9B\uDEA1-\uDEA3\uDEA5-\uDEA9\uDEAB-\uDEBB]|\uD869[\uDC00-\uDED6\uDF00-\uDFFF]|\uD86D[\uDC00-\uDF34\uDF40-\uDFFF]|\uD86E[\uDC00-\uDC1D\uDC20-\uDFFF]|\uD873[\uDC00-\uDEA1]|\uD87E[\uDC00-\uDE1D]|\uDB40[\uDD00-\uDDEF]/
- };
- exports.Character = {
- /* tslint:disable:no-bitwise */
- fromCodePoint: function (cp) {
- return (cp < 0x10000) ? String.fromCharCode(cp) :
- String.fromCharCode(0xD800 + ((cp - 0x10000) >> 10)) +
- String.fromCharCode(0xDC00 + ((cp - 0x10000) & 1023));
- },
- // https://tc39.github.io/ecma262/#sec-white-space
- isWhiteSpace: function (cp) {
- return (cp === 0x20) || (cp === 0x09) || (cp === 0x0B) || (cp === 0x0C) || (cp === 0xA0) ||
- (cp >= 0x1680 && [0x1680, 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x202F, 0x205F, 0x3000, 0xFEFF].indexOf(cp) >= 0);
- },
- // https://tc39.github.io/ecma262/#sec-line-terminators
- isLineTerminator: function (cp) {
- return (cp === 0x0A) || (cp === 0x0D) || (cp === 0x2028) || (cp === 0x2029);
- },
- // https://tc39.github.io/ecma262/#sec-names-and-keywords
- isIdentifierStart: function (cp) {
- return (cp === 0x24) || (cp === 0x5F) ||
- (cp >= 0x41 && cp <= 0x5A) ||
- (cp >= 0x61 && cp <= 0x7A) ||
- (cp === 0x5C) ||
- ((cp >= 0x80) && Regex.NonAsciiIdentifierStart.test(exports.Character.fromCodePoint(cp)));
- },
- isIdentifierPart: function (cp) {
- return (cp === 0x24) || (cp === 0x5F) ||
- (cp >= 0x41 && cp <= 0x5A) ||
- (cp >= 0x61 && cp <= 0x7A) ||
- (cp >= 0x30 && cp <= 0x39) ||
- (cp === 0x5C) ||
- ((cp >= 0x80) && Regex.NonAsciiIdentifierPart.test(exports.Character.fromCodePoint(cp)));
- },
- // https://tc39.github.io/ecma262/#sec-literals-numeric-literals
- isDecimalDigit: function (cp) {
- return (cp >= 0x30 && cp <= 0x39); // 0..9
- },
- isHexDigit: function (cp) {
- return (cp >= 0x30 && cp <= 0x39) ||
- (cp >= 0x41 && cp <= 0x46) ||
- (cp >= 0x61 && cp <= 0x66); // a..f
- },
- isOctalDigit: function (cp) {
- return (cp >= 0x30 && cp <= 0x37); // 0..7
- }
- };
-
-
-/***/ },
-/* 5 */
-/***/ function(module, exports, __webpack_require__) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- var jsx_syntax_1 = __webpack_require__(6);
- /* tslint:disable:max-classes-per-file */
- var JSXClosingElement = (function () {
- function JSXClosingElement(name) {
- this.type = jsx_syntax_1.JSXSyntax.JSXClosingElement;
- this.name = name;
- }
- return JSXClosingElement;
- }());
- exports.JSXClosingElement = JSXClosingElement;
- var JSXElement = (function () {
- function JSXElement(openingElement, children, closingElement) {
- this.type = jsx_syntax_1.JSXSyntax.JSXElement;
- this.openingElement = openingElement;
- this.children = children;
- this.closingElement = closingElement;
- }
- return JSXElement;
- }());
- exports.JSXElement = JSXElement;
- var JSXEmptyExpression = (function () {
- function JSXEmptyExpression() {
- this.type = jsx_syntax_1.JSXSyntax.JSXEmptyExpression;
- }
- return JSXEmptyExpression;
- }());
- exports.JSXEmptyExpression = JSXEmptyExpression;
- var JSXExpressionContainer = (function () {
- function JSXExpressionContainer(expression) {
- this.type = jsx_syntax_1.JSXSyntax.JSXExpressionContainer;
- this.expression = expression;
- }
- return JSXExpressionContainer;
- }());
- exports.JSXExpressionContainer = JSXExpressionContainer;
- var JSXIdentifier = (function () {
- function JSXIdentifier(name) {
- this.type = jsx_syntax_1.JSXSyntax.JSXIdentifier;
- this.name = name;
- }
- return JSXIdentifier;
- }());
- exports.JSXIdentifier = JSXIdentifier;
- var JSXMemberExpression = (function () {
- function JSXMemberExpression(object, property) {
- this.type = jsx_syntax_1.JSXSyntax.JSXMemberExpression;
- this.object = object;
- this.property = property;
- }
- return JSXMemberExpression;
- }());
- exports.JSXMemberExpression = JSXMemberExpression;
- var JSXAttribute = (function () {
- function JSXAttribute(name, value) {
- this.type = jsx_syntax_1.JSXSyntax.JSXAttribute;
- this.name = name;
- this.value = value;
- }
- return JSXAttribute;
- }());
- exports.JSXAttribute = JSXAttribute;
- var JSXNamespacedName = (function () {
- function JSXNamespacedName(namespace, name) {
- this.type = jsx_syntax_1.JSXSyntax.JSXNamespacedName;
- this.namespace = namespace;
- this.name = name;
- }
- return JSXNamespacedName;
- }());
- exports.JSXNamespacedName = JSXNamespacedName;
- var JSXOpeningElement = (function () {
- function JSXOpeningElement(name, selfClosing, attributes) {
- this.type = jsx_syntax_1.JSXSyntax.JSXOpeningElement;
- this.name = name;
- this.selfClosing = selfClosing;
- this.attributes = attributes;
- }
- return JSXOpeningElement;
- }());
- exports.JSXOpeningElement = JSXOpeningElement;
- var JSXSpreadAttribute = (function () {
- function JSXSpreadAttribute(argument) {
- this.type = jsx_syntax_1.JSXSyntax.JSXSpreadAttribute;
- this.argument = argument;
- }
- return JSXSpreadAttribute;
- }());
- exports.JSXSpreadAttribute = JSXSpreadAttribute;
- var JSXText = (function () {
- function JSXText(value, raw) {
- this.type = jsx_syntax_1.JSXSyntax.JSXText;
- this.value = value;
- this.raw = raw;
- }
- return JSXText;
- }());
- exports.JSXText = JSXText;
-
-
-/***/ },
-/* 6 */
-/***/ function(module, exports) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- exports.JSXSyntax = {
- JSXAttribute: 'JSXAttribute',
- JSXClosingElement: 'JSXClosingElement',
- JSXElement: 'JSXElement',
- JSXEmptyExpression: 'JSXEmptyExpression',
- JSXExpressionContainer: 'JSXExpressionContainer',
- JSXIdentifier: 'JSXIdentifier',
- JSXMemberExpression: 'JSXMemberExpression',
- JSXNamespacedName: 'JSXNamespacedName',
- JSXOpeningElement: 'JSXOpeningElement',
- JSXSpreadAttribute: 'JSXSpreadAttribute',
- JSXText: 'JSXText'
- };
-
-
-/***/ },
-/* 7 */
-/***/ function(module, exports, __webpack_require__) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- var syntax_1 = __webpack_require__(2);
- /* tslint:disable:max-classes-per-file */
- var ArrayExpression = (function () {
- function ArrayExpression(elements) {
- this.type = syntax_1.Syntax.ArrayExpression;
- this.elements = elements;
- }
- return ArrayExpression;
- }());
- exports.ArrayExpression = ArrayExpression;
- var ArrayPattern = (function () {
- function ArrayPattern(elements) {
- this.type = syntax_1.Syntax.ArrayPattern;
- this.elements = elements;
- }
- return ArrayPattern;
- }());
- exports.ArrayPattern = ArrayPattern;
- var ArrowFunctionExpression = (function () {
- function ArrowFunctionExpression(params, body, expression) {
- this.type = syntax_1.Syntax.ArrowFunctionExpression;
- this.id = null;
- this.params = params;
- this.body = body;
- this.generator = false;
- this.expression = expression;
- this.async = false;
- }
- return ArrowFunctionExpression;
- }());
- exports.ArrowFunctionExpression = ArrowFunctionExpression;
- var AssignmentExpression = (function () {
- function AssignmentExpression(operator, left, right) {
- this.type = syntax_1.Syntax.AssignmentExpression;
- this.operator = operator;
- this.left = left;
- this.right = right;
- }
- return AssignmentExpression;
- }());
- exports.AssignmentExpression = AssignmentExpression;
- var AssignmentPattern = (function () {
- function AssignmentPattern(left, right) {
- this.type = syntax_1.Syntax.AssignmentPattern;
- this.left = left;
- this.right = right;
- }
- return AssignmentPattern;
- }());
- exports.AssignmentPattern = AssignmentPattern;
- var AsyncArrowFunctionExpression = (function () {
- function AsyncArrowFunctionExpression(params, body, expression) {
- this.type = syntax_1.Syntax.ArrowFunctionExpression;
- this.id = null;
- this.params = params;
- this.body = body;
- this.generator = false;
- this.expression = expression;
- this.async = true;
- }
- return AsyncArrowFunctionExpression;
- }());
- exports.AsyncArrowFunctionExpression = AsyncArrowFunctionExpression;
- var AsyncFunctionDeclaration = (function () {
- function AsyncFunctionDeclaration(id, params, body) {
- this.type = syntax_1.Syntax.FunctionDeclaration;
- this.id = id;
- this.params = params;
- this.body = body;
- this.generator = false;
- this.expression = false;
- this.async = true;
- }
- return AsyncFunctionDeclaration;
- }());
- exports.AsyncFunctionDeclaration = AsyncFunctionDeclaration;
- var AsyncFunctionExpression = (function () {
- function AsyncFunctionExpression(id, params, body) {
- this.type = syntax_1.Syntax.FunctionExpression;
- this.id = id;
- this.params = params;
- this.body = body;
- this.generator = false;
- this.expression = false;
- this.async = true;
- }
- return AsyncFunctionExpression;
- }());
- exports.AsyncFunctionExpression = AsyncFunctionExpression;
- var AwaitExpression = (function () {
- function AwaitExpression(argument) {
- this.type = syntax_1.Syntax.AwaitExpression;
- this.argument = argument;
- }
- return AwaitExpression;
- }());
- exports.AwaitExpression = AwaitExpression;
- var BinaryExpression = (function () {
- function BinaryExpression(operator, left, right) {
- var logical = (operator === '||' || operator === '&&');
- this.type = logical ? syntax_1.Syntax.LogicalExpression : syntax_1.Syntax.BinaryExpression;
- this.operator = operator;
- this.left = left;
- this.right = right;
- }
- return BinaryExpression;
- }());
- exports.BinaryExpression = BinaryExpression;
- var BlockStatement = (function () {
- function BlockStatement(body) {
- this.type = syntax_1.Syntax.BlockStatement;
- this.body = body;
- }
- return BlockStatement;
- }());
- exports.BlockStatement = BlockStatement;
- var BreakStatement = (function () {
- function BreakStatement(label) {
- this.type = syntax_1.Syntax.BreakStatement;
- this.label = label;
- }
- return BreakStatement;
- }());
- exports.BreakStatement = BreakStatement;
- var CallExpression = (function () {
- function CallExpression(callee, args) {
- this.type = syntax_1.Syntax.CallExpression;
- this.callee = callee;
- this.arguments = args;
- }
- return CallExpression;
- }());
- exports.CallExpression = CallExpression;
- var CatchClause = (function () {
- function CatchClause(param, body) {
- this.type = syntax_1.Syntax.CatchClause;
- this.param = param;
- this.body = body;
- }
- return CatchClause;
- }());
- exports.CatchClause = CatchClause;
- var ClassBody = (function () {
- function ClassBody(body) {
- this.type = syntax_1.Syntax.ClassBody;
- this.body = body;
- }
- return ClassBody;
- }());
- exports.ClassBody = ClassBody;
- var ClassDeclaration = (function () {
- function ClassDeclaration(id, superClass, body) {
- this.type = syntax_1.Syntax.ClassDeclaration;
- this.id = id;
- this.superClass = superClass;
- this.body = body;
- }
- return ClassDeclaration;
- }());
- exports.ClassDeclaration = ClassDeclaration;
- var ClassExpression = (function () {
- function ClassExpression(id, superClass, body) {
- this.type = syntax_1.Syntax.ClassExpression;
- this.id = id;
- this.superClass = superClass;
- this.body = body;
- }
- return ClassExpression;
- }());
- exports.ClassExpression = ClassExpression;
- var ComputedMemberExpression = (function () {
- function ComputedMemberExpression(object, property) {
- this.type = syntax_1.Syntax.MemberExpression;
- this.computed = true;
- this.object = object;
- this.property = property;
- }
- return ComputedMemberExpression;
- }());
- exports.ComputedMemberExpression = ComputedMemberExpression;
- var ConditionalExpression = (function () {
- function ConditionalExpression(test, consequent, alternate) {
- this.type = syntax_1.Syntax.ConditionalExpression;
- this.test = test;
- this.consequent = consequent;
- this.alternate = alternate;
- }
- return ConditionalExpression;
- }());
- exports.ConditionalExpression = ConditionalExpression;
- var ContinueStatement = (function () {
- function ContinueStatement(label) {
- this.type = syntax_1.Syntax.ContinueStatement;
- this.label = label;
- }
- return ContinueStatement;
- }());
- exports.ContinueStatement = ContinueStatement;
- var DebuggerStatement = (function () {
- function DebuggerStatement() {
- this.type = syntax_1.Syntax.DebuggerStatement;
- }
- return DebuggerStatement;
- }());
- exports.DebuggerStatement = DebuggerStatement;
- var Directive = (function () {
- function Directive(expression, directive) {
- this.type = syntax_1.Syntax.ExpressionStatement;
- this.expression = expression;
- this.directive = directive;
- }
- return Directive;
- }());
- exports.Directive = Directive;
- var DoWhileStatement = (function () {
- function DoWhileStatement(body, test) {
- this.type = syntax_1.Syntax.DoWhileStatement;
- this.body = body;
- this.test = test;
- }
- return DoWhileStatement;
- }());
- exports.DoWhileStatement = DoWhileStatement;
- var EmptyStatement = (function () {
- function EmptyStatement() {
- this.type = syntax_1.Syntax.EmptyStatement;
- }
- return EmptyStatement;
- }());
- exports.EmptyStatement = EmptyStatement;
- var ExportAllDeclaration = (function () {
- function ExportAllDeclaration(source) {
- this.type = syntax_1.Syntax.ExportAllDeclaration;
- this.source = source;
- }
- return ExportAllDeclaration;
- }());
- exports.ExportAllDeclaration = ExportAllDeclaration;
- var ExportDefaultDeclaration = (function () {
- function ExportDefaultDeclaration(declaration) {
- this.type = syntax_1.Syntax.ExportDefaultDeclaration;
- this.declaration = declaration;
- }
- return ExportDefaultDeclaration;
- }());
- exports.ExportDefaultDeclaration = ExportDefaultDeclaration;
- var ExportNamedDeclaration = (function () {
- function ExportNamedDeclaration(declaration, specifiers, source) {
- this.type = syntax_1.Syntax.ExportNamedDeclaration;
- this.declaration = declaration;
- this.specifiers = specifiers;
- this.source = source;
- }
- return ExportNamedDeclaration;
- }());
- exports.ExportNamedDeclaration = ExportNamedDeclaration;
- var ExportSpecifier = (function () {
- function ExportSpecifier(local, exported) {
- this.type = syntax_1.Syntax.ExportSpecifier;
- this.exported = exported;
- this.local = local;
- }
- return ExportSpecifier;
- }());
- exports.ExportSpecifier = ExportSpecifier;
- var ExpressionStatement = (function () {
- function ExpressionStatement(expression) {
- this.type = syntax_1.Syntax.ExpressionStatement;
- this.expression = expression;
- }
- return ExpressionStatement;
- }());
- exports.ExpressionStatement = ExpressionStatement;
- var ForInStatement = (function () {
- function ForInStatement(left, right, body) {
- this.type = syntax_1.Syntax.ForInStatement;
- this.left = left;
- this.right = right;
- this.body = body;
- this.each = false;
- }
- return ForInStatement;
- }());
- exports.ForInStatement = ForInStatement;
- var ForOfStatement = (function () {
- function ForOfStatement(left, right, body) {
- this.type = syntax_1.Syntax.ForOfStatement;
- this.left = left;
- this.right = right;
- this.body = body;
- }
- return ForOfStatement;
- }());
- exports.ForOfStatement = ForOfStatement;
- var ForStatement = (function () {
- function ForStatement(init, test, update, body) {
- this.type = syntax_1.Syntax.ForStatement;
- this.init = init;
- this.test = test;
- this.update = update;
- this.body = body;
- }
- return ForStatement;
- }());
- exports.ForStatement = ForStatement;
- var FunctionDeclaration = (function () {
- function FunctionDeclaration(id, params, body, generator) {
- this.type = syntax_1.Syntax.FunctionDeclaration;
- this.id = id;
- this.params = params;
- this.body = body;
- this.generator = generator;
- this.expression = false;
- this.async = false;
- }
- return FunctionDeclaration;
- }());
- exports.FunctionDeclaration = FunctionDeclaration;
- var FunctionExpression = (function () {
- function FunctionExpression(id, params, body, generator) {
- this.type = syntax_1.Syntax.FunctionExpression;
- this.id = id;
- this.params = params;
- this.body = body;
- this.generator = generator;
- this.expression = false;
- this.async = false;
- }
- return FunctionExpression;
- }());
- exports.FunctionExpression = FunctionExpression;
- var Identifier = (function () {
- function Identifier(name) {
- this.type = syntax_1.Syntax.Identifier;
- this.name = name;
- }
- return Identifier;
- }());
- exports.Identifier = Identifier;
- var IfStatement = (function () {
- function IfStatement(test, consequent, alternate) {
- this.type = syntax_1.Syntax.IfStatement;
- this.test = test;
- this.consequent = consequent;
- this.alternate = alternate;
- }
- return IfStatement;
- }());
- exports.IfStatement = IfStatement;
- var ImportDeclaration = (function () {
- function ImportDeclaration(specifiers, source) {
- this.type = syntax_1.Syntax.ImportDeclaration;
- this.specifiers = specifiers;
- this.source = source;
- }
- return ImportDeclaration;
- }());
- exports.ImportDeclaration = ImportDeclaration;
- var ImportDefaultSpecifier = (function () {
- function ImportDefaultSpecifier(local) {
- this.type = syntax_1.Syntax.ImportDefaultSpecifier;
- this.local = local;
- }
- return ImportDefaultSpecifier;
- }());
- exports.ImportDefaultSpecifier = ImportDefaultSpecifier;
- var ImportNamespaceSpecifier = (function () {
- function ImportNamespaceSpecifier(local) {
- this.type = syntax_1.Syntax.ImportNamespaceSpecifier;
- this.local = local;
- }
- return ImportNamespaceSpecifier;
- }());
- exports.ImportNamespaceSpecifier = ImportNamespaceSpecifier;
- var ImportSpecifier = (function () {
- function ImportSpecifier(local, imported) {
- this.type = syntax_1.Syntax.ImportSpecifier;
- this.local = local;
- this.imported = imported;
- }
- return ImportSpecifier;
- }());
- exports.ImportSpecifier = ImportSpecifier;
- var LabeledStatement = (function () {
- function LabeledStatement(label, body) {
- this.type = syntax_1.Syntax.LabeledStatement;
- this.label = label;
- this.body = body;
- }
- return LabeledStatement;
- }());
- exports.LabeledStatement = LabeledStatement;
- var Literal = (function () {
- function Literal(value, raw) {
- this.type = syntax_1.Syntax.Literal;
- this.value = value;
- this.raw = raw;
- }
- return Literal;
- }());
- exports.Literal = Literal;
- var MetaProperty = (function () {
- function MetaProperty(meta, property) {
- this.type = syntax_1.Syntax.MetaProperty;
- this.meta = meta;
- this.property = property;
- }
- return MetaProperty;
- }());
- exports.MetaProperty = MetaProperty;
- var MethodDefinition = (function () {
- function MethodDefinition(key, computed, value, kind, isStatic) {
- this.type = syntax_1.Syntax.MethodDefinition;
- this.key = key;
- this.computed = computed;
- this.value = value;
- this.kind = kind;
- this.static = isStatic;
- }
- return MethodDefinition;
- }());
- exports.MethodDefinition = MethodDefinition;
- var Module = (function () {
- function Module(body) {
- this.type = syntax_1.Syntax.Program;
- this.body = body;
- this.sourceType = 'module';
- }
- return Module;
- }());
- exports.Module = Module;
- var NewExpression = (function () {
- function NewExpression(callee, args) {
- this.type = syntax_1.Syntax.NewExpression;
- this.callee = callee;
- this.arguments = args;
- }
- return NewExpression;
- }());
- exports.NewExpression = NewExpression;
- var ObjectExpression = (function () {
- function ObjectExpression(properties) {
- this.type = syntax_1.Syntax.ObjectExpression;
- this.properties = properties;
- }
- return ObjectExpression;
- }());
- exports.ObjectExpression = ObjectExpression;
- var ObjectPattern = (function () {
- function ObjectPattern(properties) {
- this.type = syntax_1.Syntax.ObjectPattern;
- this.properties = properties;
- }
- return ObjectPattern;
- }());
- exports.ObjectPattern = ObjectPattern;
- var Property = (function () {
- function Property(kind, key, computed, value, method, shorthand) {
- this.type = syntax_1.Syntax.Property;
- this.key = key;
- this.computed = computed;
- this.value = value;
- this.kind = kind;
- this.method = method;
- this.shorthand = shorthand;
- }
- return Property;
- }());
- exports.Property = Property;
- var RegexLiteral = (function () {
- function RegexLiteral(value, raw, pattern, flags) {
- this.type = syntax_1.Syntax.Literal;
- this.value = value;
- this.raw = raw;
- this.regex = { pattern: pattern, flags: flags };
- }
- return RegexLiteral;
- }());
- exports.RegexLiteral = RegexLiteral;
- var RestElement = (function () {
- function RestElement(argument) {
- this.type = syntax_1.Syntax.RestElement;
- this.argument = argument;
- }
- return RestElement;
- }());
- exports.RestElement = RestElement;
- var ReturnStatement = (function () {
- function ReturnStatement(argument) {
- this.type = syntax_1.Syntax.ReturnStatement;
- this.argument = argument;
- }
- return ReturnStatement;
- }());
- exports.ReturnStatement = ReturnStatement;
- var Script = (function () {
- function Script(body) {
- this.type = syntax_1.Syntax.Program;
- this.body = body;
- this.sourceType = 'script';
- }
- return Script;
- }());
- exports.Script = Script;
- var SequenceExpression = (function () {
- function SequenceExpression(expressions) {
- this.type = syntax_1.Syntax.SequenceExpression;
- this.expressions = expressions;
- }
- return SequenceExpression;
- }());
- exports.SequenceExpression = SequenceExpression;
- var SpreadElement = (function () {
- function SpreadElement(argument) {
- this.type = syntax_1.Syntax.SpreadElement;
- this.argument = argument;
- }
- return SpreadElement;
- }());
- exports.SpreadElement = SpreadElement;
- var StaticMemberExpression = (function () {
- function StaticMemberExpression(object, property) {
- this.type = syntax_1.Syntax.MemberExpression;
- this.computed = false;
- this.object = object;
- this.property = property;
- }
- return StaticMemberExpression;
- }());
- exports.StaticMemberExpression = StaticMemberExpression;
- var Super = (function () {
- function Super() {
- this.type = syntax_1.Syntax.Super;
- }
- return Super;
- }());
- exports.Super = Super;
- var SwitchCase = (function () {
- function SwitchCase(test, consequent) {
- this.type = syntax_1.Syntax.SwitchCase;
- this.test = test;
- this.consequent = consequent;
- }
- return SwitchCase;
- }());
- exports.SwitchCase = SwitchCase;
- var SwitchStatement = (function () {
- function SwitchStatement(discriminant, cases) {
- this.type = syntax_1.Syntax.SwitchStatement;
- this.discriminant = discriminant;
- this.cases = cases;
- }
- return SwitchStatement;
- }());
- exports.SwitchStatement = SwitchStatement;
- var TaggedTemplateExpression = (function () {
- function TaggedTemplateExpression(tag, quasi) {
- this.type = syntax_1.Syntax.TaggedTemplateExpression;
- this.tag = tag;
- this.quasi = quasi;
- }
- return TaggedTemplateExpression;
- }());
- exports.TaggedTemplateExpression = TaggedTemplateExpression;
- var TemplateElement = (function () {
- function TemplateElement(value, tail) {
- this.type = syntax_1.Syntax.TemplateElement;
- this.value = value;
- this.tail = tail;
- }
- return TemplateElement;
- }());
- exports.TemplateElement = TemplateElement;
- var TemplateLiteral = (function () {
- function TemplateLiteral(quasis, expressions) {
- this.type = syntax_1.Syntax.TemplateLiteral;
- this.quasis = quasis;
- this.expressions = expressions;
- }
- return TemplateLiteral;
- }());
- exports.TemplateLiteral = TemplateLiteral;
- var ThisExpression = (function () {
- function ThisExpression() {
- this.type = syntax_1.Syntax.ThisExpression;
- }
- return ThisExpression;
- }());
- exports.ThisExpression = ThisExpression;
- var ThrowStatement = (function () {
- function ThrowStatement(argument) {
- this.type = syntax_1.Syntax.ThrowStatement;
- this.argument = argument;
- }
- return ThrowStatement;
- }());
- exports.ThrowStatement = ThrowStatement;
- var TryStatement = (function () {
- function TryStatement(block, handler, finalizer) {
- this.type = syntax_1.Syntax.TryStatement;
- this.block = block;
- this.handler = handler;
- this.finalizer = finalizer;
- }
- return TryStatement;
- }());
- exports.TryStatement = TryStatement;
- var UnaryExpression = (function () {
- function UnaryExpression(operator, argument) {
- this.type = syntax_1.Syntax.UnaryExpression;
- this.operator = operator;
- this.argument = argument;
- this.prefix = true;
- }
- return UnaryExpression;
- }());
- exports.UnaryExpression = UnaryExpression;
- var UpdateExpression = (function () {
- function UpdateExpression(operator, argument, prefix) {
- this.type = syntax_1.Syntax.UpdateExpression;
- this.operator = operator;
- this.argument = argument;
- this.prefix = prefix;
- }
- return UpdateExpression;
- }());
- exports.UpdateExpression = UpdateExpression;
- var VariableDeclaration = (function () {
- function VariableDeclaration(declarations, kind) {
- this.type = syntax_1.Syntax.VariableDeclaration;
- this.declarations = declarations;
- this.kind = kind;
- }
- return VariableDeclaration;
- }());
- exports.VariableDeclaration = VariableDeclaration;
- var VariableDeclarator = (function () {
- function VariableDeclarator(id, init) {
- this.type = syntax_1.Syntax.VariableDeclarator;
- this.id = id;
- this.init = init;
- }
- return VariableDeclarator;
- }());
- exports.VariableDeclarator = VariableDeclarator;
- var WhileStatement = (function () {
- function WhileStatement(test, body) {
- this.type = syntax_1.Syntax.WhileStatement;
- this.test = test;
- this.body = body;
- }
- return WhileStatement;
- }());
- exports.WhileStatement = WhileStatement;
- var WithStatement = (function () {
- function WithStatement(object, body) {
- this.type = syntax_1.Syntax.WithStatement;
- this.object = object;
- this.body = body;
- }
- return WithStatement;
- }());
- exports.WithStatement = WithStatement;
- var YieldExpression = (function () {
- function YieldExpression(argument, delegate) {
- this.type = syntax_1.Syntax.YieldExpression;
- this.argument = argument;
- this.delegate = delegate;
- }
- return YieldExpression;
- }());
- exports.YieldExpression = YieldExpression;
-
-
-/***/ },
-/* 8 */
-/***/ function(module, exports, __webpack_require__) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- var assert_1 = __webpack_require__(9);
- var error_handler_1 = __webpack_require__(10);
- var messages_1 = __webpack_require__(11);
- var Node = __webpack_require__(7);
- var scanner_1 = __webpack_require__(12);
- var syntax_1 = __webpack_require__(2);
- var token_1 = __webpack_require__(13);
- var ArrowParameterPlaceHolder = 'ArrowParameterPlaceHolder';
- var Parser = (function () {
- function Parser(code, options, delegate) {
- if (options === void 0) { options = {}; }
- this.config = {
- range: (typeof options.range === 'boolean') && options.range,
- loc: (typeof options.loc === 'boolean') && options.loc,
- source: null,
- tokens: (typeof options.tokens === 'boolean') && options.tokens,
- comment: (typeof options.comment === 'boolean') && options.comment,
- tolerant: (typeof options.tolerant === 'boolean') && options.tolerant
- };
- if (this.config.loc && options.source && options.source !== null) {
- this.config.source = String(options.source);
- }
- this.delegate = delegate;
- this.errorHandler = new error_handler_1.ErrorHandler();
- this.errorHandler.tolerant = this.config.tolerant;
- this.scanner = new scanner_1.Scanner(code, this.errorHandler);
- this.scanner.trackComment = this.config.comment;
- this.operatorPrecedence = {
- ')': 0,
- ';': 0,
- ',': 0,
- '=': 0,
- ']': 0,
- '||': 1,
- '&&': 2,
- '|': 3,
- '^': 4,
- '&': 5,
- '==': 6,
- '!=': 6,
- '===': 6,
- '!==': 6,
- '<': 7,
- '>': 7,
- '<=': 7,
- '>=': 7,
- '<<': 8,
- '>>': 8,
- '>>>': 8,
- '+': 9,
- '-': 9,
- '*': 11,
- '/': 11,
- '%': 11
- };
- this.lookahead = {
- type: 2 /* EOF */,
- value: '',
- lineNumber: this.scanner.lineNumber,
- lineStart: 0,
- start: 0,
- end: 0
- };
- this.hasLineTerminator = false;
- this.context = {
- isModule: false,
- await: false,
- allowIn: true,
- allowStrictDirective: true,
- allowYield: true,
- firstCoverInitializedNameError: null,
- isAssignmentTarget: false,
- isBindingElement: false,
- inFunctionBody: false,
- inIteration: false,
- inSwitch: false,
- labelSet: {},
- strict: false
- };
- this.tokens = [];
- this.startMarker = {
- index: 0,
- line: this.scanner.lineNumber,
- column: 0
- };
- this.lastMarker = {
- index: 0,
- line: this.scanner.lineNumber,
- column: 0
- };
- this.nextToken();
- this.lastMarker = {
- index: this.scanner.index,
- line: this.scanner.lineNumber,
- column: this.scanner.index - this.scanner.lineStart
- };
- }
- Parser.prototype.throwError = function (messageFormat) {
- var values = [];
- for (var _i = 1; _i < arguments.length; _i++) {
- values[_i - 1] = arguments[_i];
- }
- var args = Array.prototype.slice.call(arguments, 1);
- var msg = messageFormat.replace(/%(\d)/g, function (whole, idx) {
- assert_1.assert(idx < args.length, 'Message reference must be in range');
- return args[idx];
- });
- var index = this.lastMarker.index;
- var line = this.lastMarker.line;
- var column = this.lastMarker.column + 1;
- throw this.errorHandler.createError(index, line, column, msg);
- };
- Parser.prototype.tolerateError = function (messageFormat) {
- var values = [];
- for (var _i = 1; _i < arguments.length; _i++) {
- values[_i - 1] = arguments[_i];
- }
- var args = Array.prototype.slice.call(arguments, 1);
- var msg = messageFormat.replace(/%(\d)/g, function (whole, idx) {
- assert_1.assert(idx < args.length, 'Message reference must be in range');
- return args[idx];
- });
- var index = this.lastMarker.index;
- var line = this.scanner.lineNumber;
- var column = this.lastMarker.column + 1;
- this.errorHandler.tolerateError(index, line, column, msg);
- };
- // Throw an exception because of the token.
- Parser.prototype.unexpectedTokenError = function (token, message) {
- var msg = message || messages_1.Messages.UnexpectedToken;
- var value;
- if (token) {
- if (!message) {
- msg = (token.type === 2 /* EOF */) ? messages_1.Messages.UnexpectedEOS :
- (token.type === 3 /* Identifier */) ? messages_1.Messages.UnexpectedIdentifier :
- (token.type === 6 /* NumericLiteral */) ? messages_1.Messages.UnexpectedNumber :
- (token.type === 8 /* StringLiteral */) ? messages_1.Messages.UnexpectedString :
- (token.type === 10 /* Template */) ? messages_1.Messages.UnexpectedTemplate :
- messages_1.Messages.UnexpectedToken;
- if (token.type === 4 /* Keyword */) {
- if (this.scanner.isFutureReservedWord(token.value)) {
- msg = messages_1.Messages.UnexpectedReserved;
- }
- else if (this.context.strict && this.scanner.isStrictModeReservedWord(token.value)) {
- msg = messages_1.Messages.StrictReservedWord;
- }
- }
- }
- value = token.value;
- }
- else {
- value = 'ILLEGAL';
- }
- msg = msg.replace('%0', value);
- if (token && typeof token.lineNumber === 'number') {
- var index = token.start;
- var line = token.lineNumber;
- var lastMarkerLineStart = this.lastMarker.index - this.lastMarker.column;
- var column = token.start - lastMarkerLineStart + 1;
- return this.errorHandler.createError(index, line, column, msg);
- }
- else {
- var index = this.lastMarker.index;
- var line = this.lastMarker.line;
- var column = this.lastMarker.column + 1;
- return this.errorHandler.createError(index, line, column, msg);
- }
- };
- Parser.prototype.throwUnexpectedToken = function (token, message) {
- throw this.unexpectedTokenError(token, message);
- };
- Parser.prototype.tolerateUnexpectedToken = function (token, message) {
- this.errorHandler.tolerate(this.unexpectedTokenError(token, message));
- };
- Parser.prototype.collectComments = function () {
- if (!this.config.comment) {
- this.scanner.scanComments();
- }
- else {
- var comments = this.scanner.scanComments();
- if (comments.length > 0 && this.delegate) {
- for (var i = 0; i < comments.length; ++i) {
- var e = comments[i];
- var node = void 0;
- node = {
- type: e.multiLine ? 'BlockComment' : 'LineComment',
- value: this.scanner.source.slice(e.slice[0], e.slice[1])
- };
- if (this.config.range) {
- node.range = e.range;
- }
- if (this.config.loc) {
- node.loc = e.loc;
- }
- var metadata = {
- start: {
- line: e.loc.start.line,
- column: e.loc.start.column,
- offset: e.range[0]
- },
- end: {
- line: e.loc.end.line,
- column: e.loc.end.column,
- offset: e.range[1]
- }
- };
- this.delegate(node, metadata);
- }
- }
- }
- };
- // From internal representation to an external structure
- Parser.prototype.getTokenRaw = function (token) {
- return this.scanner.source.slice(token.start, token.end);
- };
- Parser.prototype.convertToken = function (token) {
- var t = {
- type: token_1.TokenName[token.type],
- value: this.getTokenRaw(token)
- };
- if (this.config.range) {
- t.range = [token.start, token.end];
- }
- if (this.config.loc) {
- t.loc = {
- start: {
- line: this.startMarker.line,
- column: this.startMarker.column
- },
- end: {
- line: this.scanner.lineNumber,
- column: this.scanner.index - this.scanner.lineStart
- }
- };
- }
- if (token.type === 9 /* RegularExpression */) {
- var pattern = token.pattern;
- var flags = token.flags;
- t.regex = { pattern: pattern, flags: flags };
- }
- return t;
- };
- Parser.prototype.nextToken = function () {
- var token = this.lookahead;
- this.lastMarker.index = this.scanner.index;
- this.lastMarker.line = this.scanner.lineNumber;
- this.lastMarker.column = this.scanner.index - this.scanner.lineStart;
- this.collectComments();
- if (this.scanner.index !== this.startMarker.index) {
- this.startMarker.index = this.scanner.index;
- this.startMarker.line = this.scanner.lineNumber;
- this.startMarker.column = this.scanner.index - this.scanner.lineStart;
- }
- var next = this.scanner.lex();
- this.hasLineTerminator = (token.lineNumber !== next.lineNumber);
- if (next && this.context.strict && next.type === 3 /* Identifier */) {
- if (this.scanner.isStrictModeReservedWord(next.value)) {
- next.type = 4 /* Keyword */;
- }
- }
- this.lookahead = next;
- if (this.config.tokens && next.type !== 2 /* EOF */) {
- this.tokens.push(this.convertToken(next));
- }
- return token;
- };
- Parser.prototype.nextRegexToken = function () {
- this.collectComments();
- var token = this.scanner.scanRegExp();
- if (this.config.tokens) {
- // Pop the previous token, '/' or '/='
- // This is added from the lookahead token.
- this.tokens.pop();
- this.tokens.push(this.convertToken(token));
- }
- // Prime the next lookahead.
- this.lookahead = token;
- this.nextToken();
- return token;
- };
- Parser.prototype.createNode = function () {
- return {
- index: this.startMarker.index,
- line: this.startMarker.line,
- column: this.startMarker.column
- };
- };
- Parser.prototype.startNode = function (token, lastLineStart) {
- if (lastLineStart === void 0) { lastLineStart = 0; }
- var column = token.start - token.lineStart;
- var line = token.lineNumber;
- if (column < 0) {
- column += lastLineStart;
- line--;
- }
- return {
- index: token.start,
- line: line,
- column: column
- };
- };
- Parser.prototype.finalize = function (marker, node) {
- if (this.config.range) {
- node.range = [marker.index, this.lastMarker.index];
- }
- if (this.config.loc) {
- node.loc = {
- start: {
- line: marker.line,
- column: marker.column,
- },
- end: {
- line: this.lastMarker.line,
- column: this.lastMarker.column
- }
- };
- if (this.config.source) {
- node.loc.source = this.config.source;
- }
- }
- if (this.delegate) {
- var metadata = {
- start: {
- line: marker.line,
- column: marker.column,
- offset: marker.index
- },
- end: {
- line: this.lastMarker.line,
- column: this.lastMarker.column,
- offset: this.lastMarker.index
- }
- };
- this.delegate(node, metadata);
- }
- return node;
- };
- // Expect the next token to match the specified punctuator.
- // If not, an exception will be thrown.
- Parser.prototype.expect = function (value) {
- var token = this.nextToken();
- if (token.type !== 7 /* Punctuator */ || token.value !== value) {
- this.throwUnexpectedToken(token);
- }
- };
- // Quietly expect a comma when in tolerant mode, otherwise delegates to expect().
- Parser.prototype.expectCommaSeparator = function () {
- if (this.config.tolerant) {
- var token = this.lookahead;
- if (token.type === 7 /* Punctuator */ && token.value === ',') {
- this.nextToken();
- }
- else if (token.type === 7 /* Punctuator */ && token.value === ';') {
- this.nextToken();
- this.tolerateUnexpectedToken(token);
- }
- else {
- this.tolerateUnexpectedToken(token, messages_1.Messages.UnexpectedToken);
- }
- }
- else {
- this.expect(',');
- }
- };
- // Expect the next token to match the specified keyword.
- // If not, an exception will be thrown.
- Parser.prototype.expectKeyword = function (keyword) {
- var token = this.nextToken();
- if (token.type !== 4 /* Keyword */ || token.value !== keyword) {
- this.throwUnexpectedToken(token);
- }
- };
- // Return true if the next token matches the specified punctuator.
- Parser.prototype.match = function (value) {
- return this.lookahead.type === 7 /* Punctuator */ && this.lookahead.value === value;
- };
- // Return true if the next token matches the specified keyword
- Parser.prototype.matchKeyword = function (keyword) {
- return this.lookahead.type === 4 /* Keyword */ && this.lookahead.value === keyword;
- };
- // Return true if the next token matches the specified contextual keyword
- // (where an identifier is sometimes a keyword depending on the context)
- Parser.prototype.matchContextualKeyword = function (keyword) {
- return this.lookahead.type === 3 /* Identifier */ && this.lookahead.value === keyword;
- };
- // Return true if the next token is an assignment operator
- Parser.prototype.matchAssign = function () {
- if (this.lookahead.type !== 7 /* Punctuator */) {
- return false;
- }
- var op = this.lookahead.value;
- return op === '=' ||
- op === '*=' ||
- op === '**=' ||
- op === '/=' ||
- op === '%=' ||
- op === '+=' ||
- op === '-=' ||
- op === '<<=' ||
- op === '>>=' ||
- op === '>>>=' ||
- op === '&=' ||
- op === '^=' ||
- op === '|=';
- };
- // Cover grammar support.
- //
- // When an assignment expression position starts with an left parenthesis, the determination of the type
- // of the syntax is to be deferred arbitrarily long until the end of the parentheses pair (plus a lookahead)
- // or the first comma. This situation also defers the determination of all the expressions nested in the pair.
- //
- // There are three productions that can be parsed in a parentheses pair that needs to be determined
- // after the outermost pair is closed. They are:
- //
- // 1. AssignmentExpression
- // 2. BindingElements
- // 3. AssignmentTargets
- //
- // In order to avoid exponential backtracking, we use two flags to denote if the production can be
- // binding element or assignment target.
- //
- // The three productions have the relationship:
- //
- // BindingElements ⊆ AssignmentTargets ⊆ AssignmentExpression
- //
- // with a single exception that CoverInitializedName when used directly in an Expression, generates
- // an early error. Therefore, we need the third state, firstCoverInitializedNameError, to track the
- // first usage of CoverInitializedName and report it when we reached the end of the parentheses pair.
- //
- // isolateCoverGrammar function runs the given parser function with a new cover grammar context, and it does not
- // effect the current flags. This means the production the parser parses is only used as an expression. Therefore
- // the CoverInitializedName check is conducted.
- //
- // inheritCoverGrammar function runs the given parse function with a new cover grammar context, and it propagates
- // the flags outside of the parser. This means the production the parser parses is used as a part of a potential
- // pattern. The CoverInitializedName check is deferred.
- Parser.prototype.isolateCoverGrammar = function (parseFunction) {
- var previousIsBindingElement = this.context.isBindingElement;
- var previousIsAssignmentTarget = this.context.isAssignmentTarget;
- var previousFirstCoverInitializedNameError = this.context.firstCoverInitializedNameError;
- this.context.isBindingElement = true;
- this.context.isAssignmentTarget = true;
- this.context.firstCoverInitializedNameError = null;
- var result = parseFunction.call(this);
- if (this.context.firstCoverInitializedNameError !== null) {
- this.throwUnexpectedToken(this.context.firstCoverInitializedNameError);
- }
- this.context.isBindingElement = previousIsBindingElement;
- this.context.isAssignmentTarget = previousIsAssignmentTarget;
- this.context.firstCoverInitializedNameError = previousFirstCoverInitializedNameError;
- return result;
- };
- Parser.prototype.inheritCoverGrammar = function (parseFunction) {
- var previousIsBindingElement = this.context.isBindingElement;
- var previousIsAssignmentTarget = this.context.isAssignmentTarget;
- var previousFirstCoverInitializedNameError = this.context.firstCoverInitializedNameError;
- this.context.isBindingElement = true;
- this.context.isAssignmentTarget = true;
- this.context.firstCoverInitializedNameError = null;
- var result = parseFunction.call(this);
- this.context.isBindingElement = this.context.isBindingElement && previousIsBindingElement;
- this.context.isAssignmentTarget = this.context.isAssignmentTarget && previousIsAssignmentTarget;
- this.context.firstCoverInitializedNameError = previousFirstCoverInitializedNameError || this.context.firstCoverInitializedNameError;
- return result;
- };
- Parser.prototype.consumeSemicolon = function () {
- if (this.match(';')) {
- this.nextToken();
- }
- else if (!this.hasLineTerminator) {
- if (this.lookahead.type !== 2 /* EOF */ && !this.match('}')) {
- this.throwUnexpectedToken(this.lookahead);
- }
- this.lastMarker.index = this.startMarker.index;
- this.lastMarker.line = this.startMarker.line;
- this.lastMarker.column = this.startMarker.column;
- }
- };
- // https://tc39.github.io/ecma262/#sec-primary-expression
- Parser.prototype.parsePrimaryExpression = function () {
- var node = this.createNode();
- var expr;
- var token, raw;
- switch (this.lookahead.type) {
- case 3 /* Identifier */:
- if ((this.context.isModule || this.context.await) && this.lookahead.value === 'await') {
- this.tolerateUnexpectedToken(this.lookahead);
- }
- expr = this.matchAsyncFunction() ? this.parseFunctionExpression() : this.finalize(node, new Node.Identifier(this.nextToken().value));
- break;
- case 6 /* NumericLiteral */:
- case 8 /* StringLiteral */:
- if (this.context.strict && this.lookahead.octal) {
- this.tolerateUnexpectedToken(this.lookahead, messages_1.Messages.StrictOctalLiteral);
- }
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- token = this.nextToken();
- raw = this.getTokenRaw(token);
- expr = this.finalize(node, new Node.Literal(token.value, raw));
- break;
- case 1 /* BooleanLiteral */:
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- token = this.nextToken();
- raw = this.getTokenRaw(token);
- expr = this.finalize(node, new Node.Literal(token.value === 'true', raw));
- break;
- case 5 /* NullLiteral */:
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- token = this.nextToken();
- raw = this.getTokenRaw(token);
- expr = this.finalize(node, new Node.Literal(null, raw));
- break;
- case 10 /* Template */:
- expr = this.parseTemplateLiteral();
- break;
- case 7 /* Punctuator */:
- switch (this.lookahead.value) {
- case '(':
- this.context.isBindingElement = false;
- expr = this.inheritCoverGrammar(this.parseGroupExpression);
- break;
- case '[':
- expr = this.inheritCoverGrammar(this.parseArrayInitializer);
- break;
- case '{':
- expr = this.inheritCoverGrammar(this.parseObjectInitializer);
- break;
- case '/':
- case '/=':
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- this.scanner.index = this.startMarker.index;
- token = this.nextRegexToken();
- raw = this.getTokenRaw(token);
- expr = this.finalize(node, new Node.RegexLiteral(token.regex, raw, token.pattern, token.flags));
- break;
- default:
- expr = this.throwUnexpectedToken(this.nextToken());
- }
- break;
- case 4 /* Keyword */:
- if (!this.context.strict && this.context.allowYield && this.matchKeyword('yield')) {
- expr = this.parseIdentifierName();
- }
- else if (!this.context.strict && this.matchKeyword('let')) {
- expr = this.finalize(node, new Node.Identifier(this.nextToken().value));
- }
- else {
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- if (this.matchKeyword('function')) {
- expr = this.parseFunctionExpression();
- }
- else if (this.matchKeyword('this')) {
- this.nextToken();
- expr = this.finalize(node, new Node.ThisExpression());
- }
- else if (this.matchKeyword('class')) {
- expr = this.parseClassExpression();
- }
- else {
- expr = this.throwUnexpectedToken(this.nextToken());
- }
- }
- break;
- default:
- expr = this.throwUnexpectedToken(this.nextToken());
- }
- return expr;
- };
- // https://tc39.github.io/ecma262/#sec-array-initializer
- Parser.prototype.parseSpreadElement = function () {
- var node = this.createNode();
- this.expect('...');
- var arg = this.inheritCoverGrammar(this.parseAssignmentExpression);
- return this.finalize(node, new Node.SpreadElement(arg));
- };
- Parser.prototype.parseArrayInitializer = function () {
- var node = this.createNode();
- var elements = [];
- this.expect('[');
- while (!this.match(']')) {
- if (this.match(',')) {
- this.nextToken();
- elements.push(null);
- }
- else if (this.match('...')) {
- var element = this.parseSpreadElement();
- if (!this.match(']')) {
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- this.expect(',');
- }
- elements.push(element);
- }
- else {
- elements.push(this.inheritCoverGrammar(this.parseAssignmentExpression));
- if (!this.match(']')) {
- this.expect(',');
- }
- }
- }
- this.expect(']');
- return this.finalize(node, new Node.ArrayExpression(elements));
- };
- // https://tc39.github.io/ecma262/#sec-object-initializer
- Parser.prototype.parsePropertyMethod = function (params) {
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- var previousStrict = this.context.strict;
- var previousAllowStrictDirective = this.context.allowStrictDirective;
- this.context.allowStrictDirective = params.simple;
- var body = this.isolateCoverGrammar(this.parseFunctionSourceElements);
- if (this.context.strict && params.firstRestricted) {
- this.tolerateUnexpectedToken(params.firstRestricted, params.message);
- }
- if (this.context.strict && params.stricted) {
- this.tolerateUnexpectedToken(params.stricted, params.message);
- }
- this.context.strict = previousStrict;
- this.context.allowStrictDirective = previousAllowStrictDirective;
- return body;
- };
- Parser.prototype.parsePropertyMethodFunction = function () {
- var isGenerator = false;
- var node = this.createNode();
- var previousAllowYield = this.context.allowYield;
- this.context.allowYield = true;
- var params = this.parseFormalParameters();
- var method = this.parsePropertyMethod(params);
- this.context.allowYield = previousAllowYield;
- return this.finalize(node, new Node.FunctionExpression(null, params.params, method, isGenerator));
- };
- Parser.prototype.parsePropertyMethodAsyncFunction = function () {
- var node = this.createNode();
- var previousAllowYield = this.context.allowYield;
- var previousAwait = this.context.await;
- this.context.allowYield = false;
- this.context.await = true;
- var params = this.parseFormalParameters();
- var method = this.parsePropertyMethod(params);
- this.context.allowYield = previousAllowYield;
- this.context.await = previousAwait;
- return this.finalize(node, new Node.AsyncFunctionExpression(null, params.params, method));
- };
- Parser.prototype.parseObjectPropertyKey = function () {
- var node = this.createNode();
- var token = this.nextToken();
- var key;
- switch (token.type) {
- case 8 /* StringLiteral */:
- case 6 /* NumericLiteral */:
- if (this.context.strict && token.octal) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.StrictOctalLiteral);
- }
- var raw = this.getTokenRaw(token);
- key = this.finalize(node, new Node.Literal(token.value, raw));
- break;
- case 3 /* Identifier */:
- case 1 /* BooleanLiteral */:
- case 5 /* NullLiteral */:
- case 4 /* Keyword */:
- key = this.finalize(node, new Node.Identifier(token.value));
- break;
- case 7 /* Punctuator */:
- if (token.value === '[') {
- key = this.isolateCoverGrammar(this.parseAssignmentExpression);
- this.expect(']');
- }
- else {
- key = this.throwUnexpectedToken(token);
- }
- break;
- default:
- key = this.throwUnexpectedToken(token);
- }
- return key;
- };
- Parser.prototype.isPropertyKey = function (key, value) {
- return (key.type === syntax_1.Syntax.Identifier && key.name === value) ||
- (key.type === syntax_1.Syntax.Literal && key.value === value);
- };
- Parser.prototype.parseObjectProperty = function (hasProto) {
- var node = this.createNode();
- var token = this.lookahead;
- var kind;
- var key = null;
- var value = null;
- var computed = false;
- var method = false;
- var shorthand = false;
- var isAsync = false;
- if (token.type === 3 /* Identifier */) {
- var id = token.value;
- this.nextToken();
- computed = this.match('[');
- isAsync = !this.hasLineTerminator && (id === 'async') &&
- !this.match(':') && !this.match('(') && !this.match('*') && !this.match(',');
- key = isAsync ? this.parseObjectPropertyKey() : this.finalize(node, new Node.Identifier(id));
- }
- else if (this.match('*')) {
- this.nextToken();
- }
- else {
- computed = this.match('[');
- key = this.parseObjectPropertyKey();
- }
- var lookaheadPropertyKey = this.qualifiedPropertyName(this.lookahead);
- if (token.type === 3 /* Identifier */ && !isAsync && token.value === 'get' && lookaheadPropertyKey) {
- kind = 'get';
- computed = this.match('[');
- key = this.parseObjectPropertyKey();
- this.context.allowYield = false;
- value = this.parseGetterMethod();
- }
- else if (token.type === 3 /* Identifier */ && !isAsync && token.value === 'set' && lookaheadPropertyKey) {
- kind = 'set';
- computed = this.match('[');
- key = this.parseObjectPropertyKey();
- value = this.parseSetterMethod();
- }
- else if (token.type === 7 /* Punctuator */ && token.value === '*' && lookaheadPropertyKey) {
- kind = 'init';
- computed = this.match('[');
- key = this.parseObjectPropertyKey();
- value = this.parseGeneratorMethod();
- method = true;
- }
- else {
- if (!key) {
- this.throwUnexpectedToken(this.lookahead);
- }
- kind = 'init';
- if (this.match(':') && !isAsync) {
- if (!computed && this.isPropertyKey(key, '__proto__')) {
- if (hasProto.value) {
- this.tolerateError(messages_1.Messages.DuplicateProtoProperty);
- }
- hasProto.value = true;
- }
- this.nextToken();
- value = this.inheritCoverGrammar(this.parseAssignmentExpression);
- }
- else if (this.match('(')) {
- value = isAsync ? this.parsePropertyMethodAsyncFunction() : this.parsePropertyMethodFunction();
- method = true;
- }
- else if (token.type === 3 /* Identifier */) {
- var id = this.finalize(node, new Node.Identifier(token.value));
- if (this.match('=')) {
- this.context.firstCoverInitializedNameError = this.lookahead;
- this.nextToken();
- shorthand = true;
- var init = this.isolateCoverGrammar(this.parseAssignmentExpression);
- value = this.finalize(node, new Node.AssignmentPattern(id, init));
- }
- else {
- shorthand = true;
- value = id;
- }
- }
- else {
- this.throwUnexpectedToken(this.nextToken());
- }
- }
- return this.finalize(node, new Node.Property(kind, key, computed, value, method, shorthand));
- };
- Parser.prototype.parseObjectInitializer = function () {
- var node = this.createNode();
- this.expect('{');
- var properties = [];
- var hasProto = { value: false };
- while (!this.match('}')) {
- properties.push(this.parseObjectProperty(hasProto));
- if (!this.match('}')) {
- this.expectCommaSeparator();
- }
- }
- this.expect('}');
- return this.finalize(node, new Node.ObjectExpression(properties));
- };
- // https://tc39.github.io/ecma262/#sec-template-literals
- Parser.prototype.parseTemplateHead = function () {
- assert_1.assert(this.lookahead.head, 'Template literal must start with a template head');
- var node = this.createNode();
- var token = this.nextToken();
- var raw = token.value;
- var cooked = token.cooked;
- return this.finalize(node, new Node.TemplateElement({ raw: raw, cooked: cooked }, token.tail));
- };
- Parser.prototype.parseTemplateElement = function () {
- if (this.lookahead.type !== 10 /* Template */) {
- this.throwUnexpectedToken();
- }
- var node = this.createNode();
- var token = this.nextToken();
- var raw = token.value;
- var cooked = token.cooked;
- return this.finalize(node, new Node.TemplateElement({ raw: raw, cooked: cooked }, token.tail));
- };
- Parser.prototype.parseTemplateLiteral = function () {
- var node = this.createNode();
- var expressions = [];
- var quasis = [];
- var quasi = this.parseTemplateHead();
- quasis.push(quasi);
- while (!quasi.tail) {
- expressions.push(this.parseExpression());
- quasi = this.parseTemplateElement();
- quasis.push(quasi);
- }
- return this.finalize(node, new Node.TemplateLiteral(quasis, expressions));
- };
- // https://tc39.github.io/ecma262/#sec-grouping-operator
- Parser.prototype.reinterpretExpressionAsPattern = function (expr) {
- switch (expr.type) {
- case syntax_1.Syntax.Identifier:
- case syntax_1.Syntax.MemberExpression:
- case syntax_1.Syntax.RestElement:
- case syntax_1.Syntax.AssignmentPattern:
- break;
- case syntax_1.Syntax.SpreadElement:
- expr.type = syntax_1.Syntax.RestElement;
- this.reinterpretExpressionAsPattern(expr.argument);
- break;
- case syntax_1.Syntax.ArrayExpression:
- expr.type = syntax_1.Syntax.ArrayPattern;
- for (var i = 0; i < expr.elements.length; i++) {
- if (expr.elements[i] !== null) {
- this.reinterpretExpressionAsPattern(expr.elements[i]);
- }
- }
- break;
- case syntax_1.Syntax.ObjectExpression:
- expr.type = syntax_1.Syntax.ObjectPattern;
- for (var i = 0; i < expr.properties.length; i++) {
- this.reinterpretExpressionAsPattern(expr.properties[i].value);
- }
- break;
- case syntax_1.Syntax.AssignmentExpression:
- expr.type = syntax_1.Syntax.AssignmentPattern;
- delete expr.operator;
- this.reinterpretExpressionAsPattern(expr.left);
- break;
- default:
- // Allow other node type for tolerant parsing.
- break;
- }
- };
- Parser.prototype.parseGroupExpression = function () {
- var expr;
- this.expect('(');
- if (this.match(')')) {
- this.nextToken();
- if (!this.match('=>')) {
- this.expect('=>');
- }
- expr = {
- type: ArrowParameterPlaceHolder,
- params: [],
- async: false
- };
- }
- else {
- var startToken = this.lookahead;
- var params = [];
- if (this.match('...')) {
- expr = this.parseRestElement(params);
- this.expect(')');
- if (!this.match('=>')) {
- this.expect('=>');
- }
- expr = {
- type: ArrowParameterPlaceHolder,
- params: [expr],
- async: false
- };
- }
- else {
- var arrow = false;
- this.context.isBindingElement = true;
- expr = this.inheritCoverGrammar(this.parseAssignmentExpression);
- if (this.match(',')) {
- var expressions = [];
- this.context.isAssignmentTarget = false;
- expressions.push(expr);
- while (this.lookahead.type !== 2 /* EOF */) {
- if (!this.match(',')) {
- break;
- }
- this.nextToken();
- if (this.match(')')) {
- this.nextToken();
- for (var i = 0; i < expressions.length; i++) {
- this.reinterpretExpressionAsPattern(expressions[i]);
- }
- arrow = true;
- expr = {
- type: ArrowParameterPlaceHolder,
- params: expressions,
- async: false
- };
- }
- else if (this.match('...')) {
- if (!this.context.isBindingElement) {
- this.throwUnexpectedToken(this.lookahead);
- }
- expressions.push(this.parseRestElement(params));
- this.expect(')');
- if (!this.match('=>')) {
- this.expect('=>');
- }
- this.context.isBindingElement = false;
- for (var i = 0; i < expressions.length; i++) {
- this.reinterpretExpressionAsPattern(expressions[i]);
- }
- arrow = true;
- expr = {
- type: ArrowParameterPlaceHolder,
- params: expressions,
- async: false
- };
- }
- else {
- expressions.push(this.inheritCoverGrammar(this.parseAssignmentExpression));
- }
- if (arrow) {
- break;
- }
- }
- if (!arrow) {
- expr = this.finalize(this.startNode(startToken), new Node.SequenceExpression(expressions));
- }
- }
- if (!arrow) {
- this.expect(')');
- if (this.match('=>')) {
- if (expr.type === syntax_1.Syntax.Identifier && expr.name === 'yield') {
- arrow = true;
- expr = {
- type: ArrowParameterPlaceHolder,
- params: [expr],
- async: false
- };
- }
- if (!arrow) {
- if (!this.context.isBindingElement) {
- this.throwUnexpectedToken(this.lookahead);
- }
- if (expr.type === syntax_1.Syntax.SequenceExpression) {
- for (var i = 0; i < expr.expressions.length; i++) {
- this.reinterpretExpressionAsPattern(expr.expressions[i]);
- }
- }
- else {
- this.reinterpretExpressionAsPattern(expr);
- }
- var parameters = (expr.type === syntax_1.Syntax.SequenceExpression ? expr.expressions : [expr]);
- expr = {
- type: ArrowParameterPlaceHolder,
- params: parameters,
- async: false
- };
- }
- }
- this.context.isBindingElement = false;
- }
- }
- }
- return expr;
- };
- // https://tc39.github.io/ecma262/#sec-left-hand-side-expressions
- Parser.prototype.parseArguments = function () {
- this.expect('(');
- var args = [];
- if (!this.match(')')) {
- while (true) {
- var expr = this.match('...') ? this.parseSpreadElement() :
- this.isolateCoverGrammar(this.parseAssignmentExpression);
- args.push(expr);
- if (this.match(')')) {
- break;
- }
- this.expectCommaSeparator();
- if (this.match(')')) {
- break;
- }
- }
- }
- this.expect(')');
- return args;
- };
- Parser.prototype.isIdentifierName = function (token) {
- return token.type === 3 /* Identifier */ ||
- token.type === 4 /* Keyword */ ||
- token.type === 1 /* BooleanLiteral */ ||
- token.type === 5 /* NullLiteral */;
- };
- Parser.prototype.parseIdentifierName = function () {
- var node = this.createNode();
- var token = this.nextToken();
- if (!this.isIdentifierName(token)) {
- this.throwUnexpectedToken(token);
- }
- return this.finalize(node, new Node.Identifier(token.value));
- };
- Parser.prototype.parseNewExpression = function () {
- var node = this.createNode();
- var id = this.parseIdentifierName();
- assert_1.assert(id.name === 'new', 'New expression must start with `new`');
- var expr;
- if (this.match('.')) {
- this.nextToken();
- if (this.lookahead.type === 3 /* Identifier */ && this.context.inFunctionBody && this.lookahead.value === 'target') {
- var property = this.parseIdentifierName();
- expr = new Node.MetaProperty(id, property);
- }
- else {
- this.throwUnexpectedToken(this.lookahead);
- }
- }
- else {
- var callee = this.isolateCoverGrammar(this.parseLeftHandSideExpression);
- var args = this.match('(') ? this.parseArguments() : [];
- expr = new Node.NewExpression(callee, args);
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- }
- return this.finalize(node, expr);
- };
- Parser.prototype.parseAsyncArgument = function () {
- var arg = this.parseAssignmentExpression();
- this.context.firstCoverInitializedNameError = null;
- return arg;
- };
- Parser.prototype.parseAsyncArguments = function () {
- this.expect('(');
- var args = [];
- if (!this.match(')')) {
- while (true) {
- var expr = this.match('...') ? this.parseSpreadElement() :
- this.isolateCoverGrammar(this.parseAsyncArgument);
- args.push(expr);
- if (this.match(')')) {
- break;
- }
- this.expectCommaSeparator();
- if (this.match(')')) {
- break;
- }
- }
- }
- this.expect(')');
- return args;
- };
- Parser.prototype.parseLeftHandSideExpressionAllowCall = function () {
- var startToken = this.lookahead;
- var maybeAsync = this.matchContextualKeyword('async');
- var previousAllowIn = this.context.allowIn;
- this.context.allowIn = true;
- var expr;
- if (this.matchKeyword('super') && this.context.inFunctionBody) {
- expr = this.createNode();
- this.nextToken();
- expr = this.finalize(expr, new Node.Super());
- if (!this.match('(') && !this.match('.') && !this.match('[')) {
- this.throwUnexpectedToken(this.lookahead);
- }
- }
- else {
- expr = this.inheritCoverGrammar(this.matchKeyword('new') ? this.parseNewExpression : this.parsePrimaryExpression);
- }
- while (true) {
- if (this.match('.')) {
- this.context.isBindingElement = false;
- this.context.isAssignmentTarget = true;
- this.expect('.');
- var property = this.parseIdentifierName();
- expr = this.finalize(this.startNode(startToken), new Node.StaticMemberExpression(expr, property));
- }
- else if (this.match('(')) {
- var asyncArrow = maybeAsync && (startToken.lineNumber === this.lookahead.lineNumber);
- this.context.isBindingElement = false;
- this.context.isAssignmentTarget = false;
- var args = asyncArrow ? this.parseAsyncArguments() : this.parseArguments();
- expr = this.finalize(this.startNode(startToken), new Node.CallExpression(expr, args));
- if (asyncArrow && this.match('=>')) {
- for (var i = 0; i < args.length; ++i) {
- this.reinterpretExpressionAsPattern(args[i]);
- }
- expr = {
- type: ArrowParameterPlaceHolder,
- params: args,
- async: true
- };
- }
- }
- else if (this.match('[')) {
- this.context.isBindingElement = false;
- this.context.isAssignmentTarget = true;
- this.expect('[');
- var property = this.isolateCoverGrammar(this.parseExpression);
- this.expect(']');
- expr = this.finalize(this.startNode(startToken), new Node.ComputedMemberExpression(expr, property));
- }
- else if (this.lookahead.type === 10 /* Template */ && this.lookahead.head) {
- var quasi = this.parseTemplateLiteral();
- expr = this.finalize(this.startNode(startToken), new Node.TaggedTemplateExpression(expr, quasi));
- }
- else {
- break;
- }
- }
- this.context.allowIn = previousAllowIn;
- return expr;
- };
- Parser.prototype.parseSuper = function () {
- var node = this.createNode();
- this.expectKeyword('super');
- if (!this.match('[') && !this.match('.')) {
- this.throwUnexpectedToken(this.lookahead);
- }
- return this.finalize(node, new Node.Super());
- };
- Parser.prototype.parseLeftHandSideExpression = function () {
- assert_1.assert(this.context.allowIn, 'callee of new expression always allow in keyword.');
- var node = this.startNode(this.lookahead);
- var expr = (this.matchKeyword('super') && this.context.inFunctionBody) ? this.parseSuper() :
- this.inheritCoverGrammar(this.matchKeyword('new') ? this.parseNewExpression : this.parsePrimaryExpression);
- while (true) {
- if (this.match('[')) {
- this.context.isBindingElement = false;
- this.context.isAssignmentTarget = true;
- this.expect('[');
- var property = this.isolateCoverGrammar(this.parseExpression);
- this.expect(']');
- expr = this.finalize(node, new Node.ComputedMemberExpression(expr, property));
- }
- else if (this.match('.')) {
- this.context.isBindingElement = false;
- this.context.isAssignmentTarget = true;
- this.expect('.');
- var property = this.parseIdentifierName();
- expr = this.finalize(node, new Node.StaticMemberExpression(expr, property));
- }
- else if (this.lookahead.type === 10 /* Template */ && this.lookahead.head) {
- var quasi = this.parseTemplateLiteral();
- expr = this.finalize(node, new Node.TaggedTemplateExpression(expr, quasi));
- }
- else {
- break;
- }
- }
- return expr;
- };
- // https://tc39.github.io/ecma262/#sec-update-expressions
- Parser.prototype.parseUpdateExpression = function () {
- var expr;
- var startToken = this.lookahead;
- if (this.match('++') || this.match('--')) {
- var node = this.startNode(startToken);
- var token = this.nextToken();
- expr = this.inheritCoverGrammar(this.parseUnaryExpression);
- if (this.context.strict && expr.type === syntax_1.Syntax.Identifier && this.scanner.isRestrictedWord(expr.name)) {
- this.tolerateError(messages_1.Messages.StrictLHSPrefix);
- }
- if (!this.context.isAssignmentTarget) {
- this.tolerateError(messages_1.Messages.InvalidLHSInAssignment);
- }
- var prefix = true;
- expr = this.finalize(node, new Node.UpdateExpression(token.value, expr, prefix));
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- }
- else {
- expr = this.inheritCoverGrammar(this.parseLeftHandSideExpressionAllowCall);
- if (!this.hasLineTerminator && this.lookahead.type === 7 /* Punctuator */) {
- if (this.match('++') || this.match('--')) {
- if (this.context.strict && expr.type === syntax_1.Syntax.Identifier && this.scanner.isRestrictedWord(expr.name)) {
- this.tolerateError(messages_1.Messages.StrictLHSPostfix);
- }
- if (!this.context.isAssignmentTarget) {
- this.tolerateError(messages_1.Messages.InvalidLHSInAssignment);
- }
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- var operator = this.nextToken().value;
- var prefix = false;
- expr = this.finalize(this.startNode(startToken), new Node.UpdateExpression(operator, expr, prefix));
- }
- }
- }
- return expr;
- };
- // https://tc39.github.io/ecma262/#sec-unary-operators
- Parser.prototype.parseAwaitExpression = function () {
- var node = this.createNode();
- this.nextToken();
- var argument = this.parseUnaryExpression();
- return this.finalize(node, new Node.AwaitExpression(argument));
- };
- Parser.prototype.parseUnaryExpression = function () {
- var expr;
- if (this.match('+') || this.match('-') || this.match('~') || this.match('!') ||
- this.matchKeyword('delete') || this.matchKeyword('void') || this.matchKeyword('typeof')) {
- var node = this.startNode(this.lookahead);
- var token = this.nextToken();
- expr = this.inheritCoverGrammar(this.parseUnaryExpression);
- expr = this.finalize(node, new Node.UnaryExpression(token.value, expr));
- if (this.context.strict && expr.operator === 'delete' && expr.argument.type === syntax_1.Syntax.Identifier) {
- this.tolerateError(messages_1.Messages.StrictDelete);
- }
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- }
- else if (this.context.await && this.matchContextualKeyword('await')) {
- expr = this.parseAwaitExpression();
- }
- else {
- expr = this.parseUpdateExpression();
- }
- return expr;
- };
- Parser.prototype.parseExponentiationExpression = function () {
- var startToken = this.lookahead;
- var expr = this.inheritCoverGrammar(this.parseUnaryExpression);
- if (expr.type !== syntax_1.Syntax.UnaryExpression && this.match('**')) {
- this.nextToken();
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- var left = expr;
- var right = this.isolateCoverGrammar(this.parseExponentiationExpression);
- expr = this.finalize(this.startNode(startToken), new Node.BinaryExpression('**', left, right));
- }
- return expr;
- };
- // https://tc39.github.io/ecma262/#sec-exp-operator
- // https://tc39.github.io/ecma262/#sec-multiplicative-operators
- // https://tc39.github.io/ecma262/#sec-additive-operators
- // https://tc39.github.io/ecma262/#sec-bitwise-shift-operators
- // https://tc39.github.io/ecma262/#sec-relational-operators
- // https://tc39.github.io/ecma262/#sec-equality-operators
- // https://tc39.github.io/ecma262/#sec-binary-bitwise-operators
- // https://tc39.github.io/ecma262/#sec-binary-logical-operators
- Parser.prototype.binaryPrecedence = function (token) {
- var op = token.value;
- var precedence;
- if (token.type === 7 /* Punctuator */) {
- precedence = this.operatorPrecedence[op] || 0;
- }
- else if (token.type === 4 /* Keyword */) {
- precedence = (op === 'instanceof' || (this.context.allowIn && op === 'in')) ? 7 : 0;
- }
- else {
- precedence = 0;
- }
- return precedence;
- };
- Parser.prototype.parseBinaryExpression = function () {
- var startToken = this.lookahead;
- var expr = this.inheritCoverGrammar(this.parseExponentiationExpression);
- var token = this.lookahead;
- var prec = this.binaryPrecedence(token);
- if (prec > 0) {
- this.nextToken();
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- var markers = [startToken, this.lookahead];
- var left = expr;
- var right = this.isolateCoverGrammar(this.parseExponentiationExpression);
- var stack = [left, token.value, right];
- var precedences = [prec];
- while (true) {
- prec = this.binaryPrecedence(this.lookahead);
- if (prec <= 0) {
- break;
- }
- // Reduce: make a binary expression from the three topmost entries.
- while ((stack.length > 2) && (prec <= precedences[precedences.length - 1])) {
- right = stack.pop();
- var operator = stack.pop();
- precedences.pop();
- left = stack.pop();
- markers.pop();
- var node = this.startNode(markers[markers.length - 1]);
- stack.push(this.finalize(node, new Node.BinaryExpression(operator, left, right)));
- }
- // Shift.
- stack.push(this.nextToken().value);
- precedences.push(prec);
- markers.push(this.lookahead);
- stack.push(this.isolateCoverGrammar(this.parseExponentiationExpression));
- }
- // Final reduce to clean-up the stack.
- var i = stack.length - 1;
- expr = stack[i];
- var lastMarker = markers.pop();
- while (i > 1) {
- var marker = markers.pop();
- var lastLineStart = lastMarker && lastMarker.lineStart;
- var node = this.startNode(marker, lastLineStart);
- var operator = stack[i - 1];
- expr = this.finalize(node, new Node.BinaryExpression(operator, stack[i - 2], expr));
- i -= 2;
- lastMarker = marker;
- }
- }
- return expr;
- };
- // https://tc39.github.io/ecma262/#sec-conditional-operator
- Parser.prototype.parseConditionalExpression = function () {
- var startToken = this.lookahead;
- var expr = this.inheritCoverGrammar(this.parseBinaryExpression);
- if (this.match('?')) {
- this.nextToken();
- var previousAllowIn = this.context.allowIn;
- this.context.allowIn = true;
- var consequent = this.isolateCoverGrammar(this.parseAssignmentExpression);
- this.context.allowIn = previousAllowIn;
- this.expect(':');
- var alternate = this.isolateCoverGrammar(this.parseAssignmentExpression);
- expr = this.finalize(this.startNode(startToken), new Node.ConditionalExpression(expr, consequent, alternate));
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- }
- return expr;
- };
- // https://tc39.github.io/ecma262/#sec-assignment-operators
- Parser.prototype.checkPatternParam = function (options, param) {
- switch (param.type) {
- case syntax_1.Syntax.Identifier:
- this.validateParam(options, param, param.name);
- break;
- case syntax_1.Syntax.RestElement:
- this.checkPatternParam(options, param.argument);
- break;
- case syntax_1.Syntax.AssignmentPattern:
- this.checkPatternParam(options, param.left);
- break;
- case syntax_1.Syntax.ArrayPattern:
- for (var i = 0; i < param.elements.length; i++) {
- if (param.elements[i] !== null) {
- this.checkPatternParam(options, param.elements[i]);
- }
- }
- break;
- case syntax_1.Syntax.ObjectPattern:
- for (var i = 0; i < param.properties.length; i++) {
- this.checkPatternParam(options, param.properties[i].value);
- }
- break;
- default:
- break;
- }
- options.simple = options.simple && (param instanceof Node.Identifier);
- };
- Parser.prototype.reinterpretAsCoverFormalsList = function (expr) {
- var params = [expr];
- var options;
- var asyncArrow = false;
- switch (expr.type) {
- case syntax_1.Syntax.Identifier:
- break;
- case ArrowParameterPlaceHolder:
- params = expr.params;
- asyncArrow = expr.async;
- break;
- default:
- return null;
- }
- options = {
- simple: true,
- paramSet: {}
- };
- for (var i = 0; i < params.length; ++i) {
- var param = params[i];
- if (param.type === syntax_1.Syntax.AssignmentPattern) {
- if (param.right.type === syntax_1.Syntax.YieldExpression) {
- if (param.right.argument) {
- this.throwUnexpectedToken(this.lookahead);
- }
- param.right.type = syntax_1.Syntax.Identifier;
- param.right.name = 'yield';
- delete param.right.argument;
- delete param.right.delegate;
- }
- }
- else if (asyncArrow && param.type === syntax_1.Syntax.Identifier && param.name === 'await') {
- this.throwUnexpectedToken(this.lookahead);
- }
- this.checkPatternParam(options, param);
- params[i] = param;
- }
- if (this.context.strict || !this.context.allowYield) {
- for (var i = 0; i < params.length; ++i) {
- var param = params[i];
- if (param.type === syntax_1.Syntax.YieldExpression) {
- this.throwUnexpectedToken(this.lookahead);
- }
- }
- }
- if (options.message === messages_1.Messages.StrictParamDupe) {
- var token = this.context.strict ? options.stricted : options.firstRestricted;
- this.throwUnexpectedToken(token, options.message);
- }
- return {
- simple: options.simple,
- params: params,
- stricted: options.stricted,
- firstRestricted: options.firstRestricted,
- message: options.message
- };
- };
- Parser.prototype.parseAssignmentExpression = function () {
- var expr;
- if (!this.context.allowYield && this.matchKeyword('yield')) {
- expr = this.parseYieldExpression();
- }
- else {
- var startToken = this.lookahead;
- var token = startToken;
- expr = this.parseConditionalExpression();
- if (token.type === 3 /* Identifier */ && (token.lineNumber === this.lookahead.lineNumber) && token.value === 'async') {
- if (this.lookahead.type === 3 /* Identifier */ || this.matchKeyword('yield')) {
- var arg = this.parsePrimaryExpression();
- this.reinterpretExpressionAsPattern(arg);
- expr = {
- type: ArrowParameterPlaceHolder,
- params: [arg],
- async: true
- };
- }
- }
- if (expr.type === ArrowParameterPlaceHolder || this.match('=>')) {
- // https://tc39.github.io/ecma262/#sec-arrow-function-definitions
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- var isAsync = expr.async;
- var list = this.reinterpretAsCoverFormalsList(expr);
- if (list) {
- if (this.hasLineTerminator) {
- this.tolerateUnexpectedToken(this.lookahead);
- }
- this.context.firstCoverInitializedNameError = null;
- var previousStrict = this.context.strict;
- var previousAllowStrictDirective = this.context.allowStrictDirective;
- this.context.allowStrictDirective = list.simple;
- var previousAllowYield = this.context.allowYield;
- var previousAwait = this.context.await;
- this.context.allowYield = true;
- this.context.await = isAsync;
- var node = this.startNode(startToken);
- this.expect('=>');
- var body = void 0;
- if (this.match('{')) {
- var previousAllowIn = this.context.allowIn;
- this.context.allowIn = true;
- body = this.parseFunctionSourceElements();
- this.context.allowIn = previousAllowIn;
- }
- else {
- body = this.isolateCoverGrammar(this.parseAssignmentExpression);
- }
- var expression = body.type !== syntax_1.Syntax.BlockStatement;
- if (this.context.strict && list.firstRestricted) {
- this.throwUnexpectedToken(list.firstRestricted, list.message);
- }
- if (this.context.strict && list.stricted) {
- this.tolerateUnexpectedToken(list.stricted, list.message);
- }
- expr = isAsync ? this.finalize(node, new Node.AsyncArrowFunctionExpression(list.params, body, expression)) :
- this.finalize(node, new Node.ArrowFunctionExpression(list.params, body, expression));
- this.context.strict = previousStrict;
- this.context.allowStrictDirective = previousAllowStrictDirective;
- this.context.allowYield = previousAllowYield;
- this.context.await = previousAwait;
- }
- }
- else {
- if (this.matchAssign()) {
- if (!this.context.isAssignmentTarget) {
- this.tolerateError(messages_1.Messages.InvalidLHSInAssignment);
- }
- if (this.context.strict && expr.type === syntax_1.Syntax.Identifier) {
- var id = expr;
- if (this.scanner.isRestrictedWord(id.name)) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.StrictLHSAssignment);
- }
- if (this.scanner.isStrictModeReservedWord(id.name)) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.StrictReservedWord);
- }
- }
- if (!this.match('=')) {
- this.context.isAssignmentTarget = false;
- this.context.isBindingElement = false;
- }
- else {
- this.reinterpretExpressionAsPattern(expr);
- }
- token = this.nextToken();
- var operator = token.value;
- var right = this.isolateCoverGrammar(this.parseAssignmentExpression);
- expr = this.finalize(this.startNode(startToken), new Node.AssignmentExpression(operator, expr, right));
- this.context.firstCoverInitializedNameError = null;
- }
- }
- }
- return expr;
- };
- // https://tc39.github.io/ecma262/#sec-comma-operator
- Parser.prototype.parseExpression = function () {
- var startToken = this.lookahead;
- var expr = this.isolateCoverGrammar(this.parseAssignmentExpression);
- if (this.match(',')) {
- var expressions = [];
- expressions.push(expr);
- while (this.lookahead.type !== 2 /* EOF */) {
- if (!this.match(',')) {
- break;
- }
- this.nextToken();
- expressions.push(this.isolateCoverGrammar(this.parseAssignmentExpression));
- }
- expr = this.finalize(this.startNode(startToken), new Node.SequenceExpression(expressions));
- }
- return expr;
- };
- // https://tc39.github.io/ecma262/#sec-block
- Parser.prototype.parseStatementListItem = function () {
- var statement;
- this.context.isAssignmentTarget = true;
- this.context.isBindingElement = true;
- if (this.lookahead.type === 4 /* Keyword */) {
- switch (this.lookahead.value) {
- case 'export':
- if (!this.context.isModule) {
- this.tolerateUnexpectedToken(this.lookahead, messages_1.Messages.IllegalExportDeclaration);
- }
- statement = this.parseExportDeclaration();
- break;
- case 'import':
- if (!this.context.isModule) {
- this.tolerateUnexpectedToken(this.lookahead, messages_1.Messages.IllegalImportDeclaration);
- }
- statement = this.parseImportDeclaration();
- break;
- case 'const':
- statement = this.parseLexicalDeclaration({ inFor: false });
- break;
- case 'function':
- // Apache CouchDB modification: add true to tolerate
- // missing function identifiers.
- statement = this.parseFunctionDeclaration(true);
- break;
- case 'class':
- statement = this.parseClassDeclaration();
- break;
- case 'let':
- statement = this.isLexicalDeclaration() ? this.parseLexicalDeclaration({ inFor: false }) : this.parseStatement();
- break;
- default:
- statement = this.parseStatement();
- break;
- }
- }
- else {
- statement = this.parseStatement();
- }
- return statement;
- };
- Parser.prototype.parseBlock = function () {
- var node = this.createNode();
- this.expect('{');
- var block = [];
- while (true) {
- if (this.match('}')) {
- break;
- }
- block.push(this.parseStatementListItem());
- }
- this.expect('}');
- return this.finalize(node, new Node.BlockStatement(block));
- };
- // https://tc39.github.io/ecma262/#sec-let-and-const-declarations
- Parser.prototype.parseLexicalBinding = function (kind, options) {
- var node = this.createNode();
- var params = [];
- var id = this.parsePattern(params, kind);
- if (this.context.strict && id.type === syntax_1.Syntax.Identifier) {
- if (this.scanner.isRestrictedWord(id.name)) {
- this.tolerateError(messages_1.Messages.StrictVarName);
- }
- }
- var init = null;
- if (kind === 'const') {
- if (!this.matchKeyword('in') && !this.matchContextualKeyword('of')) {
- if (this.match('=')) {
- this.nextToken();
- init = this.isolateCoverGrammar(this.parseAssignmentExpression);
- }
- else {
- this.throwError(messages_1.Messages.DeclarationMissingInitializer, 'const');
- }
- }
- }
- else if ((!options.inFor && id.type !== syntax_1.Syntax.Identifier) || this.match('=')) {
- this.expect('=');
- init = this.isolateCoverGrammar(this.parseAssignmentExpression);
- }
- return this.finalize(node, new Node.VariableDeclarator(id, init));
- };
- Parser.prototype.parseBindingList = function (kind, options) {
- var list = [this.parseLexicalBinding(kind, options)];
- while (this.match(',')) {
- this.nextToken();
- list.push(this.parseLexicalBinding(kind, options));
- }
- return list;
- };
- Parser.prototype.isLexicalDeclaration = function () {
- var state = this.scanner.saveState();
- this.scanner.scanComments();
- var next = this.scanner.lex();
- this.scanner.restoreState(state);
- return (next.type === 3 /* Identifier */) ||
- (next.type === 7 /* Punctuator */ && next.value === '[') ||
- (next.type === 7 /* Punctuator */ && next.value === '{') ||
- (next.type === 4 /* Keyword */ && next.value === 'let') ||
- (next.type === 4 /* Keyword */ && next.value === 'yield');
- };
- Parser.prototype.parseLexicalDeclaration = function (options) {
- var node = this.createNode();
- var kind = this.nextToken().value;
- assert_1.assert(kind === 'let' || kind === 'const', 'Lexical declaration must be either let or const');
- var declarations = this.parseBindingList(kind, options);
- this.consumeSemicolon();
- return this.finalize(node, new Node.VariableDeclaration(declarations, kind));
- };
- // https://tc39.github.io/ecma262/#sec-destructuring-binding-patterns
- Parser.prototype.parseBindingRestElement = function (params, kind) {
- var node = this.createNode();
- this.expect('...');
- var arg = this.parsePattern(params, kind);
- return this.finalize(node, new Node.RestElement(arg));
- };
- Parser.prototype.parseArrayPattern = function (params, kind) {
- var node = this.createNode();
- this.expect('[');
- var elements = [];
- while (!this.match(']')) {
- if (this.match(',')) {
- this.nextToken();
- elements.push(null);
- }
- else {
- if (this.match('...')) {
- elements.push(this.parseBindingRestElement(params, kind));
- break;
- }
- else {
- elements.push(this.parsePatternWithDefault(params, kind));
- }
- if (!this.match(']')) {
- this.expect(',');
- }
- }
- }
- this.expect(']');
- return this.finalize(node, new Node.ArrayPattern(elements));
- };
- Parser.prototype.parsePropertyPattern = function (params, kind) {
- var node = this.createNode();
- var computed = false;
- var shorthand = false;
- var method = false;
- var key;
- var value;
- if (this.lookahead.type === 3 /* Identifier */) {
- var keyToken = this.lookahead;
- key = this.parseVariableIdentifier();
- var init = this.finalize(node, new Node.Identifier(keyToken.value));
- if (this.match('=')) {
- params.push(keyToken);
- shorthand = true;
- this.nextToken();
- var expr = this.parseAssignmentExpression();
- value = this.finalize(this.startNode(keyToken), new Node.AssignmentPattern(init, expr));
- }
- else if (!this.match(':')) {
- params.push(keyToken);
- shorthand = true;
- value = init;
- }
- else {
- this.expect(':');
- value = this.parsePatternWithDefault(params, kind);
- }
- }
- else {
- computed = this.match('[');
- key = this.parseObjectPropertyKey();
- this.expect(':');
- value = this.parsePatternWithDefault(params, kind);
- }
- return this.finalize(node, new Node.Property('init', key, computed, value, method, shorthand));
- };
- Parser.prototype.parseObjectPattern = function (params, kind) {
- var node = this.createNode();
- var properties = [];
- this.expect('{');
- while (!this.match('}')) {
- properties.push(this.parsePropertyPattern(params, kind));
- if (!this.match('}')) {
- this.expect(',');
- }
- }
- this.expect('}');
- return this.finalize(node, new Node.ObjectPattern(properties));
- };
- Parser.prototype.parsePattern = function (params, kind) {
- var pattern;
- if (this.match('[')) {
- pattern = this.parseArrayPattern(params, kind);
- }
- else if (this.match('{')) {
- pattern = this.parseObjectPattern(params, kind);
- }
- else {
- if (this.matchKeyword('let') && (kind === 'const' || kind === 'let')) {
- this.tolerateUnexpectedToken(this.lookahead, messages_1.Messages.LetInLexicalBinding);
- }
- params.push(this.lookahead);
- pattern = this.parseVariableIdentifier(kind);
- }
- return pattern;
- };
- Parser.prototype.parsePatternWithDefault = function (params, kind) {
- var startToken = this.lookahead;
- var pattern = this.parsePattern(params, kind);
- if (this.match('=')) {
- this.nextToken();
- var previousAllowYield = this.context.allowYield;
- this.context.allowYield = true;
- var right = this.isolateCoverGrammar(this.parseAssignmentExpression);
- this.context.allowYield = previousAllowYield;
- pattern = this.finalize(this.startNode(startToken), new Node.AssignmentPattern(pattern, right));
- }
- return pattern;
- };
- // https://tc39.github.io/ecma262/#sec-variable-statement
- Parser.prototype.parseVariableIdentifier = function (kind) {
- var node = this.createNode();
- var token = this.nextToken();
- if (token.type === 4 /* Keyword */ && token.value === 'yield') {
- if (this.context.strict) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.StrictReservedWord);
- }
- else if (!this.context.allowYield) {
- this.throwUnexpectedToken(token);
- }
- }
- else if (token.type !== 3 /* Identifier */) {
- if (this.context.strict && token.type === 4 /* Keyword */ && this.scanner.isStrictModeReservedWord(token.value)) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.StrictReservedWord);
- }
- else {
- if (this.context.strict || token.value !== 'let' || kind !== 'var') {
- this.throwUnexpectedToken(token);
- }
- }
- }
- else if ((this.context.isModule || this.context.await) && token.type === 3 /* Identifier */ && token.value === 'await') {
- this.tolerateUnexpectedToken(token);
- }
- return this.finalize(node, new Node.Identifier(token.value));
- };
- Parser.prototype.parseVariableDeclaration = function (options) {
- var node = this.createNode();
- var params = [];
- var id = this.parsePattern(params, 'var');
- if (this.context.strict && id.type === syntax_1.Syntax.Identifier) {
- if (this.scanner.isRestrictedWord(id.name)) {
- this.tolerateError(messages_1.Messages.StrictVarName);
- }
- }
- var init = null;
- if (this.match('=')) {
- this.nextToken();
- init = this.isolateCoverGrammar(this.parseAssignmentExpression);
- }
- else if (id.type !== syntax_1.Syntax.Identifier && !options.inFor) {
- this.expect('=');
- }
- return this.finalize(node, new Node.VariableDeclarator(id, init));
- };
- Parser.prototype.parseVariableDeclarationList = function (options) {
- var opt = { inFor: options.inFor };
- var list = [];
- list.push(this.parseVariableDeclaration(opt));
- while (this.match(',')) {
- this.nextToken();
- list.push(this.parseVariableDeclaration(opt));
- }
- return list;
- };
- Parser.prototype.parseVariableStatement = function () {
- var node = this.createNode();
- this.expectKeyword('var');
- var declarations = this.parseVariableDeclarationList({ inFor: false });
- this.consumeSemicolon();
- return this.finalize(node, new Node.VariableDeclaration(declarations, 'var'));
- };
- // https://tc39.github.io/ecma262/#sec-empty-statement
- Parser.prototype.parseEmptyStatement = function () {
- var node = this.createNode();
- this.expect(';');
- return this.finalize(node, new Node.EmptyStatement());
- };
- // https://tc39.github.io/ecma262/#sec-expression-statement
- Parser.prototype.parseExpressionStatement = function () {
- var node = this.createNode();
- var expr = this.parseExpression();
- this.consumeSemicolon();
- return this.finalize(node, new Node.ExpressionStatement(expr));
- };
- // https://tc39.github.io/ecma262/#sec-if-statement
- Parser.prototype.parseIfClause = function () {
- if (this.context.strict && this.matchKeyword('function')) {
- this.tolerateError(messages_1.Messages.StrictFunction);
- }
- return this.parseStatement();
- };
- Parser.prototype.parseIfStatement = function () {
- var node = this.createNode();
- var consequent;
- var alternate = null;
- this.expectKeyword('if');
- this.expect('(');
- var test = this.parseExpression();
- if (!this.match(')') && this.config.tolerant) {
- this.tolerateUnexpectedToken(this.nextToken());
- consequent = this.finalize(this.createNode(), new Node.EmptyStatement());
- }
- else {
- this.expect(')');
- consequent = this.parseIfClause();
- if (this.matchKeyword('else')) {
- this.nextToken();
- alternate = this.parseIfClause();
- }
- }
- return this.finalize(node, new Node.IfStatement(test, consequent, alternate));
- };
- // https://tc39.github.io/ecma262/#sec-do-while-statement
- Parser.prototype.parseDoWhileStatement = function () {
- var node = this.createNode();
- this.expectKeyword('do');
- var previousInIteration = this.context.inIteration;
- this.context.inIteration = true;
- var body = this.parseStatement();
- this.context.inIteration = previousInIteration;
- this.expectKeyword('while');
- this.expect('(');
- var test = this.parseExpression();
- if (!this.match(')') && this.config.tolerant) {
- this.tolerateUnexpectedToken(this.nextToken());
- }
- else {
- this.expect(')');
- if (this.match(';')) {
- this.nextToken();
- }
- }
- return this.finalize(node, new Node.DoWhileStatement(body, test));
- };
- // https://tc39.github.io/ecma262/#sec-while-statement
- Parser.prototype.parseWhileStatement = function () {
- var node = this.createNode();
- var body;
- this.expectKeyword('while');
- this.expect('(');
- var test = this.parseExpression();
- if (!this.match(')') && this.config.tolerant) {
- this.tolerateUnexpectedToken(this.nextToken());
- body = this.finalize(this.createNode(), new Node.EmptyStatement());
- }
- else {
- this.expect(')');
- var previousInIteration = this.context.inIteration;
- this.context.inIteration = true;
- body = this.parseStatement();
- this.context.inIteration = previousInIteration;
- }
- return this.finalize(node, new Node.WhileStatement(test, body));
- };
- // https://tc39.github.io/ecma262/#sec-for-statement
- // https://tc39.github.io/ecma262/#sec-for-in-and-for-of-statements
- Parser.prototype.parseForStatement = function () {
- var init = null;
- var test = null;
- var update = null;
- var forIn = true;
- var left, right;
- var node = this.createNode();
- this.expectKeyword('for');
- this.expect('(');
- if (this.match(';')) {
- this.nextToken();
- }
- else {
- if (this.matchKeyword('var')) {
- init = this.createNode();
- this.nextToken();
- var previousAllowIn = this.context.allowIn;
- this.context.allowIn = false;
- var declarations = this.parseVariableDeclarationList({ inFor: true });
- this.context.allowIn = previousAllowIn;
- if (declarations.length === 1 && this.matchKeyword('in')) {
- var decl = declarations[0];
- if (decl.init && (decl.id.type === syntax_1.Syntax.ArrayPattern || decl.id.type === syntax_1.Syntax.ObjectPattern || this.context.strict)) {
- this.tolerateError(messages_1.Messages.ForInOfLoopInitializer, 'for-in');
- }
- init = this.finalize(init, new Node.VariableDeclaration(declarations, 'var'));
- this.nextToken();
- left = init;
- right = this.parseExpression();
- init = null;
- }
- else if (declarations.length === 1 && declarations[0].init === null && this.matchContextualKeyword('of')) {
- init = this.finalize(init, new Node.VariableDeclaration(declarations, 'var'));
- this.nextToken();
- left = init;
- right = this.parseAssignmentExpression();
- init = null;
- forIn = false;
- }
- else {
- init = this.finalize(init, new Node.VariableDeclaration(declarations, 'var'));
- this.expect(';');
- }
- }
- else if (this.matchKeyword('const') || this.matchKeyword('let')) {
- init = this.createNode();
- var kind = this.nextToken().value;
- if (!this.context.strict && this.lookahead.value === 'in') {
- init = this.finalize(init, new Node.Identifier(kind));
- this.nextToken();
- left = init;
- right = this.parseExpression();
- init = null;
- }
- else {
- var previousAllowIn = this.context.allowIn;
- this.context.allowIn = false;
- var declarations = this.parseBindingList(kind, { inFor: true });
- this.context.allowIn = previousAllowIn;
- if (declarations.length === 1 && declarations[0].init === null && this.matchKeyword('in')) {
- init = this.finalize(init, new Node.VariableDeclaration(declarations, kind));
- this.nextToken();
- left = init;
- right = this.parseExpression();
- init = null;
- }
- else if (declarations.length === 1 && declarations[0].init === null && this.matchContextualKeyword('of')) {
- init = this.finalize(init, new Node.VariableDeclaration(declarations, kind));
- this.nextToken();
- left = init;
- right = this.parseAssignmentExpression();
- init = null;
- forIn = false;
- }
- else {
- this.consumeSemicolon();
- init = this.finalize(init, new Node.VariableDeclaration(declarations, kind));
- }
- }
- }
- else {
- var initStartToken = this.lookahead;
- var previousAllowIn = this.context.allowIn;
- this.context.allowIn = false;
- init = this.inheritCoverGrammar(this.parseAssignmentExpression);
- this.context.allowIn = previousAllowIn;
- if (this.matchKeyword('in')) {
- if (!this.context.isAssignmentTarget || init.type === syntax_1.Syntax.AssignmentExpression) {
- this.tolerateError(messages_1.Messages.InvalidLHSInForIn);
- }
- this.nextToken();
- this.reinterpretExpressionAsPattern(init);
- left = init;
- right = this.parseExpression();
- init = null;
- }
- else if (this.matchContextualKeyword('of')) {
- if (!this.context.isAssignmentTarget || init.type === syntax_1.Syntax.AssignmentExpression) {
- this.tolerateError(messages_1.Messages.InvalidLHSInForLoop);
- }
- this.nextToken();
- this.reinterpretExpressionAsPattern(init);
- left = init;
- right = this.parseAssignmentExpression();
- init = null;
- forIn = false;
- }
- else {
- if (this.match(',')) {
- var initSeq = [init];
- while (this.match(',')) {
- this.nextToken();
- initSeq.push(this.isolateCoverGrammar(this.parseAssignmentExpression));
- }
- init = this.finalize(this.startNode(initStartToken), new Node.SequenceExpression(initSeq));
- }
- this.expect(';');
- }
- }
- }
- if (typeof left === 'undefined') {
- if (!this.match(';')) {
- test = this.parseExpression();
- }
- this.expect(';');
- if (!this.match(')')) {
- update = this.parseExpression();
- }
- }
- var body;
- if (!this.match(')') && this.config.tolerant) {
- this.tolerateUnexpectedToken(this.nextToken());
- body = this.finalize(this.createNode(), new Node.EmptyStatement());
- }
- else {
- this.expect(')');
- var previousInIteration = this.context.inIteration;
- this.context.inIteration = true;
- body = this.isolateCoverGrammar(this.parseStatement);
- this.context.inIteration = previousInIteration;
- }
- return (typeof left === 'undefined') ?
- this.finalize(node, new Node.ForStatement(init, test, update, body)) :
- forIn ? this.finalize(node, new Node.ForInStatement(left, right, body)) :
- this.finalize(node, new Node.ForOfStatement(left, right, body));
- };
- // https://tc39.github.io/ecma262/#sec-continue-statement
- Parser.prototype.parseContinueStatement = function () {
- var node = this.createNode();
- this.expectKeyword('continue');
- var label = null;
- if (this.lookahead.type === 3 /* Identifier */ && !this.hasLineTerminator) {
- var id = this.parseVariableIdentifier();
- label = id;
- var key = '$' + id.name;
- if (!Object.prototype.hasOwnProperty.call(this.context.labelSet, key)) {
- this.throwError(messages_1.Messages.UnknownLabel, id.name);
- }
- }
- this.consumeSemicolon();
- if (label === null && !this.context.inIteration) {
- this.throwError(messages_1.Messages.IllegalContinue);
- }
- return this.finalize(node, new Node.ContinueStatement(label));
- };
- // https://tc39.github.io/ecma262/#sec-break-statement
- Parser.prototype.parseBreakStatement = function () {
- var node = this.createNode();
- this.expectKeyword('break');
- var label = null;
- if (this.lookahead.type === 3 /* Identifier */ && !this.hasLineTerminator) {
- var id = this.parseVariableIdentifier();
- var key = '$' + id.name;
- if (!Object.prototype.hasOwnProperty.call(this.context.labelSet, key)) {
- this.throwError(messages_1.Messages.UnknownLabel, id.name);
- }
- label = id;
- }
- this.consumeSemicolon();
- if (label === null && !this.context.inIteration && !this.context.inSwitch) {
- this.throwError(messages_1.Messages.IllegalBreak);
- }
- return this.finalize(node, new Node.BreakStatement(label));
- };
- // https://tc39.github.io/ecma262/#sec-return-statement
- Parser.prototype.parseReturnStatement = function () {
- if (!this.context.inFunctionBody) {
- this.tolerateError(messages_1.Messages.IllegalReturn);
- }
- var node = this.createNode();
- this.expectKeyword('return');
- var hasArgument = (!this.match(';') && !this.match('}') &&
- !this.hasLineTerminator && this.lookahead.type !== 2 /* EOF */) ||
- this.lookahead.type === 8 /* StringLiteral */ ||
- this.lookahead.type === 10 /* Template */;
- var argument = hasArgument ? this.parseExpression() : null;
- this.consumeSemicolon();
- return this.finalize(node, new Node.ReturnStatement(argument));
- };
- // https://tc39.github.io/ecma262/#sec-with-statement
- Parser.prototype.parseWithStatement = function () {
- if (this.context.strict) {
- this.tolerateError(messages_1.Messages.StrictModeWith);
- }
- var node = this.createNode();
- var body;
- this.expectKeyword('with');
- this.expect('(');
- var object = this.parseExpression();
- if (!this.match(')') && this.config.tolerant) {
- this.tolerateUnexpectedToken(this.nextToken());
- body = this.finalize(this.createNode(), new Node.EmptyStatement());
- }
- else {
- this.expect(')');
- body = this.parseStatement();
- }
- return this.finalize(node, new Node.WithStatement(object, body));
- };
- // https://tc39.github.io/ecma262/#sec-switch-statement
- Parser.prototype.parseSwitchCase = function () {
- var node = this.createNode();
- var test;
- if (this.matchKeyword('default')) {
- this.nextToken();
- test = null;
- }
- else {
- this.expectKeyword('case');
- test = this.parseExpression();
- }
- this.expect(':');
- var consequent = [];
- while (true) {
- if (this.match('}') || this.matchKeyword('default') || this.matchKeyword('case')) {
- break;
- }
- consequent.push(this.parseStatementListItem());
- }
- return this.finalize(node, new Node.SwitchCase(test, consequent));
- };
- Parser.prototype.parseSwitchStatement = function () {
- var node = this.createNode();
- this.expectKeyword('switch');
- this.expect('(');
- var discriminant = this.parseExpression();
- this.expect(')');
- var previousInSwitch = this.context.inSwitch;
- this.context.inSwitch = true;
- var cases = [];
- var defaultFound = false;
- this.expect('{');
- while (true) {
- if (this.match('}')) {
- break;
- }
- var clause = this.parseSwitchCase();
- if (clause.test === null) {
- if (defaultFound) {
- this.throwError(messages_1.Messages.MultipleDefaultsInSwitch);
- }
- defaultFound = true;
- }
- cases.push(clause);
- }
- this.expect('}');
- this.context.inSwitch = previousInSwitch;
- return this.finalize(node, new Node.SwitchStatement(discriminant, cases));
- };
- // https://tc39.github.io/ecma262/#sec-labelled-statements
- Parser.prototype.parseLabelledStatement = function () {
- var node = this.createNode();
- var expr = this.parseExpression();
- var statement;
- if ((expr.type === syntax_1.Syntax.Identifier) && this.match(':')) {
- this.nextToken();
- var id = expr;
- var key = '$' + id.name;
- if (Object.prototype.hasOwnProperty.call(this.context.labelSet, key)) {
- this.throwError(messages_1.Messages.Redeclaration, 'Label', id.name);
- }
- this.context.labelSet[key] = true;
- var body = void 0;
- if (this.matchKeyword('class')) {
- this.tolerateUnexpectedToken(this.lookahead);
- body = this.parseClassDeclaration();
- }
- else if (this.matchKeyword('function')) {
- var token = this.lookahead;
- var declaration = this.parseFunctionDeclaration();
- if (this.context.strict) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.StrictFunction);
- }
- else if (declaration.generator) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.GeneratorInLegacyContext);
- }
- body = declaration;
- }
- else {
- body = this.parseStatement();
- }
- delete this.context.labelSet[key];
- statement = new Node.LabeledStatement(id, body);
- }
- else {
- this.consumeSemicolon();
- statement = new Node.ExpressionStatement(expr);
- }
- return this.finalize(node, statement);
- };
- // https://tc39.github.io/ecma262/#sec-throw-statement
- Parser.prototype.parseThrowStatement = function () {
- var node = this.createNode();
- this.expectKeyword('throw');
- if (this.hasLineTerminator) {
- this.throwError(messages_1.Messages.NewlineAfterThrow);
- }
- var argument = this.parseExpression();
- this.consumeSemicolon();
- return this.finalize(node, new Node.ThrowStatement(argument));
- };
- // https://tc39.github.io/ecma262/#sec-try-statement
- Parser.prototype.parseCatchClause = function () {
- var node = this.createNode();
- this.expectKeyword('catch');
- this.expect('(');
- if (this.match(')')) {
- this.throwUnexpectedToken(this.lookahead);
- }
- var params = [];
- var param = this.parsePattern(params);
- var paramMap = {};
- for (var i = 0; i < params.length; i++) {
- var key = '$' + params[i].value;
- if (Object.prototype.hasOwnProperty.call(paramMap, key)) {
- this.tolerateError(messages_1.Messages.DuplicateBinding, params[i].value);
- }
- paramMap[key] = true;
- }
- if (this.context.strict && param.type === syntax_1.Syntax.Identifier) {
- if (this.scanner.isRestrictedWord(param.name)) {
- this.tolerateError(messages_1.Messages.StrictCatchVariable);
- }
- }
- this.expect(')');
- var body = this.parseBlock();
- return this.finalize(node, new Node.CatchClause(param, body));
- };
- Parser.prototype.parseFinallyClause = function () {
- this.expectKeyword('finally');
- return this.parseBlock();
- };
- Parser.prototype.parseTryStatement = function () {
- var node = this.createNode();
- this.expectKeyword('try');
- var block = this.parseBlock();
- var handler = this.matchKeyword('catch') ? this.parseCatchClause() : null;
- var finalizer = this.matchKeyword('finally') ? this.parseFinallyClause() : null;
- if (!handler && !finalizer) {
- this.throwError(messages_1.Messages.NoCatchOrFinally);
- }
- return this.finalize(node, new Node.TryStatement(block, handler, finalizer));
- };
- // https://tc39.github.io/ecma262/#sec-debugger-statement
- Parser.prototype.parseDebuggerStatement = function () {
- var node = this.createNode();
- this.expectKeyword('debugger');
- this.consumeSemicolon();
- return this.finalize(node, new Node.DebuggerStatement());
- };
- // https://tc39.github.io/ecma262/#sec-ecmascript-language-statements-and-declarations
- Parser.prototype.parseStatement = function () {
- var statement;
- switch (this.lookahead.type) {
- case 1 /* BooleanLiteral */:
- case 5 /* NullLiteral */:
- case 6 /* NumericLiteral */:
- case 8 /* StringLiteral */:
- case 10 /* Template */:
- case 9 /* RegularExpression */:
- statement = this.parseExpressionStatement();
- break;
- case 7 /* Punctuator */:
- var value = this.lookahead.value;
- if (value === '{') {
- statement = this.parseBlock();
- }
- else if (value === '(') {
- statement = this.parseExpressionStatement();
- }
- else if (value === ';') {
- statement = this.parseEmptyStatement();
- }
- else {
- statement = this.parseExpressionStatement();
- }
- break;
- case 3 /* Identifier */:
- statement = this.matchAsyncFunction() ? this.parseFunctionDeclaration() : this.parseLabelledStatement();
- break;
- case 4 /* Keyword */:
- switch (this.lookahead.value) {
- case 'break':
- statement = this.parseBreakStatement();
- break;
- case 'continue':
- statement = this.parseContinueStatement();
- break;
- case 'debugger':
- statement = this.parseDebuggerStatement();
- break;
- case 'do':
- statement = this.parseDoWhileStatement();
- break;
- case 'for':
- statement = this.parseForStatement();
- break;
- case 'function':
- statement = this.parseFunctionDeclaration();
- break;
- case 'if':
- statement = this.parseIfStatement();
- break;
- case 'return':
- statement = this.parseReturnStatement();
- break;
- case 'switch':
- statement = this.parseSwitchStatement();
- break;
- case 'throw':
- statement = this.parseThrowStatement();
- break;
- case 'try':
- statement = this.parseTryStatement();
- break;
- case 'var':
- statement = this.parseVariableStatement();
- break;
- case 'while':
- statement = this.parseWhileStatement();
- break;
- case 'with':
- statement = this.parseWithStatement();
- break;
- default:
- statement = this.parseExpressionStatement();
- break;
- }
- break;
- default:
- statement = this.throwUnexpectedToken(this.lookahead);
- }
- return statement;
- };
- // https://tc39.github.io/ecma262/#sec-function-definitions
- Parser.prototype.parseFunctionSourceElements = function () {
- var node = this.createNode();
- this.expect('{');
- var body = this.parseDirectivePrologues();
- var previousLabelSet = this.context.labelSet;
- var previousInIteration = this.context.inIteration;
- var previousInSwitch = this.context.inSwitch;
- var previousInFunctionBody = this.context.inFunctionBody;
- this.context.labelSet = {};
- this.context.inIteration = false;
- this.context.inSwitch = false;
- this.context.inFunctionBody = true;
- while (this.lookahead.type !== 2 /* EOF */) {
- if (this.match('}')) {
- break;
- }
- body.push(this.parseStatementListItem());
- }
- this.expect('}');
- this.context.labelSet = previousLabelSet;
- this.context.inIteration = previousInIteration;
- this.context.inSwitch = previousInSwitch;
- this.context.inFunctionBody = previousInFunctionBody;
- return this.finalize(node, new Node.BlockStatement(body));
- };
- Parser.prototype.validateParam = function (options, param, name) {
- var key = '$' + name;
- if (this.context.strict) {
- if (this.scanner.isRestrictedWord(name)) {
- options.stricted = param;
- options.message = messages_1.Messages.StrictParamName;
- }
- if (Object.prototype.hasOwnProperty.call(options.paramSet, key)) {
- options.stricted = param;
- options.message = messages_1.Messages.StrictParamDupe;
- }
- }
- else if (!options.firstRestricted) {
- if (this.scanner.isRestrictedWord(name)) {
- options.firstRestricted = param;
- options.message = messages_1.Messages.StrictParamName;
- }
- else if (this.scanner.isStrictModeReservedWord(name)) {
- options.firstRestricted = param;
- options.message = messages_1.Messages.StrictReservedWord;
- }
- else if (Object.prototype.hasOwnProperty.call(options.paramSet, key)) {
- options.stricted = param;
- options.message = messages_1.Messages.StrictParamDupe;
- }
- }
- /* istanbul ignore next */
- if (typeof Object.defineProperty === 'function') {
- Object.defineProperty(options.paramSet, key, { value: true, enumerable: true, writable: true, configurable: true });
- }
- else {
- options.paramSet[key] = true;
- }
- };
- Parser.prototype.parseRestElement = function (params) {
- var node = this.createNode();
- this.expect('...');
- var arg = this.parsePattern(params);
- if (this.match('=')) {
- this.throwError(messages_1.Messages.DefaultRestParameter);
- }
- if (!this.match(')')) {
- this.throwError(messages_1.Messages.ParameterAfterRestParameter);
- }
- return this.finalize(node, new Node.RestElement(arg));
- };
- Parser.prototype.parseFormalParameter = function (options) {
- var params = [];
- var param = this.match('...') ? this.parseRestElement(params) : this.parsePatternWithDefault(params);
- for (var i = 0; i < params.length; i++) {
- this.validateParam(options, params[i], params[i].value);
- }
- options.simple = options.simple && (param instanceof Node.Identifier);
- options.params.push(param);
- };
- Parser.prototype.parseFormalParameters = function (firstRestricted) {
- var options;
- options = {
- simple: true,
- params: [],
- firstRestricted: firstRestricted
- };
- this.expect('(');
- if (!this.match(')')) {
- options.paramSet = {};
- while (this.lookahead.type !== 2 /* EOF */) {
- this.parseFormalParameter(options);
- if (this.match(')')) {
- break;
- }
- this.expect(',');
- if (this.match(')')) {
- break;
- }
- }
- }
- this.expect(')');
- return {
- simple: options.simple,
- params: options.params,
- stricted: options.stricted,
- firstRestricted: options.firstRestricted,
- message: options.message
- };
- };
- Parser.prototype.matchAsyncFunction = function () {
- var match = this.matchContextualKeyword('async');
- if (match) {
- var state = this.scanner.saveState();
- this.scanner.scanComments();
- var next = this.scanner.lex();
- this.scanner.restoreState(state);
- match = (state.lineNumber === next.lineNumber) && (next.type === 4 /* Keyword */) && (next.value === 'function');
- }
- return match;
- };
- Parser.prototype.parseFunctionDeclaration = function (identifierIsOptional) {
- var node = this.createNode();
- var isAsync = this.matchContextualKeyword('async');
- if (isAsync) {
- this.nextToken();
- }
- this.expectKeyword('function');
- var isGenerator = isAsync ? false : this.match('*');
- if (isGenerator) {
- this.nextToken();
- }
- var message;
- var id = null;
- var firstRestricted = null;
- if (!identifierIsOptional || !this.match('(')) {
- var token = this.lookahead;
- id = this.parseVariableIdentifier();
- if (this.context.strict) {
- if (this.scanner.isRestrictedWord(token.value)) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.StrictFunctionName);
- }
- }
- else {
- if (this.scanner.isRestrictedWord(token.value)) {
- firstRestricted = token;
- message = messages_1.Messages.StrictFunctionName;
- }
- else if (this.scanner.isStrictModeReservedWord(token.value)) {
- firstRestricted = token;
- message = messages_1.Messages.StrictReservedWord;
- }
- }
- }
- var previousAllowAwait = this.context.await;
- var previousAllowYield = this.context.allowYield;
- this.context.await = isAsync;
- this.context.allowYield = !isGenerator;
- var formalParameters = this.parseFormalParameters(firstRestricted);
- var params = formalParameters.params;
- var stricted = formalParameters.stricted;
- firstRestricted = formalParameters.firstRestricted;
- if (formalParameters.message) {
- message = formalParameters.message;
- }
- var previousStrict = this.context.strict;
- var previousAllowStrictDirective = this.context.allowStrictDirective;
- this.context.allowStrictDirective = formalParameters.simple;
- var body = this.parseFunctionSourceElements();
- if (this.context.strict && firstRestricted) {
- this.throwUnexpectedToken(firstRestricted, message);
- }
- if (this.context.strict && stricted) {
- this.tolerateUnexpectedToken(stricted, message);
- }
- this.context.strict = previousStrict;
- this.context.allowStrictDirective = previousAllowStrictDirective;
- this.context.await = previousAllowAwait;
- this.context.allowYield = previousAllowYield;
- return isAsync ? this.finalize(node, new Node.AsyncFunctionDeclaration(id, params, body)) :
- this.finalize(node, new Node.FunctionDeclaration(id, params, body, isGenerator));
- };
- Parser.prototype.parseFunctionExpression = function () {
- var node = this.createNode();
- var isAsync = this.matchContextualKeyword('async');
- if (isAsync) {
- this.nextToken();
- }
- this.expectKeyword('function');
- var isGenerator = isAsync ? false : this.match('*');
- if (isGenerator) {
- this.nextToken();
- }
- var message;
- var id = null;
- var firstRestricted;
- var previousAllowAwait = this.context.await;
- var previousAllowYield = this.context.allowYield;
- this.context.await = isAsync;
- this.context.allowYield = !isGenerator;
- if (!this.match('(')) {
- var token = this.lookahead;
- id = (!this.context.strict && !isGenerator && this.matchKeyword('yield')) ? this.parseIdentifierName() : this.parseVariableIdentifier();
- if (this.context.strict) {
- if (this.scanner.isRestrictedWord(token.value)) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.StrictFunctionName);
- }
- }
- else {
- if (this.scanner.isRestrictedWord(token.value)) {
- firstRestricted = token;
- message = messages_1.Messages.StrictFunctionName;
- }
- else if (this.scanner.isStrictModeReservedWord(token.value)) {
- firstRestricted = token;
- message = messages_1.Messages.StrictReservedWord;
- }
- }
- }
- var formalParameters = this.parseFormalParameters(firstRestricted);
- var params = formalParameters.params;
- var stricted = formalParameters.stricted;
- firstRestricted = formalParameters.firstRestricted;
- if (formalParameters.message) {
- message = formalParameters.message;
- }
- var previousStrict = this.context.strict;
- var previousAllowStrictDirective = this.context.allowStrictDirective;
- this.context.allowStrictDirective = formalParameters.simple;
- var body = this.parseFunctionSourceElements();
- if (this.context.strict && firstRestricted) {
- this.throwUnexpectedToken(firstRestricted, message);
- }
- if (this.context.strict && stricted) {
- this.tolerateUnexpectedToken(stricted, message);
- }
- this.context.strict = previousStrict;
- this.context.allowStrictDirective = previousAllowStrictDirective;
- this.context.await = previousAllowAwait;
- this.context.allowYield = previousAllowYield;
- return isAsync ? this.finalize(node, new Node.AsyncFunctionExpression(id, params, body)) :
- this.finalize(node, new Node.FunctionExpression(id, params, body, isGenerator));
- };
- // https://tc39.github.io/ecma262/#sec-directive-prologues-and-the-use-strict-directive
- Parser.prototype.parseDirective = function () {
- var token = this.lookahead;
- var node = this.createNode();
- var expr = this.parseExpression();
- var directive = (expr.type === syntax_1.Syntax.Literal) ? this.getTokenRaw(token).slice(1, -1) : null;
- this.consumeSemicolon();
- return this.finalize(node, directive ? new Node.Directive(expr, directive) : new Node.ExpressionStatement(expr));
- };
- Parser.prototype.parseDirectivePrologues = function () {
- var firstRestricted = null;
- var body = [];
- while (true) {
- var token = this.lookahead;
- if (token.type !== 8 /* StringLiteral */) {
- break;
- }
- var statement = this.parseDirective();
- body.push(statement);
- var directive = statement.directive;
- if (typeof directive !== 'string') {
- break;
- }
- if (directive === 'use strict') {
- this.context.strict = true;
- if (firstRestricted) {
- this.tolerateUnexpectedToken(firstRestricted, messages_1.Messages.StrictOctalLiteral);
- }
- if (!this.context.allowStrictDirective) {
- this.tolerateUnexpectedToken(token, messages_1.Messages.IllegalLanguageModeDirective);
- }
- }
- else {
- if (!firstRestricted && token.octal) {
- firstRestricted = token;
- }
- }
- }
- return body;
- };
- // https://tc39.github.io/ecma262/#sec-method-definitions
- Parser.prototype.qualifiedPropertyName = function (token) {
- switch (token.type) {
- case 3 /* Identifier */:
- case 8 /* StringLiteral */:
- case 1 /* BooleanLiteral */:
- case 5 /* NullLiteral */:
- case 6 /* NumericLiteral */:
- case 4 /* Keyword */:
- return true;
- case 7 /* Punctuator */:
- return token.value === '[';
- default:
- break;
- }
- return false;
- };
- Parser.prototype.parseGetterMethod = function () {
- var node = this.createNode();
- var isGenerator = false;
- var previousAllowYield = this.context.allowYield;
- this.context.allowYield = !isGenerator;
- var formalParameters = this.parseFormalParameters();
- if (formalParameters.params.length > 0) {
- this.tolerateError(messages_1.Messages.BadGetterArity);
- }
- var method = this.parsePropertyMethod(formalParameters);
- this.context.allowYield = previousAllowYield;
- return this.finalize(node, new Node.FunctionExpression(null, formalParameters.params, method, isGenerator));
- };
- Parser.prototype.parseSetterMethod = function () {
- var node = this.createNode();
- var isGenerator = false;
- var previousAllowYield = this.context.allowYield;
- this.context.allowYield = !isGenerator;
- var formalParameters = this.parseFormalParameters();
- if (formalParameters.params.length !== 1) {
- this.tolerateError(messages_1.Messages.BadSetterArity);
- }
- else if (formalParameters.params[0] instanceof Node.RestElement) {
- this.tolerateError(messages_1.Messages.BadSetterRestParameter);
- }
- var method = this.parsePropertyMethod(formalParameters);
- this.context.allowYield = previousAllowYield;
- return this.finalize(node, new Node.FunctionExpression(null, formalParameters.params, method, isGenerator));
- };
- Parser.prototype.parseGeneratorMethod = function () {
- var node = this.createNode();
- var isGenerator = true;
- var previousAllowYield = this.context.allowYield;
- this.context.allowYield = true;
- var params = this.parseFormalParameters();
- this.context.allowYield = false;
- var method = this.parsePropertyMethod(params);
- this.context.allowYield = previousAllowYield;
- return this.finalize(node, new Node.FunctionExpression(null, params.params, method, isGenerator));
- };
- // https://tc39.github.io/ecma262/#sec-generator-function-definitions
- Parser.prototype.isStartOfExpression = function () {
- var start = true;
- var value = this.lookahead.value;
- switch (this.lookahead.type) {
- case 7 /* Punctuator */:
- start = (value === '[') || (value === '(') || (value === '{') ||
- (value === '+') || (value === '-') ||
- (value === '!') || (value === '~') ||
- (value === '++') || (value === '--') ||
- (value === '/') || (value === '/='); // regular expression literal
- break;
- case 4 /* Keyword */:
- start = (value === 'class') || (value === 'delete') ||
- (value === 'function') || (value === 'let') || (value === 'new') ||
- (value === 'super') || (value === 'this') || (value === 'typeof') ||
- (value === 'void') || (value === 'yield');
- break;
- default:
- break;
- }
- return start;
- };
- Parser.prototype.parseYieldExpression = function () {
- var node = this.createNode();
- this.expectKeyword('yield');
- var argument = null;
- var delegate = false;
- if (!this.hasLineTerminator) {
- var previousAllowYield = this.context.allowYield;
- this.context.allowYield = false;
- delegate = this.match('*');
- if (delegate) {
- this.nextToken();
- argument = this.parseAssignmentExpression();
- }
- else if (this.isStartOfExpression()) {
- argument = this.parseAssignmentExpression();
- }
- this.context.allowYield = previousAllowYield;
- }
- return this.finalize(node, new Node.YieldExpression(argument, delegate));
- };
- // https://tc39.github.io/ecma262/#sec-class-definitions
- Parser.prototype.parseClassElement = function (hasConstructor) {
- var token = this.lookahead;
- var node = this.createNode();
- var kind = '';
- var key = null;
- var value = null;
- var computed = false;
- var method = false;
- var isStatic = false;
- var isAsync = false;
- if (this.match('*')) {
- this.nextToken();
- }
- else {
- computed = this.match('[');
- key = this.parseObjectPropertyKey();
- var id = key;
- if (id.name === 'static' && (this.qualifiedPropertyName(this.lookahead) || this.match('*'))) {
- token = this.lookahead;
- isStatic = true;
- computed = this.match('[');
- if (this.match('*')) {
- this.nextToken();
- }
- else {
- key = this.parseObjectPropertyKey();
- }
- }
- if ((token.type === 3 /* Identifier */) && !this.hasLineTerminator && (token.value === 'async')) {
- var punctuator = this.lookahead.value;
- if (punctuator !== ':' && punctuator !== '(' && punctuator !== '*') {
- isAsync = true;
- token = this.lookahead;
- key = this.parseObjectPropertyKey();
- if (token.type === 3 /* Identifier */ && token.value === 'constructor') {
- this.tolerateUnexpectedToken(token, messages_1.Messages.ConstructorIsAsync);
- }
- }
- }
- }
- var lookaheadPropertyKey = this.qualifiedPropertyName(this.lookahead);
- if (token.type === 3 /* Identifier */) {
- if (token.value === 'get' && lookaheadPropertyKey) {
- kind = 'get';
- computed = this.match('[');
- key = this.parseObjectPropertyKey();
- this.context.allowYield = false;
- value = this.parseGetterMethod();
- }
- else if (token.value === 'set' && lookaheadPropertyKey) {
- kind = 'set';
- computed = this.match('[');
- key = this.parseObjectPropertyKey();
- value = this.parseSetterMethod();
- }
- }
- else if (token.type === 7 /* Punctuator */ && token.value === '*' && lookaheadPropertyKey) {
- kind = 'init';
- computed = this.match('[');
- key = this.parseObjectPropertyKey();
- value = this.parseGeneratorMethod();
- method = true;
- }
- if (!kind && key && this.match('(')) {
- kind = 'init';
- value = isAsync ? this.parsePropertyMethodAsyncFunction() : this.parsePropertyMethodFunction();
- method = true;
- }
- if (!kind) {
- this.throwUnexpectedToken(this.lookahead);
- }
- if (kind === 'init') {
- kind = 'method';
- }
- if (!computed) {
- if (isStatic && this.isPropertyKey(key, 'prototype')) {
- this.throwUnexpectedToken(token, messages_1.Messages.StaticPrototype);
- }
- if (!isStatic && this.isPropertyKey(key, 'constructor')) {
- if (kind !== 'method' || !method || (value && value.generator)) {
- this.throwUnexpectedToken(token, messages_1.Messages.ConstructorSpecialMethod);
- }
- if (hasConstructor.value) {
- this.throwUnexpectedToken(token, messages_1.Messages.DuplicateConstructor);
- }
- else {
- hasConstructor.value = true;
- }
- kind = 'constructor';
- }
- }
- return this.finalize(node, new Node.MethodDefinition(key, computed, value, kind, isStatic));
- };
- Parser.prototype.parseClassElementList = function () {
- var body = [];
- var hasConstructor = { value: false };
- this.expect('{');
- while (!this.match('}')) {
- if (this.match(';')) {
- this.nextToken();
- }
- else {
- body.push(this.parseClassElement(hasConstructor));
- }
- }
- this.expect('}');
- return body;
- };
- Parser.prototype.parseClassBody = function () {
- var node = this.createNode();
- var elementList = this.parseClassElementList();
- return this.finalize(node, new Node.ClassBody(elementList));
- };
- Parser.prototype.parseClassDeclaration = function (identifierIsOptional) {
- var node = this.createNode();
- var previousStrict = this.context.strict;
- this.context.strict = true;
- this.expectKeyword('class');
- var id = (identifierIsOptional && (this.lookahead.type !== 3 /* Identifier */)) ? null : this.parseVariableIdentifier();
- var superClass = null;
- if (this.matchKeyword('extends')) {
- this.nextToken();
- superClass = this.isolateCoverGrammar(this.parseLeftHandSideExpressionAllowCall);
- }
- var classBody = this.parseClassBody();
- this.context.strict = previousStrict;
- return this.finalize(node, new Node.ClassDeclaration(id, superClass, classBody));
- };
- Parser.prototype.parseClassExpression = function () {
- var node = this.createNode();
- var previousStrict = this.context.strict;
- this.context.strict = true;
- this.expectKeyword('class');
- var id = (this.lookahead.type === 3 /* Identifier */) ? this.parseVariableIdentifier() : null;
- var superClass = null;
- if (this.matchKeyword('extends')) {
- this.nextToken();
- superClass = this.isolateCoverGrammar(this.parseLeftHandSideExpressionAllowCall);
- }
- var classBody = this.parseClassBody();
- this.context.strict = previousStrict;
- return this.finalize(node, new Node.ClassExpression(id, superClass, classBody));
- };
- // https://tc39.github.io/ecma262/#sec-scripts
- // https://tc39.github.io/ecma262/#sec-modules
- Parser.prototype.parseModule = function () {
- this.context.strict = true;
- this.context.isModule = true;
- this.scanner.isModule = true;
- var node = this.createNode();
- var body = this.parseDirectivePrologues();
- while (this.lookahead.type !== 2 /* EOF */) {
- body.push(this.parseStatementListItem());
- }
- return this.finalize(node, new Node.Module(body));
- };
- Parser.prototype.parseScript = function () {
- var node = this.createNode();
- var body = this.parseDirectivePrologues();
- while (this.lookahead.type !== 2 /* EOF */) {
- body.push(this.parseStatementListItem());
- }
- return this.finalize(node, new Node.Script(body));
- };
- // https://tc39.github.io/ecma262/#sec-imports
- Parser.prototype.parseModuleSpecifier = function () {
- var node = this.createNode();
- if (this.lookahead.type !== 8 /* StringLiteral */) {
- this.throwError(messages_1.Messages.InvalidModuleSpecifier);
- }
- var token = this.nextToken();
- var raw = this.getTokenRaw(token);
- return this.finalize(node, new Node.Literal(token.value, raw));
- };
- // import {<foo as bar>} ...;
- Parser.prototype.parseImportSpecifier = function () {
- var node = this.createNode();
- var imported;
- var local;
- if (this.lookahead.type === 3 /* Identifier */) {
- imported = this.parseVariableIdentifier();
- local = imported;
- if (this.matchContextualKeyword('as')) {
- this.nextToken();
- local = this.parseVariableIdentifier();
- }
- }
- else {
- imported = this.parseIdentifierName();
- local = imported;
- if (this.matchContextualKeyword('as')) {
- this.nextToken();
- local = this.parseVariableIdentifier();
- }
- else {
- this.throwUnexpectedToken(this.nextToken());
- }
- }
- return this.finalize(node, new Node.ImportSpecifier(local, imported));
- };
- // {foo, bar as bas}
- Parser.prototype.parseNamedImports = function () {
- this.expect('{');
- var specifiers = [];
- while (!this.match('}')) {
- specifiers.push(this.parseImportSpecifier());
- if (!this.match('}')) {
- this.expect(',');
- }
- }
- this.expect('}');
- return specifiers;
- };
- // import <foo> ...;
- Parser.prototype.parseImportDefaultSpecifier = function () {
- var node = this.createNode();
- var local = this.parseIdentifierName();
- return this.finalize(node, new Node.ImportDefaultSpecifier(local));
- };
- // import <* as foo> ...;
- Parser.prototype.parseImportNamespaceSpecifier = function () {
- var node = this.createNode();
- this.expect('*');
- if (!this.matchContextualKeyword('as')) {
- this.throwError(messages_1.Messages.NoAsAfterImportNamespace);
- }
- this.nextToken();
- var local = this.parseIdentifierName();
- return this.finalize(node, new Node.ImportNamespaceSpecifier(local));
- };
- Parser.prototype.parseImportDeclaration = function () {
- if (this.context.inFunctionBody) {
- this.throwError(messages_1.Messages.IllegalImportDeclaration);
- }
- var node = this.createNode();
- this.expectKeyword('import');
- var src;
- var specifiers = [];
- if (this.lookahead.type === 8 /* StringLiteral */) {
- // import 'foo';
- src = this.parseModuleSpecifier();
- }
- else {
- if (this.match('{')) {
- // import {bar}
- specifiers = specifiers.concat(this.parseNamedImports());
- }
- else if (this.match('*')) {
- // import * as foo
- specifiers.push(this.parseImportNamespaceSpecifier());
- }
- else if (this.isIdentifierName(this.lookahead) && !this.matchKeyword('default')) {
- // import foo
- specifiers.push(this.parseImportDefaultSpecifier());
- if (this.match(',')) {
- this.nextToken();
- if (this.match('*')) {
- // import foo, * as foo
- specifiers.push(this.parseImportNamespaceSpecifier());
- }
- else if (this.match('{')) {
- // import foo, {bar}
- specifiers = specifiers.concat(this.parseNamedImports());
- }
- else {
- this.throwUnexpectedToken(this.lookahead);
- }
- }
- }
- else {
- this.throwUnexpectedToken(this.nextToken());
- }
- if (!this.matchContextualKeyword('from')) {
- var message = this.lookahead.value ? messages_1.Messages.UnexpectedToken : messages_1.Messages.MissingFromClause;
- this.throwError(message, this.lookahead.value);
- }
- this.nextToken();
- src = this.parseModuleSpecifier();
- }
- this.consumeSemicolon();
- return this.finalize(node, new Node.ImportDeclaration(specifiers, src));
- };
- // https://tc39.github.io/ecma262/#sec-exports
- Parser.prototype.parseExportSpecifier = function () {
- var node = this.createNode();
- var local = this.parseIdentifierName();
- var exported = local;
- if (this.matchContextualKeyword('as')) {
- this.nextToken();
- exported = this.parseIdentifierName();
- }
- return this.finalize(node, new Node.ExportSpecifier(local, exported));
- };
- Parser.prototype.parseExportDeclaration = function () {
- if (this.context.inFunctionBody) {
- this.throwError(messages_1.Messages.IllegalExportDeclaration);
- }
- var node = this.createNode();
- this.expectKeyword('export');
- var exportDeclaration;
- if (this.matchKeyword('default')) {
- // export default ...
- this.nextToken();
- if (this.matchKeyword('function')) {
- // export default function foo () {}
- // export default function () {}
- var declaration = this.parseFunctionDeclaration(true);
- exportDeclaration = this.finalize(node, new Node.ExportDefaultDeclaration(declaration));
- }
- else if (this.matchKeyword('class')) {
- // export default class foo {}
- var declaration = this.parseClassDeclaration(true);
- exportDeclaration = this.finalize(node, new Node.ExportDefaultDeclaration(declaration));
- }
- else if (this.matchContextualKeyword('async')) {
- // export default async function f () {}
- // export default async function () {}
- // export default async x => x
- var declaration = this.matchAsyncFunction() ? this.parseFunctionDeclaration(true) : this.parseAssignmentExpression();
- exportDeclaration = this.finalize(node, new Node.ExportDefaultDeclaration(declaration));
- }
- else {
- if (this.matchContextualKeyword('from')) {
- this.throwError(messages_1.Messages.UnexpectedToken, this.lookahead.value);
- }
- // export default {};
- // export default [];
- // export default (1 + 2);
- var declaration = this.match('{') ? this.parseObjectInitializer() :
- this.match('[') ? this.parseArrayInitializer() : this.parseAssignmentExpression();
- this.consumeSemicolon();
- exportDeclaration = this.finalize(node, new Node.ExportDefaultDeclaration(declaration));
- }
- }
- else if (this.match('*')) {
- // export * from 'foo';
- this.nextToken();
- if (!this.matchContextualKeyword('from')) {
- var message = this.lookahead.value ? messages_1.Messages.UnexpectedToken : messages_1.Messages.MissingFromClause;
- this.throwError(message, this.lookahead.value);
- }
- this.nextToken();
- var src = this.parseModuleSpecifier();
- this.consumeSemicolon();
- exportDeclaration = this.finalize(node, new Node.ExportAllDeclaration(src));
- }
- else if (this.lookahead.type === 4 /* Keyword */) {
- // export var f = 1;
- var declaration = void 0;
- switch (this.lookahead.value) {
- case 'let':
- case 'const':
- declaration = this.parseLexicalDeclaration({ inFor: false });
- break;
- case 'var':
- case 'class':
- case 'function':
- declaration = this.parseStatementListItem();
- break;
- default:
- this.throwUnexpectedToken(this.lookahead);
- }
- exportDeclaration = this.finalize(node, new Node.ExportNamedDeclaration(declaration, [], null));
- }
- else if (this.matchAsyncFunction()) {
- var declaration = this.parseFunctionDeclaration();
- exportDeclaration = this.finalize(node, new Node.ExportNamedDeclaration(declaration, [], null));
- }
- else {
- var specifiers = [];
- var source = null;
- var isExportFromIdentifier = false;
- this.expect('{');
- while (!this.match('}')) {
- isExportFromIdentifier = isExportFromIdentifier || this.matchKeyword('default');
- specifiers.push(this.parseExportSpecifier());
- if (!this.match('}')) {
- this.expect(',');
- }
- }
- this.expect('}');
- if (this.matchContextualKeyword('from')) {
- // export {default} from 'foo';
- // export {foo} from 'foo';
- this.nextToken();
- source = this.parseModuleSpecifier();
- this.consumeSemicolon();
- }
- else if (isExportFromIdentifier) {
- // export {default}; // missing fromClause
- var message = this.lookahead.value ? messages_1.Messages.UnexpectedToken : messages_1.Messages.MissingFromClause;
- this.throwError(message, this.lookahead.value);
- }
- else {
- // export {foo};
- this.consumeSemicolon();
- }
- exportDeclaration = this.finalize(node, new Node.ExportNamedDeclaration(null, specifiers, source));
- }
- return exportDeclaration;
- };
- return Parser;
- }());
- exports.Parser = Parser;
-
-
-/***/ },
-/* 9 */
-/***/ function(module, exports) {
-
- "use strict";
- // Ensure the condition is true, otherwise throw an error.
- // This is only to have a better contract semantic, i.e. another safety net
- // to catch a logic error. The condition shall be fulfilled in normal case.
- // Do NOT use this to enforce a certain condition on any user input.
- Object.defineProperty(exports, "__esModule", { value: true });
- function assert(condition, message) {
- /* istanbul ignore if */
- if (!condition) {
- throw new Error('ASSERT: ' + message);
- }
- }
- exports.assert = assert;
-
-
-/***/ },
-/* 10 */
-/***/ function(module, exports) {
-
- "use strict";
- /* tslint:disable:max-classes-per-file */
- Object.defineProperty(exports, "__esModule", { value: true });
- var ErrorHandler = (function () {
- function ErrorHandler() {
- this.errors = [];
- this.tolerant = false;
- }
- ErrorHandler.prototype.recordError = function (error) {
- this.errors.push(error);
- };
- ErrorHandler.prototype.tolerate = function (error) {
- if (this.tolerant) {
- this.recordError(error);
- }
- else {
- throw error;
- }
- };
- ErrorHandler.prototype.constructError = function (msg, column) {
- var error = new Error(msg);
- try {
- throw error;
- }
- catch (base) {
- /* istanbul ignore else */
- if (Object.create && Object.defineProperty) {
- error = Object.create(base);
- Object.defineProperty(error, 'column', { value: column });
- }
- }
- /* istanbul ignore next */
- return error;
- };
- ErrorHandler.prototype.createError = function (index, line, col, description) {
- var msg = 'Line ' + line + ': ' + description;
- var error = this.constructError(msg, col);
- error.index = index;
- error.lineNumber = line;
- error.description = description;
- return error;
- };
- ErrorHandler.prototype.throwError = function (index, line, col, description) {
- throw this.createError(index, line, col, description);
- };
- ErrorHandler.prototype.tolerateError = function (index, line, col, description) {
- var error = this.createError(index, line, col, description);
- if (this.tolerant) {
- this.recordError(error);
- }
- else {
- throw error;
- }
- };
- return ErrorHandler;
- }());
- exports.ErrorHandler = ErrorHandler;
-
-
-/***/ },
-/* 11 */
-/***/ function(module, exports) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- // Error messages should be identical to V8.
- exports.Messages = {
- BadGetterArity: 'Getter must not have any formal parameters',
- BadSetterArity: 'Setter must have exactly one formal parameter',
- BadSetterRestParameter: 'Setter function argument must not be a rest parameter',
- ConstructorIsAsync: 'Class constructor may not be an async method',
- ConstructorSpecialMethod: 'Class constructor may not be an accessor',
- DeclarationMissingInitializer: 'Missing initializer in %0 declaration',
- DefaultRestParameter: 'Unexpected token =',
- DuplicateBinding: 'Duplicate binding %0',
- DuplicateConstructor: 'A class may only have one constructor',
- DuplicateProtoProperty: 'Duplicate __proto__ fields are not allowed in object literals',
- ForInOfLoopInitializer: '%0 loop variable declaration may not have an initializer',
- GeneratorInLegacyContext: 'Generator declarations are not allowed in legacy contexts',
- IllegalBreak: 'Illegal break statement',
- IllegalContinue: 'Illegal continue statement',
- IllegalExportDeclaration: 'Unexpected token',
- IllegalImportDeclaration: 'Unexpected token',
- IllegalLanguageModeDirective: 'Illegal \'use strict\' directive in function with non-simple parameter list',
- IllegalReturn: 'Illegal return statement',
- InvalidEscapedReservedWord: 'Keyword must not contain escaped characters',
- InvalidHexEscapeSequence: 'Invalid hexadecimal escape sequence',
- InvalidLHSInAssignment: 'Invalid left-hand side in assignment',
- InvalidLHSInForIn: 'Invalid left-hand side in for-in',
- InvalidLHSInForLoop: 'Invalid left-hand side in for-loop',
- InvalidModuleSpecifier: 'Unexpected token',
- InvalidRegExp: 'Invalid regular expression',
- LetInLexicalBinding: 'let is disallowed as a lexically bound name',
- MissingFromClause: 'Unexpected token',
- MultipleDefaultsInSwitch: 'More than one default clause in switch statement',
- NewlineAfterThrow: 'Illegal newline after throw',
- NoAsAfterImportNamespace: 'Unexpected token',
- NoCatchOrFinally: 'Missing catch or finally after try',
- ParameterAfterRestParameter: 'Rest parameter must be last formal parameter',
- Redeclaration: '%0 \'%1\' has already been declared',
- StaticPrototype: 'Classes may not have static property named prototype',
- StrictCatchVariable: 'Catch variable may not be eval or arguments in strict mode',
- StrictDelete: 'Delete of an unqualified identifier in strict mode.',
- StrictFunction: 'In strict mode code, functions can only be declared at top level or inside a block',
- StrictFunctionName: 'Function name may not be eval or arguments in strict mode',
- StrictLHSAssignment: 'Assignment to eval or arguments is not allowed in strict mode',
- StrictLHSPostfix: 'Postfix increment/decrement may not have eval or arguments operand in strict mode',
- StrictLHSPrefix: 'Prefix increment/decrement may not have eval or arguments operand in strict mode',
- StrictModeWith: 'Strict mode code may not include a with statement',
- StrictOctalLiteral: 'Octal literals are not allowed in strict mode.',
- StrictParamDupe: 'Strict mode function may not have duplicate parameter names',
- StrictParamName: 'Parameter name eval or arguments is not allowed in strict mode',
- StrictReservedWord: 'Use of future reserved word in strict mode',
- StrictVarName: 'Variable name may not be eval or arguments in strict mode',
- TemplateOctalLiteral: 'Octal literals are not allowed in template strings.',
- UnexpectedEOS: 'Unexpected end of input',
- UnexpectedIdentifier: 'Unexpected identifier',
- UnexpectedNumber: 'Unexpected number',
- UnexpectedReserved: 'Unexpected reserved word',
- UnexpectedString: 'Unexpected string',
- UnexpectedTemplate: 'Unexpected quasi %0',
- UnexpectedToken: 'Unexpected token %0',
- UnexpectedTokenIllegal: 'Unexpected token ILLEGAL',
- UnknownLabel: 'Undefined label \'%0\'',
- UnterminatedRegExp: 'Invalid regular expression: missing /'
- };
-
-
-/***/ },
-/* 12 */
-/***/ function(module, exports, __webpack_require__) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- var assert_1 = __webpack_require__(9);
- var character_1 = __webpack_require__(4);
- var messages_1 = __webpack_require__(11);
- function hexValue(ch) {
- return '0123456789abcdef'.indexOf(ch.toLowerCase());
- }
- function octalValue(ch) {
- return '01234567'.indexOf(ch);
- }
- var Scanner = (function () {
- function Scanner(code, handler) {
- this.source = code;
- this.errorHandler = handler;
- this.trackComment = false;
- this.isModule = false;
- this.length = code.length;
- this.index = 0;
- this.lineNumber = (code.length > 0) ? 1 : 0;
- this.lineStart = 0;
- this.curlyStack = [];
- }
- Scanner.prototype.saveState = function () {
- return {
- index: this.index,
- lineNumber: this.lineNumber,
- lineStart: this.lineStart
- };
- };
- Scanner.prototype.restoreState = function (state) {
- this.index = state.index;
- this.lineNumber = state.lineNumber;
- this.lineStart = state.lineStart;
- };
- Scanner.prototype.eof = function () {
- return this.index >= this.length;
- };
- Scanner.prototype.throwUnexpectedToken = function (message) {
- if (message === void 0) { message = messages_1.Messages.UnexpectedTokenIllegal; }
- return this.errorHandler.throwError(this.index, this.lineNumber, this.index - this.lineStart + 1, message);
- };
- Scanner.prototype.tolerateUnexpectedToken = function (message) {
- if (message === void 0) { message = messages_1.Messages.UnexpectedTokenIllegal; }
- this.errorHandler.tolerateError(this.index, this.lineNumber, this.index - this.lineStart + 1, message);
- };
- // https://tc39.github.io/ecma262/#sec-comments
- Scanner.prototype.skipSingleLineComment = function (offset) {
- var comments = [];
- var start, loc;
- if (this.trackComment) {
- comments = [];
- start = this.index - offset;
- loc = {
- start: {
- line: this.lineNumber,
- column: this.index - this.lineStart - offset
- },
- end: {}
- };
- }
- while (!this.eof()) {
- var ch = this.source.charCodeAt(this.index);
- ++this.index;
- if (character_1.Character.isLineTerminator(ch)) {
- if (this.trackComment) {
- loc.end = {
- line: this.lineNumber,
- column: this.index - this.lineStart - 1
- };
- var entry = {
- multiLine: false,
- slice: [start + offset, this.index - 1],
- range: [start, this.index - 1],
- loc: loc
- };
- comments.push(entry);
- }
- if (ch === 13 && this.source.charCodeAt(this.index) === 10) {
- ++this.index;
- }
- ++this.lineNumber;
- this.lineStart = this.index;
- return comments;
- }
- }
- if (this.trackComment) {
- loc.end = {
- line: this.lineNumber,
- column: this.index - this.lineStart
- };
- var entry = {
- multiLine: false,
- slice: [start + offset, this.index],
- range: [start, this.index],
- loc: loc
- };
- comments.push(entry);
- }
- return comments;
- };
- Scanner.prototype.skipMultiLineComment = function () {
- var comments = [];
- var start, loc;
- if (this.trackComment) {
- comments = [];
- start = this.index - 2;
- loc = {
- start: {
- line: this.lineNumber,
- column: this.index - this.lineStart - 2
- },
- end: {}
- };
- }
- while (!this.eof()) {
- var ch = this.source.charCodeAt(this.index);
- if (character_1.Character.isLineTerminator(ch)) {
- if (ch === 0x0D && this.source.charCodeAt(this.index + 1) === 0x0A) {
- ++this.index;
- }
- ++this.lineNumber;
- ++this.index;
- this.lineStart = this.index;
- }
- else if (ch === 0x2A) {
- // Block comment ends with '*/'.
- if (this.source.charCodeAt(this.index + 1) === 0x2F) {
- this.index += 2;
- if (this.trackComment) {
- loc.end = {
- line: this.lineNumber,
- column: this.index - this.lineStart
- };
- var entry = {
- multiLine: true,
- slice: [start + 2, this.index - 2],
- range: [start, this.index],
- loc: loc
- };
- comments.push(entry);
- }
- return comments;
- }
- ++this.index;
- }
- else {
- ++this.index;
- }
- }
- // Ran off the end of the file - the whole thing is a comment
- if (this.trackComment) {
- loc.end = {
- line: this.lineNumber,
- column: this.index - this.lineStart
- };
- var entry = {
- multiLine: true,
- slice: [start + 2, this.index],
- range: [start, this.index],
- loc: loc
- };
- comments.push(entry);
- }
- this.tolerateUnexpectedToken();
- return comments;
- };
- Scanner.prototype.scanComments = function () {
- var comments;
- if (this.trackComment) {
- comments = [];
- }
- var start = (this.index === 0);
- while (!this.eof()) {
- var ch = this.source.charCodeAt(this.index);
- if (character_1.Character.isWhiteSpace(ch)) {
- ++this.index;
- }
- else if (character_1.Character.isLineTerminator(ch)) {
- ++this.index;
- if (ch === 0x0D && this.source.charCodeAt(this.index) === 0x0A) {
- ++this.index;
- }
- ++this.lineNumber;
- this.lineStart = this.index;
- start = true;
- }
- else if (ch === 0x2F) {
- ch = this.source.charCodeAt(this.index + 1);
- if (ch === 0x2F) {
- this.index += 2;
- var comment = this.skipSingleLineComment(2);
- if (this.trackComment) {
- comments = comments.concat(comment);
- }
- start = true;
- }
- else if (ch === 0x2A) {
- this.index += 2;
- var comment = this.skipMultiLineComment();
- if (this.trackComment) {
- comments = comments.concat(comment);
- }
- }
- else {
- break;
- }
- }
- else if (start && ch === 0x2D) {
- // U+003E is '>'
- if ((this.source.charCodeAt(this.index + 1) === 0x2D) && (this.source.charCodeAt(this.index + 2) === 0x3E)) {
- // '-->' is a single-line comment
- this.index += 3;
- var comment = this.skipSingleLineComment(3);
- if (this.trackComment) {
- comments = comments.concat(comment);
- }
- }
- else {
- break;
- }
- }
- else if (ch === 0x3C && !this.isModule) {
- if (this.source.slice(this.index + 1, this.index + 4) === '!--') {
- this.index += 4; // `<!--`
- var comment = this.skipSingleLineComment(4);
- if (this.trackComment) {
- comments = comments.concat(comment);
- }
- }
- else {
- break;
- }
- }
- else {
- break;
- }
- }
- return comments;
- };
- // https://tc39.github.io/ecma262/#sec-future-reserved-words
- Scanner.prototype.isFutureReservedWord = function (id) {
- switch (id) {
- case 'enum':
- case 'export':
- case 'import':
- case 'super':
- return true;
- default:
- return false;
- }
- };
- Scanner.prototype.isStrictModeReservedWord = function (id) {
- switch (id) {
- case 'implements':
- case 'interface':
- case 'package':
- case 'private':
- case 'protected':
- case 'public':
- case 'static':
- case 'yield':
- case 'let':
- return true;
- default:
- return false;
- }
- };
- Scanner.prototype.isRestrictedWord = function (id) {
- return id === 'eval' || id === 'arguments';
- };
- // https://tc39.github.io/ecma262/#sec-keywords
- Scanner.prototype.isKeyword = function (id) {
- switch (id.length) {
- case 2:
- return (id === 'if') || (id === 'in') || (id === 'do');
- case 3:
- return (id === 'var') || (id === 'for') || (id === 'new') ||
- (id === 'try') || (id === 'let');
- case 4:
- return (id === 'this') || (id === 'else') || (id === 'case') ||
- (id === 'void') || (id === 'with') || (id === 'enum');
- case 5:
- return (id === 'while') || (id === 'break') || (id === 'catch') ||
- (id === 'throw') || (id === 'const') || (id === 'yield') ||
- (id === 'class') || (id === 'super');
- case 6:
- return (id === 'return') || (id === 'typeof') || (id === 'delete') ||
- (id === 'switch') || (id === 'export') || (id === 'import');
- case 7:
- return (id === 'default') || (id === 'finally') || (id === 'extends');
- case 8:
- return (id === 'function') || (id === 'continue') || (id === 'debugger');
- case 10:
- return (id === 'instanceof');
- default:
- return false;
- }
- };
- Scanner.prototype.codePointAt = function (i) {
- var cp = this.source.charCodeAt(i);
- if (cp >= 0xD800 && cp <= 0xDBFF) {
- var second = this.source.charCodeAt(i + 1);
- if (second >= 0xDC00 && second <= 0xDFFF) {
- var first = cp;
- cp = (first - 0xD800) * 0x400 + second - 0xDC00 + 0x10000;
- }
- }
- return cp;
- };
- Scanner.prototype.scanHexEscape = function (prefix) {
- var len = (prefix === 'u') ? 4 : 2;
- var code = 0;
- for (var i = 0; i < len; ++i) {
- if (!this.eof() && character_1.Character.isHexDigit(this.source.charCodeAt(this.index))) {
- code = code * 16 + hexValue(this.source[this.index++]);
- }
- else {
- return null;
- }
- }
- return String.fromCharCode(code);
- };
- Scanner.prototype.scanUnicodeCodePointEscape = function () {
- var ch = this.source[this.index];
- var code = 0;
- // At least, one hex digit is required.
- if (ch === '}') {
- this.throwUnexpectedToken();
- }
- while (!this.eof()) {
- ch = this.source[this.index++];
- if (!character_1.Character.isHexDigit(ch.charCodeAt(0))) {
- break;
- }
- code = code * 16 + hexValue(ch);
- }
- if (code > 0x10FFFF || ch !== '}') {
- this.throwUnexpectedToken();
- }
- return character_1.Character.fromCodePoint(code);
- };
- Scanner.prototype.getIdentifier = function () {
- var start = this.index++;
- while (!this.eof()) {
- var ch = this.source.charCodeAt(this.index);
- if (ch === 0x5C) {
- // Blackslash (U+005C) marks Unicode escape sequence.
- this.index = start;
- return this.getComplexIdentifier();
- }
- else if (ch >= 0xD800 && ch < 0xDFFF) {
- // Need to handle surrogate pairs.
- this.index = start;
- return this.getComplexIdentifier();
- }
- if (character_1.Character.isIdentifierPart(ch)) {
- ++this.index;
- }
- else {
- break;
- }
- }
- return this.source.slice(start, this.index);
- };
- Scanner.prototype.getComplexIdentifier = function () {
- var cp = this.codePointAt(this.index);
- var id = character_1.Character.fromCodePoint(cp);
- this.index += id.length;
- // '\u' (U+005C, U+0075) denotes an escaped character.
- var ch;
- if (cp === 0x5C) {
- if (this.source.charCodeAt(this.index) !== 0x75) {
- this.throwUnexpectedToken();
- }
- ++this.index;
- if (this.source[this.index] === '{') {
- ++this.index;
- ch = this.scanUnicodeCodePointEscape();
- }
- else {
- ch = this.scanHexEscape('u');
- if (ch === null || ch === '\\' || !character_1.Character.isIdentifierStart(ch.charCodeAt(0))) {
- this.throwUnexpectedToken();
- }
- }
- id = ch;
- }
- while (!this.eof()) {
- cp = this.codePointAt(this.index);
- if (!character_1.Character.isIdentifierPart(cp)) {
- break;
- }
- ch = character_1.Character.fromCodePoint(cp);
- id += ch;
- this.index += ch.length;
- // '\u' (U+005C, U+0075) denotes an escaped character.
- if (cp === 0x5C) {
- id = id.substr(0, id.length - 1);
- if (this.source.charCodeAt(this.index) !== 0x75) {
- this.throwUnexpectedToken();
- }
- ++this.index;
- if (this.source[this.index] === '{') {
- ++this.index;
- ch = this.scanUnicodeCodePointEscape();
- }
- else {
- ch = this.scanHexEscape('u');
- if (ch === null || ch === '\\' || !character_1.Character.isIdentifierPart(ch.charCodeAt(0))) {
- this.throwUnexpectedToken();
- }
- }
- id += ch;
- }
- }
- return id;
- };
- Scanner.prototype.octalToDecimal = function (ch) {
- // \0 is not octal escape sequence
- var octal = (ch !== '0');
- var code = octalValue(ch);
- if (!this.eof() && character_1.Character.isOctalDigit(this.source.charCodeAt(this.index))) {
- octal = true;
- code = code * 8 + octalValue(this.source[this.index++]);
- // 3 digits are only allowed when string starts
- // with 0, 1, 2, 3
- if ('0123'.indexOf(ch) >= 0 && !this.eof() && character_1.Character.isOctalDigit(this.source.charCodeAt(this.index))) {
- code = code * 8 + octalValue(this.source[this.index++]);
- }
- }
- return {
- code: code,
- octal: octal
- };
- };
- // https://tc39.github.io/ecma262/#sec-names-and-keywords
- Scanner.prototype.scanIdentifier = function () {
- var type;
- var start = this.index;
- // Backslash (U+005C) starts an escaped character.
- var id = (this.source.charCodeAt(start) === 0x5C) ? this.getComplexIdentifier() : this.getIdentifier();
- // There is no keyword or literal with only one character.
- // Thus, it must be an identifier.
- if (id.length === 1) {
- type = 3 /* Identifier */;
- }
- else if (this.isKeyword(id)) {
- type = 4 /* Keyword */;
- }
- else if (id === 'null') {
- type = 5 /* NullLiteral */;
- }
- else if (id === 'true' || id === 'false') {
- type = 1 /* BooleanLiteral */;
- }
- else {
- type = 3 /* Identifier */;
- }
- if (type !== 3 /* Identifier */ && (start + id.length !== this.index)) {
- var restore = this.index;
- this.index = start;
- this.tolerateUnexpectedToken(messages_1.Messages.InvalidEscapedReservedWord);
- this.index = restore;
- }
- return {
- type: type,
- value: id,
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: start,
- end: this.index
- };
- };
- // https://tc39.github.io/ecma262/#sec-punctuators
- Scanner.prototype.scanPunctuator = function () {
- var start = this.index;
- // Check for most common single-character punctuators.
- var str = this.source[this.index];
- switch (str) {
- case '(':
- case '{':
- if (str === '{') {
- this.curlyStack.push('{');
- }
- ++this.index;
- break;
- case '.':
- ++this.index;
- if (this.source[this.index] === '.' && this.source[this.index + 1] === '.') {
- // Spread operator: ...
- this.index += 2;
- str = '...';
- }
- break;
- case '}':
- ++this.index;
- this.curlyStack.pop();
- break;
- case ')':
- case ';':
- case ',':
- case '[':
- case ']':
- case ':':
- case '?':
- case '~':
- ++this.index;
- break;
- default:
- // 4-character punctuator.
- str = this.source.substr(this.index, 4);
- if (str === '>>>=') {
- this.index += 4;
- }
- else {
- // 3-character punctuators.
- str = str.substr(0, 3);
- if (str === '===' || str === '!==' || str === '>>>' ||
- str === '<<=' || str === '>>=' || str === '**=') {
- this.index += 3;
- }
- else {
- // 2-character punctuators.
- str = str.substr(0, 2);
- if (str === '&&' || str === '||' || str === '==' || str === '!=' ||
- str === '+=' || str === '-=' || str === '*=' || str === '/=' ||
- str === '++' || str === '--' || str === '<<' || str === '>>' ||
- str === '&=' || str === '|=' || str === '^=' || str === '%=' ||
- str === '<=' || str === '>=' || str === '=>' || str === '**') {
- this.index += 2;
- }
- else {
- // 1-character punctuators.
- str = this.source[this.index];
- if ('<>=!+-*%&|^/'.indexOf(str) >= 0) {
- ++this.index;
- }
- }
- }
- }
- }
- if (this.index === start) {
- this.throwUnexpectedToken();
- }
- return {
- type: 7 /* Punctuator */,
- value: str,
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: start,
- end: this.index
- };
- };
- // https://tc39.github.io/ecma262/#sec-literals-numeric-literals
- Scanner.prototype.scanHexLiteral = function (start) {
- var num = '';
- while (!this.eof()) {
- if (!character_1.Character.isHexDigit(this.source.charCodeAt(this.index))) {
- break;
- }
- num += this.source[this.index++];
- }
- if (num.length === 0) {
- this.throwUnexpectedToken();
- }
- if (character_1.Character.isIdentifierStart(this.source.charCodeAt(this.index))) {
- this.throwUnexpectedToken();
- }
- return {
- type: 6 /* NumericLiteral */,
- value: parseInt('0x' + num, 16),
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: start,
- end: this.index
- };
- };
- Scanner.prototype.scanBinaryLiteral = function (start) {
- var num = '';
- var ch;
- while (!this.eof()) {
- ch = this.source[this.index];
- if (ch !== '0' && ch !== '1') {
- break;
- }
- num += this.source[this.index++];
- }
- if (num.length === 0) {
- // only 0b or 0B
- this.throwUnexpectedToken();
- }
- if (!this.eof()) {
- ch = this.source.charCodeAt(this.index);
- /* istanbul ignore else */
- if (character_1.Character.isIdentifierStart(ch) || character_1.Character.isDecimalDigit(ch)) {
- this.throwUnexpectedToken();
- }
- }
- return {
- type: 6 /* NumericLiteral */,
- value: parseInt(num, 2),
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: start,
- end: this.index
- };
- };
- Scanner.prototype.scanOctalLiteral = function (prefix, start) {
- var num = '';
- var octal = false;
- if (character_1.Character.isOctalDigit(prefix.charCodeAt(0))) {
- octal = true;
- num = '0' + this.source[this.index++];
- }
- else {
- ++this.index;
- }
- while (!this.eof()) {
- if (!character_1.Character.isOctalDigit(this.source.charCodeAt(this.index))) {
- break;
- }
- num += this.source[this.index++];
- }
- if (!octal && num.length === 0) {
- // only 0o or 0O
- this.throwUnexpectedToken();
- }
- if (character_1.Character.isIdentifierStart(this.source.charCodeAt(this.index)) || character_1.Character.isDecimalDigit(this.source.charCodeAt(this.index))) {
- this.throwUnexpectedToken();
- }
- return {
- type: 6 /* NumericLiteral */,
- value: parseInt(num, 8),
- octal: octal,
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: start,
- end: this.index
- };
- };
- Scanner.prototype.isImplicitOctalLiteral = function () {
- // Implicit octal, unless there is a non-octal digit.
- // (Annex B.1.1 on Numeric Literals)
- for (var i = this.index + 1; i < this.length; ++i) {
- var ch = this.source[i];
- if (ch === '8' || ch === '9') {
- return false;
- }
- if (!character_1.Character.isOctalDigit(ch.charCodeAt(0))) {
- return true;
- }
- }
- return true;
- };
- Scanner.prototype.scanNumericLiteral = function () {
- var start = this.index;
- var ch = this.source[start];
- assert_1.assert(character_1.Character.isDecimalDigit(ch.charCodeAt(0)) || (ch === '.'), 'Numeric literal must start with a decimal digit or a decimal point');
- var num = '';
- if (ch !== '.') {
- num = this.source[this.index++];
- ch = this.source[this.index];
- // Hex number starts with '0x'.
- // Octal number starts with '0'.
- // Octal number in ES6 starts with '0o'.
- // Binary number in ES6 starts with '0b'.
- if (num === '0') {
- if (ch === 'x' || ch === 'X') {
- ++this.index;
- return this.scanHexLiteral(start);
- }
- if (ch === 'b' || ch === 'B') {
- ++this.index;
- return this.scanBinaryLiteral(start);
- }
- if (ch === 'o' || ch === 'O') {
- return this.scanOctalLiteral(ch, start);
- }
- if (ch && character_1.Character.isOctalDigit(ch.charCodeAt(0))) {
- if (this.isImplicitOctalLiteral()) {
- return this.scanOctalLiteral(ch, start);
- }
- }
- }
- while (character_1.Character.isDecimalDigit(this.source.charCodeAt(this.index))) {
- num += this.source[this.index++];
- }
- ch = this.source[this.index];
- }
- if (ch === '.') {
- num += this.source[this.index++];
- while (character_1.Character.isDecimalDigit(this.source.charCodeAt(this.index))) {
- num += this.source[this.index++];
- }
- ch = this.source[this.index];
- }
- if (ch === 'e' || ch === 'E') {
- num += this.source[this.index++];
- ch = this.source[this.index];
- if (ch === '+' || ch === '-') {
- num += this.source[this.index++];
- }
- if (character_1.Character.isDecimalDigit(this.source.charCodeAt(this.index))) {
- while (character_1.Character.isDecimalDigit(this.source.charCodeAt(this.index))) {
- num += this.source[this.index++];
- }
- }
- else {
- this.throwUnexpectedToken();
- }
- }
- if (character_1.Character.isIdentifierStart(this.source.charCodeAt(this.index))) {
- this.throwUnexpectedToken();
- }
- return {
- type: 6 /* NumericLiteral */,
- value: parseFloat(num),
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: start,
- end: this.index
- };
- };
- // https://tc39.github.io/ecma262/#sec-literals-string-literals
- Scanner.prototype.scanStringLiteral = function () {
- var start = this.index;
- var quote = this.source[start];
- assert_1.assert((quote === '\'' || quote === '"'), 'String literal must starts with a quote');
- ++this.index;
- var octal = false;
- var str = '';
- while (!this.eof()) {
- var ch = this.source[this.index++];
- if (ch === quote) {
- quote = '';
- break;
- }
- else if (ch === '\\') {
- ch = this.source[this.index++];
- if (!ch || !character_1.Character.isLineTerminator(ch.charCodeAt(0))) {
- switch (ch) {
- case 'u':
- if (this.source[this.index] === '{') {
- ++this.index;
- str += this.scanUnicodeCodePointEscape();
- }
- else {
- var unescaped_1 = this.scanHexEscape(ch);
- if (unescaped_1 === null) {
- this.throwUnexpectedToken();
- }
- str += unescaped_1;
- }
- break;
- case 'x':
- var unescaped = this.scanHexEscape(ch);
- if (unescaped === null) {
- this.throwUnexpectedToken(messages_1.Messages.InvalidHexEscapeSequence);
- }
- str += unescaped;
- break;
- case 'n':
- str += '\n';
- break;
- case 'r':
- str += '\r';
- break;
- case 't':
- str += '\t';
- break;
- case 'b':
- str += '\b';
- break;
- case 'f':
- str += '\f';
- break;
- case 'v':
- str += '\x0B';
- break;
- case '8':
- case '9':
- str += ch;
- this.tolerateUnexpectedToken();
- break;
- default:
- if (ch && character_1.Character.isOctalDigit(ch.charCodeAt(0))) {
- var octToDec = this.octalToDecimal(ch);
- octal = octToDec.octal || octal;
- str += String.fromCharCode(octToDec.code);
- }
- else {
- str += ch;
- }
- break;
- }
- }
- else {
- ++this.lineNumber;
- if (ch === '\r' && this.source[this.index] === '\n') {
- ++this.index;
- }
- this.lineStart = this.index;
- }
- }
- else if (character_1.Character.isLineTerminator(ch.charCodeAt(0))) {
- break;
- }
- else {
- str += ch;
- }
- }
- if (quote !== '') {
- this.index = start;
- this.throwUnexpectedToken();
- }
- return {
- type: 8 /* StringLiteral */,
- value: str,
- octal: octal,
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: start,
- end: this.index
- };
- };
- // https://tc39.github.io/ecma262/#sec-template-literal-lexical-components
- Scanner.prototype.scanTemplate = function () {
- var cooked = '';
- var terminated = false;
- var start = this.index;
- var head = (this.source[start] === '`');
- var tail = false;
- var rawOffset = 2;
- ++this.index;
- while (!this.eof()) {
- var ch = this.source[this.index++];
- if (ch === '`') {
- rawOffset = 1;
- tail = true;
- terminated = true;
- break;
- }
- else if (ch === '$') {
- if (this.source[this.index] === '{') {
- this.curlyStack.push('${');
- ++this.index;
- terminated = true;
- break;
- }
- cooked += ch;
- }
- else if (ch === '\\') {
- ch = this.source[this.index++];
- if (!character_1.Character.isLineTerminator(ch.charCodeAt(0))) {
- switch (ch) {
- case 'n':
- cooked += '\n';
- break;
- case 'r':
- cooked += '\r';
- break;
- case 't':
- cooked += '\t';
- break;
- case 'u':
- if (this.source[this.index] === '{') {
- ++this.index;
- cooked += this.scanUnicodeCodePointEscape();
- }
- else {
- var restore = this.index;
- var unescaped_2 = this.scanHexEscape(ch);
- if (unescaped_2 !== null) {
- cooked += unescaped_2;
- }
- else {
- this.index = restore;
- cooked += ch;
- }
- }
- break;
- case 'x':
- var unescaped = this.scanHexEscape(ch);
- if (unescaped === null) {
- this.throwUnexpectedToken(messages_1.Messages.InvalidHexEscapeSequence);
- }
- cooked += unescaped;
- break;
- case 'b':
- cooked += '\b';
- break;
- case 'f':
- cooked += '\f';
- break;
- case 'v':
- cooked += '\v';
- break;
- default:
- if (ch === '0') {
- if (character_1.Character.isDecimalDigit(this.source.charCodeAt(this.index))) {
- // Illegal: \01 \02 and so on
- this.throwUnexpectedToken(messages_1.Messages.TemplateOctalLiteral);
- }
- cooked += '\0';
- }
- else if (character_1.Character.isOctalDigit(ch.charCodeAt(0))) {
- // Illegal: \1 \2
- this.throwUnexpectedToken(messages_1.Messages.TemplateOctalLiteral);
- }
- else {
- cooked += ch;
- }
- break;
- }
- }
- else {
- ++this.lineNumber;
- if (ch === '\r' && this.source[this.index] === '\n') {
- ++this.index;
- }
- this.lineStart = this.index;
- }
- }
- else if (character_1.Character.isLineTerminator(ch.charCodeAt(0))) {
- ++this.lineNumber;
- if (ch === '\r' && this.source[this.index] === '\n') {
- ++this.index;
- }
- this.lineStart = this.index;
- cooked += '\n';
- }
- else {
- cooked += ch;
- }
- }
- if (!terminated) {
- this.throwUnexpectedToken();
- }
- if (!head) {
- this.curlyStack.pop();
- }
- return {
- type: 10 /* Template */,
- value: this.source.slice(start + 1, this.index - rawOffset),
- cooked: cooked,
- head: head,
- tail: tail,
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: start,
- end: this.index
- };
- };
- // https://tc39.github.io/ecma262/#sec-literals-regular-expression-literals
- Scanner.prototype.testRegExp = function (pattern, flags) {
- // The BMP character to use as a replacement for astral symbols when
- // translating an ES6 "u"-flagged pattern to an ES5-compatible
- // approximation.
- // Note: replacing with '\uFFFF' enables false positives in unlikely
- // scenarios. For example, `[\u{1044f}-\u{10440}]` is an invalid
- // pattern that would not be detected by this substitution.
- var astralSubstitute = '\uFFFF';
- var tmp = pattern;
- var self = this;
- if (flags.indexOf('u') >= 0) {
- tmp = tmp
- .replace(/\\u\{([0-9a-fA-F]+)\}|\\u([a-fA-F0-9]{4})/g, function ($0, $1, $2) {
- var codePoint = parseInt($1 || $2, 16);
- if (codePoint > 0x10FFFF) {
- self.throwUnexpectedToken(messages_1.Messages.InvalidRegExp);
- }
- if (codePoint <= 0xFFFF) {
- return String.fromCharCode(codePoint);
- }
- return astralSubstitute;
- })
- .replace(/[\uD800-\uDBFF][\uDC00-\uDFFF]/g, astralSubstitute);
- }
- // First, detect invalid regular expressions.
- try {
- RegExp(tmp);
- }
- catch (e) {
- this.throwUnexpectedToken(messages_1.Messages.InvalidRegExp);
- }
- // Return a regular expression object for this pattern-flag pair, or
- // `null` in case the current environment doesn't support the flags it
- // uses.
- try {
- return new RegExp(pattern, flags);
- }
- catch (exception) {
- /* istanbul ignore next */
- return null;
- }
- };
- Scanner.prototype.scanRegExpBody = function () {
- var ch = this.source[this.index];
- assert_1.assert(ch === '/', 'Regular expression literal must start with a slash');
- var str = this.source[this.index++];
- var classMarker = false;
- var terminated = false;
- while (!this.eof()) {
- ch = this.source[this.index++];
- str += ch;
- if (ch === '\\') {
- ch = this.source[this.index++];
- // https://tc39.github.io/ecma262/#sec-literals-regular-expression-literals
- if (character_1.Character.isLineTerminator(ch.charCodeAt(0))) {
- this.throwUnexpectedToken(messages_1.Messages.UnterminatedRegExp);
- }
- str += ch;
- }
- else if (character_1.Character.isLineTerminator(ch.charCodeAt(0))) {
- this.throwUnexpectedToken(messages_1.Messages.UnterminatedRegExp);
- }
- else if (classMarker) {
- if (ch === ']') {
- classMarker = false;
- }
- }
- else {
- if (ch === '/') {
- terminated = true;
- break;
- }
- else if (ch === '[') {
- classMarker = true;
- }
- }
- }
- if (!terminated) {
- this.throwUnexpectedToken(messages_1.Messages.UnterminatedRegExp);
- }
- // Exclude leading and trailing slash.
- return str.substr(1, str.length - 2);
- };
- Scanner.prototype.scanRegExpFlags = function () {
- var str = '';
- var flags = '';
- while (!this.eof()) {
- var ch = this.source[this.index];
- if (!character_1.Character.isIdentifierPart(ch.charCodeAt(0))) {
- break;
- }
- ++this.index;
- if (ch === '\\' && !this.eof()) {
- ch = this.source[this.index];
- if (ch === 'u') {
- ++this.index;
- var restore = this.index;
- var char = this.scanHexEscape('u');
- if (char !== null) {
- flags += char;
- for (str += '\\u'; restore < this.index; ++restore) {
- str += this.source[restore];
- }
- }
- else {
- this.index = restore;
- flags += 'u';
- str += '\\u';
- }
- this.tolerateUnexpectedToken();
- }
- else {
- str += '\\';
- this.tolerateUnexpectedToken();
- }
- }
- else {
- flags += ch;
- str += ch;
- }
- }
- return flags;
- };
- Scanner.prototype.scanRegExp = function () {
- var start = this.index;
- var pattern = this.scanRegExpBody();
- var flags = this.scanRegExpFlags();
- var value = this.testRegExp(pattern, flags);
- return {
- type: 9 /* RegularExpression */,
- value: '',
- pattern: pattern,
- flags: flags,
- regex: value,
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: start,
- end: this.index
- };
- };
- Scanner.prototype.lex = function () {
- if (this.eof()) {
- return {
- type: 2 /* EOF */,
- value: '',
- lineNumber: this.lineNumber,
- lineStart: this.lineStart,
- start: this.index,
- end: this.index
- };
- }
- var cp = this.source.charCodeAt(this.index);
- if (character_1.Character.isIdentifierStart(cp)) {
- return this.scanIdentifier();
- }
- // Very common: ( and ) and ;
- if (cp === 0x28 || cp === 0x29 || cp === 0x3B) {
- return this.scanPunctuator();
- }
- // String literal starts with single quote (U+0027) or double quote (U+0022).
- if (cp === 0x27 || cp === 0x22) {
- return this.scanStringLiteral();
- }
- // Dot (.) U+002E can also start a floating-point number, hence the need
- // to check the next character.
- if (cp === 0x2E) {
- if (character_1.Character.isDecimalDigit(this.source.charCodeAt(this.index + 1))) {
- return this.scanNumericLiteral();
- }
- return this.scanPunctuator();
- }
- if (character_1.Character.isDecimalDigit(cp)) {
- return this.scanNumericLiteral();
- }
- // Template literals start with ` (U+0060) for template head
- // or } (U+007D) for template middle or template tail.
- if (cp === 0x60 || (cp === 0x7D && this.curlyStack[this.curlyStack.length - 1] === '${')) {
- return this.scanTemplate();
- }
- // Possible identifier start in a surrogate pair.
- if (cp >= 0xD800 && cp < 0xDFFF) {
- if (character_1.Character.isIdentifierStart(this.codePointAt(this.index))) {
- return this.scanIdentifier();
- }
- }
- return this.scanPunctuator();
- };
- return Scanner;
- }());
- exports.Scanner = Scanner;
-
-
-/***/ },
-/* 13 */
-/***/ function(module, exports) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- exports.TokenName = {};
- exports.TokenName[1 /* BooleanLiteral */] = 'Boolean';
- exports.TokenName[2 /* EOF */] = '<end>';
- exports.TokenName[3 /* Identifier */] = 'Identifier';
- exports.TokenName[4 /* Keyword */] = 'Keyword';
- exports.TokenName[5 /* NullLiteral */] = 'Null';
- exports.TokenName[6 /* NumericLiteral */] = 'Numeric';
- exports.TokenName[7 /* Punctuator */] = 'Punctuator';
- exports.TokenName[8 /* StringLiteral */] = 'String';
- exports.TokenName[9 /* RegularExpression */] = 'RegularExpression';
- exports.TokenName[10 /* Template */] = 'Template';
-
-
-/***/ },
-/* 14 */
-/***/ function(module, exports) {
-
- "use strict";
- // Generated by generate-xhtml-entities.js. DO NOT MODIFY!
- Object.defineProperty(exports, "__esModule", { value: true });
- exports.XHTMLEntities = {
- quot: '\u0022',
- amp: '\u0026',
- apos: '\u0027',
- gt: '\u003E',
- nbsp: '\u00A0',
- iexcl: '\u00A1',
- cent: '\u00A2',
- pound: '\u00A3',
- curren: '\u00A4',
- yen: '\u00A5',
- brvbar: '\u00A6',
- sect: '\u00A7',
- uml: '\u00A8',
- copy: '\u00A9',
- ordf: '\u00AA',
- laquo: '\u00AB',
- not: '\u00AC',
- shy: '\u00AD',
- reg: '\u00AE',
- macr: '\u00AF',
- deg: '\u00B0',
- plusmn: '\u00B1',
- sup2: '\u00B2',
- sup3: '\u00B3',
- acute: '\u00B4',
- micro: '\u00B5',
- para: '\u00B6',
- middot: '\u00B7',
- cedil: '\u00B8',
- sup1: '\u00B9',
- ordm: '\u00BA',
- raquo: '\u00BB',
- frac14: '\u00BC',
- frac12: '\u00BD',
- frac34: '\u00BE',
- iquest: '\u00BF',
- Agrave: '\u00C0',
- Aacute: '\u00C1',
- Acirc: '\u00C2',
- Atilde: '\u00C3',
- Auml: '\u00C4',
- Aring: '\u00C5',
- AElig: '\u00C6',
- Ccedil: '\u00C7',
- Egrave: '\u00C8',
- Eacute: '\u00C9',
- Ecirc: '\u00CA',
- Euml: '\u00CB',
- Igrave: '\u00CC',
- Iacute: '\u00CD',
- Icirc: '\u00CE',
- Iuml: '\u00CF',
- ETH: '\u00D0',
- Ntilde: '\u00D1',
- Ograve: '\u00D2',
- Oacute: '\u00D3',
- Ocirc: '\u00D4',
- Otilde: '\u00D5',
- Ouml: '\u00D6',
- times: '\u00D7',
- Oslash: '\u00D8',
- Ugrave: '\u00D9',
- Uacute: '\u00DA',
- Ucirc: '\u00DB',
- Uuml: '\u00DC',
- Yacute: '\u00DD',
- THORN: '\u00DE',
- szlig: '\u00DF',
- agrave: '\u00E0',
- aacute: '\u00E1',
- acirc: '\u00E2',
- atilde: '\u00E3',
- auml: '\u00E4',
- aring: '\u00E5',
- aelig: '\u00E6',
- ccedil: '\u00E7',
- egrave: '\u00E8',
- eacute: '\u00E9',
- ecirc: '\u00EA',
- euml: '\u00EB',
- igrave: '\u00EC',
- iacute: '\u00ED',
- icirc: '\u00EE',
- iuml: '\u00EF',
- eth: '\u00F0',
- ntilde: '\u00F1',
- ograve: '\u00F2',
- oacute: '\u00F3',
- ocirc: '\u00F4',
- otilde: '\u00F5',
- ouml: '\u00F6',
- divide: '\u00F7',
- oslash: '\u00F8',
- ugrave: '\u00F9',
- uacute: '\u00FA',
- ucirc: '\u00FB',
- uuml: '\u00FC',
- yacute: '\u00FD',
- thorn: '\u00FE',
- yuml: '\u00FF',
- OElig: '\u0152',
- oelig: '\u0153',
- Scaron: '\u0160',
- scaron: '\u0161',
- Yuml: '\u0178',
- fnof: '\u0192',
- circ: '\u02C6',
- tilde: '\u02DC',
- Alpha: '\u0391',
- Beta: '\u0392',
- Gamma: '\u0393',
- Delta: '\u0394',
- Epsilon: '\u0395',
- Zeta: '\u0396',
- Eta: '\u0397',
- Theta: '\u0398',
- Iota: '\u0399',
- Kappa: '\u039A',
- Lambda: '\u039B',
- Mu: '\u039C',
- Nu: '\u039D',
- Xi: '\u039E',
- Omicron: '\u039F',
- Pi: '\u03A0',
- Rho: '\u03A1',
- Sigma: '\u03A3',
- Tau: '\u03A4',
- Upsilon: '\u03A5',
- Phi: '\u03A6',
- Chi: '\u03A7',
- Psi: '\u03A8',
- Omega: '\u03A9',
- alpha: '\u03B1',
- beta: '\u03B2',
- gamma: '\u03B3',
- delta: '\u03B4',
- epsilon: '\u03B5',
- zeta: '\u03B6',
- eta: '\u03B7',
- theta: '\u03B8',
- iota: '\u03B9',
- kappa: '\u03BA',
- lambda: '\u03BB',
- mu: '\u03BC',
- nu: '\u03BD',
- xi: '\u03BE',
- omicron: '\u03BF',
- pi: '\u03C0',
- rho: '\u03C1',
- sigmaf: '\u03C2',
- sigma: '\u03C3',
- tau: '\u03C4',
- upsilon: '\u03C5',
- phi: '\u03C6',
- chi: '\u03C7',
- psi: '\u03C8',
- omega: '\u03C9',
- thetasym: '\u03D1',
- upsih: '\u03D2',
- piv: '\u03D6',
- ensp: '\u2002',
- emsp: '\u2003',
- thinsp: '\u2009',
- zwnj: '\u200C',
- zwj: '\u200D',
- lrm: '\u200E',
- rlm: '\u200F',
- ndash: '\u2013',
- mdash: '\u2014',
- lsquo: '\u2018',
- rsquo: '\u2019',
- sbquo: '\u201A',
- ldquo: '\u201C',
- rdquo: '\u201D',
- bdquo: '\u201E',
- dagger: '\u2020',
- Dagger: '\u2021',
- bull: '\u2022',
- hellip: '\u2026',
- permil: '\u2030',
- prime: '\u2032',
- Prime: '\u2033',
- lsaquo: '\u2039',
- rsaquo: '\u203A',
- oline: '\u203E',
- frasl: '\u2044',
- euro: '\u20AC',
- image: '\u2111',
- weierp: '\u2118',
- real: '\u211C',
- trade: '\u2122',
- alefsym: '\u2135',
- larr: '\u2190',
- uarr: '\u2191',
- rarr: '\u2192',
- darr: '\u2193',
- harr: '\u2194',
- crarr: '\u21B5',
- lArr: '\u21D0',
- uArr: '\u21D1',
- rArr: '\u21D2',
- dArr: '\u21D3',
- hArr: '\u21D4',
- forall: '\u2200',
- part: '\u2202',
- exist: '\u2203',
- empty: '\u2205',
- nabla: '\u2207',
- isin: '\u2208',
- notin: '\u2209',
- ni: '\u220B',
- prod: '\u220F',
- sum: '\u2211',
- minus: '\u2212',
- lowast: '\u2217',
- radic: '\u221A',
- prop: '\u221D',
- infin: '\u221E',
- ang: '\u2220',
- and: '\u2227',
- or: '\u2228',
- cap: '\u2229',
- cup: '\u222A',
- int: '\u222B',
- there4: '\u2234',
- sim: '\u223C',
- cong: '\u2245',
- asymp: '\u2248',
- ne: '\u2260',
- equiv: '\u2261',
- le: '\u2264',
- ge: '\u2265',
- sub: '\u2282',
- sup: '\u2283',
- nsub: '\u2284',
- sube: '\u2286',
- supe: '\u2287',
- oplus: '\u2295',
- otimes: '\u2297',
- perp: '\u22A5',
- sdot: '\u22C5',
- lceil: '\u2308',
- rceil: '\u2309',
- lfloor: '\u230A',
- rfloor: '\u230B',
- loz: '\u25CA',
- spades: '\u2660',
- clubs: '\u2663',
- hearts: '\u2665',
- diams: '\u2666',
- lang: '\u27E8',
- rang: '\u27E9'
- };
-
-
-/***/ },
-/* 15 */
-/***/ function(module, exports, __webpack_require__) {
-
- "use strict";
- Object.defineProperty(exports, "__esModule", { value: true });
- var error_handler_1 = __webpack_require__(10);
- var scanner_1 = __webpack_require__(12);
- var token_1 = __webpack_require__(13);
- var Reader = (function () {
- function Reader() {
- this.values = [];
- this.curly = this.paren = -1;
- }
- // A function following one of those tokens is an expression.
- Reader.prototype.beforeFunctionExpression = function (t) {
- return ['(', '{', '[', 'in', 'typeof', 'instanceof', 'new',
- 'return', 'case', 'delete', 'throw', 'void',
- // assignment operators
- '=', '+=', '-=', '*=', '**=', '/=', '%=', '<<=', '>>=', '>>>=',
- '&=', '|=', '^=', ',',
- // binary/unary operators
- '+', '-', '*', '**', '/', '%', '++', '--', '<<', '>>', '>>>', '&',
- '|', '^', '!', '~', '&&', '||', '?', ':', '===', '==', '>=',
- '<=', '<', '>', '!=', '!=='].indexOf(t) >= 0;
- };
- // Determine if forward slash (/) is an operator or part of a regular expression
- // https://github.com/mozilla/sweet.js/wiki/design
- Reader.prototype.isRegexStart = function () {
- var previous = this.values[this.values.length - 1];
- var regex = (previous !== null);
- switch (previous) {
- case 'this':
- case ']':
- regex = false;
- break;
- case ')':
- var keyword = this.values[this.paren - 1];
- regex = (keyword === 'if' || keyword === 'while' || keyword === 'for' || keyword === 'with');
- break;
- case '}':
- // Dividing a function by anything makes little sense,
- // but we have to check for that.
- regex = false;
- if (this.values[this.curly - 3] === 'function') {
- // Anonymous function, e.g. function(){} /42
- var check = this.values[this.curly - 4];
- regex = check ? !this.beforeFunctionExpression(check) : false;
- }
- else if (this.values[this.curly - 4] === 'function') {
- // Named function, e.g. function f(){} /42/
- var check = this.values[this.curly - 5];
- regex = check ? !this.beforeFunctionExpression(check) : true;
- }
- break;
- default:
- break;
- }
- return regex;
- };
- Reader.prototype.push = function (token) {
- if (token.type === 7 /* Punctuator */ || token.type === 4 /* Keyword */) {
- if (token.value === '{') {
- this.curly = this.values.length;
- }
- else if (token.value === '(') {
- this.paren = this.values.length;
- }
- this.values.push(token.value);
- }
- else {
- this.values.push(null);
- }
- };
- return Reader;
- }());
- var Tokenizer = (function () {
- function Tokenizer(code, config) {
- this.errorHandler = new error_handler_1.ErrorHandler();
- this.errorHandler.tolerant = config ? (typeof config.tolerant === 'boolean' && config.tolerant) : false;
- this.scanner = new scanner_1.Scanner(code, this.errorHandler);
- this.scanner.trackComment = config ? (typeof config.comment === 'boolean' && config.comment) : false;
- this.trackRange = config ? (typeof config.range === 'boolean' && config.range) : false;
- this.trackLoc = config ? (typeof config.loc === 'boolean' && config.loc) : false;
- this.buffer = [];
- this.reader = new Reader();
- }
- Tokenizer.prototype.errors = function () {
- return this.errorHandler.errors;
- };
- Tokenizer.prototype.getNextToken = function () {
- if (this.buffer.length === 0) {
- var comments = this.scanner.scanComments();
- if (this.scanner.trackComment) {
- for (var i = 0; i < comments.length; ++i) {
- var e = comments[i];
- var value = this.scanner.source.slice(e.slice[0], e.slice[1]);
- var comment = {
- type: e.multiLine ? 'BlockComment' : 'LineComment',
- value: value
- };
- if (this.trackRange) {
- comment.range = e.range;
- }
- if (this.trackLoc) {
- comment.loc = e.loc;
- }
- this.buffer.push(comment);
- }
- }
- if (!this.scanner.eof()) {
- var loc = void 0;
- if (this.trackLoc) {
- loc = {
- start: {
- line: this.scanner.lineNumber,
- column: this.scanner.index - this.scanner.lineStart
- },
- end: {}
- };
- }
- var startRegex = (this.scanner.source[this.scanner.index] === '/') && this.reader.isRegexStart();
- var token = startRegex ? this.scanner.scanRegExp() : this.scanner.lex();
- this.reader.push(token);
- var entry = {
- type: token_1.TokenName[token.type],
- value: this.scanner.source.slice(token.start, token.end)
- };
- if (this.trackRange) {
- entry.range = [token.start, token.end];
- }
- if (this.trackLoc) {
- loc.end = {
- line: this.scanner.lineNumber,
- column: this.scanner.index - this.scanner.lineStart
- };
- entry.loc = loc;
- }
- if (token.type === 9 /* RegularExpression */) {
- var pattern = token.pattern;
- var flags = token.flags;
- entry.regex = { pattern: pattern, flags: flags };
- }
- this.buffer.push(entry);
- }
- }
- return this.buffer.shift();
- };
- return Tokenizer;
- }());
- exports.Tokenizer = Tokenizer;
-
-
-/***/ }
-/******/ ])
-});
-; \ No newline at end of file
diff --git a/share/server/60/rewrite_fun.js b/share/server/60/rewrite_fun.js
deleted file mode 100644
index 1b27a9d14..000000000
--- a/share/server/60/rewrite_fun.js
+++ /dev/null
@@ -1,56 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-//
-// Based on the normalizeFunction which can be
-// found here:
-//
-// https://github.com/dmunch/couch-chakra/blob/master/js/normalizeFunction.js
-
-function rewriteFunInt(fun) {
- const ast = esprima.parse(fun);
- let idx = ast.body.length - 1;
- let decl = {};
-
- // Search for the first FunctionDeclaration beginning from the end
- do {
- decl = ast.body[idx--];
- } while (idx >= 0 && decl.type !== "FunctionDeclaration");
- idx++;
-
- // If we have a function declaration without an Id, wrap it
- // in an ExpressionStatement and change it into
- // a FuntionExpression
- if (decl.type == "FunctionDeclaration" && decl.id == null) {
- decl.type = "FunctionExpression";
- ast.body[idx] = {
- type: "ExpressionStatement",
- expression: decl
- };
- }
-
- // Generate source from the rewritten AST
- return escodegen.generate(ast);
-}
-
-
-function rewriteFun(funJSON) {
- const fun = JSON.parse(funJSON);
- return JSON.stringify(rewriteFunInt(fun));
-}
-
-function rewriteFuns(funsJSON) {
- let funs = JSON.parse(funsJSON);
- const results = Array.from(funs, (fun) => {
- return rewriteFunInt(fun);
- });
- return JSON.stringify(results);
-} \ No newline at end of file
diff --git a/share/server/coffee-script.js b/share/server/coffee-script.js
deleted file mode 100644
index 06671c21f..000000000
--- a/share/server/coffee-script.js
+++ /dev/null
@@ -1,12 +0,0 @@
-/**
- * CoffeeScript Compiler v1.10.0
- * http://coffeescript.org
- *
- * Copyright 2011, Jeremy Ashkenas
- * Released under the MIT License
- */
-(function(root){var CoffeeScript=function(){function require(e){return require[e]}return require["./helpers"]=function(){var e={},t={exports:e};return function(){var t,n,i,r,s,o;e.starts=function(e,t,n){return t===e.substr(n,t.length)},e.ends=function(e,t,n){var i;return i=t.length,t===e.substr(e.length-i-(n||0),i)},e.repeat=s=function(e,t){var n;for(n="";t>0;)1&t&&(n+=e),t>>>=1,e+=e;return n},e.compact=function(e){var t,n,i,r;for(r=[],t=0,i=e.length;i>t;t++)n=e[t],n&&r.push(n);return r},e.count=function(e,t){var n,i;if(n=i=0,!t.length)return 1/0;for(;i=1+e.indexOf(t,i);)n++;return n},e.merge=function(e,t){return n(n({},e),t)},n=e.extend=function(e,t){var n,i;for(n in t)i=t[n],e[n]=i;return e},e.flatten=i=function(e){var t,n,r,s;for(n=[],r=0,s=e.length;s>r;r++)t=e[r],"[object Array]"===Object.prototype.toString.call(t)?n=n.concat(i(t)):n.push(t);return n},e.del=function(e,t){var n;return n=e[t],delete e[t],n},e.some=null!=(r=Array.prototype.some)?r:function(e){var t,n,i;for(n=0,i=this.length;i>n;n++)if(t=this[n],e(t))return!0;return!1},e.invertLiterate=function(e){var t,n,i;return i=!0,n=function(){var n,r,s,o;for(s=e.split("\n"),o=[],n=0,r=s.length;r>n;n++)t=s[n],i&&/^([ ]{4}|[ ]{0,3}\t)/.test(t)?o.push(t):(i=/^\s*$/.test(t))?o.push(t):o.push("# "+t);return o}(),n.join("\n")},t=function(e,t){return t?{first_line:e.first_line,first_column:e.first_column,last_line:t.last_line,last_column:t.last_column}:e},e.addLocationDataFn=function(e,n){return function(i){return"object"==typeof i&&i.updateLocationDataIfMissing&&i.updateLocationDataIfMissing(t(e,n)),i}},e.locationDataToString=function(e){var t;return"2"in e&&"first_line"in e[2]?t=e[2]:"first_line"in e&&(t=e),t?t.first_line+1+":"+(t.first_column+1)+"-"+(t.last_line+1+":"+(t.last_column+1)):"No location data"},e.baseFileName=function(e,t,n){var i,r;return null==t&&(t=!1),null==n&&(n=!1),r=n?/\\|\//:/\//,i=e.split(r),e=i[i.length-1],t&&e.indexOf(".")>=0?(i=e.split("."),i.pop(),"coffee"===i[i.length-1]&&i.length>1&&i.pop(),i.join(".")):e},e.isCoffee=function(e){return/\.((lit)?coffee|coffee\.md)$/.test(e)},e.isLiterate=function(e){return/\.(litcoffee|coffee\.md)$/.test(e)},e.throwSyntaxError=function(e,t){var n;throw n=new SyntaxError(e),n.location=t,n.toString=o,n.stack=""+n,n},e.updateSyntaxError=function(e,t,n){return e.toString===o&&(e.code||(e.code=t),e.filename||(e.filename=n),e.stack=""+e),e},o=function(){var e,t,n,i,r,o,a,c,l,h,u,p,d,f,m;return this.code&&this.location?(u=this.location,a=u.first_line,o=u.first_column,l=u.last_line,c=u.last_column,null==l&&(l=a),null==c&&(c=o),r=this.filename||"[stdin]",e=this.code.split("\n")[a],m=o,i=a===l?c+1:e.length,h=e.slice(0,m).replace(/[^\s]/g," ")+s("^",i-m),"undefined"!=typeof process&&null!==process&&(n=(null!=(p=process.stdout)?p.isTTY:void 0)&&!(null!=(d=process.env)?d.NODE_DISABLE_COLORS:void 0)),(null!=(f=this.colorful)?f:n)&&(t=function(e){return""+e+""},e=e.slice(0,m)+t(e.slice(m,i))+e.slice(i),h=t(h)),r+":"+(a+1)+":"+(o+1)+": error: "+this.message+"\n"+e+"\n"+h):Error.prototype.toString.call(this)},e.nameWhitespaceCharacter=function(e){switch(e){case" ":return"space";case"\n":return"newline";case"\r":return"carriage return";case" ":return"tab";default:return e}}}.call(this),t.exports}(),require["./rewriter"]=function(){var e={},t={exports:e};return function(){var t,n,i,r,s,o,a,c,l,h,u,p,d,f,m,g,v,b,y,k=[].indexOf||function(e){for(var t=0,n=this.length;n>t;t++)if(t in this&&this[t]===e)return t;return-1},w=[].slice;for(f=function(e,t,n){var i;return i=[e,t],i.generated=!0,n&&(i.origin=n),i},e.Rewriter=function(){function e(){}return e.prototype.rewrite=function(e){return this.tokens=e,this.removeLeadingNewlines(),this.closeOpenCalls(),this.closeOpenIndexes(),this.normalizeLines(),this.tagPostfixConditionals(),this.addImplicitBracesAndParens(),this.addLocationDataToGeneratedTokens(),this.tokens},e.prototype.scanTokens=function(e){var t,n,i;for(i=this.tokens,t=0;n=i[t];)t+=e.call(this,n,t,i);return!0},e.prototype.detectEnd=function(e,t,n){var i,o,a,c,l;for(l=this.tokens,i=0;c=l[e];){if(0===i&&t.call(this,c,e))return n.call(this,c,e);if(!c||0>i)return n.call(this,c,e-1);o=c[0],k.call(s,o)>=0?i+=1:(a=c[0],k.call(r,a)>=0&&(i-=1)),e+=1}return e-1},e.prototype.removeLeadingNewlines=function(){var e,t,n,i,r;for(i=this.tokens,e=t=0,n=i.length;n>t&&(r=i[e][0],"TERMINATOR"===r);e=++t);return e?this.tokens.splice(0,e):void 0},e.prototype.closeOpenCalls=function(){var e,t;return t=function(e,t){var n;return")"===(n=e[0])||"CALL_END"===n||"OUTDENT"===e[0]&&")"===this.tag(t-1)},e=function(e,t){return this.tokens["OUTDENT"===e[0]?t-1:t][0]="CALL_END"},this.scanTokens(function(n,i){return"CALL_START"===n[0]&&this.detectEnd(i+1,t,e),1})},e.prototype.closeOpenIndexes=function(){var e,t;return t=function(e){var t;return"]"===(t=e[0])||"INDEX_END"===t},e=function(e){return e[0]="INDEX_END"},this.scanTokens(function(n,i){return"INDEX_START"===n[0]&&this.detectEnd(i+1,t,e),1})},e.prototype.indexOfTag=function(){var e,t,n,i,r,s,o;for(t=arguments[0],r=arguments.length>=2?w.call(arguments,1):[],e=0,n=i=0,s=r.length;s>=0?s>i:i>s;n=s>=0?++i:--i){for(;"HERECOMMENT"===this.tag(t+n+e);)e+=2;if(null!=r[n]&&("string"==typeof r[n]&&(r[n]=[r[n]]),o=this.tag(t+n+e),0>k.call(r[n],o)))return-1}return t+n+e-1},e.prototype.looksObjectish=function(e){var t,n;return this.indexOfTag(e,"@",null,":")>-1||this.indexOfTag(e,null,":")>-1?!0:(n=this.indexOfTag(e,s),n>-1&&(t=null,this.detectEnd(n+1,function(e){var t;return t=e[0],k.call(r,t)>=0},function(e,n){return t=n}),":"===this.tag(t+1))?!0:!1)},e.prototype.findTagsBackwards=function(e,t){var n,i,o,a,c,l,h;for(n=[];e>=0&&(n.length||(a=this.tag(e),0>k.call(t,a)&&(c=this.tag(e),0>k.call(s,c)||this.tokens[e].generated)&&(l=this.tag(e),0>k.call(u,l))));)i=this.tag(e),k.call(r,i)>=0&&n.push(this.tag(e)),o=this.tag(e),k.call(s,o)>=0&&n.length&&n.pop(),e-=1;return h=this.tag(e),k.call(t,h)>=0},e.prototype.addImplicitBracesAndParens=function(){var e,t;return e=[],t=null,this.scanTokens(function(i,h,p){var d,m,g,v,b,y,w,T,C,F,E,N,L,x,S,D,R,A,I,_,O,$,j,M,B,V,P,U;if(U=i[0],E=(N=h>0?p[h-1]:[])[0],C=(p.length-1>h?p[h+1]:[])[0],j=function(){return e[e.length-1]},M=h,g=function(e){return h-M+e},v=function(){var e,t;return null!=(e=j())?null!=(t=e[2])?t.ours:void 0:void 0},b=function(){var e;return v()&&"("===(null!=(e=j())?e[0]:void 0)},w=function(){var e;return v()&&"{"===(null!=(e=j())?e[0]:void 0)},y=function(){var e;return v&&"CONTROL"===(null!=(e=j())?e[0]:void 0)},B=function(t){var n;return n=null!=t?t:h,e.push(["(",n,{ours:!0}]),p.splice(n,0,f("CALL_START","(")),null==t?h+=1:void 0},d=function(){return e.pop(),p.splice(h,0,f("CALL_END",")",["","end of input",i[2]])),h+=1},V=function(t,n){var r,s;return null==n&&(n=!0),r=null!=t?t:h,e.push(["{",r,{sameLine:!0,startsLine:n,ours:!0}]),s=new String("{"),s.generated=!0,p.splice(r,0,f("{",s,i)),null==t?h+=1:void 0},m=function(t){return t=null!=t?t:h,e.pop(),p.splice(t,0,f("}","}",i)),h+=1},b()&&("IF"===U||"TRY"===U||"FINALLY"===U||"CATCH"===U||"CLASS"===U||"SWITCH"===U))return e.push(["CONTROL",h,{ours:!0}]),g(1);if("INDENT"===U&&v()){if("=>"!==E&&"->"!==E&&"["!==E&&"("!==E&&","!==E&&"{"!==E&&"TRY"!==E&&"ELSE"!==E&&"="!==E)for(;b();)d();return y()&&e.pop(),e.push([U,h]),g(1)}if(k.call(s,U)>=0)return e.push([U,h]),g(1);if(k.call(r,U)>=0){for(;v();)b()?d():w()?m():e.pop();t=e.pop()}if((k.call(c,U)>=0&&i.spaced||"?"===U&&h>0&&!p[h-1].spaced)&&(k.call(o,C)>=0||k.call(l,C)>=0&&!(null!=(L=p[h+1])?L.spaced:void 0)&&!(null!=(x=p[h+1])?x.newLine:void 0)))return"?"===U&&(U=i[0]="FUNC_EXIST"),B(h+1),g(2);if(k.call(c,U)>=0&&this.indexOfTag(h+1,"INDENT")>-1&&this.looksObjectish(h+2)&&!this.findTagsBackwards(h,["CLASS","EXTENDS","IF","CATCH","SWITCH","LEADING_WHEN","FOR","WHILE","UNTIL"]))return B(h+1),e.push(["INDENT",h+2]),g(3);if(":"===U){for(I=function(){var e;switch(!1){case e=this.tag(h-1),0>k.call(r,e):return t[1];case"@"!==this.tag(h-2):return h-2;default:return h-1}}.call(this);"HERECOMMENT"===this.tag(I-2);)I-=2;return this.insideForDeclaration="FOR"===C,P=0===I||(S=this.tag(I-1),k.call(u,S)>=0)||p[I-1].newLine,j()&&(D=j(),$=D[0],O=D[1],("{"===$||"INDENT"===$&&"{"===this.tag(O-1))&&(P||","===this.tag(I-1)||"{"===this.tag(I-1)))?g(1):(V(I,!!P),g(2))}if(w()&&k.call(u,U)>=0&&(j()[2].sameLine=!1),T="OUTDENT"===E||N.newLine,k.call(a,U)>=0||k.call(n,U)>=0&&T)for(;v();)if(R=j(),$=R[0],O=R[1],A=R[2],_=A.sameLine,P=A.startsLine,b()&&","!==E)d();else if(w()&&!this.insideForDeclaration&&_&&"TERMINATOR"!==U&&":"!==E)m();else{if(!w()||"TERMINATOR"!==U||","===E||P&&this.looksObjectish(h+1))break;if("HERECOMMENT"===C)return g(1);m()}if(!(","!==U||this.looksObjectish(h+1)||!w()||this.insideForDeclaration||"TERMINATOR"===C&&this.looksObjectish(h+2)))for(F="OUTDENT"===C?1:0;w();)m(h+F);return g(1)})},e.prototype.addLocationDataToGeneratedTokens=function(){return this.scanTokens(function(e,t,n){var i,r,s,o,a,c;return e[2]?1:e.generated||e.explicit?("{"===e[0]&&(s=null!=(a=n[t+1])?a[2]:void 0)?(r=s.first_line,i=s.first_column):(o=null!=(c=n[t-1])?c[2]:void 0)?(r=o.last_line,i=o.last_column):r=i=0,e[2]={first_line:r,first_column:i,last_line:r,last_column:i},1):1})},e.prototype.normalizeLines=function(){var e,t,r,s,o;return o=r=s=null,t=function(e,t){var r,s,a,c;return";"!==e[1]&&(r=e[0],k.call(p,r)>=0)&&!("TERMINATOR"===e[0]&&(s=this.tag(t+1),k.call(i,s)>=0))&&!("ELSE"===e[0]&&"THEN"!==o)&&!!("CATCH"!==(a=e[0])&&"FINALLY"!==a||"->"!==o&&"=>"!==o)||(c=e[0],k.call(n,c)>=0&&this.tokens[t-1].newLine)},e=function(e,t){return this.tokens.splice(","===this.tag(t-1)?t-1:t,0,s)},this.scanTokens(function(n,a,c){var l,h,u,p,f,m;if(m=n[0],"TERMINATOR"===m){if("ELSE"===this.tag(a+1)&&"OUTDENT"!==this.tag(a-1))return c.splice.apply(c,[a,1].concat(w.call(this.indentation()))),1;if(u=this.tag(a+1),k.call(i,u)>=0)return c.splice(a,1),0}if("CATCH"===m)for(l=h=1;2>=h;l=++h)if("OUTDENT"===(p=this.tag(a+l))||"TERMINATOR"===p||"FINALLY"===p)return c.splice.apply(c,[a+l,0].concat(w.call(this.indentation()))),2+l;return k.call(d,m)>=0&&"INDENT"!==this.tag(a+1)&&("ELSE"!==m||"IF"!==this.tag(a+1))?(o=m,f=this.indentation(c[a]),r=f[0],s=f[1],"THEN"===o&&(r.fromThen=!0),c.splice(a+1,0,r),this.detectEnd(a+2,t,e),"THEN"===m&&c.splice(a,1),1):1})},e.prototype.tagPostfixConditionals=function(){var e,t,n;return n=null,t=function(e,t){var n,i;return i=e[0],n=this.tokens[t-1][0],"TERMINATOR"===i||"INDENT"===i&&0>k.call(d,n)},e=function(e){return"INDENT"!==e[0]||e.generated&&!e.fromThen?n[0]="POST_"+n[0]:void 0},this.scanTokens(function(i,r){return"IF"!==i[0]?1:(n=i,this.detectEnd(r+1,t,e),1)})},e.prototype.indentation=function(e){var t,n;return t=["INDENT",2],n=["OUTDENT",2],e?(t.generated=n.generated=!0,t.origin=n.origin=e):t.explicit=n.explicit=!0,[t,n]},e.prototype.generate=f,e.prototype.tag=function(e){var t;return null!=(t=this.tokens[e])?t[0]:void 0},e}(),t=[["(",")"],["[","]"],["{","}"],["INDENT","OUTDENT"],["CALL_START","CALL_END"],["PARAM_START","PARAM_END"],["INDEX_START","INDEX_END"],["STRING_START","STRING_END"],["REGEX_START","REGEX_END"]],e.INVERSES=h={},s=[],r=[],m=0,v=t.length;v>m;m++)b=t[m],g=b[0],y=b[1],s.push(h[y]=g),r.push(h[g]=y);i=["CATCH","THEN","ELSE","FINALLY"].concat(r),c=["IDENTIFIER","SUPER",")","CALL_END","]","INDEX_END","@","THIS"],o=["IDENTIFIER","NUMBER","STRING","STRING_START","JS","REGEX","REGEX_START","NEW","PARAM_START","CLASS","IF","TRY","SWITCH","THIS","BOOL","NULL","UNDEFINED","UNARY","YIELD","UNARY_MATH","SUPER","THROW","@","->","=>","[","(","{","--","++"],l=["+","-"],a=["POST_IF","FOR","WHILE","UNTIL","WHEN","BY","LOOP","TERMINATOR"],d=["ELSE","->","=>","TRY","FINALLY","THEN"],p=["TERMINATOR","CATCH","FINALLY","ELSE","OUTDENT","LEADING_WHEN"],u=["TERMINATOR","INDENT","OUTDENT"],n=[".","?.","::","?::"]}.call(this),t.exports}(),require["./lexer"]=function(){var e={},t={exports:e};return function(){var t,n,i,r,s,o,a,c,l,h,u,p,d,f,m,g,v,b,y,k,w,T,C,F,E,N,L,x,S,D,R,A,I,_,O,$,j,M,B,V,P,U,G,H,q,X,W,Y,K,z,J,Q,Z,et,tt,nt,it,rt,st,ot,at,ct,lt,ht,ut=[].indexOf||function(e){for(var t=0,n=this.length;n>t;t++)if(t in this&&this[t]===e)return t;return-1};ot=require("./rewriter"),P=ot.Rewriter,w=ot.INVERSES,at=require("./helpers"),nt=at.count,lt=at.starts,tt=at.compact,ct=at.repeat,it=at.invertLiterate,st=at.locationDataToString,ht=at.throwSyntaxError,e.Lexer=S=function(){function e(){}return e.prototype.tokenize=function(e,t){var n,i,r,s;for(null==t&&(t={}),this.literate=t.literate,this.indent=0,this.baseIndent=0,this.indebt=0,this.outdebt=0,this.indents=[],this.ends=[],this.tokens=[],this.seenFor=!1,this.chunkLine=t.line||0,this.chunkColumn=t.column||0,e=this.clean(e),r=0;this.chunk=e.slice(r);)if(n=this.identifierToken()||this.commentToken()||this.whitespaceToken()||this.lineToken()||this.stringToken()||this.numberToken()||this.regexToken()||this.jsToken()||this.literalToken(),s=this.getLineAndColumnFromChunk(n),this.chunkLine=s[0],this.chunkColumn=s[1],r+=n,t.untilBalanced&&0===this.ends.length)return{tokens:this.tokens,index:r};return this.closeIndentation(),(i=this.ends.pop())&&this.error("missing "+i.tag,i.origin[2]),t.rewrite===!1?this.tokens:(new P).rewrite(this.tokens)},e.prototype.clean=function(e){return e.charCodeAt(0)===t&&(e=e.slice(1)),e=e.replace(/\r/g,"").replace(z,""),et.test(e)&&(e="\n"+e,this.chunkLine--),this.literate&&(e=it(e)),e},e.prototype.identifierToken=function(){var e,t,n,i,r,c,l,h,u,p,d,f,m,g,b,y;return(h=v.exec(this.chunk))?(l=h[0],r=h[1],t=h[2],c=r.length,u=void 0,"own"===r&&"FOR"===this.tag()?(this.token("OWN",r),r.length):"from"===r&&"YIELD"===this.tag()?(this.token("FROM",r),r.length):(d=this.tokens,p=d[d.length-1],i=t||null!=p&&("."===(f=p[0])||"?."===f||"::"===f||"?::"===f||!p.spaced&&"@"===p[0]),b="IDENTIFIER",!i&&(ut.call(F,r)>=0||ut.call(a,r)>=0)&&(b=r.toUpperCase(),"WHEN"===b&&(m=this.tag(),ut.call(N,m)>=0)?b="LEADING_WHEN":"FOR"===b?this.seenFor=!0:"UNLESS"===b?b="IF":ut.call(J,b)>=0?b="UNARY":ut.call(B,b)>=0&&("INSTANCEOF"!==b&&this.seenFor?(b="FOR"+b,this.seenFor=!1):(b="RELATION","!"===this.value()&&(u=this.tokens.pop(),r="!"+r)))),ut.call(C,r)>=0&&(i?(b="IDENTIFIER",r=new String(r),r.reserved=!0):ut.call(V,r)>=0&&this.error("reserved word '"+r+"'",{length:r.length})),i||(ut.call(s,r)>=0&&(e=r,r=o[r]),b=function(){switch(r){case"!":return"UNARY";case"==":case"!=":return"COMPARE";case"&&":case"||":return"LOGIC";case"true":case"false":return"BOOL";case"break":case"continue":return"STATEMENT";default:return b}}()),y=this.token(b,r,0,c),e&&(y.origin=[b,e,y[2]]),y.variable=!i,u&&(g=[u[2].first_line,u[2].first_column],y[2].first_line=g[0],y[2].first_column=g[1]),t&&(n=l.lastIndexOf(":"),this.token(":",":",n,t.length)),l.length)):0},e.prototype.numberToken=function(){var e,t,n,i,r;return(n=I.exec(this.chunk))?(i=n[0],t=i.length,/^0[BOX]/.test(i)?this.error("radix prefix in '"+i+"' must be lowercase",{offset:1}):/E/.test(i)&&!/^0x/.test(i)?this.error("exponential notation in '"+i+"' must be indicated with a lowercase 'e'",{offset:i.indexOf("E")}):/^0\d*[89]/.test(i)?this.error("decimal literal '"+i+"' must not be prefixed with '0'",{length:t}):/^0\d+/.test(i)&&this.error("octal literal '"+i+"' must be prefixed with '0o'",{length:t}),(r=/^0o([0-7]+)/.exec(i))&&(i="0x"+parseInt(r[1],8).toString(16)),(e=/^0b([01]+)/.exec(i))&&(i="0x"+parseInt(e[1],2).toString(16)),this.token("NUMBER",i,0,t),t):0},e.prototype.stringToken=function(){var e,t,n,i,r,s,o,a,c,l,h,u,m,g,v,b;if(h=(Y.exec(this.chunk)||[])[0],!h)return 0;if(g=function(){switch(h){case"'":return W;case'"':return q;case"'''":return f;case'"""':return p}}(),s=3===h.length,u=this.matchWithInterpolations(g,h),b=u.tokens,r=u.index,e=b.length-1,n=h.charAt(0),s){for(a=null,i=function(){var e,t,n;for(n=[],o=e=0,t=b.length;t>e;o=++e)v=b[o],"NEOSTRING"===v[0]&&n.push(v[1]);return n}().join("#{}");l=d.exec(i);)t=l[1],(null===a||(m=t.length)>0&&a.length>m)&&(a=t);a&&(c=RegExp("^"+a,"gm")),this.mergeInterpolationTokens(b,{delimiter:n},function(t){return function(n,i){return n=t.formatString(n),0===i&&(n=n.replace(E,"")),i===e&&(n=n.replace(K,"")),c&&(n=n.replace(c,"")),n}}(this))}else this.mergeInterpolationTokens(b,{delimiter:n},function(t){return function(n,i){return n=t.formatString(n),n=n.replace(G,function(t,r){return 0===i&&0===r||i===e&&r+t.length===n.length?"":" "})}}(this));return r},e.prototype.commentToken=function(){var e,t,n;return(n=this.chunk.match(c))?(e=n[0],t=n[1],t&&((n=u.exec(e))&&this.error("block comments cannot contain "+n[0],{offset:n.index,length:n[0].length}),t.indexOf("\n")>=0&&(t=t.replace(RegExp("\\n"+ct(" ",this.indent),"g"),"\n")),this.token("HERECOMMENT",t,0,e.length)),e.length):0},e.prototype.jsToken=function(){var e,t;return"`"===this.chunk.charAt(0)&&(e=T.exec(this.chunk))?(this.token("JS",(t=e[0]).slice(1,-1),0,t.length),t.length):0},e.prototype.regexToken=function(){var e,t,n,r,s,o,a,c,l,h,u,p,d;switch(!1){case!(o=M.exec(this.chunk)):this.error("regular expressions cannot begin with "+o[2],{offset:o.index+o[1].length});break;case!(o=this.matchWithInterpolations(m,"///")):d=o.tokens,s=o.index;break;case!(o=$.exec(this.chunk)):if(p=o[0],e=o[1],t=o[2],this.validateEscapes(e,{isRegex:!0,offsetInChunk:1}),s=p.length,l=this.tokens,c=l[l.length-1],c)if(c.spaced&&(h=c[0],ut.call(i,h)>=0)){if(!t||O.test(p))return 0}else if(u=c[0],ut.call(A,u)>=0)return 0;t||this.error("missing / (unclosed regex)");break;default:return 0}switch(r=j.exec(this.chunk.slice(s))[0],n=s+r.length,a=this.makeToken("REGEX",null,0,n),!1){case!!Z.test(r):this.error("invalid regular expression flags "+r,{offset:s,length:r.length});break;case!(p||1===d.length):null==e&&(e=this.formatHeregex(d[0][1])),this.token("REGEX",""+this.makeDelimitedLiteral(e,{delimiter:"/"})+r,0,n,a);break;default:this.token("REGEX_START","(",0,0,a),this.token("IDENTIFIER","RegExp",0,0),this.token("CALL_START","(",0,0),this.mergeInterpolationTokens(d,{delimiter:'"',"double":!0},this.formatHeregex),r&&(this.token(",",",",s,0),this.token("STRING",'"'+r+'"',s,r.length)),this.token(")",")",n,0),this.token("REGEX_END",")",n,0)}return n},e.prototype.lineToken=function(){var e,t,n,i,r;if(!(n=R.exec(this.chunk)))return 0;if(t=n[0],this.seenFor=!1,r=t.length-1-t.lastIndexOf("\n"),i=this.unfinished(),r-this.indebt===this.indent)return i?this.suppressNewlines():this.newlineToken(0),t.length;if(r>this.indent){if(i)return this.indebt=r-this.indent,this.suppressNewlines(),t.length;if(!this.tokens.length)return this.baseIndent=this.indent=r,t.length;e=r-this.indent+this.outdebt,this.token("INDENT",e,t.length-r,r),this.indents.push(e),this.ends.push({tag:"OUTDENT"}),this.outdebt=this.indebt=0,this.indent=r}else this.baseIndent>r?this.error("missing indentation",{offset:t.length}):(this.indebt=0,this.outdentToken(this.indent-r,i,t.length));return t.length},e.prototype.outdentToken=function(e,t,n){var i,r,s,o;for(i=this.indent-e;e>0;)s=this.indents[this.indents.length-1],s?s===this.outdebt?(e-=this.outdebt,this.outdebt=0):this.outdebt>s?(this.outdebt-=s,e-=s):(r=this.indents.pop()+this.outdebt,n&&(o=this.chunk[n],ut.call(b,o)>=0)&&(i-=r-e,e=r),this.outdebt=0,this.pair("OUTDENT"),this.token("OUTDENT",e,0,n),e-=r):e=0;for(r&&(this.outdebt-=e);";"===this.value();)this.tokens.pop();return"TERMINATOR"===this.tag()||t||this.token("TERMINATOR","\n",n,0),this.indent=i,this},e.prototype.whitespaceToken=function(){var e,t,n,i;return(e=et.exec(this.chunk))||(t="\n"===this.chunk.charAt(0))?(i=this.tokens,n=i[i.length-1],n&&(n[e?"spaced":"newLine"]=!0),e?e[0].length:0):0},e.prototype.newlineToken=function(e){for(;";"===this.value();)this.tokens.pop();return"TERMINATOR"!==this.tag()&&this.token("TERMINATOR","\n",e,0),this},e.prototype.suppressNewlines=function(){return"\\"===this.value()&&this.tokens.pop(),this},e.prototype.literalToken=function(){var e,t,n,s,o,a,c,u,p,d;if((e=_.exec(this.chunk))?(d=e[0],r.test(d)&&this.tagParameters()):d=this.chunk.charAt(0),u=d,n=this.tokens,t=n[n.length-1],"="===d&&t&&(!t[1].reserved&&(s=t[1],ut.call(C,s)>=0)&&(t.origin&&(t=t.origin),this.error("reserved word '"+t[1]+"' can't be assigned",t[2])),"||"===(o=t[1])||"&&"===o))return t[0]="COMPOUND_ASSIGN",t[1]+="=",d.length;if(";"===d)this.seenFor=!1,u="TERMINATOR";else if(ut.call(D,d)>=0)u="MATH";else if(ut.call(l,d)>=0)u="COMPARE";else if(ut.call(h,d)>=0)u="COMPOUND_ASSIGN";else if(ut.call(J,d)>=0)u="UNARY";else if(ut.call(Q,d)>=0)u="UNARY_MATH";else if(ut.call(U,d)>=0)u="SHIFT";else if(ut.call(x,d)>=0||"?"===d&&(null!=t?t.spaced:void 0))u="LOGIC";else if(t&&!t.spaced)if("("===d&&(a=t[0],ut.call(i,a)>=0))"?"===t[0]&&(t[0]="FUNC_EXIST"),u="CALL_START";else if("["===d&&(c=t[0],ut.call(y,c)>=0))switch(u="INDEX_START",t[0]){case"?":t[0]="INDEX_SOAK"}switch(p=this.makeToken(u,d),d){case"(":case"{":case"[":this.ends.push({tag:w[d],origin:p});break;case")":case"}":case"]":this.pair(d)}return this.tokens.push(p),d.length},e.prototype.tagParameters=function(){var e,t,n,i;if(")"!==this.tag())return this;for(t=[],i=this.tokens,e=i.length,i[--e][0]="PARAM_END";n=i[--e];)switch(n[0]){case")":t.push(n);break;case"(":case"CALL_START":if(!t.length)return"("===n[0]?(n[0]="PARAM_START",this):this;t.pop()}return this},e.prototype.closeIndentation=function(){return this.outdentToken(this.indent)},e.prototype.matchWithInterpolations=function(t,n){var i,r,s,o,a,c,l,h,u,p,d,f,m,g,v;if(v=[],h=n.length,this.chunk.slice(0,h)!==n)return null;for(m=this.chunk.slice(h);;){if(g=t.exec(m)[0],this.validateEscapes(g,{isRegex:"/"===n.charAt(0),offsetInChunk:h}),v.push(this.makeToken("NEOSTRING",g,h)),m=m.slice(g.length),h+=g.length,"#{"!==m.slice(0,2))break;p=this.getLineAndColumnFromChunk(h+1),c=p[0],r=p[1],d=(new e).tokenize(m.slice(1),{line:c,column:r,untilBalanced:!0}),l=d.tokens,o=d.index,o+=1,u=l[0],i=l[l.length-1],u[0]=u[1]="(",i[0]=i[1]=")",i.origin=["","end of interpolation",i[2]],"TERMINATOR"===(null!=(f=l[1])?f[0]:void 0)&&l.splice(1,1),v.push(["TOKENS",l]),m=m.slice(o),h+=o}return m.slice(0,n.length)!==n&&this.error("missing "+n,{length:n.length}),s=v[0],a=v[v.length-1],s[2].first_column-=n.length,a[2].last_column+=n.length,0===a[1].length&&(a[2].last_column-=1),{tokens:v,index:h+n.length}},e.prototype.mergeInterpolationTokens=function(e,t,n){var i,r,s,o,a,c,l,h,u,p,d,f,m,g,v,b;for(e.length>1&&(u=this.token("STRING_START","(",0,0)),s=this.tokens.length,o=a=0,l=e.length;l>a;o=++a){switch(g=e[o],m=g[0],b=g[1],m){case"TOKENS":if(2===b.length)continue;h=b[0],v=b;break;case"NEOSTRING":if(i=n(g[1],o),0===i.length){if(0!==o)continue;r=this.tokens.length}2===o&&null!=r&&this.tokens.splice(r,2),g[0]="STRING",g[1]=this.makeDelimitedLiteral(i,t),h=g,v=[g]}this.tokens.length>s&&(p=this.token("+","+"),p[2]={first_line:h[2].first_line,first_column:h[2].first_column,last_line:h[2].first_line,last_column:h[2].first_column}),(d=this.tokens).push.apply(d,v)}return u?(c=e[e.length-1],u.origin=["STRING",null,{first_line:u[2].first_line,first_column:u[2].first_column,last_line:c[2].last_line,last_column:c[2].last_column}],f=this.token("STRING_END",")"),f[2]={first_line:c[2].last_line,first_column:c[2].last_column,last_line:c[2].last_line,last_column:c[2].last_column}):void 0},e.prototype.pair=function(e){var t,n,i,r,s;return i=this.ends,n=i[i.length-1],e!==(s=null!=n?n.tag:void 0)?("OUTDENT"!==s&&this.error("unmatched "+e),r=this.indents,t=r[r.length-1],this.outdentToken(t,!0),this.pair(e)):this.ends.pop()},e.prototype.getLineAndColumnFromChunk=function(e){var t,n,i,r,s;return 0===e?[this.chunkLine,this.chunkColumn]:(s=e>=this.chunk.length?this.chunk:this.chunk.slice(0,+(e-1)+1||9e9),i=nt(s,"\n"),t=this.chunkColumn,i>0?(r=s.split("\n"),n=r[r.length-1],t=n.length):t+=s.length,[this.chunkLine+i,t])},e.prototype.makeToken=function(e,t,n,i){var r,s,o,a,c;return null==n&&(n=0),null==i&&(i=t.length),s={},o=this.getLineAndColumnFromChunk(n),s.first_line=o[0],s.first_column=o[1],r=Math.max(0,i-1),a=this.getLineAndColumnFromChunk(n+r),s.last_line=a[0],s.last_column=a[1],c=[e,t,s]},e.prototype.token=function(e,t,n,i,r){var s;return s=this.makeToken(e,t,n,i),r&&(s.origin=r),this.tokens.push(s),s},e.prototype.tag=function(){var e,t;return e=this.tokens,t=e[e.length-1],null!=t?t[0]:void 0},e.prototype.value=function(){var e,t;return e=this.tokens,t=e[e.length-1],null!=t?t[1]:void 0},e.prototype.unfinished=function(){var e;return L.test(this.chunk)||"\\"===(e=this.tag())||"."===e||"?."===e||"?::"===e||"UNARY"===e||"MATH"===e||"UNARY_MATH"===e||"+"===e||"-"===e||"YIELD"===e||"**"===e||"SHIFT"===e||"RELATION"===e||"COMPARE"===e||"LOGIC"===e||"THROW"===e||"EXTENDS"===e},e.prototype.formatString=function(e){return e.replace(X,"$1")},e.prototype.formatHeregex=function(e){return e.replace(g,"$1$2")},e.prototype.validateEscapes=function(e,t){var n,i,r,s,o,a,c,l;return null==t&&(t={}),s=k.exec(e),!s||(s[0],n=s[1],a=s[2],i=s[3],l=s[4],t.isRegex&&a&&"0"!==a.charAt(0))?void 0:(o=a?"octal escape sequences are not allowed":"invalid escape sequence",r="\\"+(a||i||l),this.error(o+" "+r,{offset:(null!=(c=t.offsetInChunk)?c:0)+s.index+n.length,length:r.length}))},e.prototype.makeDelimitedLiteral=function(e,t){var n;return null==t&&(t={}),""===e&&"/"===t.delimiter&&(e="(?:)"),n=RegExp("(\\\\\\\\)|(\\\\0(?=[1-7]))|\\\\?("+t.delimiter+")|\\\\?(?:(\\n)|(\\r)|(\\u2028)|(\\u2029))|(\\\\.)","g"),e=e.replace(n,function(e,n,i,r,s,o,a,c,l){switch(!1){case!n:return t.double?n+n:n;case!i:return"\\x00";case!r:return"\\"+r;case!s:return"\\n";case!o:return"\\r";case!a:return"\\u2028";case!c:return"\\u2029";case!l:return t.double?"\\"+l:l}}),""+t.delimiter+e+t.delimiter},e.prototype.error=function(e,t){var n,i,r,s,o,a;return null==t&&(t={}),r="first_line"in t?t:(o=this.getLineAndColumnFromChunk(null!=(s=t.offset)?s:0),i=o[0],n=o[1],o,{first_line:i,first_column:n,last_column:n+(null!=(a=t.length)?a:1)-1}),ht(e,r)},e}(),F=["true","false","null","this","new","delete","typeof","in","instanceof","return","throw","break","continue","debugger","yield","if","else","switch","for","while","do","try","catch","finally","class","extends","super"],a=["undefined","then","unless","until","loop","of","by","when"],o={and:"&&",or:"||",is:"==",isnt:"!=",not:"!",yes:"true",no:"false",on:"true",off:"false"},s=function(){var e;e=[];for(rt in o)e.push(rt);return e}(),a=a.concat(s),V=["case","default","function","var","void","with","const","let","enum","export","import","native","implements","interface","package","private","protected","public","static"],H=["arguments","eval","yield*"],C=F.concat(V).concat(H),e.RESERVED=V.concat(F).concat(a).concat(H),e.STRICT_PROSCRIBED=H,t=65279,v=/^(?!\d)((?:(?!\s)[$\w\x7f-\uffff])+)([^\n\S]*:(?!:))?/,I=/^0b[01]+|^0o[0-7]+|^0x[\da-f]+|^\d*\.?\d+(?:e[+-]?\d+)?/i,_=/^(?:[-=]>|[-+*\/%<>&|^!?=]=|>>>=?|([-+:])\1|([&|<>*\/%])\2=?|\?(\.|::)|\.{2,3})/,et=/^[^\n\S]+/,c=/^###([^#][\s\S]*?)(?:###[^\n\S]*|###$)|^(?:\s*#(?!##[^#]).*)+/,r=/^[-=]>/,R=/^(?:\n[^\n\S]*)+/,T=/^`[^\\`]*(?:\\.[^\\`]*)*`/,Y=/^(?:'''|"""|'|")/,W=/^(?:[^\\']|\\[\s\S])*/,q=/^(?:[^\\"#]|\\[\s\S]|\#(?!\{))*/,f=/^(?:[^\\']|\\[\s\S]|'(?!''))*/,p=/^(?:[^\\"#]|\\[\s\S]|"(?!"")|\#(?!\{))*/,X=/((?:\\\\)+)|\\[^\S\n]*\n\s*/g,G=/\s*\n\s*/g,d=/\n+([^\n\S]*)(?=\S)/g,$=/^\/(?!\/)((?:[^[\/\n\\]|\\[^\n]|\[(?:\\[^\n]|[^\]\n\\])*\])*)(\/)?/,j=/^\w*/,Z=/^(?!.*(.).*\1)[imgy]*$/,m=/^(?:[^\\\/#]|\\[\s\S]|\/(?!\/\/)|\#(?!\{))*/,g=/((?:\\\\)+)|\\(\s)|\s+(?:#.*)?/g,M=/^(\/|\/{3}\s*)(\*)/,O=/^\/=?\s/,u=/\*\//,L=/^\s*(?:,|\??\.(?![.\d])|::)/,k=/((?:^|[^\\])(?:\\\\)*)\\(?:(0[0-7]|[1-7])|(x(?![\da-fA-F]{2}).{0,2})|(u(?![\da-fA-F]{4}).{0,4}))/,E=/^[^\n\S]*\n/,K=/\n[^\n\S]*$/,z=/\s+$/,h=["-=","+=","/=","*=","%=","||=","&&=","?=","<<=",">>=",">>>=","&=","^=","|=","**=","//=","%%="],J=["NEW","TYPEOF","DELETE","DO"],Q=["!","~"],x=["&&","||","&","|","^"],U=["<<",">>",">>>"],l=["==","!=","<",">","<=",">="],D=["*","/","%","//","%%"],B=["IN","OF","INSTANCEOF"],n=["TRUE","FALSE"],i=["IDENTIFIER",")","]","?","@","THIS","SUPER"],y=i.concat(["NUMBER","STRING","STRING_END","REGEX","REGEX_END","BOOL","NULL","UNDEFINED","}","::"]),A=y.concat(["++","--"]),N=["INDENT","OUTDENT","TERMINATOR"],b=[")","}","]"]}.call(this),t.exports}(),require["./parser"]=function(){var e={},t={exports:e},n=function(){function e(){this.yy={}}var t=function(e,t,n,i){for(n=n||{},i=e.length;i--;n[e[i]]=t);return n},n=[1,20],i=[1,75],r=[1,71],s=[1,76],o=[1,77],a=[1,73],c=[1,74],l=[1,50],h=[1,52],u=[1,53],p=[1,54],d=[1,55],f=[1,45],m=[1,46],g=[1,27],v=[1,60],b=[1,61],y=[1,70],k=[1,43],w=[1,26],T=[1,58],C=[1,59],F=[1,57],E=[1,38],N=[1,44],L=[1,56],x=[1,65],S=[1,66],D=[1,67],R=[1,68],A=[1,42],I=[1,64],_=[1,29],O=[1,30],$=[1,31],j=[1,32],M=[1,33],B=[1,34],V=[1,35],P=[1,78],U=[1,6,26,34,109],G=[1,88],H=[1,81],q=[1,80],X=[1,79],W=[1,82],Y=[1,83],K=[1,84],z=[1,85],J=[1,86],Q=[1,87],Z=[1,91],et=[1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,136,137,142,143,144,145,146,147,148],tt=[1,97],nt=[1,98],it=[1,99],rt=[1,100],st=[1,102],ot=[1,103],at=[1,96],ct=[2,115],lt=[1,6,25,26,34,56,61,64,73,74,75,76,78,80,81,85,91,92,93,98,100,109,111,112,113,117,118,133,136,137,142,143,144,145,146,147,148],ht=[2,82],ut=[1,108],pt=[2,61],dt=[1,112],ft=[1,117],mt=[1,118],gt=[1,120],vt=[1,6,25,26,34,46,56,61,64,73,74,75,76,78,80,81,85,91,92,93,98,100,109,111,112,113,117,118,133,136,137,142,143,144,145,146,147,148],bt=[2,79],yt=[1,6,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,136,137,142,143,144,145,146,147,148],kt=[1,155],wt=[1,157],Tt=[1,152],Ct=[1,6,25,26,34,46,56,61,64,73,74,75,76,78,80,81,85,87,91,92,93,98,100,109,111,112,113,117,118,133,136,137,140,141,142,143,144,145,146,147,148,149],Ft=[2,98],Et=[1,6,25,26,34,49,56,61,64,73,74,75,76,78,80,81,85,91,92,93,98,100,109,111,112,113,117,118,133,136,137,142,143,144,145,146,147,148],Nt=[1,6,25,26,34,46,49,56,61,64,73,74,75,76,78,80,81,85,87,91,92,93,98,100,109,111,112,113,117,118,124,125,133,136,137,140,141,142,143,144,145,146,147,148,149],Lt=[1,207],xt=[1,206],St=[1,6,25,26,34,38,56,61,64,73,74,75,76,78,80,81,85,91,92,93,98,100,109,111,112,113,117,118,133,136,137,142,143,144,145,146,147,148],Dt=[2,59],Rt=[1,217],At=[6,25,26,56,61],It=[6,25,26,46,56,61,64],_t=[1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,136,137,143,145,146,147,148],Ot=[1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133],$t=[73,74,75,76,78,81,91,92],jt=[1,236],Mt=[2,136],Bt=[1,6,25,26,34,46,56,61,64,73,74,75,76,78,80,81,85,91,92,93,98,100,109,111,112,113,117,118,124,125,133,136,137,142,143,144,145,146,147,148],Vt=[1,245],Pt=[6,25,26,61,93,98],Ut=[1,6,25,26,34,56,61,64,80,85,93,98,100,109,118,133],Gt=[1,6,25,26,34,56,61,64,80,85,93,98,100,109,112,118,133],Ht=[124,125],qt=[61,124,125],Xt=[1,256],Wt=[6,25,26,61,85],Yt=[6,25,26,49,61,85],Kt=[6,25,26,46,49,61,85],zt=[1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,136,137,145,146,147,148],Jt=[11,28,30,32,33,36,37,40,41,42,43,44,52,53,54,58,59,80,83,86,90,95,96,97,103,107,108,111,113,115,117,126,132,134,135,136,137,138,140,141],Qt=[2,125],Zt=[6,25,26],en=[2,60],tn=[1,270],nn=[1,271],rn=[1,6,25,26,34,56,61,64,80,85,93,98,100,105,106,109,111,112,113,117,118,128,130,133,136,137,142,143,144,145,146,147,148],sn=[26,128,130],on=[1,6,26,34,56,61,64,80,85,93,98,100,109,112,118,133],an=[2,74],cn=[1,293],ln=[1,294],hn=[1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,128,133,136,137,142,143,144,145,146,147,148],un=[1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,113,117,118,133],pn=[1,305],dn=[1,306],fn=[6,25,26,61],mn=[1,6,25,26,34,56,61,64,80,85,93,98,100,105,109,111,112,113,117,118,133,136,137,142,143,144,145,146,147,148],gn=[25,61],vn={trace:function(){},yy:{},symbols_:{error:2,Root:3,Body:4,Line:5,TERMINATOR:6,Expression:7,Statement:8,Return:9,Comment:10,STATEMENT:11,Value:12,Invocation:13,Code:14,Operation:15,Assign:16,If:17,Try:18,While:19,For:20,Switch:21,Class:22,Throw:23,Block:24,INDENT:25,OUTDENT:26,Identifier:27,IDENTIFIER:28,AlphaNumeric:29,NUMBER:30,String:31,STRING:32,STRING_START:33,STRING_END:34,Regex:35,REGEX:36,REGEX_START:37,REGEX_END:38,Literal:39,JS:40,DEBUGGER:41,UNDEFINED:42,NULL:43,BOOL:44,Assignable:45,"=":46,AssignObj:47,ObjAssignable:48,":":49,SimpleObjAssignable:50,ThisProperty:51,RETURN:52,HERECOMMENT:53,PARAM_START:54,ParamList:55,PARAM_END:56,FuncGlyph:57,"->":58,"=>":59,OptComma:60,",":61,Param:62,ParamVar:63,"...":64,Array:65,Object:66,Splat:67,SimpleAssignable:68,Accessor:69,Parenthetical:70,Range:71,This:72,".":73,"?.":74,"::":75,"?::":76,Index:77,INDEX_START:78,IndexValue:79,INDEX_END:80,INDEX_SOAK:81,Slice:82,"{":83,AssignList:84,"}":85,CLASS:86,EXTENDS:87,OptFuncExist:88,Arguments:89,SUPER:90,FUNC_EXIST:91,CALL_START:92,CALL_END:93,ArgList:94,THIS:95,"@":96,"[":97,"]":98,RangeDots:99,"..":100,Arg:101,SimpleArgs:102,TRY:103,Catch:104,FINALLY:105,CATCH:106,THROW:107,"(":108,")":109,WhileSource:110,WHILE:111,WHEN:112,UNTIL:113,Loop:114,LOOP:115,ForBody:116,FOR:117,BY:118,ForStart:119,ForSource:120,ForVariables:121,OWN:122,ForValue:123,FORIN:124,FOROF:125,SWITCH:126,Whens:127,ELSE:128,When:129,LEADING_WHEN:130,IfBlock:131,IF:132,POST_IF:133,UNARY:134,UNARY_MATH:135,"-":136,"+":137,YIELD:138,FROM:139,"--":140,"++":141,"?":142,MATH:143,"**":144,SHIFT:145,COMPARE:146,LOGIC:147,RELATION:148,COMPOUND_ASSIGN:149,$accept:0,$end:1},terminals_:{2:"error",6:"TERMINATOR",11:"STATEMENT",25:"INDENT",26:"OUTDENT",28:"IDENTIFIER",30:"NUMBER",32:"STRING",33:"STRING_START",34:"STRING_END",36:"REGEX",37:"REGEX_START",38:"REGEX_END",40:"JS",41:"DEBUGGER",42:"UNDEFINED",43:"NULL",44:"BOOL",46:"=",49:":",52:"RETURN",53:"HERECOMMENT",54:"PARAM_START",56:"PARAM_END",58:"->",59:"=>",61:",",64:"...",73:".",74:"?.",75:"::",76:"?::",78:"INDEX_START",80:"INDEX_END",81:"INDEX_SOAK",83:"{",85:"}",86:"CLASS",87:"EXTENDS",90:"SUPER",91:"FUNC_EXIST",92:"CALL_START",93:"CALL_END",95:"THIS",96:"@",97:"[",98:"]",100:"..",103:"TRY",105:"FINALLY",106:"CATCH",107:"THROW",108:"(",109:")",111:"WHILE",112:"WHEN",113:"UNTIL",115:"LOOP",117:"FOR",118:"BY",122:"OWN",124:"FORIN",125:"FOROF",126:"SWITCH",128:"ELSE",130:"LEADING_WHEN",132:"IF",133:"POST_IF",134:"UNARY",135:"UNARY_MATH",136:"-",137:"+",138:"YIELD",139:"FROM",140:"--",141:"++",142:"?",143:"MATH",144:"**",145:"SHIFT",146:"COMPARE",147:"LOGIC",148:"RELATION",149:"COMPOUND_ASSIGN"},productions_:[0,[3,0],[3,1],[4,1],[4,3],[4,2],[5,1],[5,1],[8,1],[8,1],[8,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[24,2],[24,3],[27,1],[29,1],[29,1],[31,1],[31,3],[35,1],[35,3],[39,1],[39,1],[39,1],[39,1],[39,1],[39,1],[39,1],[16,3],[16,4],[16,5],[47,1],[47,3],[47,5],[47,3],[47,5],[47,1],[50,1],[50,1],[48,1],[48,1],[9,2],[9,1],[10,1],[14,5],[14,2],[57,1],[57,1],[60,0],[60,1],[55,0],[55,1],[55,3],[55,4],[55,6],[62,1],[62,2],[62,3],[62,1],[63,1],[63,1],[63,1],[63,1],[67,2],[68,1],[68,2],[68,2],[68,1],[45,1],[45,1],[45,1],[12,1],[12,1],[12,1],[12,1],[12,1],[69,2],[69,2],[69,2],[69,2],[69,1],[69,1],[77,3],[77,2],[79,1],[79,1],[66,4],[84,0],[84,1],[84,3],[84,4],[84,6],[22,1],[22,2],[22,3],[22,4],[22,2],[22,3],[22,4],[22,5],[13,3],[13,3],[13,1],[13,2],[88,0],[88,1],[89,2],[89,4],[72,1],[72,1],[51,2],[65,2],[65,4],[99,1],[99,1],[71,5],[82,3],[82,2],[82,2],[82,1],[94,1],[94,3],[94,4],[94,4],[94,6],[101,1],[101,1],[101,1],[102,1],[102,3],[18,2],[18,3],[18,4],[18,5],[104,3],[104,3],[104,2],[23,2],[70,3],[70,5],[110,2],[110,4],[110,2],[110,4],[19,2],[19,2],[19,2],[19,1],[114,2],[114,2],[20,2],[20,2],[20,2],[116,2],[116,4],[116,2],[119,2],[119,3],[123,1],[123,1],[123,1],[123,1],[121,1],[121,3],[120,2],[120,2],[120,4],[120,4],[120,4],[120,6],[120,6],[21,5],[21,7],[21,4],[21,6],[127,1],[127,2],[129,3],[129,4],[131,3],[131,5],[17,1],[17,3],[17,3],[17,3],[15,2],[15,2],[15,2],[15,2],[15,2],[15,2],[15,3],[15,2],[15,2],[15,2],[15,2],[15,2],[15,3],[15,3],[15,3],[15,3],[15,3],[15,3],[15,3],[15,3],[15,3],[15,5],[15,4],[15,3]],performAction:function(e,t,n,i,r,s,o){var a=s.length-1;
-switch(r){case 1:return this.$=i.addLocationDataFn(o[a],o[a])(new i.Block);case 2:return this.$=s[a];case 3:this.$=i.addLocationDataFn(o[a],o[a])(i.Block.wrap([s[a]]));break;case 4:this.$=i.addLocationDataFn(o[a-2],o[a])(s[a-2].push(s[a]));break;case 5:this.$=s[a-1];break;case 6:case 7:case 8:case 9:case 11:case 12:case 13:case 14:case 15:case 16:case 17:case 18:case 19:case 20:case 21:case 22:case 27:case 32:case 34:case 47:case 48:case 49:case 50:case 51:case 59:case 60:case 70:case 71:case 72:case 73:case 78:case 79:case 82:case 86:case 92:case 136:case 137:case 139:case 169:case 170:case 186:case 192:this.$=s[a];break;case 10:case 25:case 26:case 28:case 30:case 33:case 35:this.$=i.addLocationDataFn(o[a],o[a])(new i.Literal(s[a]));break;case 23:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Block);break;case 24:case 31:case 93:this.$=i.addLocationDataFn(o[a-2],o[a])(s[a-1]);break;case 29:case 149:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Parens(s[a-1]));break;case 36:this.$=i.addLocationDataFn(o[a],o[a])(new i.Undefined);break;case 37:this.$=i.addLocationDataFn(o[a],o[a])(new i.Null);break;case 38:this.$=i.addLocationDataFn(o[a],o[a])(new i.Bool(s[a]));break;case 39:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Assign(s[a-2],s[a]));break;case 40:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.Assign(s[a-3],s[a]));break;case 41:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Assign(s[a-4],s[a-1]));break;case 42:case 75:case 80:case 81:case 83:case 84:case 85:case 171:case 172:this.$=i.addLocationDataFn(o[a],o[a])(new i.Value(s[a]));break;case 43:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Assign(i.addLocationDataFn(o[a-2])(new i.Value(s[a-2])),s[a],"object",{operatorToken:i.addLocationDataFn(o[a-1])(new i.Literal(s[a-1]))}));break;case 44:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Assign(i.addLocationDataFn(o[a-4])(new i.Value(s[a-4])),s[a-1],"object",{operatorToken:i.addLocationDataFn(o[a-3])(new i.Literal(s[a-3]))}));break;case 45:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Assign(i.addLocationDataFn(o[a-2])(new i.Value(s[a-2])),s[a],null,{operatorToken:i.addLocationDataFn(o[a-1])(new i.Literal(s[a-1]))}));break;case 46:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Assign(i.addLocationDataFn(o[a-4])(new i.Value(s[a-4])),s[a-1],null,{operatorToken:i.addLocationDataFn(o[a-3])(new i.Literal(s[a-3]))}));break;case 52:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Return(s[a]));break;case 53:this.$=i.addLocationDataFn(o[a],o[a])(new i.Return);break;case 54:this.$=i.addLocationDataFn(o[a],o[a])(new i.Comment(s[a]));break;case 55:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Code(s[a-3],s[a],s[a-1]));break;case 56:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Code([],s[a],s[a-1]));break;case 57:this.$=i.addLocationDataFn(o[a],o[a])("func");break;case 58:this.$=i.addLocationDataFn(o[a],o[a])("boundfunc");break;case 61:case 98:this.$=i.addLocationDataFn(o[a],o[a])([]);break;case 62:case 99:case 131:case 173:this.$=i.addLocationDataFn(o[a],o[a])([s[a]]);break;case 63:case 100:case 132:this.$=i.addLocationDataFn(o[a-2],o[a])(s[a-2].concat(s[a]));break;case 64:case 101:case 133:this.$=i.addLocationDataFn(o[a-3],o[a])(s[a-3].concat(s[a]));break;case 65:case 102:case 135:this.$=i.addLocationDataFn(o[a-5],o[a])(s[a-5].concat(s[a-2]));break;case 66:this.$=i.addLocationDataFn(o[a],o[a])(new i.Param(s[a]));break;case 67:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Param(s[a-1],null,!0));break;case 68:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Param(s[a-2],s[a]));break;case 69:case 138:this.$=i.addLocationDataFn(o[a],o[a])(new i.Expansion);break;case 74:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Splat(s[a-1]));break;case 76:this.$=i.addLocationDataFn(o[a-1],o[a])(s[a-1].add(s[a]));break;case 77:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Value(s[a-1],[].concat(s[a])));break;case 87:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Access(s[a]));break;case 88:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Access(s[a],"soak"));break;case 89:this.$=i.addLocationDataFn(o[a-1],o[a])([i.addLocationDataFn(o[a-1])(new i.Access(new i.Literal("prototype"))),i.addLocationDataFn(o[a])(new i.Access(s[a]))]);break;case 90:this.$=i.addLocationDataFn(o[a-1],o[a])([i.addLocationDataFn(o[a-1])(new i.Access(new i.Literal("prototype"),"soak")),i.addLocationDataFn(o[a])(new i.Access(s[a]))]);break;case 91:this.$=i.addLocationDataFn(o[a],o[a])(new i.Access(new i.Literal("prototype")));break;case 94:this.$=i.addLocationDataFn(o[a-1],o[a])(i.extend(s[a],{soak:!0}));break;case 95:this.$=i.addLocationDataFn(o[a],o[a])(new i.Index(s[a]));break;case 96:this.$=i.addLocationDataFn(o[a],o[a])(new i.Slice(s[a]));break;case 97:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.Obj(s[a-2],s[a-3].generated));break;case 103:this.$=i.addLocationDataFn(o[a],o[a])(new i.Class);break;case 104:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Class(null,null,s[a]));break;case 105:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Class(null,s[a]));break;case 106:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.Class(null,s[a-1],s[a]));break;case 107:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Class(s[a]));break;case 108:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Class(s[a-1],null,s[a]));break;case 109:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.Class(s[a-2],s[a]));break;case 110:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Class(s[a-3],s[a-1],s[a]));break;case 111:case 112:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Call(s[a-2],s[a],s[a-1]));break;case 113:this.$=i.addLocationDataFn(o[a],o[a])(new i.Call("super",[new i.Splat(new i.Literal("arguments"))]));break;case 114:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Call("super",s[a]));break;case 115:this.$=i.addLocationDataFn(o[a],o[a])(!1);break;case 116:this.$=i.addLocationDataFn(o[a],o[a])(!0);break;case 117:this.$=i.addLocationDataFn(o[a-1],o[a])([]);break;case 118:case 134:this.$=i.addLocationDataFn(o[a-3],o[a])(s[a-2]);break;case 119:case 120:this.$=i.addLocationDataFn(o[a],o[a])(new i.Value(new i.Literal("this")));break;case 121:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Value(i.addLocationDataFn(o[a-1])(new i.Literal("this")),[i.addLocationDataFn(o[a])(new i.Access(s[a]))],"this"));break;case 122:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Arr([]));break;case 123:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.Arr(s[a-2]));break;case 124:this.$=i.addLocationDataFn(o[a],o[a])("inclusive");break;case 125:this.$=i.addLocationDataFn(o[a],o[a])("exclusive");break;case 126:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Range(s[a-3],s[a-1],s[a-2]));break;case 127:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Range(s[a-2],s[a],s[a-1]));break;case 128:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Range(s[a-1],null,s[a]));break;case 129:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Range(null,s[a],s[a-1]));break;case 130:this.$=i.addLocationDataFn(o[a],o[a])(new i.Range(null,null,s[a]));break;case 140:this.$=i.addLocationDataFn(o[a-2],o[a])([].concat(s[a-2],s[a]));break;case 141:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Try(s[a]));break;case 142:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Try(s[a-1],s[a][0],s[a][1]));break;case 143:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.Try(s[a-2],null,null,s[a]));break;case 144:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Try(s[a-3],s[a-2][0],s[a-2][1],s[a]));break;case 145:this.$=i.addLocationDataFn(o[a-2],o[a])([s[a-1],s[a]]);break;case 146:this.$=i.addLocationDataFn(o[a-2],o[a])([i.addLocationDataFn(o[a-1])(new i.Value(s[a-1])),s[a]]);break;case 147:this.$=i.addLocationDataFn(o[a-1],o[a])([null,s[a]]);break;case 148:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Throw(s[a]));break;case 150:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Parens(s[a-2]));break;case 151:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.While(s[a]));break;case 152:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.While(s[a-2],{guard:s[a]}));break;case 153:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.While(s[a],{invert:!0}));break;case 154:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.While(s[a-2],{invert:!0,guard:s[a]}));break;case 155:this.$=i.addLocationDataFn(o[a-1],o[a])(s[a-1].addBody(s[a]));break;case 156:case 157:this.$=i.addLocationDataFn(o[a-1],o[a])(s[a].addBody(i.addLocationDataFn(o[a-1])(i.Block.wrap([s[a-1]]))));break;case 158:this.$=i.addLocationDataFn(o[a],o[a])(s[a]);break;case 159:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.While(i.addLocationDataFn(o[a-1])(new i.Literal("true"))).addBody(s[a]));break;case 160:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.While(i.addLocationDataFn(o[a-1])(new i.Literal("true"))).addBody(i.addLocationDataFn(o[a])(i.Block.wrap([s[a]]))));break;case 161:case 162:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.For(s[a-1],s[a]));break;case 163:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.For(s[a],s[a-1]));break;case 164:this.$=i.addLocationDataFn(o[a-1],o[a])({source:i.addLocationDataFn(o[a])(new i.Value(s[a]))});break;case 165:this.$=i.addLocationDataFn(o[a-3],o[a])({source:i.addLocationDataFn(o[a-2])(new i.Value(s[a-2])),step:s[a]});break;case 166:this.$=i.addLocationDataFn(o[a-1],o[a])(function(){return s[a].own=s[a-1].own,s[a].name=s[a-1][0],s[a].index=s[a-1][1],s[a]}());break;case 167:this.$=i.addLocationDataFn(o[a-1],o[a])(s[a]);break;case 168:this.$=i.addLocationDataFn(o[a-2],o[a])(function(){return s[a].own=!0,s[a]}());break;case 174:this.$=i.addLocationDataFn(o[a-2],o[a])([s[a-2],s[a]]);break;case 175:this.$=i.addLocationDataFn(o[a-1],o[a])({source:s[a]});break;case 176:this.$=i.addLocationDataFn(o[a-1],o[a])({source:s[a],object:!0});break;case 177:this.$=i.addLocationDataFn(o[a-3],o[a])({source:s[a-2],guard:s[a]});break;case 178:this.$=i.addLocationDataFn(o[a-3],o[a])({source:s[a-2],guard:s[a],object:!0});break;case 179:this.$=i.addLocationDataFn(o[a-3],o[a])({source:s[a-2],step:s[a]});break;case 180:this.$=i.addLocationDataFn(o[a-5],o[a])({source:s[a-4],guard:s[a-2],step:s[a]});break;case 181:this.$=i.addLocationDataFn(o[a-5],o[a])({source:s[a-4],step:s[a-2],guard:s[a]});break;case 182:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Switch(s[a-3],s[a-1]));break;case 183:this.$=i.addLocationDataFn(o[a-6],o[a])(new i.Switch(s[a-5],s[a-3],s[a-1]));break;case 184:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.Switch(null,s[a-1]));break;case 185:this.$=i.addLocationDataFn(o[a-5],o[a])(new i.Switch(null,s[a-3],s[a-1]));break;case 187:this.$=i.addLocationDataFn(o[a-1],o[a])(s[a-1].concat(s[a]));break;case 188:this.$=i.addLocationDataFn(o[a-2],o[a])([[s[a-1],s[a]]]);break;case 189:this.$=i.addLocationDataFn(o[a-3],o[a])([[s[a-2],s[a-1]]]);break;case 190:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.If(s[a-1],s[a],{type:s[a-2]}));break;case 191:this.$=i.addLocationDataFn(o[a-4],o[a])(s[a-4].addElse(i.addLocationDataFn(o[a-2],o[a])(new i.If(s[a-1],s[a],{type:s[a-2]}))));break;case 193:this.$=i.addLocationDataFn(o[a-2],o[a])(s[a-2].addElse(s[a]));break;case 194:case 195:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.If(s[a],i.addLocationDataFn(o[a-2])(i.Block.wrap([s[a-2]])),{type:s[a-1],statement:!0}));break;case 196:case 197:case 200:case 201:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Op(s[a-1],s[a]));break;case 198:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Op("-",s[a]));break;case 199:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Op("+",s[a]));break;case 202:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Op(s[a-2].concat(s[a-1]),s[a]));break;case 203:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Op("--",s[a]));break;case 204:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Op("++",s[a]));break;case 205:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Op("--",s[a-1],null,!0));break;case 206:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Op("++",s[a-1],null,!0));break;case 207:this.$=i.addLocationDataFn(o[a-1],o[a])(new i.Existence(s[a-1]));break;case 208:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Op("+",s[a-2],s[a]));break;case 209:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Op("-",s[a-2],s[a]));break;case 210:case 211:case 212:case 213:case 214:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Op(s[a-1],s[a-2],s[a]));break;case 215:this.$=i.addLocationDataFn(o[a-2],o[a])(function(){return"!"===s[a-1].charAt(0)?new i.Op(s[a-1].slice(1),s[a-2],s[a]).invert():new i.Op(s[a-1],s[a-2],s[a])}());break;case 216:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Assign(s[a-2],s[a],s[a-1]));break;case 217:this.$=i.addLocationDataFn(o[a-4],o[a])(new i.Assign(s[a-4],s[a-1],s[a-3]));break;case 218:this.$=i.addLocationDataFn(o[a-3],o[a])(new i.Assign(s[a-3],s[a],s[a-2]));break;case 219:this.$=i.addLocationDataFn(o[a-2],o[a])(new i.Extends(s[a-2],s[a]))}},table:[{1:[2,1],3:1,4:2,5:3,7:4,8:5,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{1:[3]},{1:[2,2],6:P},t(U,[2,3]),t(U,[2,6],{119:69,110:89,116:90,111:x,113:S,117:R,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(U,[2,7],{119:69,110:92,116:93,111:x,113:S,117:R,133:Z}),t(et,[2,11],{88:94,69:95,77:101,73:tt,74:nt,75:it,76:rt,78:st,81:ot,91:at,92:ct}),t(et,[2,12],{77:101,88:104,69:105,73:tt,74:nt,75:it,76:rt,78:st,81:ot,91:at,92:ct}),t(et,[2,13]),t(et,[2,14]),t(et,[2,15]),t(et,[2,16]),t(et,[2,17]),t(et,[2,18]),t(et,[2,19]),t(et,[2,20]),t(et,[2,21]),t(et,[2,22]),t(et,[2,8]),t(et,[2,9]),t(et,[2,10]),t(lt,ht,{46:[1,106]}),t(lt,[2,83]),t(lt,[2,84]),t(lt,[2,85]),t(lt,[2,86]),t([1,6,25,26,34,38,56,61,64,73,74,75,76,78,80,81,85,91,93,98,100,109,111,112,113,117,118,133,136,137,142,143,144,145,146,147,148],[2,113],{89:107,92:ut}),t([6,25,56,61],pt,{55:109,62:110,63:111,27:113,51:114,65:115,66:116,28:i,64:dt,83:y,96:ft,97:mt}),{24:119,25:gt},{7:121,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:123,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:124,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:125,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:127,8:126,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,139:[1,128],140:B,141:V},{12:130,13:131,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:132,51:63,65:47,66:48,68:129,70:23,71:24,72:25,83:y,90:w,95:T,96:C,97:F,108:L},{12:130,13:131,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:132,51:63,65:47,66:48,68:133,70:23,71:24,72:25,83:y,90:w,95:T,96:C,97:F,108:L},t(vt,bt,{87:[1,137],140:[1,134],141:[1,135],149:[1,136]}),t(et,[2,192],{128:[1,138]}),{24:139,25:gt},{24:140,25:gt},t(et,[2,158]),{24:141,25:gt},{7:142,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:[1,143],27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(yt,[2,103],{39:22,70:23,71:24,72:25,65:47,66:48,29:49,35:51,27:62,51:63,31:72,12:130,13:131,45:132,24:144,68:146,25:gt,28:i,30:r,32:s,33:o,36:a,37:c,40:l,41:h,42:u,43:p,44:d,83:y,87:[1,145],90:w,95:T,96:C,97:F,108:L}),{7:147,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t([1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,142,143,144,145,146,147,148],[2,53],{12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,9:18,10:19,45:21,39:22,70:23,71:24,72:25,57:28,68:36,131:37,110:39,114:40,116:41,65:47,66:48,29:49,35:51,27:62,51:63,119:69,31:72,8:122,7:148,11:n,28:i,30:r,32:s,33:o,36:a,37:c,40:l,41:h,42:u,43:p,44:d,52:f,53:m,54:g,58:v,59:b,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,115:D,126:A,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V}),t(et,[2,54]),t(vt,[2,80]),t(vt,[2,81]),t(lt,[2,32]),t(lt,[2,33]),t(lt,[2,34]),t(lt,[2,35]),t(lt,[2,36]),t(lt,[2,37]),t(lt,[2,38]),{4:149,5:3,7:4,8:5,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:[1,150],27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:151,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:kt,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,64:wt,65:47,66:48,67:156,68:36,70:23,71:24,72:25,83:y,86:k,90:w,94:153,95:T,96:C,97:F,98:Tt,101:154,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(lt,[2,119]),t(lt,[2,120],{27:158,28:i}),{25:[2,57]},{25:[2,58]},t(Ct,[2,75]),t(Ct,[2,78]),{7:159,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:160,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:161,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:163,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,24:162,25:gt,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{27:168,28:i,51:169,65:170,66:171,71:164,83:y,96:ft,97:F,121:165,122:[1,166],123:167},{120:172,124:[1,173],125:[1,174]},t([6,25,61,85],Ft,{31:72,84:175,47:176,48:177,50:178,10:179,29:180,27:181,51:182,28:i,30:r,32:s,33:o,53:m,96:ft}),t(Et,[2,26]),t(Et,[2,27]),t(lt,[2,30]),{12:130,13:183,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:132,51:63,65:47,66:48,68:184,70:23,71:24,72:25,83:y,90:w,95:T,96:C,97:F,108:L},t(Nt,[2,25]),t(Et,[2,28]),{4:185,5:3,7:4,8:5,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(U,[2,5],{7:4,8:5,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,9:18,10:19,45:21,39:22,70:23,71:24,72:25,57:28,68:36,131:37,110:39,114:40,116:41,65:47,66:48,29:49,35:51,27:62,51:63,119:69,31:72,5:186,11:n,28:i,30:r,32:s,33:o,36:a,37:c,40:l,41:h,42:u,43:p,44:d,52:f,53:m,54:g,58:v,59:b,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,111:x,113:S,115:D,117:R,126:A,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V}),t(et,[2,207]),{7:187,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:188,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:189,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:190,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:191,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:192,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:193,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:194,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:195,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(et,[2,157]),t(et,[2,162]),{7:196,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(et,[2,156]),t(et,[2,161]),{89:197,92:ut},t(Ct,[2,76]),{92:[2,116]},{27:198,28:i},{27:199,28:i},t(Ct,[2,91],{27:200,28:i}),{27:201,28:i},t(Ct,[2,92]),{7:203,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,64:Lt,65:47,66:48,68:36,70:23,71:24,72:25,79:202,82:204,83:y,86:k,90:w,95:T,96:C,97:F,99:205,100:xt,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{77:208,78:st,81:ot},{89:209,92:ut},t(Ct,[2,77]),{6:[1,211],7:210,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:[1,212],27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(St,[2,114]),{7:215,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:kt,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,64:wt,65:47,66:48,67:156,68:36,70:23,71:24,72:25,83:y,86:k,90:w,93:[1,213],94:214,95:T,96:C,97:F,101:154,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t([6,25],Dt,{60:218,56:[1,216],61:Rt}),t(At,[2,62]),t(At,[2,66],{46:[1,220],64:[1,219]}),t(At,[2,69]),t(It,[2,70]),t(It,[2,71]),t(It,[2,72]),t(It,[2,73]),{27:158,28:i},{7:215,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:kt,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,64:wt,65:47,66:48,67:156,68:36,70:23,71:24,72:25,83:y,86:k,90:w,94:153,95:T,96:C,97:F,98:Tt,101:154,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(et,[2,56]),{4:222,5:3,7:4,8:5,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,26:[1,221],27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t([1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,136,137,143,144,145,146,147,148],[2,196],{119:69,110:89,116:90,142:X}),{110:92,111:x,113:S,116:93,117:R,119:69,133:Z},t(_t,[2,197],{119:69,110:89,116:90,142:X,144:Y}),t(_t,[2,198],{119:69,110:89,116:90,142:X,144:Y}),t(_t,[2,199],{119:69,110:89,116:90,142:X,144:Y}),t(et,[2,200],{119:69,110:92,116:93}),t(Ot,[2,201],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{7:223,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(et,[2,203],{73:bt,74:bt,75:bt,76:bt,78:bt,81:bt,91:bt,92:bt}),{69:95,73:tt,74:nt,75:it,76:rt,77:101,78:st,81:ot,88:94,91:at,92:ct},{69:105,73:tt,74:nt,75:it,76:rt,77:101,78:st,81:ot,88:104,91:at,92:ct},t($t,ht),t(et,[2,204],{73:bt,74:bt,75:bt,76:bt,78:bt,81:bt,91:bt,92:bt}),t(et,[2,205]),t(et,[2,206]),{6:[1,226],7:224,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:[1,225],27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:227,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{24:228,25:gt,132:[1,229]},t(et,[2,141],{104:230,105:[1,231],106:[1,232]}),t(et,[2,155]),t(et,[2,163]),{25:[1,233],110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},{127:234,129:235,130:jt},t(et,[2,104]),{7:237,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(yt,[2,107],{24:238,25:gt,73:bt,74:bt,75:bt,76:bt,78:bt,81:bt,91:bt,92:bt,87:[1,239]}),t(Ot,[2,148],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Ot,[2,52],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{6:P,109:[1,240]},{4:241,5:3,7:4,8:5,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t([6,25,61,98],Mt,{119:69,110:89,116:90,99:242,64:[1,243],100:xt,111:x,113:S,117:R,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Bt,[2,122]),t([6,25,98],Dt,{60:244,61:Vt}),t(Pt,[2,131]),{7:215,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:kt,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,64:wt,65:47,66:48,67:156,68:36,70:23,71:24,72:25,83:y,86:k,90:w,94:246,95:T,96:C,97:F,101:154,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(Pt,[2,137]),t(Pt,[2,138]),t(Nt,[2,121]),{24:247,25:gt,110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},t(Ut,[2,151],{119:69,110:89,116:90,111:x,112:[1,248],113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Ut,[2,153],{119:69,110:89,116:90,111:x,112:[1,249],113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(et,[2,159]),t(Gt,[2,160],{119:69,110:89,116:90,111:x,113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t([1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,133,136,137,142,143,144,145,146,147,148],[2,164],{118:[1,250]}),t(Ht,[2,167]),{27:168,28:i,51:169,65:170,66:171,83:y,96:ft,97:mt,121:251,123:167},t(Ht,[2,173],{61:[1,252]}),t(qt,[2,169]),t(qt,[2,170]),t(qt,[2,171]),t(qt,[2,172]),t(et,[2,166]),{7:253,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:254,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t([6,25,85],Dt,{60:255,61:Xt}),t(Wt,[2,99]),t(Wt,[2,42],{49:[1,257]}),t(Yt,[2,50],{46:[1,258]}),t(Wt,[2,47]),t(Yt,[2,51]),t(Kt,[2,48]),t(Kt,[2,49]),{38:[1,259],69:105,73:tt,74:nt,75:it,76:rt,77:101,78:st,81:ot,88:104,91:at,92:ct},t($t,bt),{6:P,34:[1,260]},t(U,[2,4]),t(zt,[2,208],{119:69,110:89,116:90,142:X,143:W,144:Y}),t(zt,[2,209],{119:69,110:89,116:90,142:X,143:W,144:Y}),t(_t,[2,210],{119:69,110:89,116:90,142:X,144:Y}),t(_t,[2,211],{119:69,110:89,116:90,142:X,144:Y}),t([1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,145,146,147,148],[2,212],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y}),t([1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,146,147],[2,213],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,148:Q}),t([1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,147],[2,214],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,148:Q}),t([1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,118,133,146,147,148],[2,215],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K}),t(Gt,[2,195],{119:69,110:89,116:90,111:x,113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Gt,[2,194],{119:69,110:89,116:90,111:x,113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(St,[2,111]),t(Ct,[2,87]),t(Ct,[2,88]),t(Ct,[2,89]),t(Ct,[2,90]),{80:[1,261]},{64:Lt,80:[2,95],99:262,100:xt,110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},{80:[2,96]},{7:263,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,80:[2,130],83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(Jt,[2,124]),t(Jt,Qt),t(Ct,[2,94]),t(St,[2,112]),t(Ot,[2,39],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{7:264,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:265,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(St,[2,117]),t([6,25,93],Dt,{60:266,61:Vt}),t(Pt,Mt,{119:69,110:89,116:90,64:[1,267],111:x,113:S,117:R,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{57:268,58:v,59:b},t(Zt,en,{63:111,27:113,51:114,65:115,66:116,62:269,28:i,64:dt,83:y,96:ft,97:mt}),{6:tn,25:nn},t(At,[2,67]),{7:272,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(rn,[2,23]),{6:P,26:[1,273]},t(Ot,[2,202],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Ot,[2,216],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{7:274,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:275,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(Ot,[2,219],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(et,[2,193]),{7:276,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(et,[2,142],{105:[1,277]}),{24:278,25:gt},{24:281,25:gt,27:279,28:i,66:280,83:y},{127:282,129:235,130:jt},{26:[1,283],128:[1,284],129:285,130:jt},t(sn,[2,186]),{7:287,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,102:286,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(on,[2,105],{119:69,110:89,116:90,24:288,25:gt,111:x,113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(et,[2,108]),{7:289,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(lt,[2,149]),{6:P,26:[1,290]},{7:291,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t([11,28,30,32,33,36,37,40,41,42,43,44,52,53,54,58,59,83,86,90,95,96,97,103,107,108,111,113,115,117,126,132,134,135,136,137,138,140,141],Qt,{6:an,25:an,61:an,98:an}),{6:cn,25:ln,98:[1,292]},t([6,25,26,93,98],en,{12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,9:18,10:19,45:21,39:22,70:23,71:24,72:25,57:28,68:36,131:37,110:39,114:40,116:41,65:47,66:48,29:49,35:51,27:62,51:63,119:69,31:72,8:122,67:156,7:215,101:295,11:n,28:i,30:r,32:s,33:o,36:a,37:c,40:l,41:h,42:u,43:p,44:d,52:f,53:m,54:g,58:v,59:b,64:wt,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,111:x,113:S,115:D,117:R,126:A,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V}),t(Zt,Dt,{60:296,61:Vt}),t(hn,[2,190]),{7:297,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:298,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:299,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(Ht,[2,168]),{27:168,28:i,51:169,65:170,66:171,83:y,96:ft,97:mt,123:300},t([1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,113,117,133],[2,175],{119:69,110:89,116:90,112:[1,301],118:[1,302],136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(un,[2,176],{119:69,110:89,116:90,112:[1,303],136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{6:pn,25:dn,85:[1,304]},t([6,25,26,85],en,{31:72,48:177,50:178,10:179,29:180,27:181,51:182,47:307,28:i,30:r,32:s,33:o,53:m,96:ft}),{7:308,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:[1,309],27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:310,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:[1,311],27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(lt,[2,31]),t(Et,[2,29]),t(Ct,[2,93]),{7:312,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,80:[2,128],83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{80:[2,129],110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},t(Ot,[2,40],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{26:[1,313],110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},{6:cn,25:ln,93:[1,314]},t(Pt,an),{24:315,25:gt},t(At,[2,63]),{27:113,28:i,51:114,62:316,63:111,64:dt,65:115,66:116,83:y,96:ft,97:mt},t(fn,pt,{62:110,63:111,27:113,51:114,65:115,66:116,55:317,28:i,64:dt,83:y,96:ft,97:mt}),t(At,[2,68],{119:69,110:89,116:90,111:x,113:S,117:R,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(rn,[2,24]),{26:[1,318],110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},t(Ot,[2,218],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{24:319,25:gt,110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},{24:320,25:gt},t(et,[2,143]),{24:321,25:gt},{24:322,25:gt},t(mn,[2,147]),{26:[1,323],128:[1,324],129:285,130:jt},t(et,[2,184]),{24:325,25:gt},t(sn,[2,187]),{24:326,25:gt,61:[1,327]},t(gn,[2,139],{119:69,110:89,116:90,111:x,113:S,117:R,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(et,[2,106]),t(on,[2,109],{119:69,110:89,116:90,24:328,25:gt,111:x,113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{109:[1,329]},{98:[1,330],110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},t(Bt,[2,123]),{7:215,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,64:wt,65:47,66:48,67:156,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,101:331,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:215,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,25:kt,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,64:wt,65:47,66:48,67:156,68:36,70:23,71:24,72:25,83:y,86:k,90:w,94:332,95:T,96:C,97:F,101:154,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(Pt,[2,132]),{6:cn,25:ln,26:[1,333]},t(Gt,[2,152],{119:69,110:89,116:90,111:x,113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Gt,[2,154],{119:69,110:89,116:90,111:x,113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Gt,[2,165],{119:69,110:89,116:90,111:x,113:S,117:R,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Ht,[2,174]),{7:334,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:335,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:336,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(Bt,[2,97]),{10:179,27:181,28:i,29:180,30:r,31:72,32:s,33:o,47:337,48:177,50:178,51:182,53:m,96:ft},t(fn,Ft,{31:72,47:176,48:177,50:178,10:179,29:180,27:181,51:182,84:338,28:i,30:r,32:s,33:o,53:m,96:ft}),t(Wt,[2,100]),t(Wt,[2,43],{119:69,110:89,116:90,111:x,113:S,117:R,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{7:339,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(Wt,[2,45],{119:69,110:89,116:90,111:x,113:S,117:R,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{7:340,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{80:[2,127],110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},t(et,[2,41]),t(St,[2,118]),t(et,[2,55]),t(At,[2,64]),t(Zt,Dt,{60:341,61:Rt}),t(et,[2,217]),t(hn,[2,191]),t(et,[2,144]),t(mn,[2,145]),t(mn,[2,146]),t(et,[2,182]),{24:342,25:gt},{26:[1,343]},t(sn,[2,188],{6:[1,344]}),{7:345,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},t(et,[2,110]),t(lt,[2,150]),t(lt,[2,126]),t(Pt,[2,133]),t(Zt,Dt,{60:346,61:Vt}),t(Pt,[2,134]),t([1,6,25,26,34,56,61,64,80,85,93,98,100,109,111,112,113,117,133],[2,177],{119:69,110:89,116:90,118:[1,347],136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(un,[2,179],{119:69,110:89,116:90,112:[1,348],136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Ot,[2,178],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Wt,[2,101]),t(Zt,Dt,{60:349,61:Xt}),{26:[1,350],110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},{26:[1,351],110:89,111:x,113:S,116:90,117:R,119:69,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q},{6:tn,25:nn,26:[1,352]},{26:[1,353]},t(et,[2,185]),t(sn,[2,189]),t(gn,[2,140],{119:69,110:89,116:90,111:x,113:S,117:R,133:G,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),{6:cn,25:ln,26:[1,354]},{7:355,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{7:356,8:122,9:18,10:19,11:n,12:6,13:7,14:8,15:9,16:10,17:11,18:12,19:13,20:14,21:15,22:16,23:17,27:62,28:i,29:49,30:r,31:72,32:s,33:o,35:51,36:a,37:c,39:22,40:l,41:h,42:u,43:p,44:d,45:21,51:63,52:f,53:m,54:g,57:28,58:v,59:b,65:47,66:48,68:36,70:23,71:24,72:25,83:y,86:k,90:w,95:T,96:C,97:F,103:E,107:N,108:L,110:39,111:x,113:S,114:40,115:D,116:41,117:R,119:69,126:A,131:37,132:I,134:_,135:O,136:$,137:j,138:M,140:B,141:V},{6:pn,25:dn,26:[1,357]},t(Wt,[2,44]),t(Wt,[2,46]),t(At,[2,65]),t(et,[2,183]),t(Pt,[2,135]),t(Ot,[2,180],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Ot,[2,181],{119:69,110:89,116:90,136:H,137:q,142:X,143:W,144:Y,145:K,146:z,147:J,148:Q}),t(Wt,[2,102])],defaultActions:{60:[2,57],61:[2,58],96:[2,116],204:[2,96]},parseError:function(e,t){if(!t.recoverable)throw Error(e);
-this.trace(e)},parse:function(e){function t(){var e;return e=f.lex()||p,"number"!=typeof e&&(e=n.symbols_[e]||e),e}var n=this,i=[0],r=[null],s=[],o=this.table,a="",c=0,l=0,h=0,u=2,p=1,d=s.slice.call(arguments,1),f=Object.create(this.lexer),m={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(m.yy[g]=this.yy[g]);f.setInput(e,m.yy),m.yy.lexer=f,m.yy.parser=this,f.yylloc===void 0&&(f.yylloc={});var v=f.yylloc;s.push(v);var b=f.options&&f.options.ranges;this.parseError="function"==typeof m.yy.parseError?m.yy.parseError:Object.getPrototypeOf(this).parseError;for(var y,k,w,T,C,F,E,N,L,x={};;){if(w=i[i.length-1],this.defaultActions[w]?T=this.defaultActions[w]:((null===y||y===void 0)&&(y=t()),T=o[w]&&o[w][y]),T===void 0||!T.length||!T[0]){var S="";L=[];for(F in o[w])this.terminals_[F]&&F>u&&L.push("'"+this.terminals_[F]+"'");S=f.showPosition?"Parse error on line "+(c+1)+":\n"+f.showPosition()+"\nExpecting "+L.join(", ")+", got '"+(this.terminals_[y]||y)+"'":"Parse error on line "+(c+1)+": Unexpected "+(y==p?"end of input":"'"+(this.terminals_[y]||y)+"'"),this.parseError(S,{text:f.match,token:this.terminals_[y]||y,line:f.yylineno,loc:v,expected:L})}if(T[0]instanceof Array&&T.length>1)throw Error("Parse Error: multiple actions possible at state: "+w+", token: "+y);switch(T[0]){case 1:i.push(y),r.push(f.yytext),s.push(f.yylloc),i.push(T[1]),y=null,k?(y=k,k=null):(l=f.yyleng,a=f.yytext,c=f.yylineno,v=f.yylloc,h>0&&h--);break;case 2:if(E=this.productions_[T[1]][1],x.$=r[r.length-E],x._$={first_line:s[s.length-(E||1)].first_line,last_line:s[s.length-1].last_line,first_column:s[s.length-(E||1)].first_column,last_column:s[s.length-1].last_column},b&&(x._$.range=[s[s.length-(E||1)].range[0],s[s.length-1].range[1]]),C=this.performAction.apply(x,[a,l,c,m.yy,T[1],r,s].concat(d)),C!==void 0)return C;E&&(i=i.slice(0,2*-1*E),r=r.slice(0,-1*E),s=s.slice(0,-1*E)),i.push(this.productions_[T[1]][0]),r.push(x.$),s.push(x._$),N=o[i[i.length-2]][i[i.length-1]],i.push(N);break;case 3:return!0}}return!0}};return e.prototype=vn,vn.Parser=e,new e}();return require!==void 0&&e!==void 0&&(e.parser=n,e.Parser=n.Parser,e.parse=function(){return n.parse.apply(n,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var n=require("fs").readFileSync(require("path").normalize(t[1]),"utf8");return e.parser.parse(n)},t!==void 0&&require.main===t&&e.main(process.argv.slice(1))),t.exports}(),require["./scope"]=function(){var e={},t={exports:e};return function(){var t,n=[].indexOf||function(e){for(var t=0,n=this.length;n>t;t++)if(t in this&&this[t]===e)return t;return-1};e.Scope=t=function(){function e(e,t,n,i){var r,s;this.parent=e,this.expressions=t,this.method=n,this.referencedVars=i,this.variables=[{name:"arguments",type:"arguments"}],this.positions={},this.parent||(this.utilities={}),this.root=null!=(r=null!=(s=this.parent)?s.root:void 0)?r:this}return e.prototype.add=function(e,t,n){return this.shared&&!n?this.parent.add(e,t,n):Object.prototype.hasOwnProperty.call(this.positions,e)?this.variables[this.positions[e]].type=t:this.positions[e]=this.variables.push({name:e,type:t})-1},e.prototype.namedMethod=function(){var e;return(null!=(e=this.method)?e.name:void 0)||!this.parent?this.method:this.parent.namedMethod()},e.prototype.find=function(e){return this.check(e)?!0:(this.add(e,"var"),!1)},e.prototype.parameter=function(e){return this.shared&&this.parent.check(e,!0)?void 0:this.add(e,"param")},e.prototype.check=function(e){var t;return!!(this.type(e)||(null!=(t=this.parent)?t.check(e):void 0))},e.prototype.temporary=function(e,t,n){return null==n&&(n=!1),n?(t+parseInt(e,36)).toString(36).replace(/\d/g,"a"):e+(t||"")},e.prototype.type=function(e){var t,n,i,r;for(i=this.variables,t=0,n=i.length;n>t;t++)if(r=i[t],r.name===e)return r.type;return null},e.prototype.freeVariable=function(e,t){var i,r,s;for(null==t&&(t={}),i=0;;){if(s=this.temporary(e,i,t.single),!(this.check(s)||n.call(this.root.referencedVars,s)>=0))break;i++}return(null!=(r=t.reserve)?r:!0)&&this.add(s,"var",!0),s},e.prototype.assign=function(e,t){return this.add(e,{value:t,assigned:!0},!0),this.hasAssignments=!0},e.prototype.hasDeclarations=function(){return!!this.declaredVariables().length},e.prototype.declaredVariables=function(){var e;return function(){var t,n,i,r;for(i=this.variables,r=[],t=0,n=i.length;n>t;t++)e=i[t],"var"===e.type&&r.push(e.name);return r}.call(this).sort()},e.prototype.assignedVariables=function(){var e,t,n,i,r;for(n=this.variables,i=[],e=0,t=n.length;t>e;e++)r=n[e],r.type.assigned&&i.push(r.name+" = "+r.type.value);return i},e}()}.call(this),t.exports}(),require["./nodes"]=function(){var e={},t={exports:e};return function(){var t,n,i,r,s,o,a,c,l,h,u,p,d,f,m,g,v,b,y,k,w,T,C,F,E,N,L,x,S,D,R,A,I,_,O,$,j,M,B,V,P,U,G,H,q,X,W,Y,K,z,J,Q,Z,et,tt,nt,it,rt,st,ot,at,ct,lt,ht,ut,pt,dt,ft,mt,gt,vt,bt,yt,kt=function(e,t){function n(){this.constructor=e}for(var i in t)wt.call(t,i)&&(e[i]=t[i]);return n.prototype=t.prototype,e.prototype=new n,e.__super__=t.prototype,e},wt={}.hasOwnProperty,Tt=[].indexOf||function(e){for(var t=0,n=this.length;n>t;t++)if(t in this&&this[t]===e)return t;return-1},Ct=[].slice;Error.stackTraceLimit=1/0,P=require("./scope").Scope,dt=require("./lexer"),$=dt.RESERVED,V=dt.STRICT_PROSCRIBED,ft=require("./helpers"),et=ft.compact,rt=ft.flatten,it=ft.extend,ht=ft.merge,tt=ft.del,gt=ft.starts,nt=ft.ends,mt=ft.some,Z=ft.addLocationDataFn,lt=ft.locationDataToString,vt=ft.throwSyntaxError,e.extend=it,e.addLocationDataFn=Z,Q=function(){return!0},D=function(){return!1},X=function(){return this},S=function(){return this.negated=!this.negated,this},e.CodeFragment=l=function(){function e(e,t){var n;this.code=""+t,this.locationData=null!=e?e.locationData:void 0,this.type=(null!=e?null!=(n=e.constructor)?n.name:void 0:void 0)||"unknown"}return e.prototype.toString=function(){return""+this.code+(this.locationData?": "+lt(this.locationData):"")},e}(),st=function(e){var t;return function(){var n,i,r;for(r=[],n=0,i=e.length;i>n;n++)t=e[n],r.push(t.code);return r}().join("")},e.Base=r=function(){function e(){}return e.prototype.compile=function(e,t){return st(this.compileToFragments(e,t))},e.prototype.compileToFragments=function(e,t){var n;return e=it({},e),t&&(e.level=t),n=this.unfoldSoak(e)||this,n.tab=e.indent,e.level!==L&&n.isStatement(e)?n.compileClosure(e):n.compileNode(e)},e.prototype.compileClosure=function(e){var n,i,r,a,l,h,u;return(a=this.jumps())&&a.error("cannot use a pure statement in an expression"),e.sharedScope=!0,r=new c([],s.wrap([this])),n=[],((i=this.contains(at))||this.contains(ct))&&(n=[new x("this")],i?(l="apply",n.push(new x("arguments"))):l="call",r=new z(r,[new t(new x(l))])),h=new o(r,n).compileNode(e),(r.isGenerator||(null!=(u=r.base)?u.isGenerator:void 0))&&(h.unshift(this.makeCode("(yield* ")),h.push(this.makeCode(")"))),h},e.prototype.cache=function(e,t,n){var r,s,o;return r=null!=n?n(this):this.isComplex(),r?(s=new x(e.scope.freeVariable("ref")),o=new i(s,this),t?[o.compileToFragments(e,t),[this.makeCode(s.value)]]:[o,s]):(s=t?this.compileToFragments(e,t):this,[s,s])},e.prototype.cacheToCodeFragments=function(e){return[st(e[0]),st(e[1])]},e.prototype.makeReturn=function(e){var t;return t=this.unwrapAll(),e?new o(new x(e+".push"),[t]):new M(t)},e.prototype.contains=function(e){var t;return t=void 0,this.traverseChildren(!1,function(n){return e(n)?(t=n,!1):void 0}),t},e.prototype.lastNonComment=function(e){var t;for(t=e.length;t--;)if(!(e[t]instanceof h))return e[t];return null},e.prototype.toString=function(e,t){var n;return null==e&&(e=""),null==t&&(t=this.constructor.name),n="\n"+e+t,this.soak&&(n+="?"),this.eachChild(function(t){return n+=t.toString(e+q)}),n},e.prototype.eachChild=function(e){var t,n,i,r,s,o,a,c;if(!this.children)return this;for(a=this.children,i=0,s=a.length;s>i;i++)if(t=a[i],this[t])for(c=rt([this[t]]),r=0,o=c.length;o>r;r++)if(n=c[r],e(n)===!1)return this;return this},e.prototype.traverseChildren=function(e,t){return this.eachChild(function(n){var i;return i=t(n),i!==!1?n.traverseChildren(e,t):void 0})},e.prototype.invert=function(){return new I("!",this)},e.prototype.unwrapAll=function(){var e;for(e=this;e!==(e=e.unwrap()););return e},e.prototype.children=[],e.prototype.isStatement=D,e.prototype.jumps=D,e.prototype.isComplex=Q,e.prototype.isChainable=D,e.prototype.isAssignable=D,e.prototype.unwrap=X,e.prototype.unfoldSoak=D,e.prototype.assigns=D,e.prototype.updateLocationDataIfMissing=function(e){return this.locationData?this:(this.locationData=e,this.eachChild(function(t){return t.updateLocationDataIfMissing(e)}))},e.prototype.error=function(e){return vt(e,this.locationData)},e.prototype.makeCode=function(e){return new l(this,e)},e.prototype.wrapInBraces=function(e){return[].concat(this.makeCode("("),e,this.makeCode(")"))},e.prototype.joinFragmentArrays=function(e,t){var n,i,r,s,o;for(n=[],r=s=0,o=e.length;o>s;r=++s)i=e[r],r&&n.push(this.makeCode(t)),n=n.concat(i);return n},e}(),e.Block=s=function(e){function t(e){this.expressions=et(rt(e||[]))}return kt(t,e),t.prototype.children=["expressions"],t.prototype.push=function(e){return this.expressions.push(e),this},t.prototype.pop=function(){return this.expressions.pop()},t.prototype.unshift=function(e){return this.expressions.unshift(e),this},t.prototype.unwrap=function(){return 1===this.expressions.length?this.expressions[0]:this},t.prototype.isEmpty=function(){return!this.expressions.length},t.prototype.isStatement=function(e){var t,n,i,r;for(r=this.expressions,n=0,i=r.length;i>n;n++)if(t=r[n],t.isStatement(e))return!0;return!1},t.prototype.jumps=function(e){var t,n,i,r,s;for(s=this.expressions,n=0,r=s.length;r>n;n++)if(t=s[n],i=t.jumps(e))return i},t.prototype.makeReturn=function(e){var t,n;for(n=this.expressions.length;n--;)if(t=this.expressions[n],!(t instanceof h)){this.expressions[n]=t.makeReturn(e),t instanceof M&&!t.expression&&this.expressions.splice(n,1);break}return this},t.prototype.compileToFragments=function(e,n){return null==e&&(e={}),e.scope?t.__super__.compileToFragments.call(this,e,n):this.compileRoot(e)},t.prototype.compileNode=function(e){var n,i,r,s,o,a,c,l,h;for(this.tab=e.indent,h=e.level===L,i=[],l=this.expressions,s=o=0,a=l.length;a>o;s=++o)c=l[s],c=c.unwrapAll(),c=c.unfoldSoak(e)||c,c instanceof t?i.push(c.compileNode(e)):h?(c.front=!0,r=c.compileToFragments(e),c.isStatement(e)||(r.unshift(this.makeCode(""+this.tab)),r.push(this.makeCode(";"))),i.push(r)):i.push(c.compileToFragments(e,F));return h?this.spaced?[].concat(this.joinFragmentArrays(i,"\n\n"),this.makeCode("\n")):this.joinFragmentArrays(i,"\n"):(n=i.length?this.joinFragmentArrays(i,", "):[this.makeCode("void 0")],i.length>1&&e.level>=F?this.wrapInBraces(n):n)},t.prototype.compileRoot=function(e){var t,n,i,r,s,o,a,c,l,u,p;for(e.indent=e.bare?"":q,e.level=L,this.spaced=!0,e.scope=new P(null,this,null,null!=(l=e.referencedVars)?l:[]),u=e.locals||[],r=0,s=u.length;s>r;r++)o=u[r],e.scope.parameter(o);return a=[],e.bare||(c=function(){var e,n,r,s;for(r=this.expressions,s=[],i=e=0,n=r.length;n>e&&(t=r[i],t.unwrap()instanceof h);i=++e)s.push(t);return s}.call(this),p=this.expressions.slice(c.length),this.expressions=c,c.length&&(a=this.compileNode(ht(e,{indent:""})),a.push(this.makeCode("\n"))),this.expressions=p),n=this.compileWithDeclarations(e),e.bare?n:[].concat(a,this.makeCode("(function() {\n"),n,this.makeCode("\n}).call(this);\n"))},t.prototype.compileWithDeclarations=function(e){var t,n,i,r,s,o,a,c,l,u,p,d,f,m;for(r=[],c=[],l=this.expressions,s=o=0,a=l.length;a>o&&(i=l[s],i=i.unwrap(),i instanceof h||i instanceof x);s=++o);return e=ht(e,{level:L}),s&&(d=this.expressions.splice(s,9e9),u=[this.spaced,!1],m=u[0],this.spaced=u[1],p=[this.compileNode(e),m],r=p[0],this.spaced=p[1],this.expressions=d),c=this.compileNode(e),f=e.scope,f.expressions===this&&(n=e.scope.hasDeclarations(),t=f.hasAssignments,n||t?(s&&r.push(this.makeCode("\n")),r.push(this.makeCode(this.tab+"var ")),n&&r.push(this.makeCode(f.declaredVariables().join(", "))),t&&(n&&r.push(this.makeCode(",\n"+(this.tab+q))),r.push(this.makeCode(f.assignedVariables().join(",\n"+(this.tab+q))))),r.push(this.makeCode(";\n"+(this.spaced?"\n":"")))):r.length&&c.length&&r.push(this.makeCode("\n"))),r.concat(c)},t.wrap=function(e){return 1===e.length&&e[0]instanceof t?e[0]:new t(e)},t}(r),e.Literal=x=function(e){function t(e){this.value=e}return kt(t,e),t.prototype.makeReturn=function(){return this.isStatement()?this:t.__super__.makeReturn.apply(this,arguments)},t.prototype.isAssignable=function(){return g.test(this.value)},t.prototype.isStatement=function(){var e;return"break"===(e=this.value)||"continue"===e||"debugger"===e},t.prototype.isComplex=D,t.prototype.assigns=function(e){return e===this.value},t.prototype.jumps=function(e){return"break"!==this.value||(null!=e?e.loop:void 0)||(null!=e?e.block:void 0)?"continue"!==this.value||(null!=e?e.loop:void 0)?void 0:this:this},t.prototype.compileNode=function(e){var t,n,i;return n="this"===this.value?(null!=(i=e.scope.method)?i.bound:void 0)?e.scope.method.context:this.value:this.value.reserved?'"'+this.value+'"':this.value,t=this.isStatement()?""+this.tab+n+";":n,[this.makeCode(t)]},t.prototype.toString=function(){return' "'+this.value+'"'},t}(r),e.Undefined=function(e){function t(){return t.__super__.constructor.apply(this,arguments)}return kt(t,e),t.prototype.isAssignable=D,t.prototype.isComplex=D,t.prototype.compileNode=function(e){return[this.makeCode(e.level>=T?"(void 0)":"void 0")]},t}(r),e.Null=function(e){function t(){return t.__super__.constructor.apply(this,arguments)}return kt(t,e),t.prototype.isAssignable=D,t.prototype.isComplex=D,t.prototype.compileNode=function(){return[this.makeCode("null")]},t}(r),e.Bool=function(e){function t(e){this.val=e}return kt(t,e),t.prototype.isAssignable=D,t.prototype.isComplex=D,t.prototype.compileNode=function(){return[this.makeCode(this.val)]},t}(r),e.Return=M=function(e){function t(e){this.expression=e}return kt(t,e),t.prototype.children=["expression"],t.prototype.isStatement=Q,t.prototype.makeReturn=X,t.prototype.jumps=X,t.prototype.compileToFragments=function(e,n){var i,r;return i=null!=(r=this.expression)?r.makeReturn():void 0,!i||i instanceof t?t.__super__.compileToFragments.call(this,e,n):i.compileToFragments(e,n)},t.prototype.compileNode=function(e){var t,n,i;return t=[],n=null!=(i=this.expression)?"function"==typeof i.isYieldReturn?i.isYieldReturn():void 0:void 0,n||t.push(this.makeCode(this.tab+("return"+(this.expression?" ":"")))),this.expression&&(t=t.concat(this.expression.compileToFragments(e,N))),n||t.push(this.makeCode(";")),t},t}(r),e.Value=z=function(e){function t(e,n,i){return!n&&e instanceof t?e:(this.base=e,this.properties=n||[],i&&(this[i]=!0),this)}return kt(t,e),t.prototype.children=["base","properties"],t.prototype.add=function(e){return this.properties=this.properties.concat(e),this},t.prototype.hasProperties=function(){return!!this.properties.length},t.prototype.bareLiteral=function(e){return!this.properties.length&&this.base instanceof e},t.prototype.isArray=function(){return this.bareLiteral(n)},t.prototype.isRange=function(){return this.bareLiteral(j)},t.prototype.isComplex=function(){return this.hasProperties()||this.base.isComplex()},t.prototype.isAssignable=function(){return this.hasProperties()||this.base.isAssignable()},t.prototype.isSimpleNumber=function(){return this.bareLiteral(x)&&B.test(this.base.value)},t.prototype.isString=function(){return this.bareLiteral(x)&&b.test(this.base.value)},t.prototype.isRegex=function(){return this.bareLiteral(x)&&v.test(this.base.value)},t.prototype.isAtomic=function(){var e,t,n,i;for(i=this.properties.concat(this.base),e=0,t=i.length;t>e;e++)if(n=i[e],n.soak||n instanceof o)return!1;return!0},t.prototype.isNotCallable=function(){return this.isSimpleNumber()||this.isString()||this.isRegex()||this.isArray()||this.isRange()||this.isSplice()||this.isObject()},t.prototype.isStatement=function(e){return!this.properties.length&&this.base.isStatement(e)},t.prototype.assigns=function(e){return!this.properties.length&&this.base.assigns(e)},t.prototype.jumps=function(e){return!this.properties.length&&this.base.jumps(e)},t.prototype.isObject=function(e){return this.properties.length?!1:this.base instanceof A&&(!e||this.base.generated)},t.prototype.isSplice=function(){var e,t;return t=this.properties,e=t[t.length-1],e instanceof U},t.prototype.looksStatic=function(e){var t;return this.base.value===e&&1===this.properties.length&&"prototype"!==(null!=(t=this.properties[0].name)?t.value:void 0)},t.prototype.unwrap=function(){return this.properties.length?this:this.base},t.prototype.cacheReference=function(e){var n,r,s,o,a;return a=this.properties,s=a[a.length-1],2>this.properties.length&&!this.base.isComplex()&&!(null!=s?s.isComplex():void 0)?[this,this]:(n=new t(this.base,this.properties.slice(0,-1)),n.isComplex()&&(r=new x(e.scope.freeVariable("base")),n=new t(new O(new i(r,n)))),s?(s.isComplex()&&(o=new x(e.scope.freeVariable("name")),s=new w(new i(o,s.index)),o=new w(o)),[n.add(s),new t(r||n.base,[o||s])]):[n,r])},t.prototype.compileNode=function(e){var t,n,i,r,s;for(this.base.front=this.front,s=this.properties,t=this.base.compileToFragments(e,s.length?T:null),(this.base instanceof O||s.length)&&B.test(st(t))&&t.push(this.makeCode(".")),n=0,i=s.length;i>n;n++)r=s[n],t.push.apply(t,r.compileToFragments(e));return t},t.prototype.unfoldSoak=function(e){return null!=this.unfoldedSoak?this.unfoldedSoak:this.unfoldedSoak=function(n){return function(){var r,s,o,a,c,l,h,p,d,f;if(o=n.base.unfoldSoak(e))return(p=o.body.properties).push.apply(p,n.properties),o;for(d=n.properties,s=a=0,c=d.length;c>a;s=++a)if(l=d[s],l.soak)return l.soak=!1,r=new t(n.base,n.properties.slice(0,s)),f=new t(n.base,n.properties.slice(s)),r.isComplex()&&(h=new x(e.scope.freeVariable("ref")),r=new O(new i(h,r)),f.base=h),new y(new u(r),f,{soak:!0});return!1}}(this)()},t}(r),e.Comment=h=function(e){function t(e){this.comment=e}return kt(t,e),t.prototype.isStatement=Q,t.prototype.makeReturn=X,t.prototype.compileNode=function(e,t){var n,i;return i=this.comment.replace(/^(\s*)#(?=\s)/gm,"$1 *"),n="/*"+ut(i,this.tab)+(Tt.call(i,"\n")>=0?"\n"+this.tab:"")+" */",(t||e.level)===L&&(n=e.indent+n),[this.makeCode("\n"),this.makeCode(n)]},t}(r),e.Call=o=function(e){function n(e,t,n){this.args=null!=t?t:[],this.soak=n,this.isNew=!1,this.isSuper="super"===e,this.variable=this.isSuper?null:e,e instanceof z&&e.isNotCallable()&&e.error("literal is not a function")}return kt(n,e),n.prototype.children=["variable","args"],n.prototype.newInstance=function(){var e,t;return e=(null!=(t=this.variable)?t.base:void 0)||this.variable,e instanceof n&&!e.isNew?e.newInstance():this.isNew=!0,this},n.prototype.superReference=function(e){var n,r,s,o,a,c,l,h;return a=e.scope.namedMethod(),(null!=a?a.klass:void 0)?(o=a.klass,c=a.name,h=a.variable,o.isComplex()&&(s=new x(e.scope.parent.freeVariable("base")),r=new z(new O(new i(s,o))),h.base=r,h.properties.splice(0,o.properties.length)),(c.isComplex()||c instanceof w&&c.index.isAssignable())&&(l=new x(e.scope.parent.freeVariable("name")),c=new w(new i(l,c.index)),h.properties.pop(),h.properties.push(c)),n=[new t(new x("__super__"))],a["static"]&&n.push(new t(new x("constructor"))),n.push(null!=l?new w(l):c),new z(null!=s?s:o,n).compile(e)):(null!=a?a.ctor:void 0)?a.name+".__super__.constructor":this.error("cannot call super outside of an instance method.")},n.prototype.superThis=function(e){var t;return t=e.scope.method,t&&!t.klass&&t.context||"this"},n.prototype.unfoldSoak=function(e){var t,i,r,s,o,a,c,l,h;if(this.soak){if(this.variable){if(i=bt(e,this,"variable"))return i;c=new z(this.variable).cacheReference(e),s=c[0],h=c[1]}else s=new x(this.superReference(e)),h=new z(s);return h=new n(h,this.args),h.isNew=this.isNew,s=new x("typeof "+s.compile(e)+' === "function"'),new y(s,new z(h),{soak:!0})}for(t=this,a=[];;)if(t.variable instanceof n)a.push(t),t=t.variable;else{if(!(t.variable instanceof z))break;if(a.push(t),!((t=t.variable.base)instanceof n))break}for(l=a.reverse(),r=0,o=l.length;o>r;r++)t=l[r],i&&(t.variable instanceof n?t.variable=i:t.variable.base=i),i=bt(e,t,"variable");return i},n.prototype.compileNode=function(e){var t,n,i,r,s,o,a,c,l,h;if(null!=(l=this.variable)&&(l.front=this.front),r=G.compileSplattedArray(e,this.args,!0),r.length)return this.compileSplat(e,r);for(i=[],h=this.args,n=o=0,a=h.length;a>o;n=++o)t=h[n],n&&i.push(this.makeCode(", ")),i.push.apply(i,t.compileToFragments(e,F));return s=[],this.isSuper?(c=this.superReference(e)+(".call("+this.superThis(e)),i.length&&(c+=", "),s.push(this.makeCode(c))):(this.isNew&&s.push(this.makeCode("new ")),s.push.apply(s,this.variable.compileToFragments(e,T)),s.push(this.makeCode("("))),s.push.apply(s,i),s.push(this.makeCode(")")),s},n.prototype.compileSplat=function(e,t){var n,i,r,s,o,a;return this.isSuper?[].concat(this.makeCode(this.superReference(e)+".apply("+this.superThis(e)+", "),t,this.makeCode(")")):this.isNew?(s=this.tab+q,[].concat(this.makeCode("(function(func, args, ctor) {\n"+s+"ctor.prototype = func.prototype;\n"+s+"var child = new ctor, result = func.apply(child, args);\n"+s+"return Object(result) === result ? result : child;\n"+this.tab+"})("),this.variable.compileToFragments(e,F),this.makeCode(", "),t,this.makeCode(", function(){})"))):(n=[],i=new z(this.variable),(o=i.properties.pop())&&i.isComplex()?(a=e.scope.freeVariable("ref"),n=n.concat(this.makeCode("("+a+" = "),i.compileToFragments(e,F),this.makeCode(")"),o.compileToFragments(e))):(r=i.compileToFragments(e,T),B.test(st(r))&&(r=this.wrapInBraces(r)),o?(a=st(r),r.push.apply(r,o.compileToFragments(e))):a="null",n=n.concat(r)),n=n.concat(this.makeCode(".apply("+a+", "),t,this.makeCode(")")))},n}(r),e.Extends=d=function(e){function t(e,t){this.child=e,this.parent=t}return kt(t,e),t.prototype.children=["child","parent"],t.prototype.compileToFragments=function(e){return new o(new z(new x(yt("extend",e))),[this.child,this.parent]).compileToFragments(e)},t}(r),e.Access=t=function(e){function t(e,t){this.name=e,this.name.asKey=!0,this.soak="soak"===t}return kt(t,e),t.prototype.children=["name"],t.prototype.compileToFragments=function(e){var t;return t=this.name.compileToFragments(e),g.test(st(t))?t.unshift(this.makeCode(".")):(t.unshift(this.makeCode("[")),t.push(this.makeCode("]"))),t},t.prototype.isComplex=D,t}(r),e.Index=w=function(e){function t(e){this.index=e}return kt(t,e),t.prototype.children=["index"],t.prototype.compileToFragments=function(e){return[].concat(this.makeCode("["),this.index.compileToFragments(e,N),this.makeCode("]"))},t.prototype.isComplex=function(){return this.index.isComplex()},t}(r),e.Range=j=function(e){function t(e,t,n){this.from=e,this.to=t,this.exclusive="exclusive"===n,this.equals=this.exclusive?"":"="}return kt(t,e),t.prototype.children=["from","to"],t.prototype.compileVariables=function(e){var t,n,i,r,s,o;return e=ht(e,{top:!0}),t=tt(e,"isComplex"),n=this.cacheToCodeFragments(this.from.cache(e,F,t)),this.fromC=n[0],this.fromVar=n[1],i=this.cacheToCodeFragments(this.to.cache(e,F,t)),this.toC=i[0],this.toVar=i[1],(o=tt(e,"step"))&&(r=this.cacheToCodeFragments(o.cache(e,F,t)),this.step=r[0],this.stepVar=r[1]),s=[this.fromVar.match(R),this.toVar.match(R)],this.fromNum=s[0],this.toNum=s[1],this.stepVar?this.stepNum=this.stepVar.match(R):void 0},t.prototype.compileNode=function(e){var t,n,i,r,s,o,a,c,l,h,u,p,d,f;return this.fromVar||this.compileVariables(e),e.index?(a=this.fromNum&&this.toNum,s=tt(e,"index"),o=tt(e,"name"),l=o&&o!==s,f=s+" = "+this.fromC,this.toC!==this.toVar&&(f+=", "+this.toC),this.step!==this.stepVar&&(f+=", "+this.step),h=[s+" <"+this.equals,s+" >"+this.equals],c=h[0],r=h[1],n=this.stepNum?pt(this.stepNum[0])>0?c+" "+this.toVar:r+" "+this.toVar:a?(u=[pt(this.fromNum[0]),pt(this.toNum[0])],i=u[0],d=u[1],u,d>=i?c+" "+d:r+" "+d):(t=this.stepVar?this.stepVar+" > 0":this.fromVar+" <= "+this.toVar,t+" ? "+c+" "+this.toVar+" : "+r+" "+this.toVar),p=this.stepVar?s+" += "+this.stepVar:a?l?d>=i?"++"+s:"--"+s:d>=i?s+"++":s+"--":l?t+" ? ++"+s+" : --"+s:t+" ? "+s+"++ : "+s+"--",l&&(f=o+" = "+f),l&&(p=o+" = "+p),[this.makeCode(f+"; "+n+"; "+p)]):this.compileArray(e)},t.prototype.compileArray=function(e){var t,n,i,r,s,o,a,c,l,h,u,p,d;return this.fromNum&&this.toNum&&20>=Math.abs(this.fromNum-this.toNum)?(l=function(){p=[];for(var e=h=+this.fromNum,t=+this.toNum;t>=h?t>=e:e>=t;t>=h?e++:e--)p.push(e);return p}.apply(this),this.exclusive&&l.pop(),[this.makeCode("["+l.join(", ")+"]")]):(o=this.tab+q,s=e.scope.freeVariable("i",{single:!0}),u=e.scope.freeVariable("results"),c="\n"+o+u+" = [];",this.fromNum&&this.toNum?(e.index=s,n=st(this.compileNode(e))):(d=s+" = "+this.fromC+(this.toC!==this.toVar?", "+this.toC:""),i=this.fromVar+" <= "+this.toVar,n="var "+d+"; "+i+" ? "+s+" <"+this.equals+" "+this.toVar+" : "+s+" >"+this.equals+" "+this.toVar+"; "+i+" ? "+s+"++ : "+s+"--"),a="{ "+u+".push("+s+"); }\n"+o+"return "+u+";\n"+e.indent,r=function(e){return null!=e?e.contains(at):void 0},(r(this.from)||r(this.to))&&(t=", arguments"),[this.makeCode("(function() {"+c+"\n"+o+"for ("+n+")"+a+"}).apply(this"+(null!=t?t:"")+")")])},t}(r),e.Slice=U=function(e){function t(e){this.range=e,t.__super__.constructor.call(this)}return kt(t,e),t.prototype.children=["range"],t.prototype.compileNode=function(e){var t,n,i,r,s,o,a;return s=this.range,o=s.to,i=s.from,r=i&&i.compileToFragments(e,N)||[this.makeCode("0")],o&&(t=o.compileToFragments(e,N),n=st(t),(this.range.exclusive||-1!==+n)&&(a=", "+(this.range.exclusive?n:B.test(n)?""+(+n+1):(t=o.compileToFragments(e,T),"+"+st(t)+" + 1 || 9e9")))),[this.makeCode(".slice("+st(r)+(a||"")+")")]},t}(r),e.Obj=A=function(e){function n(e,t){this.generated=null!=t?t:!1,this.objects=this.properties=e||[]}return kt(n,e),n.prototype.children=["properties"],n.prototype.compileNode=function(e){var n,r,s,o,a,c,l,u,p,d,f,m,g,v,b,y,k,w,T,C,F;if(T=this.properties,this.generated)for(l=0,g=T.length;g>l;l++)y=T[l],y instanceof z&&y.error("cannot have an implicit value in an implicit object");for(r=p=0,v=T.length;v>p&&(w=T[r],!((w.variable||w).base instanceof O));r=++p);for(s=T.length>r,a=e.indent+=q,m=this.lastNonComment(this.properties),n=[],s&&(k=e.scope.freeVariable("obj"),n.push(this.makeCode("(\n"+a+k+" = "))),n.push(this.makeCode("{"+(0===T.length||0===r?"}":"\n"))),o=f=0,b=T.length;b>f;o=++f)w=T[o],o===r&&(0!==o&&n.push(this.makeCode("\n"+a+"}")),n.push(this.makeCode(",\n"))),u=o===T.length-1||o===r-1?"":w===m||w instanceof h?"\n":",\n",c=w instanceof h?"":a,s&&r>o&&(c+=q),w instanceof i&&("object"!==w.context&&w.operatorToken.error("unexpected "+w.operatorToken.value),w.variable instanceof z&&w.variable.hasProperties()&&w.variable.error("invalid object key")),w instanceof z&&w["this"]&&(w=new i(w.properties[0].name,w,"object")),w instanceof h||(r>o?(w instanceof i||(w=new i(w,w,"object")),(w.variable.base||w.variable).asKey=!0):(w instanceof i?(d=w.variable,F=w.value):(C=w.base.cache(e),d=C[0],F=C[1]),w=new i(new z(new x(k),[new t(d)]),F))),c&&n.push(this.makeCode(c)),n.push.apply(n,w.compileToFragments(e,L)),u&&n.push(this.makeCode(u));return s?n.push(this.makeCode(",\n"+a+k+"\n"+this.tab+")")):0!==T.length&&n.push(this.makeCode("\n"+this.tab+"}")),this.front&&!s?this.wrapInBraces(n):n},n.prototype.assigns=function(e){var t,n,i,r;for(r=this.properties,t=0,n=r.length;n>t;t++)if(i=r[t],i.assigns(e))return!0;return!1},n}(r),e.Arr=n=function(e){function t(e){this.objects=e||[]}return kt(t,e),t.prototype.children=["objects"],t.prototype.compileNode=function(e){var t,n,i,r,s,o,a;if(!this.objects.length)return[this.makeCode("[]")];if(e.indent+=q,t=G.compileSplattedArray(e,this.objects),t.length)return t;for(t=[],n=function(){var t,n,i,r;for(i=this.objects,r=[],t=0,n=i.length;n>t;t++)a=i[t],r.push(a.compileToFragments(e,F));return r}.call(this),r=s=0,o=n.length;o>s;r=++s)i=n[r],r&&t.push(this.makeCode(", ")),t.push.apply(t,i);return st(t).indexOf("\n")>=0?(t.unshift(this.makeCode("[\n"+e.indent)),t.push(this.makeCode("\n"+this.tab+"]"))):(t.unshift(this.makeCode("[")),t.push(this.makeCode("]"))),t},t.prototype.assigns=function(e){var t,n,i,r;for(r=this.objects,t=0,n=r.length;n>t;t++)if(i=r[t],i.assigns(e))return!0;return!1},t}(r),e.Class=a=function(e){function n(e,t,n){this.variable=e,this.parent=t,this.body=null!=n?n:new s,this.boundFuncs=[],this.body.classBody=!0}return kt(n,e),n.prototype.children=["variable","parent","body"],n.prototype.determineName=function(){var e,n,i;return this.variable?(n=this.variable.properties,i=n[n.length-1],e=i?i instanceof t&&i.name.value:this.variable.base.value,Tt.call(V,e)>=0&&this.variable.error("class variable name may not be "+e),e&&(e=g.test(e)&&e)):null},n.prototype.setContext=function(e){return this.body.traverseChildren(!1,function(t){return t.classBody?!1:t instanceof x&&"this"===t.value?t.value=e:t instanceof c&&t.bound?t.context=e:void 0})},n.prototype.addBoundFunctions=function(e){var n,i,r,s,o;for(o=this.boundFuncs,i=0,r=o.length;r>i;i++)n=o[i],s=new z(new x("this"),[new t(n)]).compile(e),this.ctor.body.unshift(new x(s+" = "+yt("bind",e)+"("+s+", this)"))},n.prototype.addProperties=function(e,n,r){var s,o,a,l,h,u;return u=e.base.properties.slice(0),l=function(){var e;for(e=[];o=u.shift();)o instanceof i&&(a=o.variable.base,delete o.context,h=o.value,"constructor"===a.value?(this.ctor&&o.error("cannot define more than one constructor in a class"),h.bound&&o.error("cannot define a constructor as a bound function"),h instanceof c?o=this.ctor=h:(this.externalCtor=r.classScope.freeVariable("class"),o=new i(new x(this.externalCtor),h))):o.variable["this"]?h["static"]=!0:(s=a.isComplex()?new w(a):new t(a),o.variable=new z(new x(n),[new t(new x("prototype")),s]),h instanceof c&&h.bound&&(this.boundFuncs.push(a),h.bound=!1))),e.push(o);return e}.call(this),et(l)},n.prototype.walkBody=function(e,t){return this.traverseChildren(!1,function(r){return function(o){var a,c,l,h,u,p,d;if(a=!0,o instanceof n)return!1;if(o instanceof s){for(d=c=o.expressions,l=h=0,u=d.length;u>h;l=++h)p=d[l],p instanceof i&&p.variable.looksStatic(e)?p.value["static"]=!0:p instanceof z&&p.isObject(!0)&&(a=!1,c[l]=r.addProperties(p,e,t));o.expressions=c=rt(c)}return a&&!(o instanceof n)}}(this))},n.prototype.hoistDirectivePrologue=function(){var e,t,n;for(t=0,e=this.body.expressions;(n=e[t])&&n instanceof h||n instanceof z&&n.isString();)++t;return this.directives=e.splice(0,t)},n.prototype.ensureConstructor=function(e){return this.ctor||(this.ctor=new c,this.externalCtor?this.ctor.body.push(new x(this.externalCtor+".apply(this, arguments)")):this.parent&&this.ctor.body.push(new x(e+".__super__.constructor.apply(this, arguments)")),this.ctor.body.makeReturn(),this.body.expressions.unshift(this.ctor)),this.ctor.ctor=this.ctor.name=e,this.ctor.klass=null,this.ctor.noReturn=!0},n.prototype.compileNode=function(e){var t,n,r,a,l,h,u,p,f;return(a=this.body.jumps())&&a.error("Class bodies cannot contain pure statements"),(n=this.body.contains(at))&&n.error("Class bodies shouldn't reference arguments"),u=this.determineName()||"_Class",u.reserved&&(u="_"+u),h=new x(u),r=new c([],s.wrap([this.body])),t=[],e.classScope=r.makeScope(e.scope),this.hoistDirectivePrologue(),this.setContext(u),this.walkBody(u,e),this.ensureConstructor(u),this.addBoundFunctions(e),this.body.spaced=!0,this.body.expressions.push(h),this.parent&&(f=new x(e.classScope.freeVariable("superClass",{reserve:!1})),this.body.expressions.unshift(new d(h,f)),r.params.push(new _(f)),t.push(this.parent)),(p=this.body.expressions).unshift.apply(p,this.directives),l=new O(new o(r,t)),this.variable&&(l=new i(this.variable,l)),l.compileToFragments(e)},n}(r),e.Assign=i=function(e){function n(e,t,n,i){var r,s,o;this.variable=e,this.value=t,this.context=n,null==i&&(i={}),this.param=i.param,this.subpattern=i.subpattern,this.operatorToken=i.operatorToken,o=s=this.variable.unwrapAll().value,r=Tt.call(V,o)>=0,r&&"object"!==this.context&&this.variable.error('variable name may not be "'+s+'"')
-}return kt(n,e),n.prototype.children=["variable","value"],n.prototype.isStatement=function(e){return(null!=e?e.level:void 0)===L&&null!=this.context&&Tt.call(this.context,"?")>=0},n.prototype.assigns=function(e){return this["object"===this.context?"value":"variable"].assigns(e)},n.prototype.unfoldSoak=function(e){return bt(e,this,"variable")},n.prototype.compileNode=function(e){var t,n,i,r,s,o,a,l,h,u,p,d,f,m;if(i=this.variable instanceof z){if(this.variable.isArray()||this.variable.isObject())return this.compilePatternMatch(e);if(this.variable.isSplice())return this.compileSplice(e);if("||="===(l=this.context)||"&&="===l||"?="===l)return this.compileConditional(e);if("**="===(h=this.context)||"//="===h||"%%="===h)return this.compileSpecialMath(e)}return this.value instanceof c&&(this.value["static"]?(this.value.klass=this.variable.base,this.value.name=this.variable.properties[0],this.value.variable=this.variable):(null!=(u=this.variable.properties)?u.length:void 0)>=2&&(p=this.variable.properties,o=p.length>=3?Ct.call(p,0,r=p.length-2):(r=0,[]),a=p[r++],s=p[r++],"prototype"===(null!=(d=a.name)?d.value:void 0)&&(this.value.klass=new z(this.variable.base,o),this.value.name=s,this.value.variable=this.variable))),this.context||(m=this.variable.unwrapAll(),m.isAssignable()||this.variable.error('"'+this.variable.compile(e)+'" cannot be assigned'),("function"==typeof m.hasProperties?m.hasProperties():void 0)||(this.param?e.scope.add(m.value,"var"):e.scope.find(m.value))),f=this.value.compileToFragments(e,F),i&&this.variable.base instanceof A&&(this.variable.front=!0),n=this.variable.compileToFragments(e,F),"object"===this.context?n.concat(this.makeCode(": "),f):(t=n.concat(this.makeCode(" "+(this.context||"=")+" "),f),F>=e.level?t:this.wrapInBraces(t))},n.prototype.compilePatternMatch=function(e){var i,r,s,o,a,c,l,h,u,d,f,m,v,b,y,k,T,C,N,S,D,R,A,_,O,j,M,B;if(_=e.level===L,j=this.value,y=this.variable.base.objects,!(k=y.length))return s=j.compileToFragments(e),e.level>=E?this.wrapInBraces(s):s;if(b=y[0],1===k&&b instanceof p&&b.error("Destructuring assignment has no target"),u=this.variable.isObject(),_&&1===k&&!(b instanceof G))return o=null,b instanceof n&&"object"===b.context?(C=b,N=C.variable,h=N.base,b=C.value,b instanceof n&&(o=b.value,b=b.variable)):(b instanceof n&&(o=b.value,b=b.variable),h=u?b["this"]?b.properties[0].name:b:new x(0)),i=g.test(h.unwrap().value),j=new z(j),j.properties.push(new(i?t:w)(h)),S=b.unwrap().value,Tt.call($,S)>=0&&b.error("assignment to a reserved word: "+b.compile(e)),o&&(j=new I("?",j,o)),new n(b,j,null,{param:this.param}).compileToFragments(e,L);for(M=j.compileToFragments(e,F),B=st(M),r=[],a=!1,(!g.test(B)||this.variable.assigns(B))&&(r.push([this.makeCode((T=e.scope.freeVariable("ref"))+" = ")].concat(Ct.call(M))),M=[this.makeCode(T)],B=T),l=f=0,m=y.length;m>f;l=++f){if(b=y[l],h=l,!a&&b instanceof G)v=b.name.unwrap().value,b=b.unwrap(),O=k+" <= "+B+".length ? "+yt("slice",e)+".call("+B+", "+l,(A=k-l-1)?(d=e.scope.freeVariable("i",{single:!0}),O+=", "+d+" = "+B+".length - "+A+") : ("+d+" = "+l+", [])"):O+=") : []",O=new x(O),a=d+"++";else{if(!a&&b instanceof p){(A=k-l-1)&&(1===A?a=B+".length - 1":(d=e.scope.freeVariable("i",{single:!0}),O=new x(d+" = "+B+".length - "+A),a=d+"++",r.push(O.compileToFragments(e,F))));continue}(b instanceof G||b instanceof p)&&b.error("multiple splats/expansions are disallowed in an assignment"),o=null,b instanceof n&&"object"===b.context?(D=b,R=D.variable,h=R.base,b=D.value,b instanceof n&&(o=b.value,b=b.variable)):(b instanceof n&&(o=b.value,b=b.variable),h=u?b["this"]?b.properties[0].name:b:new x(a||h)),v=b.unwrap().value,i=g.test(h.unwrap().value),O=new z(new x(B),[new(i?t:w)(h)]),o&&(O=new I("?",O,o))}null!=v&&Tt.call($,v)>=0&&b.error("assignment to a reserved word: "+b.compile(e)),r.push(new n(b,O,null,{param:this.param,subpattern:!0}).compileToFragments(e,F))}return _||this.subpattern||r.push(M),c=this.joinFragmentArrays(r,", "),F>e.level?c:this.wrapInBraces(c)},n.prototype.compileConditional=function(e){var t,i,r,s;return r=this.variable.cacheReference(e),i=r[0],s=r[1],!i.properties.length&&i.base instanceof x&&"this"!==i.base.value&&!e.scope.check(i.base.value)&&this.variable.error('the variable "'+i.base.value+"\" can't be assigned with "+this.context+" because it has not been declared before"),Tt.call(this.context,"?")>=0?(e.isExistentialEquals=!0,new y(new u(i),s,{type:"if"}).addElse(new n(s,this.value,"=")).compileToFragments(e)):(t=new I(this.context.slice(0,-1),i,new n(s,this.value,"=")).compileToFragments(e),F>=e.level?t:this.wrapInBraces(t))},n.prototype.compileSpecialMath=function(e){var t,i,r;return i=this.variable.cacheReference(e),t=i[0],r=i[1],new n(t,new I(this.context.slice(0,-1),r,this.value)).compileToFragments(e)},n.prototype.compileSplice=function(e){var t,n,i,r,s,o,a,c,l,h,u,p;return a=this.variable.properties.pop().range,i=a.from,h=a.to,n=a.exclusive,o=this.variable.compile(e),i?(c=this.cacheToCodeFragments(i.cache(e,E)),r=c[0],s=c[1]):r=s="0",h?i instanceof z&&i.isSimpleNumber()&&h instanceof z&&h.isSimpleNumber()?(h=h.compile(e)-s,n||(h+=1)):(h=h.compile(e,T)+" - "+s,n||(h+=" + 1")):h="9e9",l=this.value.cache(e,F),u=l[0],p=l[1],t=[].concat(this.makeCode("[].splice.apply("+o+", ["+r+", "+h+"].concat("),u,this.makeCode(")), "),p),e.level>L?this.wrapInBraces(t):t},n}(r),e.Code=c=function(e){function t(e,t,n){this.params=e||[],this.body=t||new s,this.bound="boundfunc"===n,this.isGenerator=!!this.body.contains(function(e){var t;return e instanceof I&&("yield"===(t=e.operator)||"yield*"===t)})}return kt(t,e),t.prototype.children=["params","body"],t.prototype.isStatement=function(){return!!this.ctor},t.prototype.jumps=D,t.prototype.makeScope=function(e){return new P(e,this.body,this)},t.prototype.compileNode=function(e){var r,a,c,l,h,u,d,f,m,g,v,b,k,w,C,F,E,N,L,S,D,R,A,O,$,j,M,B,V,P,U,G,H;if(this.bound&&(null!=(A=e.scope.method)?A.bound:void 0)&&(this.context=e.scope.method.context),this.bound&&!this.context)return this.context="_this",H=new t([new _(new x(this.context))],new s([this])),a=new o(H,[new x("this")]),a.updateLocationDataIfMissing(this.locationData),a.compileNode(e);for(e.scope=tt(e,"classScope")||this.makeScope(e.scope),e.scope.shared=tt(e,"sharedScope"),e.indent+=q,delete e.bare,delete e.isExistentialEquals,L=[],l=[],O=this.params,u=0,m=O.length;m>u;u++)N=O[u],N instanceof p||e.scope.parameter(N.asReference(e));for($=this.params,d=0,g=$.length;g>d;d++)if(N=$[d],N.splat||N instanceof p){for(j=this.params,f=0,v=j.length;v>f;f++)E=j[f],E instanceof p||!E.name.value||e.scope.add(E.name.value,"var",!0);V=new i(new z(new n(function(){var t,n,i,r;for(i=this.params,r=[],n=0,t=i.length;t>n;n++)E=i[n],r.push(E.asReference(e));return r}.call(this))),new z(new x("arguments")));break}for(M=this.params,F=0,b=M.length;b>F;F++)N=M[F],N.isComplex()?(U=R=N.asReference(e),N.value&&(U=new I("?",R,N.value)),l.push(new i(new z(N.name),U,"=",{param:!0}))):(R=N,N.value&&(C=new x(R.name.value+" == null"),U=new i(new z(N.name),N.value,"="),l.push(new y(C,U)))),V||L.push(R);for(G=this.body.isEmpty(),V&&l.unshift(V),l.length&&(B=this.body.expressions).unshift.apply(B,l),h=S=0,k=L.length;k>S;h=++S)E=L[h],L[h]=E.compileToFragments(e),e.scope.parameter(st(L[h]));for(P=[],this.eachParamName(function(e,t){return Tt.call(P,e)>=0&&t.error("multiple parameters named "+e),P.push(e)}),G||this.noReturn||this.body.makeReturn(),c="function",this.isGenerator&&(c+="*"),this.ctor&&(c+=" "+this.name),c+="(",r=[this.makeCode(c)],h=D=0,w=L.length;w>D;h=++D)E=L[h],h&&r.push(this.makeCode(", ")),r.push.apply(r,E);return r.push(this.makeCode(") {")),this.body.isEmpty()||(r=r.concat(this.makeCode("\n"),this.body.compileWithDeclarations(e),this.makeCode("\n"+this.tab))),r.push(this.makeCode("}")),this.ctor?[this.makeCode(this.tab)].concat(Ct.call(r)):this.front||e.level>=T?this.wrapInBraces(r):r},t.prototype.eachParamName=function(e){var t,n,i,r,s;for(r=this.params,s=[],t=0,n=r.length;n>t;t++)i=r[t],s.push(i.eachName(e));return s},t.prototype.traverseChildren=function(e,n){return e?t.__super__.traverseChildren.call(this,e,n):void 0},t}(r),e.Param=_=function(e){function t(e,t,n){var i,r,s;this.name=e,this.value=t,this.splat=n,r=i=this.name.unwrapAll().value,Tt.call(V,r)>=0&&this.name.error('parameter name "'+i+'" is not allowed'),this.name instanceof A&&this.name.generated&&(s=this.name.objects[0].operatorToken,s.error("unexpected "+s.value))}return kt(t,e),t.prototype.children=["name","value"],t.prototype.compileToFragments=function(e){return this.name.compileToFragments(e,F)},t.prototype.asReference=function(e){var t,n;return this.reference?this.reference:(n=this.name,n["this"]?(t=n.properties[0].name.value,t.reserved&&(t="_"+t),n=new x(e.scope.freeVariable(t))):n.isComplex()&&(n=new x(e.scope.freeVariable("arg"))),n=new z(n),this.splat&&(n=new G(n)),n.updateLocationDataIfMissing(this.locationData),this.reference=n)},t.prototype.isComplex=function(){return this.name.isComplex()},t.prototype.eachName=function(e,t){var n,r,s,o,a,c;if(null==t&&(t=this.name),n=function(t){return e("@"+t.properties[0].name.value,t)},t instanceof x)return e(t.value,t);if(t instanceof z)return n(t);for(c=t.objects,r=0,s=c.length;s>r;r++)a=c[r],a instanceof i&&null==a.context&&(a=a.variable),a instanceof i?this.eachName(e,a.value.unwrap()):a instanceof G?(o=a.name.unwrap(),e(o.value,o)):a instanceof z?a.isArray()||a.isObject()?this.eachName(e,a.base):a["this"]?n(a):e(a.base.value,a.base):a instanceof p||a.error("illegal parameter "+a.compile())},t}(r),e.Splat=G=function(e){function t(e){this.name=e.compile?e:new x(e)}return kt(t,e),t.prototype.children=["name"],t.prototype.isAssignable=Q,t.prototype.assigns=function(e){return this.name.assigns(e)},t.prototype.compileToFragments=function(e){return this.name.compileToFragments(e)},t.prototype.unwrap=function(){return this.name},t.compileSplattedArray=function(e,n,i){var r,s,o,a,c,l,h,u,p,d,f;for(h=-1;(f=n[++h])&&!(f instanceof t););if(h>=n.length)return[];if(1===n.length)return f=n[0],c=f.compileToFragments(e,F),i?c:[].concat(f.makeCode(yt("slice",e)+".call("),c,f.makeCode(")"));for(r=n.slice(h),l=u=0,d=r.length;d>u;l=++u)f=r[l],o=f.compileToFragments(e,F),r[l]=f instanceof t?[].concat(f.makeCode(yt("slice",e)+".call("),o,f.makeCode(")")):[].concat(f.makeCode("["),o,f.makeCode("]"));return 0===h?(f=n[0],a=f.joinFragmentArrays(r.slice(1),", "),r[0].concat(f.makeCode(".concat("),a,f.makeCode(")"))):(s=function(){var t,i,r,s;for(r=n.slice(0,h),s=[],t=0,i=r.length;i>t;t++)f=r[t],s.push(f.compileToFragments(e,F));return s}(),s=n[0].joinFragmentArrays(s,", "),a=n[h].joinFragmentArrays(r,", "),p=n[n.length-1],[].concat(n[0].makeCode("["),s,n[h].makeCode("].concat("),a,p.makeCode(")")))},t}(r),e.Expansion=p=function(e){function t(){return t.__super__.constructor.apply(this,arguments)}return kt(t,e),t.prototype.isComplex=D,t.prototype.compileNode=function(){return this.error("Expansion must be used inside a destructuring assignment or parameter list")},t.prototype.asReference=function(){return this},t.prototype.eachName=function(){},t}(r),e.While=J=function(e){function t(e,t){this.condition=(null!=t?t.invert:void 0)?e.invert():e,this.guard=null!=t?t.guard:void 0}return kt(t,e),t.prototype.children=["condition","guard","body"],t.prototype.isStatement=Q,t.prototype.makeReturn=function(e){return e?t.__super__.makeReturn.apply(this,arguments):(this.returns=!this.jumps({loop:!0}),this)},t.prototype.addBody=function(e){return this.body=e,this},t.prototype.jumps=function(){var e,t,n,i,r;if(e=this.body.expressions,!e.length)return!1;for(t=0,i=e.length;i>t;t++)if(r=e[t],n=r.jumps({loop:!0}))return n;return!1},t.prototype.compileNode=function(e){var t,n,i,r;return e.indent+=q,r="",n=this.body,n.isEmpty()?n=this.makeCode(""):(this.returns&&(n.makeReturn(i=e.scope.freeVariable("results")),r=""+this.tab+i+" = [];\n"),this.guard&&(n.expressions.length>1?n.expressions.unshift(new y(new O(this.guard).invert(),new x("continue"))):this.guard&&(n=s.wrap([new y(this.guard,n)]))),n=[].concat(this.makeCode("\n"),n.compileToFragments(e,L),this.makeCode("\n"+this.tab))),t=[].concat(this.makeCode(r+this.tab+"while ("),this.condition.compileToFragments(e,N),this.makeCode(") {"),n,this.makeCode("}")),this.returns&&t.push(this.makeCode("\n"+this.tab+"return "+i+";")),t},t}(r),e.Op=I=function(e){function n(e,t,n,i){if("in"===e)return new k(t,n);if("do"===e)return this.generateDo(t);if("new"===e){if(t instanceof o&&!t["do"]&&!t.isNew)return t.newInstance();(t instanceof c&&t.bound||t["do"])&&(t=new O(t))}return this.operator=r[e]||e,this.first=t,this.second=n,this.flip=!!i,this}var r,s;return kt(n,e),r={"==":"===","!=":"!==",of:"in",yieldfrom:"yield*"},s={"!==":"===","===":"!=="},n.prototype.children=["first","second"],n.prototype.isSimpleNumber=D,n.prototype.isYield=function(){var e;return"yield"===(e=this.operator)||"yield*"===e},n.prototype.isYieldReturn=function(){return this.isYield()&&this.first instanceof M},n.prototype.isUnary=function(){return!this.second},n.prototype.isComplex=function(){var e;return!(this.isUnary()&&("+"===(e=this.operator)||"-"===e)&&this.first instanceof z&&this.first.isSimpleNumber())},n.prototype.isChainable=function(){var e;return"<"===(e=this.operator)||">"===e||">="===e||"<="===e||"==="===e||"!=="===e},n.prototype.invert=function(){var e,t,i,r,o;if(this.isChainable()&&this.first.isChainable()){for(e=!0,t=this;t&&t.operator;)e&&(e=t.operator in s),t=t.first;if(!e)return new O(this).invert();for(t=this;t&&t.operator;)t.invert=!t.invert,t.operator=s[t.operator],t=t.first;return this}return(r=s[this.operator])?(this.operator=r,this.first.unwrap()instanceof n&&this.first.invert(),this):this.second?new O(this).invert():"!"===this.operator&&(i=this.first.unwrap())instanceof n&&("!"===(o=i.operator)||"in"===o||"instanceof"===o)?i:new n("!",this)},n.prototype.unfoldSoak=function(e){var t;return("++"===(t=this.operator)||"--"===t||"delete"===t)&&bt(e,this,"first")},n.prototype.generateDo=function(e){var t,n,r,s,a,l,h,u;for(l=[],n=e instanceof i&&(h=e.value.unwrap())instanceof c?h:e,u=n.params||[],r=0,s=u.length;s>r;r++)a=u[r],a.value?(l.push(a.value),delete a.value):l.push(a);return t=new o(e,l),t["do"]=!0,t},n.prototype.compileNode=function(e){var t,n,i,r,s,o;if(n=this.isChainable()&&this.first.isChainable(),n||(this.first.front=this.front),"delete"===this.operator&&e.scope.check(this.first.unwrapAll().value)&&this.error("delete operand may not be argument or var"),("--"===(r=this.operator)||"++"===r)&&(s=this.first.unwrapAll().value,Tt.call(V,s)>=0)&&this.error('cannot increment/decrement "'+this.first.unwrapAll().value+'"'),this.isYield())return this.compileYield(e);if(this.isUnary())return this.compileUnary(e);if(n)return this.compileChain(e);switch(this.operator){case"?":return this.compileExistence(e);case"**":return this.compilePower(e);case"//":return this.compileFloorDivision(e);case"%%":return this.compileModulo(e);default:return i=this.first.compileToFragments(e,E),o=this.second.compileToFragments(e,E),t=[].concat(i,this.makeCode(" "+this.operator+" "),o),E>=e.level?t:this.wrapInBraces(t)}},n.prototype.compileChain=function(e){var t,n,i,r;return i=this.first.second.cache(e),this.first.second=i[0],r=i[1],n=this.first.compileToFragments(e,E),t=n.concat(this.makeCode(" "+(this.invert?"&&":"||")+" "),r.compileToFragments(e),this.makeCode(" "+this.operator+" "),this.second.compileToFragments(e,E)),this.wrapInBraces(t)},n.prototype.compileExistence=function(e){var t,n;return this.first.isComplex()?(n=new x(e.scope.freeVariable("ref")),t=new O(new i(n,this.first))):(t=this.first,n=t),new y(new u(t),n,{type:"if"}).addElse(this.second).compileToFragments(e)},n.prototype.compileUnary=function(e){var t,i,r;return i=[],t=this.operator,i.push([this.makeCode(t)]),"!"===t&&this.first instanceof u?(this.first.negated=!this.first.negated,this.first.compileToFragments(e)):e.level>=T?new O(this).compileToFragments(e):(r="+"===t||"-"===t,("new"===t||"typeof"===t||"delete"===t||r&&this.first instanceof n&&this.first.operator===t)&&i.push([this.makeCode(" ")]),(r&&this.first instanceof n||"new"===t&&this.first.isStatement(e))&&(this.first=new O(this.first)),i.push(this.first.compileToFragments(e,E)),this.flip&&i.reverse(),this.joinFragmentArrays(i,""))},n.prototype.compileYield=function(e){var t,n;return n=[],t=this.operator,null==e.scope.parent&&this.error("yield statements must occur within a function generator."),Tt.call(Object.keys(this.first),"expression")>=0&&!(this.first instanceof W)?this.isYieldReturn()?n.push(this.first.compileToFragments(e,L)):null!=this.first.expression&&n.push(this.first.expression.compileToFragments(e,E)):(n.push([this.makeCode("("+t+" ")]),n.push(this.first.compileToFragments(e,E)),n.push([this.makeCode(")")])),this.joinFragmentArrays(n,"")},n.prototype.compilePower=function(e){var n;return n=new z(new x("Math"),[new t(new x("pow"))]),new o(n,[this.first,this.second]).compileToFragments(e)},n.prototype.compileFloorDivision=function(e){var i,r;return r=new z(new x("Math"),[new t(new x("floor"))]),i=new n("/",this.first,this.second),new o(r,[i]).compileToFragments(e)},n.prototype.compileModulo=function(e){var t;return t=new z(new x(yt("modulo",e))),new o(t,[this.first,this.second]).compileToFragments(e)},n.prototype.toString=function(e){return n.__super__.toString.call(this,e,this.constructor.name+" "+this.operator)},n}(r),e.In=k=function(e){function t(e,t){this.object=e,this.array=t}return kt(t,e),t.prototype.children=["object","array"],t.prototype.invert=S,t.prototype.compileNode=function(e){var t,n,i,r,s;if(this.array instanceof z&&this.array.isArray()&&this.array.base.objects.length){for(s=this.array.base.objects,n=0,i=s.length;i>n;n++)if(r=s[n],r instanceof G){t=!0;break}if(!t)return this.compileOrTest(e)}return this.compileLoopTest(e)},t.prototype.compileOrTest=function(e){var t,n,i,r,s,o,a,c,l,h,u,p;for(c=this.object.cache(e,E),u=c[0],a=c[1],l=this.negated?[" !== "," && "]:[" === "," || "],t=l[0],n=l[1],p=[],h=this.array.base.objects,i=s=0,o=h.length;o>s;i=++s)r=h[i],i&&p.push(this.makeCode(n)),p=p.concat(i?a:u,this.makeCode(t),r.compileToFragments(e,T));return E>e.level?p:this.wrapInBraces(p)},t.prototype.compileLoopTest=function(e){var t,n,i,r;return i=this.object.cache(e,F),r=i[0],n=i[1],t=[].concat(this.makeCode(yt("indexOf",e)+".call("),this.array.compileToFragments(e,F),this.makeCode(", "),n,this.makeCode(") "+(this.negated?"< 0":">= 0"))),st(r)===st(n)?t:(t=r.concat(this.makeCode(", "),t),F>e.level?t:this.wrapInBraces(t))},t.prototype.toString=function(e){return t.__super__.toString.call(this,e,this.constructor.name+(this.negated?"!":""))},t}(r),e.Try=Y=function(e){function t(e,t,n,i){this.attempt=e,this.errorVariable=t,this.recovery=n,this.ensure=i}return kt(t,e),t.prototype.children=["attempt","recovery","ensure"],t.prototype.isStatement=Q,t.prototype.jumps=function(e){var t;return this.attempt.jumps(e)||(null!=(t=this.recovery)?t.jumps(e):void 0)},t.prototype.makeReturn=function(e){return this.attempt&&(this.attempt=this.attempt.makeReturn(e)),this.recovery&&(this.recovery=this.recovery.makeReturn(e)),this},t.prototype.compileNode=function(e){var t,n,r,s,o;return e.indent+=q,o=this.attempt.compileToFragments(e,L),t=this.recovery?(r=e.scope.freeVariable("error"),s=new x(r),this.errorVariable?this.recovery.unshift(new i(this.errorVariable,s)):void 0,[].concat(this.makeCode(" catch ("),s.compileToFragments(e),this.makeCode(") {\n"),this.recovery.compileToFragments(e,L),this.makeCode("\n"+this.tab+"}"))):this.ensure||this.recovery?[]:[this.makeCode(" catch ("+r+") {}")],n=this.ensure?[].concat(this.makeCode(" finally {\n"),this.ensure.compileToFragments(e,L),this.makeCode("\n"+this.tab+"}")):[],[].concat(this.makeCode(this.tab+"try {\n"),o,this.makeCode("\n"+this.tab+"}"),t,n)},t}(r),e.Throw=W=function(e){function t(e){this.expression=e}return kt(t,e),t.prototype.children=["expression"],t.prototype.isStatement=Q,t.prototype.jumps=D,t.prototype.makeReturn=X,t.prototype.compileNode=function(e){return[].concat(this.makeCode(this.tab+"throw "),this.expression.compileToFragments(e),this.makeCode(";"))},t}(r),e.Existence=u=function(e){function t(e){this.expression=e}return kt(t,e),t.prototype.children=["expression"],t.prototype.invert=S,t.prototype.compileNode=function(e){var t,n,i,r;return this.expression.front=this.front,i=this.expression.compile(e,E),g.test(i)&&!e.scope.check(i)?(r=this.negated?["===","||"]:["!==","&&"],t=r[0],n=r[1],i="typeof "+i+" "+t+' "undefined" '+n+" "+i+" "+t+" null"):i=i+" "+(this.negated?"==":"!=")+" null",[this.makeCode(C>=e.level?i:"("+i+")")]},t}(r),e.Parens=O=function(e){function t(e){this.body=e}return kt(t,e),t.prototype.children=["body"],t.prototype.unwrap=function(){return this.body},t.prototype.isComplex=function(){return this.body.isComplex()},t.prototype.compileNode=function(e){var t,n,i;return n=this.body.unwrap(),n instanceof z&&n.isAtomic()?(n.front=this.front,n.compileToFragments(e)):(i=n.compileToFragments(e,N),t=E>e.level&&(n instanceof I||n instanceof o||n instanceof f&&n.returns),t?i:this.wrapInBraces(i))},t}(r),e.For=f=function(e){function t(e,t){var n;this.source=t.source,this.guard=t.guard,this.step=t.step,this.name=t.name,this.index=t.index,this.body=s.wrap([e]),this.own=!!t.own,this.object=!!t.object,this.object&&(n=[this.index,this.name],this.name=n[0],this.index=n[1]),this.index instanceof z&&this.index.error("index cannot be a pattern matching expression"),this.range=this.source instanceof z&&this.source.base instanceof j&&!this.source.properties.length,this.pattern=this.name instanceof z,this.range&&this.index&&this.index.error("indexes do not apply to range loops"),this.range&&this.pattern&&this.name.error("cannot pattern match over range loops"),this.own&&!this.object&&this.name.error("cannot use own with for-in"),this.returns=!1}return kt(t,e),t.prototype.children=["body","source","guard","step"],t.prototype.compileNode=function(e){var t,n,r,o,a,c,l,h,u,p,d,f,m,v,b,k,w,T,C,E,N,S,D,A,I,_,$,j,B,V,P,U,G,H;return t=s.wrap([this.body]),D=t.expressions,T=D[D.length-1],(null!=T?T.jumps():void 0)instanceof M&&(this.returns=!1),B=this.range?this.source.base:this.source,j=e.scope,this.pattern||(E=this.name&&this.name.compile(e,F)),v=this.index&&this.index.compile(e,F),E&&!this.pattern&&j.find(E),v&&j.find(v),this.returns&&($=j.freeVariable("results")),b=this.object&&v||j.freeVariable("i",{single:!0}),k=this.range&&E||v||b,w=k!==b?k+" = ":"",this.step&&!this.range&&(A=this.cacheToCodeFragments(this.step.cache(e,F,ot)),V=A[0],U=A[1],P=U.match(R)),this.pattern&&(E=b),H="",d="",l="",f=this.tab+q,this.range?p=B.compileToFragments(ht(e,{index:b,name:E,step:this.step,isComplex:ot})):(G=this.source.compile(e,F),!E&&!this.own||g.test(G)||(l+=""+this.tab+(S=j.freeVariable("ref"))+" = "+G+";\n",G=S),E&&!this.pattern&&(N=E+" = "+G+"["+k+"]"),this.object||(V!==U&&(l+=""+this.tab+V+";\n"),this.step&&P&&(u=0>pt(P[0]))||(C=j.freeVariable("len")),a=""+w+b+" = 0, "+C+" = "+G+".length",c=""+w+b+" = "+G+".length - 1",r=b+" < "+C,o=b+" >= 0",this.step?(P?u&&(r=o,a=c):(r=U+" > 0 ? "+r+" : "+o,a="("+U+" > 0 ? ("+a+") : "+c+")"),m=b+" += "+U):m=""+(k!==b?"++"+b:b+"++"),p=[this.makeCode(a+"; "+r+"; "+w+m)])),this.returns&&(I=""+this.tab+$+" = [];\n",_="\n"+this.tab+"return "+$+";",t.makeReturn($)),this.guard&&(t.expressions.length>1?t.expressions.unshift(new y(new O(this.guard).invert(),new x("continue"))):this.guard&&(t=s.wrap([new y(this.guard,t)]))),this.pattern&&t.expressions.unshift(new i(this.name,new x(G+"["+k+"]"))),h=[].concat(this.makeCode(l),this.pluckDirectCall(e,t)),N&&(H="\n"+f+N+";"),this.object&&(p=[this.makeCode(k+" in "+G)],this.own&&(d="\n"+f+"if (!"+yt("hasProp",e)+".call("+G+", "+k+")) continue;")),n=t.compileToFragments(ht(e,{indent:f}),L),n&&n.length>0&&(n=[].concat(this.makeCode("\n"),n,this.makeCode("\n"))),[].concat(h,this.makeCode(""+(I||"")+this.tab+"for ("),p,this.makeCode(") {"+d+H),n,this.makeCode(this.tab+"}"+(_||"")))},t.prototype.pluckDirectCall=function(e,t){var n,r,s,a,l,h,u,p,d,f,m,g,v,b,y,k;for(r=[],d=t.expressions,l=h=0,u=d.length;u>h;l=++h)s=d[l],s=s.unwrapAll(),s instanceof o&&(k=null!=(f=s.variable)?f.unwrapAll():void 0,(k instanceof c||k instanceof z&&(null!=(m=k.base)?m.unwrapAll():void 0)instanceof c&&1===k.properties.length&&("call"===(g=null!=(v=k.properties[0].name)?v.value:void 0)||"apply"===g))&&(a=(null!=(b=k.base)?b.unwrapAll():void 0)||k,p=new x(e.scope.freeVariable("fn")),n=new z(p),k.base&&(y=[n,k],k.base=y[0],n=y[1]),t.expressions[l]=new o(n,s.args),r=r.concat(this.makeCode(this.tab),new i(p,a).compileToFragments(e,L),this.makeCode(";\n"))));return r},t}(J),e.Switch=H=function(e){function t(e,t,n){this.subject=e,this.cases=t,this.otherwise=n}return kt(t,e),t.prototype.children=["subject","cases","otherwise"],t.prototype.isStatement=Q,t.prototype.jumps=function(e){var t,n,i,r,s,o,a,c;for(null==e&&(e={block:!0}),o=this.cases,i=0,s=o.length;s>i;i++)if(a=o[i],n=a[0],t=a[1],r=t.jumps(e))return r;return null!=(c=this.otherwise)?c.jumps(e):void 0},t.prototype.makeReturn=function(e){var t,n,i,r,o;for(r=this.cases,t=0,n=r.length;n>t;t++)i=r[t],i[1].makeReturn(e);return e&&(this.otherwise||(this.otherwise=new s([new x("void 0")]))),null!=(o=this.otherwise)&&o.makeReturn(e),this},t.prototype.compileNode=function(e){var t,n,i,r,s,o,a,c,l,h,u,p,d,f,m,g;for(c=e.indent+q,l=e.indent=c+q,o=[].concat(this.makeCode(this.tab+"switch ("),this.subject?this.subject.compileToFragments(e,N):this.makeCode("false"),this.makeCode(") {\n")),f=this.cases,a=h=0,p=f.length;p>h;a=++h){for(m=f[a],r=m[0],t=m[1],g=rt([r]),u=0,d=g.length;d>u;u++)i=g[u],this.subject||(i=i.invert()),o=o.concat(this.makeCode(c+"case "),i.compileToFragments(e,N),this.makeCode(":\n"));if((n=t.compileToFragments(e,L)).length>0&&(o=o.concat(n,this.makeCode("\n"))),a===this.cases.length-1&&!this.otherwise)break;s=this.lastNonComment(t.expressions),s instanceof M||s instanceof x&&s.jumps()&&"debugger"!==s.value||o.push(i.makeCode(l+"break;\n"))}return this.otherwise&&this.otherwise.expressions.length&&o.push.apply(o,[this.makeCode(c+"default:\n")].concat(Ct.call(this.otherwise.compileToFragments(e,L)),[this.makeCode("\n")])),o.push(this.makeCode(this.tab+"}")),o},t}(r),e.If=y=function(e){function t(e,t,n){this.body=t,null==n&&(n={}),this.condition="unless"===n.type?e.invert():e,this.elseBody=null,this.isChain=!1,this.soak=n.soak}return kt(t,e),t.prototype.children=["condition","body","elseBody"],t.prototype.bodyNode=function(){var e;return null!=(e=this.body)?e.unwrap():void 0},t.prototype.elseBodyNode=function(){var e;return null!=(e=this.elseBody)?e.unwrap():void 0},t.prototype.addElse=function(e){return this.isChain?this.elseBodyNode().addElse(e):(this.isChain=e instanceof t,this.elseBody=this.ensureBlock(e),this.elseBody.updateLocationDataIfMissing(e.locationData)),this},t.prototype.isStatement=function(e){var t;return(null!=e?e.level:void 0)===L||this.bodyNode().isStatement(e)||(null!=(t=this.elseBodyNode())?t.isStatement(e):void 0)},t.prototype.jumps=function(e){var t;return this.body.jumps(e)||(null!=(t=this.elseBody)?t.jumps(e):void 0)},t.prototype.compileNode=function(e){return this.isStatement(e)?this.compileStatement(e):this.compileExpression(e)},t.prototype.makeReturn=function(e){return e&&(this.elseBody||(this.elseBody=new s([new x("void 0")]))),this.body&&(this.body=new s([this.body.makeReturn(e)])),this.elseBody&&(this.elseBody=new s([this.elseBody.makeReturn(e)])),this},t.prototype.ensureBlock=function(e){return e instanceof s?e:new s([e])},t.prototype.compileStatement=function(e){var n,i,r,s,o,a,c;return r=tt(e,"chainChild"),(o=tt(e,"isExistentialEquals"))?new t(this.condition.invert(),this.elseBodyNode(),{type:"if"}).compileToFragments(e):(c=e.indent+q,s=this.condition.compileToFragments(e,N),i=this.ensureBlock(this.body).compileToFragments(ht(e,{indent:c})),a=[].concat(this.makeCode("if ("),s,this.makeCode(") {\n"),i,this.makeCode("\n"+this.tab+"}")),r||a.unshift(this.makeCode(this.tab)),this.elseBody?(n=a.concat(this.makeCode(" else ")),this.isChain?(e.chainChild=!0,n=n.concat(this.elseBody.unwrap().compileToFragments(e,L))):n=n.concat(this.makeCode("{\n"),this.elseBody.compileToFragments(ht(e,{indent:c}),L),this.makeCode("\n"+this.tab+"}")),n):a)},t.prototype.compileExpression=function(e){var t,n,i,r;return i=this.condition.compileToFragments(e,C),n=this.bodyNode().compileToFragments(e,F),t=this.elseBodyNode()?this.elseBodyNode().compileToFragments(e,F):[this.makeCode("void 0")],r=i.concat(this.makeCode(" ? "),n,this.makeCode(" : "),t),e.level>=C?this.wrapInBraces(r):r},t.prototype.unfoldSoak=function(){return this.soak&&this},t}(r),K={extend:function(e){return"function(child, parent) { for (var key in parent) { if ("+yt("hasProp",e)+".call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }"},bind:function(){return"function(fn, me){ return function(){ return fn.apply(me, arguments); }; }"},indexOf:function(){return"[].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; }"},modulo:function(){return"function(a, b) { return (+a % (b = +b) + b) % b; }"},hasProp:function(){return"{}.hasOwnProperty"},slice:function(){return"[].slice"}},L=1,N=2,F=3,C=4,E=5,T=6,q=" ",g=/^(?!\d)[$\w\x7f-\uffff]+$/,B=/^[+-]?\d+$/,m=/^[+-]?0x[\da-f]+/i,R=/^[+-]?(?:0x[\da-f]+|\d*\.?\d+(?:e[+-]?\d+)?)$/i,b=/^['"]/,v=/^\//,yt=function(e,t){var n,i;return i=t.scope.root,e in i.utilities?i.utilities[e]:(n=i.freeVariable(e),i.assign(n,K[e](t)),i.utilities[e]=n)},ut=function(e,t){return e=e.replace(/\n/g,"$&"+t),e.replace(/\s+$/,"")},pt=function(e){return null==e?0:e.match(m)?parseInt(e,16):parseFloat(e)},at=function(e){return e instanceof x&&"arguments"===e.value&&!e.asKey},ct=function(e){return e instanceof x&&"this"===e.value&&!e.asKey||e instanceof c&&e.bound||e instanceof o&&e.isSuper},ot=function(e){return e.isComplex()||("function"==typeof e.isAssignable?e.isAssignable():void 0)},bt=function(e,t,n){var i;if(i=t[n].unfoldSoak(e))return t[n]=i.body,i.body=new z(t),i}}.call(this),t.exports}(),require["./sourcemap"]=function(){var e={},t={exports:e};return function(){var e,n;e=function(){function e(e){this.line=e,this.columns=[]}return e.prototype.add=function(e,t,n){var i,r;return r=t[0],i=t[1],null==n&&(n={}),this.columns[e]&&n.noReplace?void 0:this.columns[e]={line:this.line,column:e,sourceLine:r,sourceColumn:i}},e.prototype.sourceLocation=function(e){for(var t;!((t=this.columns[e])||0>=e);)e--;return t&&[t.sourceLine,t.sourceColumn]},e}(),n=function(){function t(){this.lines=[]}var n,i,r,s;return t.prototype.add=function(t,n,i){var r,s,o,a;return null==i&&(i={}),o=n[0],s=n[1],a=(r=this.lines)[o]||(r[o]=new e(o)),a.add(s,t,i)},t.prototype.sourceLocation=function(e){var t,n,i;for(n=e[0],t=e[1];!((i=this.lines[n])||0>=n);)n--;return i&&i.sourceLocation(t)},t.prototype.generate=function(e,t){var n,i,r,s,o,a,c,l,h,u,p,d,f,m,g,v;for(null==e&&(e={}),null==t&&(t=null),v=0,s=0,a=0,o=0,d=!1,n="",f=this.lines,u=i=0,c=f.length;c>i;u=++i)if(h=f[u])for(m=h.columns,r=0,l=m.length;l>r;r++)if(p=m[r]){for(;p.line>v;)s=0,d=!1,n+=";",v++;d&&(n+=",",d=!1),n+=this.encodeVlq(p.column-s),s=p.column,n+=this.encodeVlq(0),n+=this.encodeVlq(p.sourceLine-a),a=p.sourceLine,n+=this.encodeVlq(p.sourceColumn-o),o=p.sourceColumn,d=!0}return g={version:3,file:e.generatedFile||"",sourceRoot:e.sourceRoot||"",sources:e.sourceFiles||[""],names:[],mappings:n},e.inline&&(g.sourcesContent=[t]),JSON.stringify(g,null,2)},r=5,i=1<<r,s=i-1,t.prototype.encodeVlq=function(e){var t,n,o,a;for(t="",o=0>e?1:0,a=(Math.abs(e)<<1)+o;a||!t;)n=a&s,a>>=r,a&&(n|=i),t+=this.encodeBase64(n);return t},n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",t.prototype.encodeBase64=function(e){return n[e]||function(){throw Error("Cannot Base64 encode value: "+e)
-}()},t}(),t.exports=n}.call(this),t.exports}(),require["./coffee-script"]=function(){var e={},t={exports:e};return function(){var t,n,i,r,s,o,a,c,l,h,u,p,d,f,m,g,v,b,y={}.hasOwnProperty,k=[].indexOf||function(e){for(var t=0,n=this.length;n>t;t++)if(t in this&&this[t]===e)return t;return-1};if(a=require("fs"),v=require("vm"),f=require("path"),t=require("./lexer").Lexer,d=require("./parser").parser,l=require("./helpers"),n=require("./sourcemap"),e.VERSION="1.10.0",e.FILE_EXTENSIONS=[".coffee",".litcoffee",".coffee.md"],e.helpers=l,b=function(e){return function(t,n){var i,r;null==n&&(n={});try{return e.call(this,t,n)}catch(r){if(i=r,"string"!=typeof t)throw i;throw l.updateSyntaxError(i,t,n.filename)}}},e.compile=r=b(function(e,t){var i,r,s,o,a,c,h,u,f,m,g,v,b,y,k;for(v=l.merge,o=l.extend,t=o({},t),t.sourceMap&&(g=new n),k=p.tokenize(e,t),t.referencedVars=function(){var e,t,n;for(n=[],e=0,t=k.length;t>e;e++)y=k[e],y.variable&&n.push(y[1]);return n}(),c=d.parse(k).compileToFragments(t),s=0,t.header&&(s+=1),t.shiftLine&&(s+=1),r=0,f="",u=0,m=c.length;m>u;u++)a=c[u],t.sourceMap&&(a.locationData&&!/^[;\s]*$/.test(a.code)&&g.add([a.locationData.first_line,a.locationData.first_column],[s,r],{noReplace:!0}),b=l.count(a.code,"\n"),s+=b,b?r=a.code.length-(a.code.lastIndexOf("\n")+1):r+=a.code.length),f+=a.code;return t.header&&(h="Generated by CoffeeScript "+this.VERSION,f="// "+h+"\n"+f),t.sourceMap?(i={js:f},i.sourceMap=g,i.v3SourceMap=g.generate(t,e),i):f}),e.tokens=b(function(e,t){return p.tokenize(e,t)}),e.nodes=b(function(e,t){return"string"==typeof e?d.parse(p.tokenize(e,t)):d.parse(e)}),e.run=function(e,t){var n,i,s,o;return null==t&&(t={}),s=require.main,s.filename=process.argv[1]=t.filename?a.realpathSync(t.filename):".",s.moduleCache&&(s.moduleCache={}),i=t.filename?f.dirname(a.realpathSync(t.filename)):a.realpathSync("."),s.paths=require("module")._nodeModulePaths(i),(!l.isCoffee(s.filename)||require.extensions)&&(n=r(e,t),e=null!=(o=n.js)?o:n),s._compile(e,s.filename)},e.eval=function(e,t){var n,i,s,o,a,c,l,h,u,p,d,m,g,b,k,w,T;if(null==t&&(t={}),e=e.trim()){if(o=null!=(m=v.Script.createContext)?m:v.createContext,c=null!=(g=v.isContext)?g:function(){return t.sandbox instanceof o().constructor},o){if(null!=t.sandbox){if(c(t.sandbox))w=t.sandbox;else{w=o(),b=t.sandbox;for(h in b)y.call(b,h)&&(T=b[h],w[h]=T)}w.global=w.root=w.GLOBAL=w}else w=global;if(w.__filename=t.filename||"eval",w.__dirname=f.dirname(w.__filename),w===global&&!w.module&&!w.require){for(n=require("module"),w.module=i=new n(t.modulename||"eval"),w.require=s=function(e){return n._load(e,i,!0)},i.filename=w.__filename,k=Object.getOwnPropertyNames(require),a=0,u=k.length;u>a;a++)d=k[a],"paths"!==d&&"arguments"!==d&&"caller"!==d&&(s[d]=require[d]);s.paths=i.paths=n._nodeModulePaths(process.cwd()),s.resolve=function(e){return n._resolveFilename(e,i)}}}p={};for(h in t)y.call(t,h)&&(T=t[h],p[h]=T);return p.bare=!0,l=r(e,p),w===global?v.runInThisContext(l):v.runInContext(l,w)}},e.register=function(){return require("./register")},require.extensions)for(m=this.FILE_EXTENSIONS,h=0,u=m.length;u>h;h++)s=m[h],null==(i=require.extensions)[s]&&(i[s]=function(){throw Error("Use CoffeeScript.register() or require the coffee-script/register module to require "+s+" files.")});e._compileFile=function(e,t){var n,i,s,o,c;null==t&&(t=!1),o=a.readFileSync(e,"utf8"),c=65279===o.charCodeAt(0)?o.substring(1):o;try{n=r(c,{filename:e,sourceMap:t,literate:l.isLiterate(e)})}catch(s){throw i=s,l.updateSyntaxError(i,c,e)}return n},p=new t,d.lexer={lex:function(){var e,t;return t=d.tokens[this.pos++],t?(e=t[0],this.yytext=t[1],this.yylloc=t[2],d.errorToken=t.origin||t,this.yylineno=this.yylloc.first_line):e="",e},setInput:function(e){return d.tokens=e,this.pos=0},upcomingInput:function(){return""}},d.yy=require("./nodes"),d.yy.parseError=function(e,t){var n,i,r,s,o,a;return o=t.token,s=d.errorToken,a=d.tokens,i=s[0],r=s[1],n=s[2],r=function(){switch(!1){case s!==a[a.length-1]:return"end of input";case"INDENT"!==i&&"OUTDENT"!==i:return"indentation";case"IDENTIFIER"!==i&&"NUMBER"!==i&&"STRING"!==i&&"STRING_START"!==i&&"REGEX"!==i&&"REGEX_START"!==i:return i.replace(/_START$/,"").toLowerCase();default:return l.nameWhitespaceCharacter(r)}}(),l.throwSyntaxError("unexpected "+r,n)},o=function(e,t){var n,i,r,s,o,a,c,l,h,u,p,d;return s=void 0,r="",e.isNative()?r="native":(e.isEval()?(s=e.getScriptNameOrSourceURL(),s||(r=e.getEvalOrigin()+", ")):s=e.getFileName(),s||(s="<anonymous>"),l=e.getLineNumber(),i=e.getColumnNumber(),u=t(s,l,i),r=u?s+":"+u[0]+":"+u[1]:s+":"+l+":"+i),o=e.getFunctionName(),a=e.isConstructor(),c=!(e.isToplevel()||a),c?(h=e.getMethodName(),d=e.getTypeName(),o?(p=n="",d&&o.indexOf(d)&&(p=d+"."),h&&o.indexOf("."+h)!==o.length-h.length-1&&(n=" [as "+h+"]"),""+p+o+n+" ("+r+")"):d+"."+(h||"<anonymous>")+" ("+r+")"):a?"new "+(o||"<anonymous>")+" ("+r+")":o?o+" ("+r+")":r},g={},c=function(t){var n,i;if(g[t])return g[t];if(i=null!=f?f.extname(t):void 0,!(0>k.call(e.FILE_EXTENSIONS,i)))return n=e._compileFile(t,!0),g[t]=n.sourceMap},Error.prepareStackTrace=function(t,n){var i,r,s;return s=function(e,t,n){var i,r;return r=c(e),r&&(i=r.sourceLocation([t-1,n-1])),i?[i[0]+1,i[1]+1]:null},r=function(){var t,r,a;for(a=[],t=0,r=n.length;r>t&&(i=n[t],i.getFunction()!==e.run);t++)a.push(" at "+o(i,s));return a}(),""+t+"\n"+r.join("\n")+"\n"}}.call(this),t.exports}(),require["./browser"]=function(){var exports={},module={exports:exports};return function(){var CoffeeScript,compile,runScripts,indexOf=[].indexOf||function(e){for(var t=0,n=this.length;n>t;t++)if(t in this&&this[t]===e)return t;return-1};CoffeeScript=require("./coffee-script"),CoffeeScript.require=require,compile=CoffeeScript.compile,CoffeeScript.eval=function(code,options){return null==options&&(options={}),null==options.bare&&(options.bare=!0),eval(compile(code,options))},CoffeeScript.run=function(e,t){return null==t&&(t={}),t.bare=!0,t.shiftLine=!0,Function(compile(e,t))()},"undefined"!=typeof window&&null!==window&&("undefined"!=typeof btoa&&null!==btoa&&"undefined"!=typeof JSON&&null!==JSON&&"undefined"!=typeof unescape&&null!==unescape&&"undefined"!=typeof encodeURIComponent&&null!==encodeURIComponent&&(compile=function(e,t){var n,i,r;return null==t&&(t={}),t.sourceMap=!0,t.inline=!0,i=CoffeeScript.compile(e,t),n=i.js,r=i.v3SourceMap,n+"\n//# sourceMappingURL=data:application/json;base64,"+btoa(unescape(encodeURIComponent(r)))+"\n//# sourceURL=coffeescript"}),CoffeeScript.load=function(e,t,n,i){var r;return null==n&&(n={}),null==i&&(i=!1),n.sourceFiles=[e],r=window.ActiveXObject?new window.ActiveXObject("Microsoft.XMLHTTP"):new window.XMLHttpRequest,r.open("GET",e,!0),"overrideMimeType"in r&&r.overrideMimeType("text/plain"),r.onreadystatechange=function(){var s,o;if(4===r.readyState){if(0!==(o=r.status)&&200!==o)throw Error("Could not load "+e);if(s=[r.responseText,n],i||CoffeeScript.run.apply(CoffeeScript,s),t)return t(s)}},r.send(null)},runScripts=function(){var e,t,n,i,r,s,o,a,c,l,h;for(h=window.document.getElementsByTagName("script"),t=["text/coffeescript","text/literate-coffeescript"],e=function(){var e,n,i,r;for(r=[],e=0,n=h.length;n>e;e++)c=h[e],i=c.type,indexOf.call(t,i)>=0&&r.push(c);return r}(),s=0,n=function(){var t;return t=e[s],t instanceof Array?(CoffeeScript.run.apply(CoffeeScript,t),s++,n()):void 0},i=function(i,r){var s,o;return s={literate:i.type===t[1]},o=i.src||i.getAttribute("data-src"),o?CoffeeScript.load(o,function(t){return e[r]=t,n()},s,!0):(s.sourceFiles=["embedded"],e[r]=[i.innerHTML,s])},r=o=0,a=e.length;a>o;r=++o)l=e[r],i(l,r);return n()},window.addEventListener?window.addEventListener("DOMContentLoaded",runScripts,!1):window.attachEvent("onload",runScripts))}.call(this),module.exports}(),require["./coffee-script"]}();"function"==typeof define&&define.amd?define(function(){return CoffeeScript}):root.CoffeeScript=CoffeeScript})(this); \ No newline at end of file
diff --git a/share/server/dreyfus.js b/share/server/dreyfus.js
deleted file mode 100644
index 1d8a029d4..000000000
--- a/share/server/dreyfus.js
+++ /dev/null
@@ -1,62 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-var Dreyfus = (function() {
-
- var index_results = []; // holds temporary emitted values during index
-
- function handleIndexError(err, doc) {
- if (err == "fatal_error") {
- throw(["error", "map_runtime_error", "function raised 'fatal_error'"]);
- } else if (err[0] == "fatal") {
- throw(err);
- }
- var message = "function raised exception " + err.toSource();
- if (doc) message += " with doc._id " + doc._id;
- log(message);
- };
-
- return {
- index: function(name, value, options) {
- if (typeof name !== 'string') {
- throw({name: 'TypeError', message: 'name must be a string not ' + typeof name});
- }
- if (name.substring(0, 1) === '_') {
- throw({name: 'ReservedName', message: 'name must not start with an underscore'});
- }
- if (typeof value !== 'string' && typeof value !== 'number' && typeof value !== 'boolean') {
- throw({name: 'TypeError', message: 'value must be a string, a number or boolean not ' + typeof value});
- }
- if (options && typeof options !== 'object') {
- throw({name: 'TypeError', message: 'options must be an object not ' + typeof options});
- }
- index_results.push([name, value, options || {}]);
- },
-
- indexDoc: function(doc) {
- Couch.recursivelySeal(doc);
- var buf = [];
- for (var fun in State.funs) {
- index_results = [];
- try {
- State.funs[fun](doc);
- buf.push(index_results);
- } catch (err) {
- handleIndexError(err, doc);
- buf.push([]);
- }
- }
- print(JSON.stringify(buf));
- }
-
- }
-})();
diff --git a/share/server/filter.js b/share/server/filter.js
deleted file mode 100644
index 84f5cfc09..000000000
--- a/share/server/filter.js
+++ /dev/null
@@ -1,46 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-var Filter = (function() {
-
- var view_emit = false;
-
- return {
- emit : function(key, value) {
- view_emit = true;
- },
- filter : function(fun, ddoc, args) {
- var results = [];
- var docs = args[0];
- var req = args[1];
- for (var i=0; i < docs.length; i++) {
- results.push((fun.apply(ddoc, [docs[i], req]) && true) || false);
- };
- respond([true, results]);
- },
- filter_view : function(fun, ddoc, args) {
- // recompile
- var sandbox = create_filter_sandbox();
- var source = fun.toSource();
- fun = evalcx(source, sandbox);
-
- var results = [];
- var docs = args[0];
- for (var i=0; i < docs.length; i++) {
- view_emit = false;
- fun(docs[i]);
- results.push((view_emit && true) || false);
- };
- respond([true, results]);
- }
- }
-})();
diff --git a/share/server/json2.js b/share/server/json2.js
deleted file mode 100644
index a1a3b170c..000000000
--- a/share/server/json2.js
+++ /dev/null
@@ -1,482 +0,0 @@
-/*
- http://www.JSON.org/json2.js
- 2010-03-20
-
- Public Domain.
-
- NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
-
- See http://www.JSON.org/js.html
-
-
- This code should be minified before deployment.
- See http://javascript.crockford.com/jsmin.html
-
- USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
- NOT CONTROL.
-
-
- This file creates a global JSON object containing two methods: stringify
- and parse.
-
- JSON.stringify(value, replacer, space)
- value any JavaScript value, usually an object or array.
-
- replacer an optional parameter that determines how object
- values are stringified for objects. It can be a
- function or an array of strings.
-
- space an optional parameter that specifies the indentation
- of nested structures. If it is omitted, the text will
- be packed without extra whitespace. If it is a number,
- it will specify the number of spaces to indent at each
- level. If it is a string (such as '\t' or '&nbsp;'),
- it contains the characters used to indent at each level.
-
- This method produces a JSON text from a JavaScript value.
-
- When an object value is found, if the object contains a toJSON
- method, its toJSON method will be called and the result will be
- stringified. A toJSON method does not serialize: it returns the
- value represented by the name/value pair that should be serialized,
- or undefined if nothing should be serialized. The toJSON method
- will be passed the key associated with the value, and this will be
- bound to the value
-
- For example, this would serialize Dates as ISO strings.
-
- Date.prototype.toJSON = function (key) {
- function f(n) {
- // Format integers to have at least two digits.
- return n < 10 ? '0' + n : n;
- }
-
- return this.getUTCFullYear() + '-' +
- f(this.getUTCMonth() + 1) + '-' +
- f(this.getUTCDate()) + 'T' +
- f(this.getUTCHours()) + ':' +
- f(this.getUTCMinutes()) + ':' +
- f(this.getUTCSeconds()) + 'Z';
- };
-
- You can provide an optional replacer method. It will be passed the
- key and value of each member, with this bound to the containing
- object. The value that is returned from your method will be
- serialized. If your method returns undefined, then the member will
- be excluded from the serialization.
-
- If the replacer parameter is an array of strings, then it will be
- used to select the members to be serialized. It filters the results
- such that only members with keys listed in the replacer array are
- stringified.
-
- Values that do not have JSON representations, such as undefined or
- functions, will not be serialized. Such values in objects will be
- dropped; in arrays they will be replaced with null. You can use
- a replacer function to replace those with JSON values.
- JSON.stringify(undefined) returns undefined.
-
- The optional space parameter produces a stringification of the
- value that is filled with line breaks and indentation to make it
- easier to read.
-
- If the space parameter is a non-empty string, then that string will
- be used for indentation. If the space parameter is a number, then
- the indentation will be that many spaces.
-
- Example:
-
- text = JSON.stringify(['e', {pluribus: 'unum'}]);
- // text is '["e",{"pluribus":"unum"}]'
-
-
- text = JSON.stringify(['e', {pluribus: 'unum'}], null, '\t');
- // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
-
- text = JSON.stringify([new Date()], function (key, value) {
- return this[key] instanceof Date ?
- 'Date(' + this[key] + ')' : value;
- });
- // text is '["Date(---current time---)"]'
-
-
- JSON.parse(text, reviver)
- This method parses a JSON text to produce an object or array.
- It can throw a SyntaxError exception.
-
- The optional reviver parameter is a function that can filter and
- transform the results. It receives each of the keys and values,
- and its return value is used instead of the original value.
- If it returns what it received, then the structure is not modified.
- If it returns undefined then the member is deleted.
-
- Example:
-
- // Parse the text. Values that look like ISO date strings will
- // be converted to Date objects.
-
- myData = JSON.parse(text, function (key, value) {
- var a;
- if (typeof value === 'string') {
- a =
-/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
- if (a) {
- return new Date(Date.UTC(+a[1], +a[2] - 1, +a[3], +a[4],
- +a[5], +a[6]));
- }
- }
- return value;
- });
-
- myData = JSON.parse('["Date(09/09/2001)"]', function (key, value) {
- var d;
- if (typeof value === 'string' &&
- value.slice(0, 5) === 'Date(' &&
- value.slice(-1) === ')') {
- d = new Date(value.slice(5, -1));
- if (d) {
- return d;
- }
- }
- return value;
- });
-
-
- This is a reference implementation. You are free to copy, modify, or
- redistribute.
-*/
-
-/*jslint evil: true, strict: false */
-
-/*members "", "\b", "\t", "\n", "\f", "\r", "\"", JSON, "\\", apply,
- call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
- getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
- lastIndex, length, parse, prototype, push, replace, slice, stringify,
- test, toJSON, toString, valueOf
-*/
-
-
-// Create a JSON object only if one does not already exist. We create the
-// methods in a closure to avoid creating global variables.
-
-if (!this.JSON) {
- this.JSON = {};
-}
-
-(function () {
-
- function f(n) {
- // Format integers to have at least two digits.
- return n < 10 ? '0' + n : n;
- }
-
- if (typeof Date.prototype.toJSON !== 'function') {
-
- Date.prototype.toJSON = function (key) {
-
- return isFinite(this.valueOf()) ?
- this.getUTCFullYear() + '-' +
- f(this.getUTCMonth() + 1) + '-' +
- f(this.getUTCDate()) + 'T' +
- f(this.getUTCHours()) + ':' +
- f(this.getUTCMinutes()) + ':' +
- f(this.getUTCSeconds()) + 'Z' : null;
- };
-
- String.prototype.toJSON =
- Number.prototype.toJSON =
- Boolean.prototype.toJSON = function (key) {
- return this.valueOf();
- };
- }
-
- var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
- escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
- gap,
- indent,
- meta = { // table of character substitutions
- '\b': '\\b',
- '\t': '\\t',
- '\n': '\\n',
- '\f': '\\f',
- '\r': '\\r',
- '"' : '\\"',
- '\\': '\\\\'
- },
- rep;
-
-
- function quote(string) {
-
-// If the string contains no control characters, no quote characters, and no
-// backslash characters, then we can safely slap some quotes around it.
-// Otherwise we must also replace the offending characters with safe escape
-// sequences.
-
- escapable.lastIndex = 0;
- return escapable.test(string) ?
- '"' + string.replace(escapable, function (a) {
- var c = meta[a];
- return typeof c === 'string' ? c :
- '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
- }) + '"' :
- '"' + string + '"';
- }
-
-
- function str(key, holder) {
-
-// Produce a string from holder[key].
-
- var i, // The loop counter.
- k, // The member key.
- v, // The member value.
- length,
- mind = gap,
- partial,
- value = holder[key];
-
-// If the value has a toJSON method, call it to obtain a replacement value.
-
- if (value && typeof value === 'object' &&
- typeof value.toJSON === 'function') {
- value = value.toJSON(key);
- }
-
-// If we were called with a replacer function, then call the replacer to
-// obtain a replacement value.
-
- if (typeof rep === 'function') {
- value = rep.call(holder, key, value);
- }
-
-// What happens next depends on the value's type.
-
- switch (typeof value) {
- case 'string':
- return quote(value);
-
- case 'number':
-
-// JSON numbers must be finite. Encode non-finite numbers as null.
-
- return isFinite(value) ? String(value) : 'null';
-
- case 'boolean':
- case 'null':
-
-// If the value is a boolean or null, convert it to a string. Note:
-// typeof null does not produce 'null'. The case is included here in
-// the remote chance that this gets fixed someday.
-
- return String(value);
-
-// If the type is 'object', we might be dealing with an object or an array or
-// null.
-
- case 'object':
-
-// Due to a specification blunder in ECMAScript, typeof null is 'object',
-// so watch out for that case.
-
- if (!value) {
- return 'null';
- }
-
-// Make an array to hold the partial results of stringifying this object value.
-
- gap += indent;
- partial = [];
-
-// Is the value an array?
-
- if (Object.prototype.toString.apply(value) === '[object Array]') {
-
-// The value is an array. Stringify every element. Use null as a placeholder
-// for non-JSON values.
-
- length = value.length;
- for (i = 0; i < length; i += 1) {
- partial[i] = str(i, value) || 'null';
- }
-
-// Join all of the elements together, separated with commas, and wrap them in
-// brackets.
-
- v = partial.length === 0 ? '[]' :
- gap ? '[\n' + gap +
- partial.join(',\n' + gap) + '\n' +
- mind + ']' :
- '[' + partial.join(',') + ']';
- gap = mind;
- return v;
- }
-
-// If the replacer is an array, use it to select the members to be stringified.
-
- if (rep && typeof rep === 'object') {
- length = rep.length;
- for (i = 0; i < length; i += 1) {
- k = rep[i];
- if (typeof k === 'string') {
- v = str(k, value);
- if (v) {
- partial.push(quote(k) + (gap ? ': ' : ':') + v);
- }
- }
- }
- } else {
-
-// Otherwise, iterate through all of the keys in the object.
-
- for (k in value) {
- if (Object.hasOwnProperty.call(value, k)) {
- v = str(k, value);
- if (v) {
- partial.push(quote(k) + (gap ? ': ' : ':') + v);
- }
- }
- }
- }
-
-// Join all of the member texts together, separated with commas,
-// and wrap them in braces.
-
- v = partial.length === 0 ? '{}' :
- gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' +
- mind + '}' : '{' + partial.join(',') + '}';
- gap = mind;
- return v;
- }
- }
-
-// If the JSON object does not yet have a stringify method, give it one.
-
- if (typeof JSON.stringify !== 'function') {
- JSON.stringify = function (value, replacer, space) {
-
-// The stringify method takes a value and an optional replacer, and an optional
-// space parameter, and returns a JSON text. The replacer can be a function
-// that can replace values, or an array of strings that will select the keys.
-// A default replacer method can be provided. Use of the space parameter can
-// produce text that is more easily readable.
-
- var i;
- gap = '';
- indent = '';
-
-// If the space parameter is a number, make an indent string containing that
-// many spaces.
-
- if (typeof space === 'number') {
- for (i = 0; i < space; i += 1) {
- indent += ' ';
- }
-
-// If the space parameter is a string, it will be used as the indent string.
-
- } else if (typeof space === 'string') {
- indent = space;
- }
-
-// If there is a replacer, it must be a function or an array.
-// Otherwise, throw an error.
-
- rep = replacer;
- if (replacer && typeof replacer !== 'function' &&
- (typeof replacer !== 'object' ||
- typeof replacer.length !== 'number')) {
- throw new Error('JSON.stringify');
- }
-
-// Make a fake root object containing our value under the key of ''.
-// Return the result of stringifying the value.
-
- return str('', {'': value});
- };
- }
-
-
-// If the JSON object does not yet have a parse method, give it one.
-
- if (typeof JSON.parse !== 'function') {
- JSON.parse = function (text, reviver) {
-
-// The parse method takes a text and an optional reviver function, and returns
-// a JavaScript value if the text is a valid JSON text.
-
- var j;
-
- function walk(holder, key) {
-
-// The walk method is used to recursively walk the resulting structure so
-// that modifications can be made.
-
- var k, v, value = holder[key];
- if (value && typeof value === 'object') {
- for (k in value) {
- if (Object.hasOwnProperty.call(value, k)) {
- v = walk(value, k);
- if (v !== undefined) {
- value[k] = v;
- } else {
- delete value[k];
- }
- }
- }
- }
- return reviver.call(holder, key, value);
- }
-
-
-// Parsing happens in four stages. In the first stage, we replace certain
-// Unicode characters with escape sequences. JavaScript handles many characters
-// incorrectly, either silently deleting them, or treating them as line endings.
-
- text = String(text);
- cx.lastIndex = 0;
- if (cx.test(text)) {
- text = text.replace(cx, function (a) {
- return '\\u' +
- ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
- });
- }
-
-// In the second stage, we run the text against regular expressions that look
-// for non-JSON patterns. We are especially concerned with '()' and 'new'
-// because they can cause invocation, and '=' because it can cause mutation.
-// But just to be safe, we want to reject all unexpected forms.
-
-// We split the second stage into 4 regexp operations in order to work around
-// crippling inefficiencies in IE's and Safari's regexp engines. First we
-// replace the JSON backslash pairs with '@' (a non-JSON character). Second, we
-// replace all simple value tokens with ']' characters. Third, we delete all
-// open brackets that follow a colon or comma or that begin the text. Finally,
-// we look to see that the remaining characters are only whitespace or ']' or
-// ',' or ':' or '{' or '}'. If that is so, then the text is safe for eval.
-
- if (/^[\],:{}\s]*$/.
-test(text.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, '@').
-replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, ']').
-replace(/(?:^|:|,)(?:\s*\[)+/g, ''))) {
-
-// In the third stage we use the eval function to compile the text into a
-// JavaScript structure. The '{' operator is subject to a syntactic ambiguity
-// in JavaScript: it can begin a block or an object literal. We wrap the text
-// in parens to eliminate the ambiguity.
-
- j = eval('(' + text + ')');
-
-// In the optional fourth stage, we recursively walk the new structure, passing
-// each name/value pair to a reviver function for possible transformation.
-
- return typeof reviver === 'function' ?
- walk({'': j}, '') : j;
- }
-
-// If the text is not JSON parseable, then a SyntaxError is thrown.
-
- throw new SyntaxError('JSON.parse');
- };
- }
-}());
diff --git a/share/server/loop.js b/share/server/loop.js
deleted file mode 100644
index 91dd1d6b0..000000000
--- a/share/server/loop.js
+++ /dev/null
@@ -1,167 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-function create_sandbox() {
- try {
- // if possible, use evalcx (not always available)
- var sandbox = evalcx('');
- sandbox.emit = Views.emit;
- sandbox.sum = Views.sum;
- sandbox.log = log;
- sandbox.toJSON = JSON.stringify;
- sandbox.JSON = JSON;
- sandbox.provides = Mime.provides;
- sandbox.registerType = Mime.registerType;
- sandbox.start = Render.start;
- sandbox.send = Render.send;
- sandbox.getRow = Render.getRow;
- sandbox.isArray = isArray;
- sandbox.index = Dreyfus.index;
- } catch (e) {
- var sandbox = {};
- }
- return sandbox;
-};
-
-function create_filter_sandbox() {
- var sandbox = create_sandbox();
- sandbox.emit = Filter.emit;
- return sandbox;
-};
-
-// Commands are in the form of json arrays:
-// ["commandname",..optional args...]\n
-//
-// Responses are json values followed by a new line ("\n")
-
-var DDoc = (function() {
- var ddoc_dispatch = {
- "lists" : Render.list,
- "shows" : Render.show,
- "filters" : Filter.filter,
- "views" : Filter.filter_view,
- "updates" : Render.update,
- "validate_doc_update" : Validate.validate,
- "rewrites" : Render.rewrite
- };
- var ddocs = {};
- return {
- ddoc : function() {
- var args = [];
- for (var i=0; i < arguments.length; i++) {
- args.push(arguments[i]);
- };
- var ddocId = args.shift();
- if (ddocId == "new") {
- // get the real ddocId.
- ddocId = args.shift();
- // store the ddoc, functions are lazily compiled.
- ddocs[ddocId] = args.shift();
- print("true");
- } else {
- // Couch makes sure we know this ddoc already.
- var ddoc = ddocs[ddocId];
- if (!ddoc) throw(["fatal", "query_protocol_error", "uncached design doc: "+ddocId]);
- var funPath = args.shift();
- var cmd = funPath[0];
- // the first member of the fun path determines the type of operation
- var funArgs = args.shift();
- if (ddoc_dispatch[cmd]) {
- // get the function, call the command with it
- var point = ddoc;
- for (var i=0; i < funPath.length; i++) {
- if (i+1 == funPath.length) {
- var fun = point[funPath[i]];
- if (!fun) {
- throw(["error","not_found",
- "missing " + funPath[0] + " function " + funPath[i] +
- " on design doc " + ddocId]);
- }
- if (typeof fun != "function") {
- fun = Couch.compileFunction(fun, ddoc, funPath.join('.'));
- // cache the compiled fun on the ddoc
- point[funPath[i]] = fun;
- };
- } else {
- point = point[funPath[i]];
- }
- };
-
- // run the correct responder with the cmd body
- ddoc_dispatch[cmd].apply(null, [fun, ddoc, funArgs]);
- } else {
- // unknown command, quit and hope the restarted version is better
- throw(["fatal", "unknown_command", "unknown ddoc command '" + cmd + "'"]);
- }
- }
- }
- };
-})();
-
-var Loop = function() {
- var line, cmd, cmdkey, dispatch = {
- "ddoc" : DDoc.ddoc,
- // "view" : Views.handler,
- "reset" : State.reset,
- "add_fun" : State.addFun,
- "add_lib" : State.addLib,
- "map_doc" : Views.mapDoc,
- "index_doc": Dreyfus.indexDoc,
- "reduce" : Views.reduce,
- "rereduce" : Views.rereduce
- };
- function handleError(e) {
- var type = e[0];
- if (type == "fatal") {
- e[0] = "error"; // we tell the client it was a fatal error by dying
- respond(e);
- quit(-1);
- } else if (type == "error") {
- respond(e);
- } else if (e.error && e.reason) {
- // compatibility with old error format
- respond(["error", e.error, e.reason]);
- } else if (e.name) {
- respond(["error", e.name, e]);
- } else {
- respond(["error","unnamed_error", e.toSource()]);
- }
- };
- while (line = readline()) {
- cmd = JSON.parse(line);
- State.line_length = line.length;
- try {
- cmdkey = cmd.shift();
- if (dispatch[cmdkey]) {
- // run the correct responder with the cmd body
- dispatch[cmdkey].apply(null, cmd);
- } else {
- // unknown command, quit and hope the restarted version is better
- throw(["fatal", "unknown_command", "unknown command '" + cmdkey + "'"]);
- }
- } catch(e) {
- handleError(e);
- }
- };
-};
-
-// Seal all the globals to prevent modification.
-seal(Couch, true);
-seal(JSON, true);
-seal(Mime, true);
-seal(Render, true);
-seal(Filter, true);
-seal(Views, true);
-seal(isArray, true);
-seal(log, true);
-
-Loop();
diff --git a/share/server/mimeparse.js b/share/server/mimeparse.js
deleted file mode 100644
index 40be7821d..000000000
--- a/share/server/mimeparse.js
+++ /dev/null
@@ -1,158 +0,0 @@
-// mimeparse.js
-//
-// This module provides basic functions for handling mime-types. It can
-// handle matching mime-types against a list of media-ranges. See section
-// 14.1 of the HTTP specification [RFC 2616] for a complete explanation.
-//
-// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
-//
-// A port to JavaScript of Joe Gregorio's MIME-Type Parser:
-//
-// http://code.google.com/p/mimeparse/
-//
-// Ported by J. Chris Anderson <jchris@apache.org>, targeting the Spidermonkey runtime.
-//
-// To run the tests, open mimeparse-js-test.html in a browser.
-// Ported from version 0.1.2
-// Comments are mostly excerpted from the original.
-
-var Mimeparse = (function() {
- // private helpers
- function strip(string) {
- return string.replace(/^\s+/, '').replace(/\s+$/, '');
- };
-
- function parseRanges(ranges) {
- var parsedRanges = [], rangeParts = ranges.split(",");
- for (var i=0; i < rangeParts.length; i++) {
- parsedRanges.push(publicMethods.parseMediaRange(rangeParts[i]));
- };
- return parsedRanges;
- };
-
- var publicMethods = {
- // Carves up a mime-type and returns an Array of the
- // [type, subtype, params] where "params" is a Hash of all
- // the parameters for the media range.
- //
- // For example, the media range "application/xhtml;q=0.5" would
- // get parsed into:
- //
- // ["application", "xhtml", { "q" : "0.5" }]
- parseMimeType : function(mimeType) {
- var fullType, typeParts, params = {}, parts = mimeType.split(';');
- for (var i=0; i < parts.length; i++) {
- var p = parts[i].split('=');
- if (p.length == 2) {
- params[strip(p[0])] = strip(p[1]);
- }
- };
- fullType = parts[0].replace(/^\s+/, '').replace(/\s+$/, '');
- if (fullType == '*') fullType = '*/*';
- typeParts = fullType.split('/');
- return [typeParts[0], typeParts[1], params];
- },
-
- // Carves up a media range and returns an Array of the
- // [type, subtype, params] where "params" is a Object with
- // all the parameters for the media range.
- //
- // For example, the media range "application/*;q=0.5" would
- // get parsed into:
- //
- // ["application", "*", { "q" : "0.5" }]
- //
- // In addition this function also guarantees that there
- // is a value for "q" in the params dictionary, filling it
- // in with a proper default if necessary.
- parseMediaRange : function(range) {
- var q, parsedType = this.parseMimeType(range);
- if (!parsedType[2]['q']) {
- parsedType[2]['q'] = '1';
- } else {
- q = parseFloat(parsedType[2]['q']);
- if (isNaN(q)) {
- parsedType[2]['q'] = '1';
- } else if (q > 1 || q < 0) {
- parsedType[2]['q'] = '1';
- }
- }
- return parsedType;
- },
-
- // Find the best match for a given mime-type against
- // a list of media_ranges that have already been
- // parsed by parseMediaRange(). Returns an array of
- // the fitness value and the value of the 'q' quality
- // parameter of the best match, or (-1, 0) if no match
- // was found. Just as for qualityParsed(), 'parsed_ranges'
- // must be a list of parsed media ranges.
- fitnessAndQualityParsed : function(mimeType, parsedRanges) {
- var bestFitness = -1, bestFitQ = 0, target = this.parseMediaRange(mimeType);
- var targetType = target[0], targetSubtype = target[1], targetParams = target[2];
-
- for (var i=0; i < parsedRanges.length; i++) {
- var parsed = parsedRanges[i];
- var type = parsed[0], subtype = parsed[1], params = parsed[2];
- if ((type == targetType || type == "*" || targetType == "*") &&
- (subtype == targetSubtype || subtype == "*" || targetSubtype == "*")) {
- var matchCount = 0;
- for (var param in targetParams) {
- if (param != 'q' && params[param] && params[param] == targetParams[param]) {
- matchCount += 1;
- }
- }
-
- var fitness = (type == targetType) ? 100 : 0;
- fitness += (subtype == targetSubtype) ? 10 : 0;
- fitness += matchCount;
-
- if (fitness > bestFitness) {
- bestFitness = fitness;
- bestFitQ = params["q"];
- }
- }
- };
- return [bestFitness, parseFloat(bestFitQ)];
- },
-
- // Find the best match for a given mime-type against
- // a list of media_ranges that have already been
- // parsed by parseMediaRange(). Returns the
- // 'q' quality parameter of the best match, 0 if no
- // match was found. This function bahaves the same as quality()
- // except that 'parsedRanges' must be a list of
- // parsed media ranges.
- qualityParsed : function(mimeType, parsedRanges) {
- return this.fitnessAndQualityParsed(mimeType, parsedRanges)[1];
- },
-
- // Returns the quality 'q' of a mime-type when compared
- // against the media-ranges in ranges. For example:
- //
- // >>> Mimeparse.quality('text/html','text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
- // 0.7
- quality : function(mimeType, ranges) {
- return this.qualityParsed(mimeType, parseRanges(ranges));
- },
-
- // Takes a list of supported mime-types and finds the best
- // match for all the media-ranges listed in header. The value of
- // header must be a string that conforms to the format of the
- // HTTP Accept: header. The value of 'supported' is a list of
- // mime-types.
- //
- // >>> bestMatch(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
- // 'text/xml'
- bestMatch : function(supported, header) {
- var parsedHeader = parseRanges(header);
- var weighted = [];
- for (var i=0; i < supported.length; i++) {
- weighted.push([publicMethods.fitnessAndQualityParsed(supported[i], parsedHeader), i, supported[i]]);
- };
- weighted.sort();
- return weighted[weighted.length-1][0][1] ? weighted[weighted.length-1][2] : '';
- }
- };
- return publicMethods;
-})();
diff --git a/share/server/render.js b/share/server/render.js
deleted file mode 100644
index 078a6491b..000000000
--- a/share/server/render.js
+++ /dev/null
@@ -1,400 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-
-var Mime = (function() {
- // registerType(name, mime-type, mime-type, ...)
- //
- // Available in query server sandbox. TODO: The list is cleared on reset.
- // This registers a particular name with the set of mimetypes it can handle.
- // Whoever registers last wins.
- //
- // Example:
- // registerType("html", "text/html; charset=utf-8");
-
- var mimesByKey = {};
- var keysByMime = {};
- function registerType() {
- var mimes = [], key = arguments[0];
- for (var i=1; i < arguments.length; i++) {
- mimes.push(arguments[i]);
- };
- mimesByKey[key] = mimes;
- for (var i=0; i < mimes.length; i++) {
- keysByMime[mimes[i]] = key;
- };
- }
-
- // Some default types
- // Ported from Ruby on Rails
- // Build list of Mime types for HTTP responses
- // http://www.iana.org/assignments/media-types/
- // http://dev.rubyonrails.org/svn/rails/trunk/actionpack/lib/action_controller/mime_types.rb
-
- registerType("all", "*/*");
- registerType("text", "text/plain; charset=utf-8", "txt");
- registerType("html", "text/html; charset=utf-8");
- registerType("xhtml", "application/xhtml+xml", "xhtml");
- registerType("xml", "application/xml", "text/xml", "application/x-xml");
- registerType("js", "text/javascript", "application/javascript", "application/x-javascript");
- registerType("css", "text/css");
- registerType("ics", "text/calendar");
- registerType("csv", "text/csv");
- registerType("rss", "application/rss+xml");
- registerType("atom", "application/atom+xml");
- registerType("yaml", "application/x-yaml", "text/yaml");
- // just like Rails
- registerType("multipart_form", "multipart/form-data");
- registerType("url_encoded_form", "application/x-www-form-urlencoded");
- // http://www.ietf.org/rfc/rfc4627.txt
- registerType("json", "application/json", "text/x-json");
-
-
- var providesUsed = false;
- var mimeFuns = [];
- var responseContentType = null;
-
- function provides(type, fun) {
- providesUsed = true;
- mimeFuns.push([type, fun]);
- };
-
- function resetProvides() {
- // set globals
- providesUsed = false;
- mimeFuns = [];
- responseContentType = null;
- };
-
- function runProvides(req, ddoc) {
- var supportedMimes = [], bestFun, bestKey = null, accept = req.headers["Accept"];
- if (req.query && req.query.format) {
- bestKey = req.query.format;
- responseContentType = mimesByKey[bestKey][0];
- } else if (accept) {
- // log("using accept header: "+accept);
- mimeFuns.reverse().forEach(function(mimeFun) {
- var mimeKey = mimeFun[0];
- if (mimesByKey[mimeKey]) {
- supportedMimes = supportedMimes.concat(mimesByKey[mimeKey]);
- }
- });
- responseContentType = Mimeparse.bestMatch(supportedMimes, accept);
- bestKey = keysByMime[responseContentType];
- } else {
- // just do the first one
- bestKey = mimeFuns[0][0];
- responseContentType = mimesByKey[bestKey][0];
- }
-
- if (bestKey) {
- for (var i=0; i < mimeFuns.length; i++) {
- if (mimeFuns[i][0] == bestKey) {
- bestFun = mimeFuns[i][1];
- break;
- }
- };
- };
-
- if (bestFun) {
- return bestFun.call(ddoc);
- } else {
- var supportedTypes = mimeFuns.map(function(mf) {
- return mimesByKey[mf[0]].join(', ') || mf[0];
- });
- throw(["error","not_acceptable",
- "Content-Type "+(accept||bestKey)+" not supported, try one of: "+supportedTypes.join(', ')]);
- }
- };
-
-
- return {
- registerType : registerType,
- provides : provides,
- resetProvides : resetProvides,
- runProvides : runProvides,
- providesUsed : function () {
- return providesUsed;
- },
- responseContentType : function () {
- return responseContentType;
- }
- };
-})();
-
-
-
-
-////
-//// Render dispatcher
-////
-////
-////
-////
-
-var Render = (function() {
- var new_header = false;
- var chunks = [];
-
-
- // Start chunks
- var startResp = {};
- function start(resp) {
- startResp = resp || {};
- new_header = true;
- };
-
- function sendStart() {
- startResp = applyContentType((startResp || {}), Mime.responseContentType());
- respond(["start", chunks, startResp]);
- chunks = [];
- startResp = {};
- new_header = false;
- }
-
- function applyContentType(resp, responseContentType) {
- resp["headers"] = resp["headers"] || {};
- if (responseContentType) {
- resp["headers"]["Content-Type"] = resp["headers"]["Content-Type"] || responseContentType;
- }
- return resp;
- }
-
- function send(chunk) {
- chunks.push(chunk.toString());
- };
-
- function blowChunks(label) {
- if (new_header) {
- respond([label||"chunks", chunks, startResp]);
- new_header = false;
- }
- else {
- respond([label||"chunks", chunks]);
- }
- chunks = [];
- };
-
- var gotRow = false, lastRow = false;
- function getRow() {
- if (lastRow) return null;
- if (!gotRow) {
- gotRow = true;
- sendStart();
- } else {
- blowChunks();
- }
- var json = JSON.parse(readline());
- if (json[0] == "list_end") {
- lastRow = true;
- return null;
- }
- if (json[0] != "list_row") {
- throw(["fatal", "list_error", "not a row '" + json[0] + "'"]);
- }
- return json[1];
- };
-
-
- function maybeWrapResponse(resp) {
- var type = typeof resp;
- if ((type == "string") || (type == "xml")) {
- return {body:resp};
- } else {
- return resp;
- }
- };
-
- // from http://javascript.crockford.com/remedial.html
- function typeOf(value) {
- var s = typeof value;
- if (s === 'object') {
- if (value) {
- if (value instanceof Array) {
- s = 'array';
- }
- } else {
- s = 'null';
- }
- }
- return s;
- };
-
- function isDocRequestPath(info) {
- var path = info.path;
- return path.length > 5;
- };
-
- function runShow(fun, ddoc, args) {
- try {
- resetList();
- Mime.resetProvides();
- var resp = fun.apply(ddoc, args) || {};
- resp = maybeWrapResponse(resp);
-
- // handle list() style API
- if (chunks.length && chunks.length > 0) {
- resp.headers = resp.headers || {};
- for(var header in startResp) {
- resp.headers[header] = startResp[header];
- }
- resp.body = chunks.join("") + (resp.body || "");
- resetList();
- }
-
- if (Mime.providesUsed()) {
- var provided_resp = Mime.runProvides(args[1], ddoc) || {};
- provided_resp = maybeWrapResponse(provided_resp);
- resp.body = (resp.body || "") + chunks.join("");
- resp.body += provided_resp.body || "";
- resp = applyContentType(resp, Mime.responseContentType());
- resetList();
- }
-
- var type = typeOf(resp);
- if (type == 'object' || type == 'string') {
- respond(["resp", maybeWrapResponse(resp)]);
- } else {
- throw(["error", "render_error", "undefined response from show function"]);
- }
- } catch(e) {
- if (args[0] === null && isDocRequestPath(args[1])) {
- throw(["error", "not_found", "document not found"]);
- } else {
- renderError(e, fun.toString());
- }
- }
- };
-
- function runUpdate(fun, ddoc, args) {
- try {
- var method = args[1].method;
- // for analytics logging applications you might want to remove the next line
- if (method == "GET") throw(["error","method_not_allowed","Update functions do not allow GET"]);
- var result = fun.apply(ddoc, args);
- var doc = result[0];
- var resp = result[1];
- var type = typeOf(resp);
- if (type == 'object' || type == 'string') {
- respond(["up", doc, maybeWrapResponse(resp)]);
- } else {
- throw(["error", "render_error", "undefined response from update function"]);
- }
- } catch(e) {
- renderError(e, fun.toString());
- }
- };
-
- function resetList() {
- gotRow = false;
- lastRow = false;
- chunks = [];
- startResp = {};
- new_header = false;
- };
-
- function runList(listFun, ddoc, args) {
- try {
- Mime.resetProvides();
- resetList();
- var head = args[0];
- var req = args[1];
- var tail = listFun.apply(ddoc, args);
-
- if (Mime.providesUsed()) {
- tail = Mime.runProvides(req, ddoc);
- }
- if (!gotRow) getRow();
- if (typeof tail != "undefined") {
- chunks.push(tail);
- }
- blowChunks("end");
- } catch(e) {
- renderError(e, listFun.toString());
- }
- };
-
- function runRewrite(fun, ddoc, args) {
- var result;
- try {
- result = fun.apply(ddoc, args);
- } catch(error) {
- renderError(error, fun.toString(), "rewrite_error");
- }
-
- if (!result) {
- respond(["no_dispatch_rule"]);
- return;
- }
-
- if (typeof result === "string") {
- result = {path: result, method: args[0].method};
- }
- respond(["ok", result]);
- }
-
- function renderError(e, funSrc, errType) {
- if (e.error && e.reason || e[0] == "error" || e[0] == "fatal") {
- throw(e);
- } else {
- var logMessage = "function raised error: " +
- e.toSource() + " \n" +
- "stacktrace: " + e.stack;
- log(logMessage);
- throw(["error", errType || "render_error", logMessage]);
- }
- };
-
- function escapeHTML(string) {
- return string && string.replace(/&/g, "&amp;")
- .replace(/</g, "&lt;")
- .replace(/>/g, "&gt;");
- };
-
-
- return {
- start : start,
- send : send,
- getRow : getRow,
- show : function(fun, ddoc, args) {
- // var showFun = Couch.compileFunction(funSrc);
- runShow(fun, ddoc, args);
- },
- update : function(fun, ddoc, args) {
- // var upFun = Couch.compileFunction(funSrc);
- runUpdate(fun, ddoc, args);
- },
- list : function(fun, ddoc, args) {
- runList(fun, ddoc, args);
- },
- rewrite : function(fun, ddoc, args) {
- runRewrite(fun, ddoc, args);
- }
- };
-})();
-
-// send = Render.send;
-// getRow = Render.getRow;
-// start = Render.start;
-
-// unused. this will be handled in the Erlang side of things.
-// function htmlRenderError(e, funSrc) {
-// var msg = ["<html><body><h1>Render Error</h1>",
-// "<p>JavaScript function raised error: ",
-// e.toString(),
-// "</p><h2>Stacktrace:</h2><code><pre>",
-// escapeHTML(e.stack),
-// "</pre></code><h2>Function source:</h2><code><pre>",
-// escapeHTML(funSrc),
-// "</pre></code></body></html>"].join('');
-// return {body:msg};
-// };
diff --git a/share/server/rewrite_fun.js b/share/server/rewrite_fun.js
deleted file mode 100644
index bbfb39223..000000000
--- a/share/server/rewrite_fun.js
+++ /dev/null
@@ -1,20 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-//
-// Based on the normalizeFunction which can be
-// found here:
-//
-// https://github.com/dmunch/couch-chakra/blob/master/js/normalizeFunction.js
-
-function rewriteFunInt(source) {
- return source;
-}
diff --git a/share/server/state.js b/share/server/state.js
deleted file mode 100644
index ff553dd57..000000000
--- a/share/server/state.js
+++ /dev/null
@@ -1,31 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-var State = {
- reset : function(config) {
- // clear the globals and run gc
- State.funs = [];
- State.lib = null;
- State.query_config = config || {};
- gc();
- print("true"); // indicates success
- },
- addFun : function(newFun) {
- // Compile to a function and add it to funs array
- State.funs.push(Couch.compileFunction(newFun, {views : {lib : State.lib}}));
- print("true");
- },
- addLib : function(lib) {
- State.lib = lib;
- print("true");
- }
-};
diff --git a/share/server/util.js b/share/server/util.js
deleted file mode 100644
index f570acebd..000000000
--- a/share/server/util.js
+++ /dev/null
@@ -1,157 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-var resolveModule = function(names, mod, root) {
- if (names.length == 0) {
- if (typeof mod.current != "string") {
- throw ["error","invalid_require_path",
- 'Must require a JavaScript string, not: '+(typeof mod.current)];
- }
- return {
- current : mod.current,
- parent : mod.parent,
- id : mod.id,
- exports : {}
- };
- }
- // we need to traverse the path
- var n = names.shift();
- if (n == '..') {
- if (!(mod.parent && mod.parent.parent)) {
- throw ["error", "invalid_require_path", 'Object has no parent '+JSON.stringify(mod.current)];
- }
- return resolveModule(names, {
- id : mod.id.slice(0, mod.id.lastIndexOf('/')),
- parent : mod.parent.parent,
- current : mod.parent.current
- });
- } else if (n == '.') {
- if (!mod.parent) {
- throw ["error", "invalid_require_path", 'Object has no parent '+JSON.stringify(mod.current)];
- }
- return resolveModule(names, {
- parent : mod.parent,
- current : mod.current,
- id : mod.id
- });
- } else if (root) {
- mod = {current : root};
- }
- if (mod.current[n] === undefined) {
- throw ["error", "invalid_require_path", 'Object has no property "'+n+'". '+JSON.stringify(mod.current)];
- }
- return resolveModule(names, {
- current : mod.current[n],
- parent : mod,
- id : mod.id ? mod.id + '/' + n : n
- });
-};
-
-var Couch = {
- // moving this away from global so we can move to json2.js later
- compileFunction : function(source, ddoc, name) {
- if (!source) throw(["error","not_found","missing function"]);
-
- var functionObject = null;
- var sandbox = create_sandbox();
-
- var require = function(name, module) {
- module = module || {};
- var newModule = resolveModule(name.split('/'), module.parent, ddoc);
- if (!ddoc._module_cache.hasOwnProperty(newModule.id)) {
- // create empty exports object before executing the module,
- // stops circular requires from filling the stack
- ddoc._module_cache[newModule.id] = {};
- var s = "(function (module, exports, require) { " + newModule.current + "\n });";
- try {
- var func = sandbox ? evalcx(s, sandbox, newModule.id) : eval(s);
- func.apply(sandbox, [newModule, newModule.exports, function(name) {
- return require(name, newModule);
- }]);
- } catch(e) {
- throw [
- "error",
- "compilation_error",
- "Module require('" +name+ "') raised error " + e.toSource()
- ];
- }
- ddoc._module_cache[newModule.id] = newModule.exports;
- }
- return ddoc._module_cache[newModule.id];
- };
-
- if (ddoc) {
- sandbox.require = require;
- if (!ddoc._module_cache) ddoc._module_cache = {};
- }
-
- try {
- if(typeof CoffeeScript === "undefined") {
- var rewrittenFun = rewriteFunInt(source);
- functionObject = evalcx(rewrittenFun, sandbox, name);
- } else {
- var transpiled = CoffeeScript.compile(source, {bare: true});
- functionObject = evalcx(transpiled, sandbox, name);
- }
- } catch (err) {
- throw([
- "error",
- "compilation_error",
- err.toSource() + " (" + source + ")"
- ]);
- };
- if (typeof(functionObject) == "function") {
- return functionObject;
- } else {
- throw(["error","compilation_error",
- "Expression does not eval to a function. (" + source.toString() + ")"]);
- };
- },
- recursivelySeal : function(obj) {
- // seal() is broken in current Spidermonkey
- try {
- seal(obj);
- } catch (x) {
- // Sealing of arrays broken in some SpiderMonkey versions.
- // https://bugzilla.mozilla.org/show_bug.cgi?id=449657
- }
- for (var propname in obj) {
- if (typeof obj[propname] == "object") {
- arguments.callee(obj[propname]);
- }
- }
- }
-};
-
-// prints the object as JSON, and rescues and logs any JSON.stringify() related errors
-function respond(obj) {
- try {
- print(JSON.stringify(obj));
- } catch(e) {
- log("Error converting object to JSON: " + e.toString());
- log("error on obj: "+ obj.toSource());
- }
-};
-
-function log(message) {
- // idea: query_server_config option for log level
- if (typeof message == "xml") {
- message = message.toXMLString();
- } else if (typeof message != "string") {
- message = JSON.stringify(message);
- }
- respond(["log", String(message)]);
-};
-
-function isArray(obj) {
- return toString.call(obj) === "[object Array]";
-}
diff --git a/share/server/validate.js b/share/server/validate.js
deleted file mode 100644
index 5b50e5473..000000000
--- a/share/server/validate.js
+++ /dev/null
@@ -1,25 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-var Validate = {
- validate : function(fun, ddoc, args) {
- try {
- fun.apply(ddoc, args);
- respond(1);
- } catch (error) {
- if (error.name && error.stack) {
- throw error;
- }
- respond(error);
- }
- }
-};
diff --git a/share/server/views.js b/share/server/views.js
deleted file mode 100644
index 32d65e457..000000000
--- a/share/server/views.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-
-
-var Views = (function() {
-
- var map_results = []; // holds temporary emitted values during doc map
-
- function runReduce(reduceFuns, keys, values, rereduce) {
- var code_size = 0;
- for (var i in reduceFuns) {
- var fun_body = reduceFuns[i];
- code_size += fun_body.length;
- reduceFuns[i] = Couch.compileFunction(fun_body);
- };
- var reductions = new Array(reduceFuns.length);
- for(var i = 0; i < reduceFuns.length; i++) {
- try {
- reductions[i] = reduceFuns[i](keys, values, rereduce);
- } catch (err) {
- handleViewError(err);
- // if the error is not fatal, ignore the results and continue
- reductions[i] = null;
- }
- };
- var reduce_line = JSON.stringify(reductions);
- var reduce_length = reduce_line.length;
- var input_length = State.line_length - code_size
- // TODO make reduce_limit config into a number
- if (State.query_config && State.query_config.reduce_limit &&
- reduce_length > 4096 && ((reduce_length * 2) > input_length)) {
- var log_message = [
- "Reduce output must shrink more rapidly:",
- "input size:", input_length,
- "output size:", reduce_length
- ].join(" ");
- if (State.query_config.reduce_limit === "log") {
- log("reduce_overflow_error: " + log_message);
- print("[true," + reduce_line + "]");
- } else {
- throw(["error", "reduce_overflow_error", log_message]);
- };
- } else {
- print("[true," + reduce_line + "]");
- }
- };
-
- function handleViewError(err, doc) {
- if (err == "fatal_error") {
- // Only if it's a "fatal_error" do we exit. What's a fatal error?
- // That's for the query to decide.
- //
- // This will make it possible for queries to completely error out,
- // by catching their own local exception and rethrowing a
- // fatal_error. But by default if they don't do error handling we
- // just eat the exception and carry on.
- //
- // In this case we abort map processing but don't destroy the
- // JavaScript process. If you need to destroy the JavaScript
- // process, throw the error form matched by the block below.
- throw(["error", "map_runtime_error", "function raised 'fatal_error'"]);
- } else if (err[0] == "fatal") {
- // Throwing errors of the form ["fatal","error_key","reason"]
- // will kill the OS process. This is not normally what you want.
- throw(err);
- }
- var message = "function raised exception " + err.toSource();
- if (doc) message += " with doc._id " + doc._id;
- log(message);
- };
-
- return {
- // view helper functions
- emit : function(key, value) {
- map_results.push([key, value]);
- },
- sum : function(values) {
- var rv = 0;
- for (var i in values) {
- rv += values[i];
- }
- return rv;
- },
- reduce : function(reduceFuns, kvs) {
- var keys = new Array(kvs.length);
- var values = new Array(kvs.length);
- for(var i = 0; i < kvs.length; i++) {
- keys[i] = kvs[i][0];
- values[i] = kvs[i][1];
- }
- runReduce(reduceFuns, keys, values, false);
- },
- rereduce : function(reduceFuns, values) {
- runReduce(reduceFuns, null, values, true);
- },
- mapDoc : function(doc) {
- // Compute all the map functions against the document.
- //
- // Each function can output multiple key/value pairs for each document.
- //
- // Example output of map_doc after three functions set by add_fun cmds:
- // [
- // [["Key","Value"]], <- fun 1 returned 1 key value
- // [], <- fun 2 returned 0 key values
- // [["Key1","Value1"],["Key2","Value2"]] <- fun 3 returned 2 key values
- // ]
- //
-
- Couch.recursivelySeal(doc);
-
- var buf = [];
- for (var fun in State.funs) {
- map_results = [];
- try {
- State.funs[fun](doc);
- buf.push(map_results);
- } catch (err) {
- handleViewError(err, doc);
- // If the error is not fatal, we treat the doc as if it
- // did not emit anything, by buffering an empty array.
- buf.push([]);
- }
- }
- print(JSON.stringify(buf));
- }
- };
-})();
diff --git a/src/chttpd/LICENSE b/src/chttpd/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/chttpd/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/chttpd/include/chttpd.hrl b/src/chttpd/include/chttpd.hrl
deleted file mode 100644
index a7f9aaac1..000000000
--- a/src/chttpd/include/chttpd.hrl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--record(httpd_resp, {
- end_ts,
- code,
- response,
- status,
- nonce,
- should_log = true,
- reason
-}).
-
--define(is_hex(C), (
- (C >= $0 andalso C =< $9) orelse
- (C >= $a andalso C =< $f) orelse
- (C >= $A andalso C =< $F)
-)).
diff --git a/src/chttpd/include/chttpd_cors.hrl b/src/chttpd/include/chttpd_cors.hrl
deleted file mode 100644
index 1988d7b21..000000000
--- a/src/chttpd/include/chttpd_cors.hrl
+++ /dev/null
@@ -1,81 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--define(SUPPORTED_HEADERS, [
- "accept",
- "accept-language",
- "authorization",
- "content-length",
- "content-range",
- "content-type",
- "destination",
- "expires",
- "if-match",
- "last-modified",
- "origin",
- "pragma",
- "x-couch-full-commit",
- "x-couch-id",
- "x-couch-persist",
- "x-couchdb-www-authenticate",
- "x-http-method-override",
- "x-requested-with",
- "x-couchdb-vhost-path"
-]).
-
-
--define(SUPPORTED_METHODS, [
- "CONNECT",
- "COPY",
- "DELETE",
- "GET",
- "HEAD",
- "OPTIONS",
- "POST",
- "PUT",
- "TRACE"
-]).
-
-
-%% as defined in http://www.w3.org/TR/cors/#terminology
--define(SIMPLE_HEADERS, [
- "cache-control",
- "content-language",
- "content-type",
- "expires",
- "last-modified",
- "pragma"
-]).
-
-
--define(COUCH_HEADERS, [
- "accept-ranges",
- "etag",
- "server",
- "x-couch-request-id",
- "x-couch-update-newrev",
- "x-couchdb-body-time"
-]).
-
-
--define(SIMPLE_CONTENT_TYPE_VALUES, [
- "application/x-www-form-urlencoded",
- "multipart/form-data",
- "text/plain"
-]).
-
-
--define(CORS_DEFAULT_MAX_AGE, 600).
-
-
--define(CORS_DEFAULT_ALLOW_CREDENTIALS, false).
diff --git a/src/chttpd/priv/stats_descriptions.cfg b/src/chttpd/priv/stats_descriptions.cfg
deleted file mode 100644
index f54231ce3..000000000
--- a/src/chttpd/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,24 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-% Style guide for descriptions: Start with a lowercase letter & do not add
-% a trailing full-stop / period
-% Please keep this in alphabetical order
-
-{[couchdb, httpd, aborted_requests], [
- {type, counter},
- {desc, <<"number of aborted requests">>}
-]}.
-{[couchdb, dbinfo], [
- {type, histogram},
- {desc, <<"distribution of latencies for calls to retrieve DB info">>}
-]}.
diff --git a/src/chttpd/rebar.config b/src/chttpd/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/chttpd/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/chttpd/src/chttpd.app.src b/src/chttpd/src/chttpd.app.src
deleted file mode 100644
index 3526745df..000000000
--- a/src/chttpd/src/chttpd.app.src
+++ /dev/null
@@ -1,33 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
- {application, chttpd, [
- {description, "HTTP interface for CouchDB cluster"},
- {vsn, git},
- {registered, [
- chttpd_sup,
- chttpd,
- chttpd_auth_cache,
- chttpd_auth_cache_lru
- ]},
- {applications, [
- kernel,
- stdlib,
- couch_log,
- couch_stats,
- config,
- couch,
- ets_lru,
- fabric
- ]},
- {mod, {chttpd_app,[]}}
-]}.
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
deleted file mode 100644
index 48280e80c..000000000
--- a/src/chttpd/src/chttpd.erl
+++ /dev/null
@@ -1,1622 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("chttpd/include/chttpd.hrl").
-
--export([
- start_link/0, start_link/1, start_link/2,
- stop/0,
- handle_request/1,
- handle_request_int/1,
- primary_header_value/2,
- header_value/2, header_value/3,
- qs_value/2,
- qs_value/3,
- qs/1,
- qs_json_value/3,
- path/1,
- absolute_uri/2,
- body_length/1,
- verify_is_server_admin/1,
- unquote/1,
- quote/1,
- recv/2,
- recv_chunked/4,
- error_info/1,
- parse_form/1,
- json_body/1,
- json_body_obj/1,
- body/1,
- doc_etag/1,
- make_etag/1,
- etag_respond/3,
- etag_match/2,
- partition/1,
- serve_file/3, serve_file/4,
- server_header/0,
- start_chunked_response/3,
- send_chunk/2,
- start_response_length/4,
- send/2,
- start_json_response/2,
- start_json_response/3,
- end_json_response/1,
- send_response/4,
- send_response_no_cors/4,
- send_method_not_allowed/2,
- send_error/2, send_error/4,
- send_redirect/2,
- send_chunked_error/2,
- send_json/2, send_json/3, send_json/4,
- validate_ctype/2
-]).
-
--export([authenticate_request/3]).
-
--export([
- start_delayed_json_response/2, start_delayed_json_response/3, start_delayed_json_response/4,
- start_delayed_chunked_response/3, start_delayed_chunked_response/4,
- send_delayed_chunk/2,
- send_delayed_last_chunk/1,
- send_delayed_error/2,
- end_delayed_json_response/1,
- get_delayed_req/1
-]).
-
--export([
- chunked_response_buffer_size/0,
- close_delayed_json_object/4
-]).
-
--record(delayed_resp, {
- start_fun,
- req,
- code,
- headers,
- chunks,
- resp = nil,
- buffer_response = false
-}).
-
--define(DEFAULT_SERVER_OPTIONS, "[{recbuf, undefined}]").
--define(DEFAULT_SOCKET_OPTIONS, "[{sndbuf, 262144}, {nodelay, true}]").
-
-start_link() ->
- start_link(http).
-start_link(http) ->
- Port = config:get("chttpd", "port", "5984"),
- start_link(?MODULE, [{port, Port}]);
-start_link(https) ->
- Port = config:get("ssl", "port", "6984"),
- {ok, Ciphers} = couch_util:parse_term(config:get("ssl", "ciphers", "undefined")),
- {ok, Versions} = couch_util:parse_term(config:get("ssl", "tls_versions", "undefined")),
- {ok, SecureRenegotiate} = couch_util:parse_term(
- config:get("ssl", "secure_renegotiate", "undefined")
- ),
- ServerOpts0 =
- [
- {cacertfile, config:get("ssl", "cacert_file", undefined)},
- {keyfile, config:get("ssl", "key_file", undefined)},
- {certfile, config:get("ssl", "cert_file", undefined)},
- {password, config:get("ssl", "password", undefined)},
- {secure_renegotiate, SecureRenegotiate},
- {versions, Versions},
- {ciphers, Ciphers}
- ],
-
- case
- (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
- couch_util:get_value(certfile, ServerOpts0) == undefined)
- of
- true ->
- io:format("SSL enabled but PEM certificates are missing.", []),
- throw({error, missing_certs});
- false ->
- ok
- end,
-
- ServerOpts = [Opt || {_, V} = Opt <- ServerOpts0, V /= undefined],
-
- ClientOpts =
- case config:get("ssl", "verify_ssl_certificates", "false") of
- "false" ->
- [];
- "true" ->
- FailIfNoPeerCert =
- case config:get("ssl", "fail_if_no_peer_cert", "false") of
- "false" -> false;
- "true" -> true
- end,
- [
- {depth,
- list_to_integer(
- config:get(
- "ssl",
- "ssl_certificate_max_depth",
- "1"
- )
- )},
- {fail_if_no_peer_cert, FailIfNoPeerCert},
- {verify, verify_peer}
- ] ++
- case config:get("ssl", "verify_fun", undefined) of
- undefined -> [];
- SpecStr -> [{verify_fun, couch_httpd:make_arity_3_fun(SpecStr)}]
- end
- end,
- SslOpts = ServerOpts ++ ClientOpts,
-
- Options0 =
- [
- {port, Port},
- {ssl, true},
- {ssl_opts, SslOpts}
- ],
- CustomServerOpts = get_server_options("httpsd"),
- Options = merge_server_options(Options0, CustomServerOpts),
- start_link(https, Options).
-
-start_link(Name, Options) ->
- IP =
- case config:get("chttpd", "bind_address", "any") of
- "any" -> any;
- Else -> Else
- end,
- ok = couch_httpd:validate_bind_address(IP),
-
- set_auth_handlers(),
-
- Options1 =
- Options ++
- [
- {loop, fun ?MODULE:handle_request/1},
- {name, Name},
- {ip, IP}
- ],
- ServerOpts = get_server_options("chttpd"),
- Options2 = merge_server_options(Options1, ServerOpts),
- case mochiweb_http:start(Options2) of
- {ok, Pid} ->
- {ok, Pid};
- {error, Reason} ->
- io:format("Failure to start Mochiweb: ~s~n", [Reason]),
- {error, Reason}
- end.
-
-get_server_options(Module) ->
- ServerOptsCfg =
- case Module of
- "chttpd" ->
- config:get(
- Module,
- "server_options",
- ?DEFAULT_SERVER_OPTIONS
- );
- _ ->
- config:get(Module, "server_options", "[]")
- end,
- {ok, ServerOpts} = couch_util:parse_term(ServerOptsCfg),
- ServerOpts.
-
-merge_server_options(A, B) ->
- lists:keymerge(1, lists:sort(A), lists:sort(B)).
-
-stop() ->
- catch mochiweb_http:stop(https),
- mochiweb_http:stop(?MODULE).
-
-handle_request(MochiReq0) ->
- erlang:put(?REWRITE_COUNT, 0),
- MochiReq = couch_httpd_vhost:dispatch_host(MochiReq0),
- handle_request_int(MochiReq).
-
-handle_request_int(MochiReq) ->
- Begin = os:timestamp(),
- SocketOptsCfg = config:get(
- "chttpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS
- ),
- {ok, SocketOpts} = couch_util:parse_term(SocketOptsCfg),
- ok = mochiweb_socket:setopts(MochiReq:get(socket), SocketOpts),
-
- % for the path, use the raw path with the query string and fragment
- % removed, but URL quoting left intact
- RawUri = MochiReq:get(raw_path),
- {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
- % get requested path
- RequestedPath =
- case MochiReq:get_header_value("x-couchdb-vhost-path") of
- undefined ->
- case MochiReq:get_header_value("x-couchdb-requested-path") of
- undefined -> RawUri;
- R -> R
- end;
- P ->
- P
- end,
-
- Peer = MochiReq:get(peer),
-
- Method1 =
- case MochiReq:get(method) of
- % already an atom
- Meth when is_atom(Meth) -> Meth;
- % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
- % possible (if any module references the atom, then it's existing).
- Meth -> couch_util:to_existing_atom(Meth)
- end,
- increment_method_stats(Method1),
-
- % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
- MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
- Method2 =
- case
- lists:member(MethodOverride, [
- "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "COPY"
- ])
- of
- true ->
- couch_log:notice("MethodOverride: ~s (real method was ~s)", [
- MethodOverride, Method1
- ]),
- case Method1 of
- 'POST' ->
- couch_util:to_existing_atom(MethodOverride);
- _ ->
- % Ignore X-HTTP-Method-Override when the original verb isn't POST.
- % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
- % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
- Method1
- end;
- _ ->
- Method1
- end,
-
- % alias HEAD to GET as mochiweb takes care of stripping the body
- Method =
- case Method2 of
- 'HEAD' -> 'GET';
- Other -> Other
- end,
-
- Nonce = couch_util:to_hex(crypto:strong_rand_bytes(5)),
-
- HttpReq0 = #httpd{
- mochi_req = MochiReq,
- begin_ts = Begin,
- peer = Peer,
- original_method = Method1,
- nonce = Nonce,
- method = Method,
- path_parts = [
- list_to_binary(unquote(Part))
- || Part <- string:tokens(Path, "/")
- ],
- requested_path_parts = [
- ?l2b(unquote(Part))
- || Part <- string:tokens(RequestedPath, "/")
- ]
- },
-
- % put small token on heap to keep requests synced to backend calls
- erlang:put(nonce, Nonce),
-
- % suppress duplicate log
- erlang:put(dont_log_request, true),
- erlang:put(dont_log_response, true),
-
- {HttpReq2, Response} =
- case before_request(HttpReq0) of
- {ok, HttpReq1} ->
- process_request(HttpReq1);
- {error, Response0} ->
- {HttpReq0, Response0}
- end,
-
- {Status, Code, Reason, Resp} = split_response(Response),
-
- HttpResp = #httpd_resp{
- code = Code,
- status = Status,
- response = Resp,
- nonce = HttpReq2#httpd.nonce,
- reason = Reason
- },
-
- case after_request(HttpReq2, HttpResp) of
- #httpd_resp{status = ok, response = Resp} ->
- {ok, Resp};
- #httpd_resp{status = aborted, reason = Reason} ->
- couch_log:error("Response abnormally terminated: ~p", [Reason]),
- exit({shutdown, Reason})
- end.
-
-before_request(HttpReq) ->
- try
- chttpd_stats:init(),
- chttpd_plugin:before_request(HttpReq)
- catch ?STACKTRACE(ErrorType, Error, Stack)
- {error, catch_error(HttpReq, ErrorType, Error, Stack)}
- end.
-
-after_request(HttpReq, HttpResp0) ->
- {ok, HttpResp1} =
- try
- chttpd_plugin:after_request(HttpReq, HttpResp0)
- catch ?STACKTRACE(_ErrorType, Error, Stack)
- send_error(HttpReq, {Error, nil, Stack}),
- {ok, HttpResp0#httpd_resp{status = aborted}}
- end,
- HttpResp2 = update_stats(HttpReq, HttpResp1),
- chttpd_stats:report(HttpReq, HttpResp2),
- maybe_log(HttpReq, HttpResp2),
- HttpResp2.
-
-process_request(#httpd{mochi_req = MochiReq} = HttpReq) ->
- HandlerKey =
- case HttpReq#httpd.path_parts of
- [] -> <<>>;
- [Key|_] -> ?l2b(quote(Key))
- end,
-
- RawUri = MochiReq:get(raw_path),
-
- try
- couch_httpd:validate_host(HttpReq),
- check_request_uri_length(RawUri),
- check_url_encoding(RawUri),
- case chttpd_cors:maybe_handle_preflight_request(HttpReq) of
- not_preflight ->
- case chttpd_auth:authenticate(HttpReq, fun authenticate_request/1) of
- #httpd{} = Req ->
- handle_req_after_auth(HandlerKey, Req);
- Response ->
- {HttpReq, Response}
- end;
- Response ->
- {HttpReq, Response}
- end
- catch ?STACKTRACE(ErrorType, Error, Stack)
- {HttpReq, catch_error(HttpReq, ErrorType, Error, Stack)}
- end.
-
-handle_req_after_auth(HandlerKey, HttpReq) ->
- try
- HandlerFun = chttpd_handlers:url_handler(HandlerKey,
- fun chttpd_db:handle_request/1),
- AuthorizedReq = chttpd_auth:authorize(possibly_hack(HttpReq),
- fun chttpd_auth_request:authorize_request/1),
- {AuthorizedReq, HandlerFun(AuthorizedReq)}
- catch ?STACKTRACE(ErrorType, Error, Stack)
- {HttpReq, catch_error(HttpReq, ErrorType, Error, Stack)}
- end.
-
-catch_error(_HttpReq, throw, {http_head_abort, Resp}, _Stack) ->
- {ok, Resp};
-catch_error(_HttpReq, throw, {http_abort, Resp, Reason}, _Stack) ->
- {aborted, Resp, Reason};
-catch_error(HttpReq, throw, {invalid_json, _}, _Stack) ->
- send_error(HttpReq, {bad_request, "invalid UTF-8 JSON"});
-catch_error(HttpReq, exit, {mochiweb_recv_error, E}, _Stack) ->
- #httpd{
- mochi_req = MochiReq,
- peer = Peer,
- original_method = Method
- } = HttpReq,
- couch_log:notice("mochiweb_recv_error for ~s - ~p ~s - ~p", [
- Peer,
- Method,
- MochiReq:get(raw_path),
- E
- ]),
- exit({shutdown, E});
-catch_error(HttpReq, exit, {uri_too_long, _}, _Stack) ->
- send_error(HttpReq, request_uri_too_long);
-catch_error(HttpReq, exit, {body_too_large, _}, _Stack) ->
- send_error(HttpReq, request_entity_too_large);
-catch_error(HttpReq, throw, Error, _Stack) ->
- send_error(HttpReq, Error);
-catch_error(HttpReq, error, database_does_not_exist, _Stack) ->
- send_error(HttpReq, database_does_not_exist);
-catch_error(HttpReq, Tag, Error, Stack) ->
- % TODO improve logging and metrics collection for client disconnects
- case {Tag, Error, Stack} of
- {exit, normal, [{mochiweb_request, send, _, _} | _]} ->
- % Client disconnect (R15+)
- exit(shutdown);
- {exit, {shutdown, _}, [{mochiweb_request, send, _, _} | _]} ->
- % Client disconnect (R15+)
- exit(shutdown);
- _Else ->
- send_error(HttpReq, {Error, nil, Stack})
- end.
-
-split_response({ok, #delayed_resp{resp = Resp}}) ->
- {ok, Resp:get(code), undefined, Resp};
-split_response({ok, Resp}) ->
- {ok, Resp:get(code), undefined, Resp};
-split_response({aborted, Resp, AbortReason}) ->
- {aborted, Resp:get(code), AbortReason, Resp}.
-
-update_stats(HttpReq, #httpd_resp{end_ts = undefined} = Res) ->
- update_stats(HttpReq, Res#httpd_resp{end_ts = os:timestamp()});
-update_stats(#httpd{begin_ts = BeginTime}, #httpd_resp{} = Res) ->
- #httpd_resp{status = Status, end_ts = EndTime} = Res,
- RequestTime = timer:now_diff(EndTime, BeginTime) / 1000,
- couch_stats:update_histogram([couchdb, request_time], RequestTime),
- case Status of
- ok ->
- couch_stats:increment_counter([couchdb, httpd, requests]);
- aborted ->
- couch_stats:increment_counter([couchdb, httpd, aborted_requests])
- end,
- Res.
-
-maybe_log(#httpd{} = HttpReq, #httpd_resp{should_log = true} = HttpResp) ->
- #httpd{
- mochi_req = MochiReq,
- begin_ts = BeginTime,
- original_method = Method,
- peer = Peer
- } = HttpReq,
- #httpd_resp{
- end_ts = EndTime,
- code = Code,
- status = Status
- } = HttpResp,
- User = get_user(HttpReq),
- Host = MochiReq:get_header_value("Host"),
- RawUri = MochiReq:get(raw_path),
- RequestTime = timer:now_diff(EndTime, BeginTime) / 1000,
- couch_log:notice("~s ~s ~s ~s ~s ~B ~p ~B", [
- Host,
- Peer,
- User,
- Method,
- RawUri,
- Code,
- Status,
- round(RequestTime)
- ]);
-maybe_log(_HttpReq, #httpd_resp{should_log = false}) ->
- ok.
-
-%% HACK: replication currently handles two forms of input, #db{} style
-%% and #http_db style. We need a third that makes use of fabric. #db{}
-%% works fine for replicating the dbs and nodes database because they
-%% aren't sharded. So for now when a local db is specified as the source or
-%% the target, it's hacked to make it a full url and treated as a remote.
-possibly_hack(#httpd{path_parts = [<<"_replicate">>]} = Req) ->
- {Props0} = chttpd:json_body_obj(Req),
- Props1 = fix_uri(Req, Props0, <<"source">>),
- Props2 = fix_uri(Req, Props1, <<"target">>),
- Req#httpd{req_body = {Props2}};
-possibly_hack(Req) ->
- Req.
-
-check_request_uri_length(Uri) ->
- check_request_uri_length(
- Uri,
- chttpd_util:get_chttpd_config("max_uri_length")
- ).
-
-check_request_uri_length(_Uri, undefined) ->
- ok;
-check_request_uri_length(Uri, MaxUriLen) when is_list(MaxUriLen) ->
- case length(Uri) > list_to_integer(MaxUriLen) of
- true ->
- throw(request_uri_too_long);
- false ->
- ok
- end.
-
-check_url_encoding([]) ->
- ok;
-check_url_encoding([$%, A, B | Rest]) when ?is_hex(A), ?is_hex(B) ->
- check_url_encoding(Rest);
-check_url_encoding([$% | _]) ->
- throw({bad_request, invalid_url_encoding});
-check_url_encoding([_ | Rest]) ->
- check_url_encoding(Rest).
-
-fix_uri(Req, Props, Type) ->
- case replication_uri(Type, Props) of
- undefined ->
- Props;
- Uri0 ->
- case is_http(Uri0) of
- true ->
- Props;
- false ->
- Uri = make_uri(Req, quote(Uri0)),
- [{Type, Uri} | proplists:delete(Type, Props)]
- end
- end.
-
-replication_uri(Type, PostProps) ->
- case couch_util:get_value(Type, PostProps) of
- {Props} ->
- couch_util:get_value(<<"url">>, Props);
- Else ->
- Else
- end.
-
-is_http(<<"http://", _/binary>>) ->
- true;
-is_http(<<"https://", _/binary>>) ->
- true;
-is_http(_) ->
- false.
-
-make_uri(Req, Raw) ->
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- Url = list_to_binary([
- "http://",
- config:get("httpd", "bind_address"),
- ":",
- Port,
- "/",
- Raw
- ]),
- Headers = [
- {<<"authorization">>, ?l2b(header_value(Req, "authorization", ""))},
- {<<"cookie">>, ?l2b(extract_cookie(Req))}
- ],
- {[{<<"url">>, Url}, {<<"headers">>, {Headers}}]}.
-
-extract_cookie(#httpd{mochi_req = MochiReq}) ->
- case MochiReq:get_cookie_value("AuthSession") of
- undefined ->
- "";
- AuthSession ->
- "AuthSession=" ++ AuthSession
- end.
-%%% end hack
-
-%% erlfmt-ignore
-set_auth_handlers() ->
- AuthenticationDefault = "{chttpd_auth, cookie_authentication_handler},
- {chttpd_auth, default_authentication_handler}",
- AuthenticationSrcs = couch_httpd:make_fun_spec_strs(
- config:get("chttpd", "authentication_handlers", AuthenticationDefault)),
- AuthHandlers = lists:map(
- fun(A) -> {auth_handler_name(A), couch_httpd:make_arity_1_fun(A)} end, AuthenticationSrcs),
- AuthenticationFuns = AuthHandlers ++ [
- fun chttpd_auth:party_mode_handler/1 %% must be last
- ],
- ok = application:set_env(chttpd, auth_handlers, AuthenticationFuns).
-
-% SpecStr is a string like "{my_module, my_fun}"
-% Takes the first token of the function name in front '_' as auth handler name
-% e.g.
-% chttpd_auth:default_authentication_handler: default
-% chttpd_auth_cookie_authentication_handler: cookie
-% couch_http_auth:proxy_authentication_handler: proxy
-%
-% couch_http:auth_handler_name can't be used here, since it assumes the name
-% of the auth handler to be the 6th token split by [\\W_]
-% - this only works for modules with exactly two underscores in their name
-% - is not very robust (a space after the ',' is assumed)
-auth_handler_name(SpecStr) ->
- {ok, {_, Fun}} = couch_util:parse_term(SpecStr),
- hd(binary:split(atom_to_binary(Fun, latin1), <<"_">>)).
-
-authenticate_request(Req) ->
- {ok, AuthenticationFuns} = application:get_env(chttpd, auth_handlers),
- authenticate_request(Req, chttpd_auth_cache, AuthenticationFuns).
-
-authenticate_request(#httpd{} = Req0, AuthModule, AuthFuns) ->
- Req = Req0#httpd{
- auth_module = AuthModule,
- authentication_handlers = AuthFuns
- },
- authenticate_request(Req, AuthFuns).
-
-% Try authentication handlers in order until one returns a result
-authenticate_request(#httpd{user_ctx = #user_ctx{}} = Req, _AuthFuns) ->
- Req;
-authenticate_request(#httpd{} = Req, [{Name, AuthFun} | Rest]) ->
- authenticate_request(maybe_set_handler(AuthFun(Req), Name), Rest);
-authenticate_request(#httpd{} = Req, [AuthFun | Rest]) ->
- authenticate_request(AuthFun(Req), Rest);
-authenticate_request(Response, _AuthFuns) ->
- Response.
-
-maybe_set_handler(#httpd{user_ctx = #user_ctx{} = UserCtx} = Req, Name) ->
- Req#httpd{user_ctx = UserCtx#user_ctx{handler = Name}};
-maybe_set_handler(Else, _) ->
- Else.
-
-increment_method_stats(Method) ->
- couch_stats:increment_counter([couchdb, httpd_request_methods, Method]).
-
-% Utilities
-
-partition(Path) ->
- mochiweb_util:partition(Path, "/").
-
-header_value(#httpd{mochi_req = MochiReq}, Key) ->
- MochiReq:get_header_value(Key).
-
-header_value(#httpd{mochi_req = MochiReq}, Key, Default) ->
- case MochiReq:get_header_value(Key) of
- undefined -> Default;
- Value -> Value
- end.
-
-primary_header_value(#httpd{mochi_req = MochiReq}, Key) ->
- MochiReq:get_primary_header_value(Key).
-
-serve_file(Req, RelativePath, DocumentRoot) ->
- serve_file(Req, RelativePath, DocumentRoot, []).
-
-serve_file(Req0, RelativePath0, DocumentRoot0, ExtraHeaders) ->
- couch_httpd:serve_file(Req0, RelativePath0, DocumentRoot0, ExtraHeaders).
-
-qs_value(Req, Key) ->
- qs_value(Req, Key, undefined).
-
-qs_value(Req, Key, Default) ->
- couch_util:get_value(Key, qs(Req), Default).
-
-qs_json_value(Req, Key, Default) ->
- case qs_value(Req, Key, Default) of
- Default ->
- Default;
- Result ->
- ?JSON_DECODE(Result)
- end.
-
-qs(#httpd{mochi_req = MochiReq, qs = undefined}) ->
- MochiReq:parse_qs();
-qs(#httpd{qs = QS}) ->
- QS.
-
-path(#httpd{mochi_req = MochiReq}) ->
- MochiReq:get(path).
-
-absolute_uri(#httpd{mochi_req = MochiReq, absolute_uri = undefined}, Path) ->
- XHost = chttpd_util:get_chttpd_config(
- "x_forwarded_host", "X-Forwarded-Host"
- ),
- Host =
- case MochiReq:get_header_value(XHost) of
- undefined ->
- case MochiReq:get_header_value("Host") of
- undefined ->
- {ok, {Address, Port}} =
- case MochiReq:get(socket) of
- {ssl, SslSocket} -> ssl:sockname(SslSocket);
- Socket -> inet:sockname(Socket)
- end,
- inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
- Value1 ->
- Value1
- end;
- Value ->
- Value
- end,
- XSsl = chttpd_util:get_chttpd_config("x_forwarded_ssl", "X-Forwarded-Ssl"),
- Scheme =
- case MochiReq:get_header_value(XSsl) of
- "on" ->
- "https";
- _ ->
- XProto = chttpd_util:get_chttpd_config(
- "x_forwarded_proto", "X-Forwarded-Proto"
- ),
- case MochiReq:get_header_value(XProto) of
- % Restrict to "https" and "http" schemes only
- "https" ->
- "https";
- _ ->
- case MochiReq:get(scheme) of
- https ->
- "https";
- http ->
- "http"
- end
- end
- end,
- Scheme ++ "://" ++ Host ++ Path;
-absolute_uri(#httpd{absolute_uri = URI}, Path) ->
- URI ++ Path.
-
-unquote(UrlEncodedString) ->
- case config:get_boolean("chttpd", "decode_plus_to_space", true) of
- true -> mochiweb_util:unquote(UrlEncodedString);
- false -> mochiweb_util:unquote_path(UrlEncodedString)
- end.
-
-quote(UrlDecodedString) ->
- mochiweb_util:quote_plus(UrlDecodedString).
-
-parse_form(#httpd{mochi_req = MochiReq}) ->
- mochiweb_multipart:parse_form(MochiReq).
-
-recv(#httpd{mochi_req = MochiReq}, Len) ->
- MochiReq:recv(Len).
-
-recv_chunked(#httpd{mochi_req = MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
- % Fun is called once with each chunk
- % Fun({Length, Binary}, State)
- % called with Length == 0 on the last time.
- MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
-
-body_length(#httpd{mochi_req = MochiReq}) ->
- MochiReq:get(body_length).
-
-body(#httpd{mochi_req = MochiReq, req_body = ReqBody}) ->
- case ReqBody of
- undefined ->
- % Maximum size of document PUT request body (4GB)
- MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296
- ),
- Begin = os:timestamp(),
- try
- MochiReq:recv_body(MaxSize)
- after
- T = timer:now_diff(os:timestamp(), Begin) div 1000,
- put(body_time, T)
- end;
- _Else ->
- ReqBody
- end.
-
-validate_ctype(Req, Ctype) ->
- couch_httpd:validate_ctype(Req, Ctype).
-
-json_body(#httpd{req_body = undefined} = Httpd) ->
- case body(Httpd) of
- undefined ->
- throw({bad_request, "Missing request body"});
- Body ->
- ?JSON_DECODE(maybe_decompress(Httpd, Body))
- end;
-json_body(#httpd{req_body = ReqBody}) ->
- ReqBody.
-
-json_body_obj(Httpd) ->
- case json_body(Httpd) of
- {Props} -> {Props};
- _Else -> throw({bad_request, "Request body must be a JSON object"})
- end.
-
-doc_etag(#doc{id = Id, body = Body, revs = {Start, [DiskRev | _]}}) ->
- couch_httpd:doc_etag(Id, Body, {Start, DiskRev}).
-
-make_etag(Term) ->
- <<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
- list_to_binary(io_lib:format("\"~.36B\"", [SigInt])).
-
-etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
- etag_match(Req, binary_to_list(CurrentEtag));
-etag_match(Req, CurrentEtag) ->
- EtagsToMatch0 = string:tokens(
- chttpd:header_value(Req, "If-None-Match", ""), ", "
- ),
- EtagsToMatch = lists:map(fun strip_weak_prefix/1, EtagsToMatch0),
- lists:member(CurrentEtag, EtagsToMatch).
-
-strip_weak_prefix([$W, $/ | Etag]) ->
- Etag;
-strip_weak_prefix(Etag) ->
- Etag.
-
-etag_respond(Req, CurrentEtag, RespFun) ->
- case etag_match(Req, CurrentEtag) of
- true ->
- % the client has this in their cache.
- Headers = [{"ETag", CurrentEtag}],
- chttpd:send_response(Req, 304, Headers, <<>>);
- false ->
- % Run the function.
- RespFun()
- end.
-
-verify_is_server_admin(#httpd{user_ctx = #user_ctx{roles = Roles}}) ->
- case lists:member(<<"_admin">>, Roles) of
- true -> ok;
- false -> throw({unauthorized, <<"You are not a server admin.">>})
- end.
-
-start_response_length(#httpd{mochi_req = MochiReq} = Req, Code, Headers0, Length) ->
- Headers1 = basic_headers(Req, Headers0),
- Resp = handle_response(Req, Code, Headers1, Length, start_response_length),
- case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
- end,
- {ok, Resp}.
-
-send(Resp, Data) ->
- Resp:send(Data),
- {ok, Resp}.
-
-start_chunked_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) ->
- Headers1 = basic_headers(Req, Headers0),
- Resp = handle_response(Req, Code, Headers1, chunked, respond),
- case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
- end,
- {ok, Resp}.
-
-send_chunk({remote, _Pid, _Ref} = Resp, Data) ->
- couch_httpd:send_chunk(Resp, Data);
-send_chunk(Resp, Data) ->
- Resp:write_chunk(Data),
- {ok, Resp}.
-
-send_response(Req, Code, Headers0, Body) ->
- Headers1 = [timing(), reqid() | Headers0],
- couch_httpd:send_response(Req, Code, Headers1, Body).
-
-send_response_no_cors(Req, Code, Headers0, Body) ->
- Headers1 = [timing(), reqid() | Headers0],
- couch_httpd:send_response_no_cors(Req, Code, Headers1, Body).
-
-send_method_not_allowed(Req, Methods) ->
- send_error(
- Req,
- 405,
- [{"Allow", Methods}],
- <<"method_not_allowed">>,
- ?l2b("Only " ++ Methods ++ " allowed"),
- []
- ).
-
-send_json(Req, Value) ->
- send_json(Req, 200, Value).
-
-send_json(Req, Code, Value) ->
- send_json(Req, Code, [], Value).
-
-send_json(Req, Code, Headers0, Value) ->
- Headers1 = [timing(), reqid() | Headers0],
- couch_httpd:send_json(Req, Code, Headers1, Value).
-
-start_json_response(Req, Code) ->
- start_json_response(Req, Code, []).
-
-start_json_response(Req, Code, Headers0) ->
- Headers1 = [timing(), reqid() | Headers0],
- couch_httpd:start_json_response(Req, Code, Headers1).
-
-end_json_response(Resp) ->
- couch_httpd:end_json_response(Resp).
-
-start_delayed_json_response(Req, Code) ->
- start_delayed_json_response(Req, Code, []).
-
-start_delayed_json_response(Req, Code, Headers) ->
- start_delayed_json_response(Req, Code, Headers, "").
-
-start_delayed_json_response(Req, Code, Headers, FirstChunk) ->
- {ok, #delayed_resp{
- start_fun = fun start_json_response/3,
- req = Req,
- code = Code,
- headers = Headers,
- chunks = [FirstChunk],
- buffer_response = buffer_response(Req)
- }}.
-
-start_delayed_chunked_response(Req, Code, Headers) ->
- start_delayed_chunked_response(Req, Code, Headers, "").
-
-start_delayed_chunked_response(Req, Code, Headers, FirstChunk) ->
- {ok, #delayed_resp{
- start_fun = fun start_chunked_response/3,
- req = Req,
- code = Code,
- headers = Headers,
- chunks = [FirstChunk],
- buffer_response = buffer_response(Req)
- }}.
-
-send_delayed_chunk(#delayed_resp{buffer_response = false} = DelayedResp, Chunk) ->
- {ok, #delayed_resp{resp = Resp} = DelayedResp1} =
- start_delayed_response(DelayedResp),
- {ok, Resp} = send_chunk(Resp, Chunk),
- {ok, DelayedResp1};
-send_delayed_chunk(#delayed_resp{buffer_response = true} = DelayedResp, Chunk) ->
- #delayed_resp{chunks = Chunks} = DelayedResp,
- {ok, DelayedResp#delayed_resp{chunks = [Chunk | Chunks]}}.
-
-send_delayed_last_chunk(Req) ->
- send_delayed_chunk(Req, []).
-
-send_delayed_error(#delayed_resp{req = Req, resp = nil} = DelayedResp, Reason) ->
- {Code, ErrorStr, ReasonStr} = error_info(Reason),
- {ok, Resp} = send_error(Req, Code, ErrorStr, ReasonStr),
- {ok, DelayedResp#delayed_resp{resp = Resp}};
-send_delayed_error(#delayed_resp{resp = Resp, req = Req}, Reason) ->
- update_timeout_stats(Reason, Req),
- log_error_with_stack_trace(Reason),
- throw({http_abort, Resp, Reason}).
-
-close_delayed_json_object(Resp, Buffer, Terminator, 0) ->
- % Use a separate chunk to close the streamed array to maintain strict
- % compatibility with earlier versions. See COUCHDB-2724
- {ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
- send_delayed_chunk(R1, Terminator);
-close_delayed_json_object(Resp, Buffer, Terminator, _Threshold) ->
- send_delayed_chunk(Resp, [Buffer | Terminator]).
-
-end_delayed_json_response(#delayed_resp{buffer_response = false} = DelayedResp) ->
- {ok, #delayed_resp{resp = Resp}} =
- start_delayed_response(DelayedResp),
- end_json_response(Resp);
-end_delayed_json_response(#delayed_resp{buffer_response = true} = DelayedResp) ->
- #delayed_resp{
- start_fun = StartFun,
- req = Req,
- code = Code,
- headers = Headers,
- chunks = Chunks
- } = DelayedResp,
- {ok, Resp} = StartFun(Req, Code, Headers),
- lists:foreach(
- fun
- ([]) -> ok;
- (Chunk) -> send_chunk(Resp, Chunk)
- end,
- lists:reverse(Chunks)
- ),
- end_json_response(Resp).
-
-get_delayed_req(#delayed_resp{req = #httpd{mochi_req = MochiReq}}) ->
- MochiReq;
-get_delayed_req(Resp) ->
- Resp:get(request).
-
-start_delayed_response(#delayed_resp{resp = nil} = DelayedResp) ->
- #delayed_resp{
- start_fun = StartFun,
- req = Req,
- code = Code,
- headers = Headers,
- chunks = [FirstChunk]
- } = DelayedResp,
- {ok, Resp} = StartFun(Req, Code, Headers),
- case FirstChunk of
- "" -> ok;
- _ -> {ok, Resp} = send_chunk(Resp, FirstChunk)
- end,
- {ok, DelayedResp#delayed_resp{resp = Resp}};
-start_delayed_response(#delayed_resp{} = DelayedResp) ->
- {ok, DelayedResp}.
-
-buffer_response(Req) ->
- case chttpd:qs_value(Req, "buffer_response") of
- "false" ->
- false;
- "true" ->
- true;
- _ ->
- config:get_boolean("chttpd", "buffer_response", false)
- end.
-
-error_info({Error, Reason}) when is_list(Reason) ->
- error_info({Error, couch_util:to_binary(Reason)});
-error_info(bad_request) ->
- {400, <<"bad_request">>, <<>>};
-error_info({bad_request, Reason}) ->
- {400, <<"bad_request">>, Reason};
-error_info({bad_request, Error, Reason}) ->
- {400, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
-error_info({query_parse_error, Reason}) ->
- {400, <<"query_parse_error">>, Reason};
-error_info(database_does_not_exist) ->
- {404, <<"not_found">>, <<"Database does not exist.">>};
-error_info(not_found) ->
- {404, <<"not_found">>, <<"missing">>};
-error_info({not_found, Reason}) ->
- {404, <<"not_found">>, Reason};
-error_info({filter_fetch_error, Reason}) ->
- {404, <<"not_found">>, Reason};
-error_info(ddoc_updated) ->
- {404, <<"not_found">>, <<"Design document was updated or deleted.">>};
-error_info({not_acceptable, Reason}) ->
- {406, <<"not_acceptable">>, Reason};
-error_info(conflict) ->
- {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({conflict, _}) ->
- {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({partition_overflow, DocId}) ->
- Descr = <<
- "Partition limit exceeded due to update on '", DocId/binary, "'"
- >>,
- {403, <<"partition_overflow">>, Descr};
-error_info({{not_found, missing}, {_, _}}) ->
- {409, <<"not_found">>, <<"missing_rev">>};
-error_info({forbidden, Error, Msg}) ->
- {403, Error, Msg};
-error_info({forbidden, Msg}) ->
- {403, <<"forbidden">>, Msg};
-error_info({unauthorized, Msg}) ->
- {401, <<"unauthorized">>, Msg};
-error_info(file_exists) ->
- {412, <<"file_exists">>, <<
- "The database could not be "
- "created, the file already exists."
- >>};
-error_info({error, {nodedown, Reason}}) ->
- {412, <<"nodedown">>, Reason};
-error_info({maintenance_mode, Node}) ->
- {412, <<"nodedown">>, Node};
-error_info({maintenance_mode, nil, Node}) ->
- {412, <<"nodedown">>, Node};
-error_info({w_quorum_not_met, Reason}) ->
- {500, <<"write_quorum_not_met">>, Reason};
-error_info(request_uri_too_long) ->
- {414, <<"too_long">>, <<"the request uri is too long">>};
-error_info({bad_ctype, Reason}) ->
- {415, <<"bad_content_type">>, Reason};
-error_info(requested_range_not_satisfiable) ->
- {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
-error_info({error, {illegal_database_name, Name}}) ->
- Message =
- <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
- "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
- "are allowed. Must begin with a letter.">>,
- {400, <<"illegal_database_name">>, Message};
-error_info({illegal_docid, Reason}) ->
- {400, <<"illegal_docid">>, Reason};
-error_info({illegal_partition, Reason}) ->
- {400, <<"illegal_partition">>, Reason};
-error_info({_DocID, {illegal_docid, DocID}}) ->
- {400, <<"illegal_docid">>, DocID};
-error_info({error, {database_name_too_long, DbName}}) ->
- {400, <<"database_name_too_long">>,
- <<"At least one path segment of `", DbName/binary, "` is too long.">>};
-error_info({doc_validation, Reason}) ->
- {400, <<"doc_validation">>, Reason};
-error_info({missing_stub, Reason}) ->
- {412, <<"missing_stub">>, Reason};
-error_info(request_entity_too_large) ->
- {413, <<"too_large">>, <<"the request entity is too large">>};
-error_info({request_entity_too_large, {attachment, AttName}}) ->
- {413, <<"attachment_too_large">>, AttName};
-error_info({request_entity_too_large, DocID}) ->
- {413, <<"document_too_large">>, DocID};
-error_info({error, security_migration_updates_disabled}) ->
- {503, <<"security_migration">>, <<
- "Updates to security docs are disabled during "
- "security migration."
- >>};
-error_info(all_workers_died) ->
- {503, <<"service unvailable">>, <<
- "Nodes are unable to service this "
- "request due to overloading or maintenance mode."
- >>};
-error_info(not_implemented) ->
- {501, <<"not_implemented">>, <<"this feature is not yet implemented">>};
-error_info(timeout) ->
- {500, <<"timeout">>, <<
- "The request could not be processed in a reasonable"
- " amount of time."
- >>};
-error_info({service_unavailable, Reason}) ->
- {503, <<"service unavailable">>, Reason};
-error_info({timeout, _Reason}) ->
- error_info(timeout);
-error_info({'EXIT', {Error, _Stack}}) ->
- error_info(Error);
-error_info({Error, null}) ->
- error_info(Error);
-error_info({_Error, _Reason} = Error) ->
- maybe_handle_error(Error);
-error_info({Error, nil, _Stack}) ->
- error_info(Error);
-error_info({Error, Reason, _Stack}) ->
- error_info({Error, Reason});
-error_info(Error) ->
- maybe_handle_error(Error).
-
-maybe_handle_error(Error) ->
- case chttpd_plugin:handle_error(Error) of
- {_Code, _Reason, _Description} = Result ->
- Result;
- {shutdown, Err} ->
- exit({shutdown, Err});
- {Err, Reason} ->
- {500, couch_util:to_binary(Err), couch_util:to_binary(Reason)};
- normal ->
- exit(normal);
- shutdown ->
- exit(shutdown);
- Err ->
- {500, <<"unknown_error">>, couch_util:to_binary(Err)}
- end.
-
-error_headers(#httpd{mochi_req = MochiReq} = Req, 401 = Code, ErrorStr, ReasonStr) ->
- % this is where the basic auth popup is triggered
- case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
- undefined ->
- case chttpd_util:get_chttpd_config("WWW-Authenticate") of
- undefined ->
- % If the client is a browser and the basic auth popup isn't turned on
- % redirect to the session page.
- case ErrorStr of
- <<"unauthorized">> ->
- case
- chttpd_util:get_chttpd_auth_config(
- "authentication_redirect", "/_utils/session.html"
- )
- of
- undefined ->
- {Code, []};
- AuthRedirect ->
- case
- chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false
- )
- of
- true ->
- % send the browser popup header no matter what if we are require_valid_user
- {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
- false ->
- case
- MochiReq:accepts_content_type("application/json")
- of
- true ->
- {Code, []};
- false ->
- case
- MochiReq:accepts_content_type("text/html")
- of
- true ->
- % Redirect to the path the user requested, not
- % the one that is used internally.
- UrlReturnRaw =
- case
- MochiReq:get_header_value(
- "x-couchdb-vhost-path"
- )
- of
- undefined ->
- MochiReq:get(path);
- VHostPath ->
- VHostPath
- end,
- RedirectLocation = lists:flatten([
- AuthRedirect,
- "?return=",
- couch_util:url_encode(UrlReturnRaw),
- "&reason=",
- couch_util:url_encode(ReasonStr)
- ]),
- {302, [
- {"Location",
- absolute_uri(
- Req, RedirectLocation
- )}
- ]};
- false ->
- {Code, []}
- end
- end
- end
- end;
- _Else ->
- {Code, []}
- end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
- end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
- end;
-error_headers(_, Code, _, _) ->
- {Code, []}.
-
-send_error(#httpd{} = Req, Error) ->
- update_timeout_stats(Error, Req),
-
- {Code, ErrorStr, ReasonStr} = error_info(Error),
- {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr),
- send_error(Req, Code1, Headers, ErrorStr, ReasonStr, json_stack(Error)).
-
-send_error(#httpd{} = Req, Code, ErrorStr, ReasonStr) ->
- update_timeout_stats(ErrorStr, Req),
- send_error(Req, Code, [], ErrorStr, ReasonStr, []).
-
-send_error(Req, Code, Headers, ErrorStr, ReasonStr, []) ->
- send_json(
- Req,
- Code,
- Headers,
- {[
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}
- ]}
- );
-send_error(Req, Code, Headers, ErrorStr, ReasonStr, Stack) ->
- log_error_with_stack_trace({ErrorStr, ReasonStr, Stack}),
- send_json(
- Req,
- Code,
- [stack_trace_id(Stack) | Headers],
- {[
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}
- | case Stack of
- [] -> [];
- _ -> [{<<"ref">>, stack_hash(Stack)}]
- end
- ]}
- ).
-
-update_timeout_stats(<<"timeout">>, #httpd{requested_path_parts = PathParts}) ->
- update_timeout_stats(PathParts);
-update_timeout_stats(timeout, #httpd{requested_path_parts = PathParts}) ->
- update_timeout_stats(PathParts);
-update_timeout_stats(_, _) ->
- ok.
-
-update_timeout_stats([
- _,
- <<"_partition">>,
- _,
- <<"_design">>,
- _,
- <<"_view">>
- | _
-]) ->
- couch_stats:increment_counter([couchdb, httpd, partition_view_timeouts]);
-update_timeout_stats([_, <<"_partition">>, _, <<"_find">> | _]) ->
- couch_stats:increment_counter([couchdb, httpd, partition_find_timeouts]);
-update_timeout_stats([_, <<"_partition">>, _, <<"_explain">> | _]) ->
- couch_stats:increment_counter([couchdb, httpd, partition_explain_timeouts]);
-update_timeout_stats([_, <<"_partition">>, _, <<"_all_docs">> | _]) ->
- couch_stats:increment_counter([couchdb, httpd, partition_all_docs_timeouts]);
-update_timeout_stats([_, <<"_design">>, _, <<"_view">> | _]) ->
- couch_stats:increment_counter([couchdb, httpd, view_timeouts]);
-update_timeout_stats([_, <<"_find">> | _]) ->
- couch_stats:increment_counter([couchdb, httpd, find_timeouts]);
-update_timeout_stats([_, <<"_explain">> | _]) ->
- couch_stats:increment_counter([couchdb, httpd, explain_timeouts]);
-update_timeout_stats([_, <<"_all_docs">> | _]) ->
- couch_stats:increment_counter([couchdb, httpd, all_docs_timeouts]);
-update_timeout_stats(_) ->
- ok.
-
-% give the option for list functions to output html or other raw errors
-send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
- send_chunk(Resp, Reason),
- send_chunk(Resp, []);
-send_chunked_error(Resp, Error) ->
- Stack = json_stack(Error),
- log_error_with_stack_trace(Error),
- {Code, ErrorStr, ReasonStr} = error_info(Error),
- JsonError =
- {[
- {<<"code">>, Code},
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}
- | case Stack of
- [] -> [];
- _ -> [{<<"ref">>, stack_hash(Stack)}]
- end
- ]},
- send_chunk(Resp, ?l2b([$\n, ?JSON_ENCODE(JsonError), $\n])),
- send_chunk(Resp, []).
-
-send_redirect(Req, Path) ->
- Headers = [{"Location", chttpd:absolute_uri(Req, Path)}],
- send_response(Req, 301, Headers, <<>>).
-
-server_header() ->
- couch_httpd:server_header().
-
-timing() ->
- case get(body_time) of
- undefined ->
- {"X-CouchDB-Body-Time", "0"};
- Time ->
- {"X-CouchDB-Body-Time", integer_to_list(Time)}
- end.
-
-reqid() ->
- {"X-Couch-Request-ID", get(nonce)}.
-
-json_stack({bad_request, _, _}) ->
- [];
-json_stack({_Error, _Reason, Stack}) when is_list(Stack) ->
- lists:map(fun json_stack_item/1, Stack);
-json_stack(_) ->
- [].
-
-json_stack_item({M, F, A}) ->
- list_to_binary(io_lib:format("~s:~s/~B", [M, F, json_stack_arity(A)]));
-json_stack_item({M, F, A, L}) ->
- case proplists:get_value(line, L) of
- undefined ->
- json_stack_item({M, F, A});
- Line ->
- list_to_binary(
- io_lib:format(
- "~s:~s/~B L~B",
- [M, F, json_stack_arity(A), Line]
- )
- )
- end;
-json_stack_item(_) ->
- <<"bad entry in stacktrace">>.
-
-json_stack_arity(A) ->
- if
- is_integer(A) -> A;
- is_list(A) -> length(A);
- true -> 0
- end.
-
-maybe_decompress(Httpd, Body) ->
- case header_value(Httpd, "Content-Encoding", "identity") of
- "gzip" ->
- try
- zlib:gunzip(Body)
- catch
- error:data_error ->
- throw({bad_request, "Request body is not properly gzipped."})
- end;
- "identity" ->
- Body;
- Else ->
- throw({bad_ctype, [Else, " is not a supported content encoding."]})
- end.
-
-log_error_with_stack_trace({bad_request, _, _}) ->
- ok;
-log_error_with_stack_trace({Error, Reason, Stack}) ->
- EFmt =
- if
- is_binary(Error) -> "~s";
- true -> "~w"
- end,
- RFmt =
- if
- is_binary(Reason) -> "~s";
- true -> "~w"
- end,
- Fmt = "req_err(~w) " ++ EFmt ++ " : " ++ RFmt ++ "~n ~p",
- couch_log:error(Fmt, [stack_hash(Stack), Error, Reason, Stack]);
-log_error_with_stack_trace(_) ->
- ok.
-
-stack_trace_id(Stack) ->
- {"X-Couch-Stack-Hash", stack_hash(Stack)}.
-
-stack_hash(Stack) ->
- erlang:crc32(term_to_binary(Stack)).
-
-%% @doc CouchDB uses a chunked transfer-encoding to stream responses to
-%% _all_docs, _changes, _view and other similar requests. This configuration
-%% value sets the maximum size of a chunk; the system will buffer rows in the
-%% response until it reaches this threshold and then send all the rows in one
-%% chunk to improve network efficiency. The default value is chosen so that
-%% the assembled chunk fits into the default Ethernet frame size (some reserved
-%% padding is necessary to accommodate the reporting of the chunk length). Set
-%% this value to 0 to restore the older behavior of sending each row in a
-%% dedicated chunk.
-chunked_response_buffer_size() ->
- chttpd_util:get_chttpd_config_integer("chunked_response_buffer", 1490).
-
-basic_headers(Req, Headers0) ->
- Headers =
- Headers0 ++
- server_header() ++
- couch_httpd_auth:cookie_auth_header(Req, Headers0),
- Headers1 = chttpd_cors:headers(Req, Headers),
- Headers2 = chttpd_xframe_options:header(Req, Headers1),
- Headers3 = [reqid(), timing() | Headers2],
- chttpd_prefer_header:maybe_return_minimal(Req, Headers3).
-
-handle_response(Req0, Code0, Headers0, Args0, Type) ->
- {ok, {Req1, Code1, Headers1, Args1}} =
- chttpd_plugin:before_response(Req0, Code0, Headers0, Args0),
- couch_stats:increment_counter([couchdb, httpd_status_codes, Code1]),
- respond_(Req1, Code1, Headers1, Args1, Type).
-
-respond_(#httpd{mochi_req = MochiReq}, Code, Headers, _Args, start_response) ->
- MochiReq:start_response({Code, Headers});
-respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
- MochiReq:Type({Code, Headers, Args}).
-
-get_user(#httpd{user_ctx = #user_ctx{name = null}}) ->
- % admin party
- "undefined";
-get_user(#httpd{user_ctx = #user_ctx{name = User}}) ->
- couch_util:url_encode(User);
-get_user(#httpd{user_ctx = undefined}) ->
- "undefined".
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-check_url_encoding_pass_test_() ->
- [
- ?_assertEqual(ok, check_url_encoding("/dbname")),
- ?_assertEqual(ok, check_url_encoding("/dbname/doc_id")),
- ?_assertEqual(ok, check_url_encoding("/dbname/doc_id?rev=1-abcdefgh")),
- ?_assertEqual(ok, check_url_encoding("/dbname%25")),
- ?_assertEqual(ok, check_url_encoding("/dbname/doc_id%25")),
- ?_assertEqual(ok, check_url_encoding("/dbname%25%3a")),
- ?_assertEqual(ok, check_url_encoding("/dbname/doc_id%25%3a")),
- ?_assertEqual(ok, check_url_encoding("/user%2Fdbname")),
- ?_assertEqual(ok, check_url_encoding("/user%2Fdbname/doc_id")),
- ?_assertEqual(ok, check_url_encoding("/dbname/escaped%25doc_id")),
- ?_assertEqual(ok, check_url_encoding("/dbname/doc%2eid")),
- ?_assertEqual(ok, check_url_encoding("/dbname/doc%2Eid")),
- ?_assertEqual(ok, check_url_encoding("/dbname-with-dash")),
- ?_assertEqual(ok, check_url_encoding("/dbname/doc_id-with-dash"))
- ].
-
-check_url_encoding_fail_test_() ->
- [
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/dbname/doc_id%")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/dbname/doc_id%?rev=1-abcdefgh")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%2")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/dbname/doc_id%2")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/user%2Fdbname%")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/user%2Fdbname/doc_id%")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("%")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/%")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/%2")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%2%3A")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%%3Ae")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%2g")
- ),
- ?_assertThrow(
- {bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%g2")
- )
- ].
-
-log_format_test() ->
- ?assertEqual(
- "127.0.0.1:15984 127.0.0.1 undefined "
- "GET /_cluster_setup 201 ok 10000",
- test_log_request("/_cluster_setup", undefined)
- ),
- ?assertEqual(
- "127.0.0.1:15984 127.0.0.1 user_foo "
- "GET /_all_dbs 201 ok 10000",
- test_log_request("/_all_dbs", #user_ctx{name = <<"user_foo">>})
- ),
-
- %% Utf8Name = unicode:characters_to_binary(Something),
- Utf8User = <<227, 130, 136, 227, 129, 134, 227, 129, 147, 227, 129, 157>>,
- ?assertEqual(
- "127.0.0.1:15984 127.0.0.1 %E3%82%88%E3%81%86%E3%81%93%E3%81%9D "
- "GET /_all_dbs 201 ok 10000",
- test_log_request("/_all_dbs", #user_ctx{name = Utf8User})
- ),
- ok.
-
-test_log_request(RawPath, UserCtx) ->
- Headers = mochiweb_headers:make([{"HOST", "127.0.0.1:15984"}]),
- MochiReq = mochiweb_request:new(socket, [], 'POST', RawPath, version, Headers),
- Req = #httpd{
- mochi_req = MochiReq,
- begin_ts = {1458, 588713, 124003},
- original_method = 'GET',
- peer = "127.0.0.1",
- nonce = "nonce",
- user_ctx = UserCtx
- },
- Resp = #httpd_resp{
- end_ts = {1458, 588723, 124303},
- code = 201,
- status = ok
- },
- ok = meck:new(couch_log, [passthrough]),
- ok = meck:expect(couch_log, notice, fun(Format, Args) ->
- lists:flatten(io_lib:format(Format, Args))
- end),
- Message = maybe_log(Req, Resp),
- ok = meck:unload(couch_log),
- Message.
-
-handle_req_after_auth_test() ->
- Headers = mochiweb_headers:make([{"HOST", "127.0.0.1:15984"}]),
- MochiReq = mochiweb_request:new(
- socket,
- [],
- 'PUT',
- "/newdb",
- version,
- Headers
- ),
- UserCtx = #user_ctx{name = <<"retain_user">>},
- Roles = [<<"_reader">>],
- AuthorizedCtx = #user_ctx{name = <<"retain_user">>, roles = Roles},
- Req = #httpd{
- mochi_req = MochiReq,
- begin_ts = {1458, 588713, 124003},
- original_method = 'PUT',
- peer = "127.0.0.1",
- nonce = "nonce",
- user_ctx = UserCtx
- },
- AuthorizedReq = Req#httpd{user_ctx = AuthorizedCtx},
- ok = meck:new(chttpd_handlers, [passthrough]),
- ok = meck:new(chttpd_auth, [passthrough]),
- ok = meck:expect(chttpd_handlers, url_handler, fun(_Key, _Fun) ->
- fun(_Req) -> handled_authorized_req end
- end),
- ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
- AuthorizedReq
- end),
- ?assertEqual(
- {AuthorizedReq, handled_authorized_req},
- handle_req_after_auth(foo_key, Req)
- ),
- ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
- meck:exception(throw, {http_abort, resp, some_reason})
- end),
- ?assertEqual(
- {Req, {aborted, resp, some_reason}},
- handle_req_after_auth(foo_key, Req)
- ),
- ok = meck:unload(chttpd_handlers),
- ok = meck:unload(chttpd_auth).
-
--endif.
diff --git a/src/chttpd/src/chttpd_app.erl b/src/chttpd/src/chttpd_app.erl
deleted file mode 100644
index d7a5aef86..000000000
--- a/src/chttpd/src/chttpd_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, StartArgs) ->
- chttpd_sup:start_link(StartArgs).
-
-stop(_State) ->
- ok.
diff --git a/src/chttpd/src/chttpd_auth.erl b/src/chttpd/src/chttpd_auth.erl
deleted file mode 100644
index 20b5a05f1..000000000
--- a/src/chttpd/src/chttpd_auth.erl
+++ /dev/null
@@ -1,98 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_auth).
-
--export([authenticate/2]).
--export([authorize/2]).
-
--export([default_authentication_handler/1]).
--export([cookie_authentication_handler/1]).
--export([proxy_authentication_handler/1]).
--export([jwt_authentication_handler/1]).
--export([party_mode_handler/1]).
-
--export([handle_session_req/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(SERVICE_ID, chttpd_auth).
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-authenticate(HttpReq, Default) ->
- maybe_handle(authenticate, [HttpReq], Default).
-
-authorize(HttpReq, Default) ->
- maybe_handle(authorize, [HttpReq], Default).
-
-%% ------------------------------------------------------------------
-%% Default callbacks
-%% ------------------------------------------------------------------
-
-default_authentication_handler(Req) ->
- couch_httpd_auth:default_authentication_handler(Req, chttpd_auth_cache).
-
-cookie_authentication_handler(Req) ->
- couch_httpd_auth:cookie_authentication_handler(Req, chttpd_auth_cache).
-
-proxy_authentication_handler(Req) ->
- couch_httpd_auth:proxy_authentication_handler(Req).
-
-jwt_authentication_handler(Req) ->
- couch_httpd_auth:jwt_authentication_handler(Req).
-
-party_mode_handler(#httpd{method = 'POST', path_parts = [<<"_session">>]} = Req) ->
- % See #1947 - users should always be able to attempt a login
- Req#httpd{user_ctx = #user_ctx{}};
-party_mode_handler(#httpd{path_parts = [<<"_up">>]} = Req) ->
- RequireValidUser = config:get_boolean("chttpd", "require_valid_user", false),
- RequireValidUserExceptUp = config:get_boolean(
- "chttpd", "require_valid_user_except_for_up", false
- ),
- require_valid_user(Req, RequireValidUser andalso not RequireValidUserExceptUp);
-party_mode_handler(Req) ->
- RequireValidUser = config:get_boolean("chttpd", "require_valid_user", false),
- RequireValidUserExceptUp = config:get_boolean(
- "chttpd", "require_valid_user_except_for_up", false
- ),
- require_valid_user(Req, RequireValidUser orelse RequireValidUserExceptUp).
-
-require_valid_user(_Req, true) ->
- throw({unauthorized, <<"Authentication required.">>});
-require_valid_user(Req, false) ->
- case config:get("admins") of
- [] ->
- Req#httpd{user_ctx = ?ADMIN_USER};
- _ ->
- Req#httpd{user_ctx = #user_ctx{}}
- end.
-
-handle_session_req(Req) ->
- couch_httpd_auth:handle_session_req(Req, chttpd_auth_cache).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-maybe_handle(Func, Args, Default) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of
- no_decision when is_function(Default) ->
- apply(Default, Args);
- no_decision ->
- Default;
- {decided, Result} ->
- Result
- end.
diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
deleted file mode 100644
index 2173eca95..000000000
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ /dev/null
@@ -1,267 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_auth_cache).
--behaviour(gen_server).
-
--export([start_link/0, get_user_creds/2, update_user_creds/3, dbname/0]).
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
--export([listen_for_changes/1, changes_callback/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch/include/couch_js_functions.hrl").
-
--define(CACHE, chttpd_auth_cache_lru).
-
--record(state, {
- changes_pid,
- last_seq = "0"
-}).
-
-%% public functions
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_user_creds(Req, UserName) when is_list(UserName) ->
- get_user_creds(Req, ?l2b(UserName));
-get_user_creds(_Req, UserName) when is_binary(UserName) ->
- Resp =
- case couch_auth_cache:get_admin(UserName) of
- nil ->
- get_from_cache(UserName);
- Props ->
- case get_from_cache(UserName) of
- nil ->
- Props;
- UserProps when is_list(UserProps) ->
- couch_auth_cache:add_roles(
- Props,
- couch_util:get_value(<<"roles">>, UserProps)
- )
- end
- end,
- maybe_validate_user_creds(Resp).
-
-update_user_creds(_Req, UserDoc, _Ctx) ->
- {_, Ref} = spawn_monitor(fun() ->
- case fabric:update_doc(dbname(), UserDoc, []) of
- {ok, _} ->
- exit(ok);
- Else ->
- exit(Else)
- end
- end),
- receive
- {'DOWN', Ref, _, _, ok} ->
- ok;
- {'DOWN', Ref, _, _, Else} ->
- Else
- end.
-
-get_from_cache(UserName) ->
- try ets_lru:lookup_d(?CACHE, UserName) of
- {ok, Props} ->
- couch_stats:increment_counter([couchdb, auth_cache_hits]),
- couch_log:debug("cache hit for ~s", [UserName]),
- Props;
- _ ->
- maybe_increment_auth_cache_miss(UserName),
- case load_user_from_db(UserName) of
- nil ->
- nil;
- Props ->
- ets_lru:insert(?CACHE, UserName, Props),
- Props
- end
- catch
- error:badarg ->
- maybe_increment_auth_cache_miss(UserName),
- load_user_from_db(UserName)
- end.
-
-maybe_increment_auth_cache_miss(UserName) ->
- Admins = config:get("admins"),
- case lists:keymember(?b2l(UserName), 1, Admins) of
- false ->
- couch_stats:increment_counter([couchdb, auth_cache_misses]),
- couch_log:debug("cache miss for ~s", [UserName]);
- _True ->
- ok
- end.
-
-%% gen_server callbacks
-
-init([]) ->
- self() ! {start_listener, 0},
- {ok, #state{}}.
-
-handle_call(_Call, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid = Pid} = State) ->
- Seq =
- case Reason of
- {seq, EndSeq} ->
- EndSeq;
- {database_does_not_exist, _} ->
- couch_log:notice(
- "~p changes listener died because the _users database does not exist. Create the database to silence this notice.",
- [?MODULE]
- ),
- 0;
- _ ->
- couch_log:notice("~p changes listener died ~r", [?MODULE, Reason]),
- 0
- end,
- erlang:send_after(5000, self(), {start_listener, Seq}),
- {noreply, State#state{last_seq = Seq}};
-handle_info({start_listener, Seq}, State) ->
- {noreply, State#state{changes_pid = spawn_changes(Seq)}};
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-terminate(_Reason, #state{changes_pid = Pid}) when is_pid(Pid) ->
- exit(Pid, kill);
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, #state{} = State, _Extra) ->
- {ok, State}.
-
-%% private functions
-
-spawn_changes(Since) ->
- {Pid, _} = spawn_monitor(?MODULE, listen_for_changes, [Since]),
- Pid.
-
-listen_for_changes(Since) ->
- ensure_auth_ddoc_exists(dbname(), <<"_design/_auth">>),
- CBFun = fun ?MODULE:changes_callback/2,
- Args = #changes_args{
- feed = "continuous",
- since = Since,
- heartbeat = true,
- filter = {default, main_only}
- },
- fabric:changes(dbname(), CBFun, Since, Args).
-
-changes_callback(waiting_for_updates, Acc) ->
- {ok, Acc};
-changes_callback(start, Since) ->
- {ok, Since};
-changes_callback({stop, EndSeq, _Pending}, _) ->
- exit({seq, EndSeq});
-changes_callback({change, {Change}}, _) ->
- case couch_util:get_value(id, Change) of
- <<"_design/", _/binary>> ->
- ok;
- DocId ->
- UserName = username(DocId),
- couch_log:debug("Invalidating cached credentials for ~s", [UserName]),
- ets_lru:remove(?CACHE, UserName)
- end,
- {ok, couch_util:get_value(seq, Change)};
-changes_callback(timeout, Acc) ->
- {ok, Acc};
-changes_callback({error, _}, EndSeq) ->
- exit({seq, EndSeq}).
-
-load_user_from_db(UserName) ->
- try fabric:open_doc(dbname(), docid(UserName), [?ADMIN_CTX, ejson_body, conflicts]) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- Props;
- _Else ->
- couch_log:debug("no record of user ~s", [UserName]),
- nil
- catch
- error:database_does_not_exist ->
- nil
- end.
-
-dbname() ->
- config:get("chttpd_auth", "authentication_db", "_users").
-
-docid(UserName) ->
- <<"org.couchdb.user:", UserName/binary>>.
-
-username(<<"org.couchdb.user:", UserName/binary>>) ->
- UserName.
-
-ensure_auth_ddoc_exists(DbName, DDocId) ->
- case fabric:open_doc(DbName, DDocId, [?ADMIN_CTX, ejson_body]) of
- {not_found, _Reason} ->
- {ok, AuthDesign} = couch_auth_cache:auth_design_doc(DDocId),
- update_doc_ignoring_conflict(DbName, AuthDesign, [?ADMIN_CTX]);
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
- ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
- ok;
- _ ->
- Props1 = lists:keyreplace(
- <<"validate_doc_update">>,
- 1,
- Props,
- {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
- ),
- update_doc_ignoring_conflict(DbName, couch_doc:from_json_obj({Props1}), [
- ?ADMIN_CTX
- ])
- end;
- {error, Reason} ->
- couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [
- DbName, DDocId, Reason
- ]),
- ok
- end,
- ok.
-
-update_doc_ignoring_conflict(DbName, Doc, Options) ->
- try
- fabric:update_doc(DbName, Doc, Options)
- catch
- throw:conflict ->
- ok
- end.
-
-maybe_validate_user_creds(nil) ->
- nil;
-% throws if UserCreds includes a _conflicts member
-% returns UserCreds otherwise
-maybe_validate_user_creds(UserCreds) ->
- AllowConflictedUserDocs = config:get_boolean(
- "chttpd_auth", "allow_conflicted_user_docs", false
- ),
- case {couch_util:get_value(<<"_conflicts">>, UserCreds), AllowConflictedUserDocs} of
- {undefined, _} ->
- {ok, UserCreds, nil};
- {_, true} ->
- {ok, UserCreds, nil};
- {_ConflictList, false} ->
- throw(
- {unauthorized,
- <<"User document conflicts must be resolved before the document",
- " is used for authentication purposes.">>}
- )
- end.
diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl
deleted file mode 100644
index 301cf8e7d..000000000
--- a/src/chttpd/src/chttpd_auth_request.erl
+++ /dev/null
@@ -1,156 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_auth_request).
--export([authorize_request/1]).
--include_lib("couch/include/couch_db.hrl").
-
-authorize_request(#httpd{auth = Auth, user_ctx = Ctx} = Req) ->
- try
- authorize_request_int(Req)
- catch
- throw:{forbidden, Msg} ->
- case {Auth, Ctx} of
- {{cookie_auth_failed, {Error, Reason}}, _} ->
- throw({forbidden, {Error, Reason}});
- {_, #user_ctx{name = null}} ->
- throw({unauthorized, Msg});
- {_, _} ->
- throw({forbidden, Msg})
- end
- end.
-
-authorize_request_int(#httpd{path_parts = []} = Req) ->
- Req;
-authorize_request_int(#httpd{path_parts = [<<"favicon.ico">> | _]} = Req) ->
- Req;
-authorize_request_int(#httpd{path_parts = [<<"_all_dbs">> | _]} = Req) ->
- case config:get_boolean("chttpd", "admin_only_all_dbs", true) of
- true -> require_admin(Req);
- false -> Req
- end;
-authorize_request_int(#httpd{path_parts = [<<"_dbs_info">> | _]} = Req) ->
- Req;
-authorize_request_int(#httpd{path_parts = [<<"_replicator">>], method = 'PUT'} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [<<"_replicator">>], method = 'DELETE'} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [<<"_replicator">>, <<"_all_docs">> | _]} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [<<"_replicator">>, <<"_changes">> | _]} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [<<"_replicator">> | _]} = Req) ->
- db_authorization_check(Req);
-authorize_request_int(#httpd{path_parts = [<<"_reshard">> | _]} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [<<"_users">>], method = 'PUT'} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [<<"_users">>], method = 'DELETE'} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [<<"_users">>, <<"_all_docs">> | _]} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [<<"_users">>, <<"_changes">> | _]} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [<<"_users">> | _]} = Req) ->
- db_authorization_check(Req);
-authorize_request_int(#httpd{path_parts = [<<"_", _/binary>> | _]} = Req) ->
- server_authorization_check(Req);
-authorize_request_int(#httpd{path_parts = [_DbName], method = 'PUT'} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [_DbName], method = 'DELETE'} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [_DbName, <<"_compact">> | _]} = Req) ->
- require_db_admin(Req);
-authorize_request_int(#httpd{path_parts = [_DbName, <<"_view_cleanup">>]} = Req) ->
- require_db_admin(Req);
-authorize_request_int(#httpd{path_parts = [_DbName, <<"_sync_shards">>]} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [_DbName, <<"_purge">>]} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [_DbName, <<"_purged_infos_limit">>]} = Req) ->
- require_admin(Req);
-authorize_request_int(#httpd{path_parts = [_DbName | _]} = Req) ->
- db_authorization_check(Req).
-
-server_authorization_check(#httpd{path_parts = [<<"_up">>]} = Req) ->
- Req;
-server_authorization_check(#httpd{path_parts = [<<"_uuids">>]} = Req) ->
- Req;
-server_authorization_check(#httpd{path_parts = [<<"_session">>]} = Req) ->
- Req;
-server_authorization_check(#httpd{path_parts = [<<"_replicate">>]} = Req) ->
- Req;
-server_authorization_check(#httpd{path_parts = [<<"_stats">>]} = Req) ->
- Req;
-server_authorization_check(#httpd{path_parts = [<<"_active_tasks">>]} = Req) ->
- Req;
-server_authorization_check(#httpd{path_parts = [<<"_dbs_info">>]} = Req) ->
- Req;
-server_authorization_check(#httpd{method = Method, path_parts = [<<"_utils">> | _]} = Req) when
- Method =:= 'HEAD' orelse Method =:= 'GET'
-->
- Req;
-server_authorization_check(#httpd{path_parts = [<<"_node">>, _, <<"_stats">> | _]} = Req) ->
- require_metrics(Req);
-server_authorization_check(#httpd{path_parts = [<<"_node">>, _, <<"_system">> | _]} = Req) ->
- require_metrics(Req);
-server_authorization_check(#httpd{path_parts = [<<"_node">>, _, <<"_prometheus">> | _]} = Req) ->
- require_metrics(Req);
-server_authorization_check(#httpd{path_parts = [<<"_", _/binary>> | _]} = Req) ->
- require_admin(Req).
-
-db_authorization_check(#httpd{path_parts = [DbName | _], user_ctx = Ctx} = Req) ->
- {_} = fabric:get_security(DbName, [{user_ctx, Ctx}]),
- Req.
-
-require_metrics(#httpd{user_ctx = #user_ctx{roles = UserRoles}} = Req) ->
- IsAdmin = lists:member(<<"_admin">>, UserRoles),
- IsMetrics = lists:member(<<"_metrics">>, UserRoles),
- case {IsAdmin, IsMetrics} of
- {true, _} -> Req;
- {_, true} -> Req;
- _ -> throw({unauthorized, <<"You are not a server admin or read-only metrics user">>})
- end.
-
-require_admin(Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- Req.
-
-require_db_admin(#httpd{path_parts = [DbName | _], user_ctx = Ctx} = Req) ->
- Sec = fabric:get_security(DbName, [{user_ctx, Ctx}]),
-
- case is_db_admin(Ctx, Sec) of
- true -> Req;
- false -> throw({unauthorized, <<"You are not a server or db admin.">>})
- end.
-
-is_db_admin(#user_ctx{name = UserName, roles = UserRoles}, {Security}) ->
- {Admins} = couch_util:get_value(<<"admins">>, Security, {[]}),
- Names = couch_util:get_value(<<"names">>, Admins, []),
- Roles = couch_util:get_value(<<"roles">>, Admins, []),
- case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
- true -> true;
- false -> check_security(names, UserName, Names)
- end.
-
-check_security(roles, [], _) ->
- false;
-check_security(roles, UserRoles, Roles) ->
- UserRolesSet = ordsets:from_list(UserRoles),
- RolesSet = ordsets:from_list(Roles),
- not ordsets:is_disjoint(UserRolesSet, RolesSet);
-check_security(names, _, []) ->
- false;
-check_security(names, null, _) ->
- false;
-check_security(names, UserName, Names) ->
- lists:member(UserName, Names).
diff --git a/src/chttpd/src/chttpd_cors.erl b/src/chttpd/src/chttpd_cors.erl
deleted file mode 100644
index 70d3163ec..000000000
--- a/src/chttpd/src/chttpd_cors.erl
+++ /dev/null
@@ -1,414 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_cors).
-
--export([
- maybe_handle_preflight_request/1,
- maybe_handle_preflight_request/2,
- headers/2,
- headers/4
-]).
--export([
- is_cors_enabled/1,
- get_cors_config/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("chttpd/include/chttpd_cors.hrl").
-
-%% http://www.w3.org/TR/cors/#resource-preflight-requests
-
-maybe_handle_preflight_request(#httpd{method = Method}) when Method /= 'OPTIONS' ->
- not_preflight;
-maybe_handle_preflight_request(Req) ->
- case maybe_handle_preflight_request(Req, get_cors_config(Req)) of
- not_preflight ->
- not_preflight;
- {ok, PreflightHeaders} ->
- chttpd:send_response_no_cors(Req, 204, PreflightHeaders, <<>>)
- end.
-
-maybe_handle_preflight_request(#httpd{} = Req, Config) ->
- case is_cors_enabled(Config) of
- true ->
- case preflight_request(Req, Config) of
- {ok, PreflightHeaders} ->
- {ok, PreflightHeaders};
- not_preflight ->
- not_preflight;
- UnknownError ->
- couch_log:error(
- "Unknown response of chttpd_cors:preflight_request(~p): ~p",
- [Req, UnknownError]
- ),
- not_preflight
- end;
- false ->
- not_preflight
- end.
-
-preflight_request(Req, Config) ->
- case get_origin(Req) of
- undefined ->
- %% If the Origin header is not present terminate this set of
- %% steps. The request is outside the scope of this specification.
- %% http://www.w3.org/TR/cors/#resource-preflight-requests
- not_preflight;
- Origin ->
- AcceptedOrigins = get_accepted_origins(Req, Config),
- AcceptAll = lists:member(<<"*">>, AcceptedOrigins),
-
- HandlerFun = fun() ->
- handle_preflight_request(Req, Config, Origin)
- end,
-
- %% We either need to accept all origins or have it listed
- %% in our origins. Origin can only contain a single origin
- %% as the user agent will not follow redirects [1]. If the
- %% value of the Origin header is not a case-sensitive
- %% match for any of the values in list of origins do not
- %% set any additional headers and terminate this set
- %% of steps [1].
- %%
- %% [1]: http://www.w3.org/TR/cors/#resource-preflight-requests
- %%
- %% TODO: Square against multi origin Security Considerations and the
- %% Vary header
- %%
- case AcceptAll orelse lists:member(Origin, AcceptedOrigins) of
- true -> HandlerFun();
- false -> not_preflight
- end
- end.
-
-handle_preflight_request(Req, Config, Origin) ->
- case chttpd:header_value(Req, "Access-Control-Request-Method") of
- undefined ->
- %% If there is no Access-Control-Request-Method header
- %% or if parsing failed, do not set any additional headers
- %% and terminate this set of steps. The request is outside
- %% the scope of this specification.
- %% http://www.w3.org/TR/cors/#resource-preflight-requests
- not_preflight;
- Method ->
- SupportedMethods = get_origin_config(
- Config,
- Origin,
- <<"allow_methods">>,
- ?SUPPORTED_METHODS
- ),
-
- SupportedHeaders = get_origin_config(
- Config,
- Origin,
- <<"allow_headers">>,
- ?SUPPORTED_HEADERS
- ),
-
- %% get max age
- MaxAge = couch_util:get_value(
- <<"max_age">>,
- Config,
- ?CORS_DEFAULT_MAX_AGE
- ),
-
- PreflightHeaders0 = maybe_add_credentials(Config, Origin, [
- {"Access-Control-Allow-Origin", binary_to_list(Origin)},
- {"Access-Control-Max-Age", MaxAge},
- {"Access-Control-Allow-Methods", string:join(SupportedMethods, ", ")}
- ]),
-
- case lists:member(Method, SupportedMethods) of
- true ->
- %% method ok , check headers
- AccessHeaders = chttpd:header_value(
- Req,
- "Access-Control-Request-Headers"
- ),
- {FinalReqHeaders, ReqHeaders} =
- case AccessHeaders of
- undefined ->
- {"", []};
- "" ->
- {"", []};
- Headers ->
- %% transform header list in something we
- %% could check. make sure everything is a
- %% list
- RH = [
- to_lower(H)
- || H <- split_headers(Headers)
- ],
- {Headers, RH}
- end,
- %% check if headers are supported
- case ReqHeaders -- SupportedHeaders of
- [] ->
- PreflightHeaders =
- PreflightHeaders0 ++
- [{"Access-Control-Allow-Headers", FinalReqHeaders}],
- {ok, PreflightHeaders};
- _ ->
- not_preflight
- end;
- false ->
- %% If method is not a case-sensitive match for any of
- %% the values in list of methods do not set any additional
- %% headers and terminate this set of steps.
- %% http://www.w3.org/TR/cors/#resource-preflight-requests
- not_preflight
- end
- end.
-
-headers(Req, RequestHeaders) ->
- case get_origin(Req) of
- undefined ->
- %% If the Origin header is not present terminate
- %% this set of steps. The request is outside the scope
- %% of this specification.
- %% http://www.w3.org/TR/cors/#resource-processing-model
- RequestHeaders;
- Origin ->
- headers(Req, RequestHeaders, Origin, get_cors_config(Req))
- end.
-
-headers(_Req, RequestHeaders, undefined, _Config) ->
- RequestHeaders;
-headers(Req, RequestHeaders, Origin, Config) when is_list(Origin) ->
- headers(Req, RequestHeaders, ?l2b(string:to_lower(Origin)), Config);
-headers(Req, RequestHeaders, Origin, Config) ->
- case is_cors_enabled(Config) of
- true ->
- AcceptedOrigins = get_accepted_origins(Req, Config),
- CorsHeaders = handle_headers(Config, Origin, AcceptedOrigins),
- ExposedCouchHeaders = couch_util:get_value(
- <<"exposed_headers">>, Config, ?COUCH_HEADERS
- ),
- maybe_apply_headers(CorsHeaders, RequestHeaders, ExposedCouchHeaders);
- false ->
- RequestHeaders
- end.
-
-maybe_apply_headers([], RequestHeaders, _ExposedCouchHeaders) ->
- RequestHeaders;
-maybe_apply_headers(CorsHeaders, RequestHeaders, ExposedCouchHeaders) ->
- %% Find all non ?SIMPLE_HEADERS and and non ?SIMPLE_CONTENT_TYPE_VALUES,
- %% expose those through Access-Control-Expose-Headers, allowing
- %% the client to access them in the browser. Also append in
- %% ?COUCH_HEADERS, as further headers may be added later that
- %% need to be exposed.
- %% return: RequestHeaders ++ CorsHeaders ++ ACEH
-
- ExposedHeaders0 = simple_headers([K || {K, _V} <- RequestHeaders]),
-
- %% If Content-Type is not in ExposedHeaders, and the Content-Type
- %% is not a member of ?SIMPLE_CONTENT_TYPE_VALUES, then add it
- %% into the list of ExposedHeaders
- ContentType = proplists:get_value("content-type", ExposedHeaders0),
- IncludeContentType =
- case ContentType of
- undefined ->
- false;
- _ ->
- lists:member(string:to_lower(ContentType), ?SIMPLE_CONTENT_TYPE_VALUES)
- end,
- ExposedHeaders =
- case IncludeContentType of
- false ->
- ["content-type" | lists:delete("content-type", ExposedHeaders0)];
- true ->
- ExposedHeaders0
- end,
-
- %% ExposedCouchHeaders may get added later, so expose them by default
- ACEH = [
- {"Access-Control-Expose-Headers", string:join(ExposedHeaders ++ ExposedCouchHeaders, ", ")}
- ],
- CorsHeaders ++ RequestHeaders ++ ACEH.
-
-simple_headers(Headers) ->
- LCHeaders = [to_lower(H) || H <- Headers],
- lists:filter(fun(H) -> lists:member(H, ?SIMPLE_HEADERS) end, LCHeaders).
-
-to_lower(String) when is_binary(String) ->
- to_lower(?b2l(String));
-to_lower(String) ->
- string:to_lower(String).
-
-handle_headers(_Config, _Origin, []) ->
- [];
-handle_headers(Config, Origin, AcceptedOrigins) ->
- AcceptAll = lists:member(<<"*">>, AcceptedOrigins),
- case AcceptAll orelse lists:member(Origin, AcceptedOrigins) of
- true ->
- make_cors_header(Config, Origin);
- false ->
- %% If the value of the Origin header is not a
- %% case-sensitive match for any of the values
- %% in list of origins, do not set any additional
- %% headers and terminate this set of steps.
- %% http://www.w3.org/TR/cors/#resource-requests
- []
- end.
-
-make_cors_header(Config, Origin) ->
- Headers = [{"Access-Control-Allow-Origin", binary_to_list(Origin)}],
- maybe_add_credentials(Config, Origin, Headers).
-
-%% util
-
-maybe_add_credentials(Config, Origin, Headers) ->
- case allow_credentials(Config, Origin) of
- false ->
- Headers;
- true ->
- Headers ++ [{"Access-Control-Allow-Credentials", "true"}]
- end.
-
-allow_credentials(_Config, <<"*">>) ->
- false;
-allow_credentials(Config, Origin) ->
- get_origin_config(
- Config,
- Origin,
- <<"allow_credentials">>,
- ?CORS_DEFAULT_ALLOW_CREDENTIALS
- ).
-
-get_cors_config(#httpd{cors_config = undefined, mochi_req = MochiReq}) ->
- Host = couch_httpd_vhost:host(MochiReq),
-
- EnableCors = chttpd_util:get_chttpd_config_boolean("enable_cors", false),
- AllowCredentials = cors_config(Host, "credentials", "false") =:= "true",
-
- AllowHeaders =
- case cors_config(Host, "headers", undefined) of
- undefined ->
- ?SUPPORTED_HEADERS;
- AllowHeaders0 ->
- [to_lower(H) || H <- split_list(AllowHeaders0)]
- end,
- AllowMethods =
- case cors_config(Host, "methods", undefined) of
- undefined ->
- ?SUPPORTED_METHODS;
- AllowMethods0 ->
- split_list(AllowMethods0)
- end,
- ExposedHeaders =
- case cors_config(Host, "exposed_headers", undefined) of
- undefined ->
- ?COUCH_HEADERS;
- ExposedHeaders0 ->
- [to_lower(H) || H <- split_list(ExposedHeaders0)]
- end,
- MaxAge = cors_config(Host, "max_age", ?CORS_DEFAULT_MAX_AGE),
- Origins0 = binary_split_list(cors_config(Host, "origins", [])),
- Origins = [{O, {[]}} || O <- Origins0],
- [
- {<<"enable_cors">>, EnableCors},
- {<<"allow_credentials">>, AllowCredentials},
- {<<"allow_methods">>, AllowMethods},
- {<<"allow_headers">>, AllowHeaders},
- {<<"exposed_headers">>, ExposedHeaders},
- {<<"max_age">>, MaxAge},
- {<<"origins">>, {Origins}}
- ];
-get_cors_config(#httpd{cors_config = Config}) ->
- Config.
-
-cors_config(Host, Key, Default) ->
- config:get(
- cors_section(Host),
- Key,
- config:get("cors", Key, Default)
- ).
-
-cors_section(HostValue) ->
- HostPort = maybe_strip_scheme(HostValue),
- Host = hd(string:tokens(HostPort, ":")),
- "cors:" ++ Host.
-
-maybe_strip_scheme(Host) ->
- case string:str(Host, "://") of
- 0 -> Host;
- N -> string:substr(Host, N + 3)
- end.
-
-is_cors_enabled(Config) ->
- case get(disable_couch_httpd_cors) of
- undefined ->
- put(disable_couch_httpd_cors, true);
- _ ->
- ok
- end,
- couch_util:get_value(<<"enable_cors">>, Config, false).
-
-%% Get a list of {Origin, OriginConfig} tuples
-%% ie: get_origin_configs(Config) ->
-%% [
-%% {<<"http://foo.com">>,
-%% {
-%% [
-%% {<<"allow_credentials">>, true},
-%% {<<"allow_methods">>, [<<"POST">>]}
-%% ]
-%% }
-%% },
-%% {<<"http://baz.com">>, {[]}}
-%% ]
-get_origin_configs(Config) ->
- {Origins} = couch_util:get_value(<<"origins">>, Config, {[]}),
- Origins.
-
-%% Get config for an individual Origin
-%% ie: get_origin_config(Config, <<"http://foo.com">>) ->
-%% [
-%% {<<"allow_credentials">>, true},
-%% {<<"allow_methods">>, [<<"POST">>]}
-%% ]
-get_origin_config(Config, Origin) ->
- OriginConfigs = get_origin_configs(Config),
- {OriginConfig} = couch_util:get_value(Origin, OriginConfigs, {[]}),
- OriginConfig.
-
-%% Get config of a single key for an individual Origin
-%% ie: get_origin_config(Config, <<"http://foo.com">>, <<"allow_methods">>, [])
-%% [<<"POST">>]
-get_origin_config(Config, Origin, Key, Default) ->
- OriginConfig = get_origin_config(Config, Origin),
- couch_util:get_value(
- Key,
- OriginConfig,
- couch_util:get_value(Key, Config, Default)
- ).
-
-get_origin(Req) ->
- case chttpd:header_value(Req, "Origin") of
- undefined ->
- undefined;
- Origin ->
- ?l2b(Origin)
- end.
-
-get_accepted_origins(_Req, Config) ->
- lists:map(fun({K, _V}) -> K end, get_origin_configs(Config)).
-
-split_list(S) ->
- re:split(S, "\\s*,\\s*", [trim, {return, list}]).
-
-binary_split_list(S) ->
- [list_to_binary(E) || E <- split_list(S)].
-
-split_headers(H) ->
- re:split(H, ",\\s*", [{return, list}, trim]).
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
deleted file mode 100644
index 4392df194..000000000
--- a/src/chttpd/src/chttpd_db.erl
+++ /dev/null
@@ -1,2696 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_db).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--export([
- handle_request/1,
- handle_compact_req/2,
- handle_design_req/2,
- db_req/2,
- couch_doc_open/4,
- handle_changes_req/2,
- update_doc_result_to_json/1, update_doc_result_to_json/2,
- handle_design_info_req/3,
- handle_view_cleanup_req/2,
- update_doc/4,
- http_code_from_status/1,
- handle_partition_req/2
-]).
-
--import(
- chttpd,
- [
- send_json/2, send_json/3, send_json/4,
- send_method_not_allowed/2,
- start_json_response/2,
- send_chunk/2,
- end_json_response/1,
- start_chunked_response/3,
- absolute_uri/2,
- send/2,
- start_response_length/4
- ]
-).
-
--record(doc_query_args, {
- options = [],
- rev = nil,
- open_revs = [],
- update_type = interactive_edit,
- atts_since = nil
-}).
-
-% Accumulator for changes_callback function
--record(cacc, {
- etag,
- feed,
- mochi,
- prepend = "",
- responding = false,
- chunks_sent = 0,
- buffer = [],
- bufsize = 0,
- threshold
-}).
-
--define(IS_ALL_DOCS(T),
- (T == <<"_all_docs">> orelse
- T == <<"_local_docs">> orelse
- T == <<"_design_docs">>)
-).
-
--define(IS_MANGO(T),
- (T == <<"_index">> orelse
- T == <<"_find">> orelse
- T == <<"_explain">>)
-).
-
-% Database request handlers
-handle_request(#httpd{path_parts = [DbName | RestParts], method = Method} = Req) ->
- case {Method, RestParts} of
- {'PUT', []} ->
- create_db_req(Req, DbName);
- {'DELETE', []} ->
- % if we get ?rev=... the user is using a faulty script where the
- % document id is empty by accident. Let them recover safely.
- case chttpd:qs_value(Req, "rev", false) of
- false ->
- delete_db_req(Req, DbName);
- _Rev ->
- throw(
- {bad_request,
- "You tried to DELETE a database with a ?=rev parameter. " ++
- "Did you mean to DELETE a document instead?"}
- )
- end;
- {_, []} ->
- do_db_req(Req, fun db_req/2);
- {_, [SecondPart | _]} ->
- Handler = chttpd_handlers:db_handler(SecondPart, fun db_req/2),
- do_db_req(Req, Handler)
- end.
-
-handle_changes_req(#httpd{method = 'POST'} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- case chttpd:body_length(Req) of
- 0 ->
- handle_changes_req1(Req, Db);
- _ ->
- {JsonProps} = chttpd:json_body_obj(Req),
- handle_changes_req1(Req#httpd{req_body = {JsonProps}}, Db)
- end;
-handle_changes_req(#httpd{method = 'GET'} = Req, Db) ->
- handle_changes_req1(Req, Db);
-handle_changes_req(#httpd{path_parts = [_, <<"_changes">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_changes_req1(#httpd{} = Req, Db) ->
- #changes_args{filter = Raw, style = Style} = Args0 = parse_changes_query(Req),
- ChangesArgs = Args0#changes_args{
- filter_fun = couch_changes:configure_filter(Raw, Style, Req, Db),
- db_open_options = [{user_ctx, couch_db:get_user_ctx(Db)}]
- },
- Max = chttpd:chunked_response_buffer_size(),
- case ChangesArgs#changes_args.feed of
- "normal" ->
- T0 = os:timestamp(),
- {ok, Info} = fabric:get_db_info(Db),
- Suffix = mem3:shard_suffix(Db),
- Etag = chttpd:make_etag({Info, Suffix}),
- DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
- couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
- chttpd:etag_respond(Req, Etag, fun() ->
- Acc0 = #cacc{
- feed = normal,
- etag = Etag,
- mochi = Req,
- threshold = Max
- },
- fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
- end);
- Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource" ->
- couch_stats:increment_counter([couchdb, httpd, clients_requesting_changes]),
- Acc0 = #cacc{
- feed = list_to_atom(Feed),
- mochi = Req,
- threshold = Max
- },
- try
- fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
- after
- couch_stats:decrement_counter([couchdb, httpd, clients_requesting_changes])
- end;
- _ ->
- Msg = <<"Supported `feed` types: normal, continuous, live, longpoll, eventsource">>,
- throw({bad_request, Msg})
- end.
-
-% callbacks for continuous feed (newline-delimited JSON Objects)
-changes_callback(start, #cacc{feed = continuous} = Acc) ->
- {ok, Resp} = chttpd:start_delayed_json_response(Acc#cacc.mochi, 200),
- {ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback({change, Change}, #cacc{feed = continuous} = Acc) ->
- chttpd_stats:incr_rows(),
- Data = [?JSON_ENCODE(Change) | "\n"],
- Len = iolist_size(Data),
- maybe_flush_changes_feed(Acc, Data, Len);
-changes_callback({stop, EndSeq, Pending}, #cacc{feed = continuous} = Acc) ->
- #cacc{mochi = Resp, buffer = Buf} = Acc,
- Row =
- {[
- {<<"last_seq">>, EndSeq},
- {<<"pending">>, Pending}
- ]},
- Data = [Buf, ?JSON_ENCODE(Row) | "\n"],
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Data),
- chttpd:end_delayed_json_response(Resp1);
-% callbacks for eventsource feed (newline-delimited eventsource Objects)
-changes_callback(start, #cacc{feed = eventsource} = Acc) ->
- #cacc{mochi = Req} = Acc,
- Headers = [
- {"Content-Type", "text/event-stream"},
- {"Cache-Control", "no-cache"}
- ],
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, Headers),
- {ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback({change, {ChangeProp} = Change}, #cacc{feed = eventsource} = Acc) ->
- chttpd_stats:incr_rows(),
- Seq = proplists:get_value(seq, ChangeProp),
- Chunk = [
- "data: ",
- ?JSON_ENCODE(Change),
- "\n",
- "id: ",
- ?JSON_ENCODE(Seq),
- "\n\n"
- ],
- Len = iolist_size(Chunk),
- maybe_flush_changes_feed(Acc, Chunk, Len);
-changes_callback(timeout, #cacc{feed = eventsource} = Acc) ->
- #cacc{mochi = Resp, chunks_sent = ChunksSet} = Acc,
- Chunk = "event: heartbeat\ndata: \n\n",
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Chunk),
- {ok, Acc#cacc{mochi = Resp1, chunks_sent = ChunksSet + 1}};
-changes_callback({stop, _EndSeq}, #cacc{feed = eventsource} = Acc) ->
- #cacc{mochi = Resp, buffer = Buf} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Buf),
- chttpd:end_delayed_json_response(Resp1);
-% callbacks for longpoll and normal (single JSON Object)
-changes_callback(start, #cacc{feed = normal} = Acc) ->
- #cacc{etag = Etag, mochi = Req} = Acc,
- FirstChunk = "{\"results\":[\n",
- {ok, Resp} = chttpd:start_delayed_json_response(
- Req,
- 200,
- [{"ETag", Etag}],
- FirstChunk
- ),
- {ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback(start, Acc) ->
- #cacc{mochi = Req} = Acc,
- FirstChunk = "{\"results\":[\n",
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [], FirstChunk),
- {ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback({change, Change}, Acc) ->
- chttpd_stats:incr_rows(),
- Data = [Acc#cacc.prepend, ?JSON_ENCODE(Change)],
- Len = iolist_size(Data),
- maybe_flush_changes_feed(Acc, Data, Len);
-changes_callback({stop, EndSeq, Pending}, Acc) ->
- #cacc{buffer = Buf, mochi = Resp, threshold = Max} = Acc,
- Terminator = [
- "\n],\n\"last_seq\":",
- ?JSON_ENCODE(EndSeq),
- ",\"pending\":",
- ?JSON_ENCODE(Pending),
- "}\n"
- ],
- {ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, Terminator, Max),
- chttpd:end_delayed_json_response(Resp1);
-changes_callback(waiting_for_updates, #cacc{buffer = []} = Acc) ->
- #cacc{mochi = Resp, chunks_sent = ChunksSent} = Acc,
- case ChunksSent > 0 of
- true ->
- {ok, Acc};
- false ->
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, <<"\n">>),
- {ok, Acc#cacc{mochi = Resp1, chunks_sent = 1}}
- end;
-changes_callback(waiting_for_updates, Acc) ->
- #cacc{buffer = Buf, mochi = Resp, chunks_sent = ChunksSent} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Buf),
- {ok, Acc#cacc{
- buffer = [],
- bufsize = 0,
- mochi = Resp1,
- chunks_sent = ChunksSent + 1
- }};
-changes_callback(timeout, Acc) ->
- #cacc{mochi = Resp, chunks_sent = ChunksSent} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\n"),
- {ok, Acc#cacc{mochi = Resp1, chunks_sent = ChunksSent + 1}};
-changes_callback({error, Reason}, #cacc{mochi = #httpd{}} = Acc) ->
- #cacc{mochi = Req} = Acc,
- chttpd:send_error(Req, Reason);
-changes_callback({error, Reason}, #cacc{feed = normal, responding = false} = Acc) ->
- #cacc{mochi = Req} = Acc,
- chttpd:send_error(Req, Reason);
-changes_callback({error, Reason}, Acc) ->
- chttpd:send_delayed_error(Acc#cacc.mochi, Reason).
-
-maybe_flush_changes_feed(#cacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
- Size > 0 andalso (Size + Len) > Max
-->
- #cacc{buffer = Buffer, mochi = Resp} = Acc,
- {ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
- {ok, Acc#cacc{prepend = ",\r\n", buffer = Data, bufsize = Len, mochi = R1}};
-maybe_flush_changes_feed(Acc0, Data, Len) ->
- #cacc{buffer = Buf, bufsize = Size, chunks_sent = ChunksSent} = Acc0,
- Acc = Acc0#cacc{
- prepend = ",\r\n",
- buffer = [Buf | Data],
- bufsize = Size + Len,
- chunks_sent = ChunksSent + 1
- },
- {ok, Acc}.
-
-handle_compact_req(#httpd{method = 'POST'} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- case Req#httpd.path_parts of
- [_DbName, <<"_compact">>] ->
- ok = fabric:compact(Db),
- send_json(Req, 202, {[{ok, true}]});
- [DbName, <<"_compact">>, DesignName | _] ->
- case ddoc_cache:open(DbName, <<"_design/", DesignName/binary>>) of
- {ok, _DDoc} ->
- ok = fabric:compact(Db, DesignName),
- send_json(Req, 202, {[{ok, true}]});
- Error ->
- throw(Error)
- end
- end;
-handle_compact_req(Req, _Db) ->
- send_method_not_allowed(Req, "POST").
-
-handle_view_cleanup_req(Req, Db) ->
- ok = fabric:cleanup_index_files_all_nodes(Db),
- send_json(Req, 202, {[{ok, true}]}).
-
-handle_partition_req(#httpd{path_parts = [_, _]} = _Req, _Db) ->
- throw({bad_request, invalid_partition_req});
-handle_partition_req(#httpd{method = 'GET', path_parts = [_, _, PartId]} = Req, Db) ->
- couch_partition:validate_partition(PartId),
- case couch_db:is_partitioned(Db) of
- true ->
- {ok, PartitionInfo} = fabric:get_partition_info(Db, PartId),
- send_json(Req, {PartitionInfo});
- false ->
- throw({bad_request, <<"database is not partitioned">>})
- end;
-handle_partition_req(
- #httpd{
- method = 'POST',
- path_parts = [_, <<"_partition">>, <<"_", _/binary>>]
- },
- _Db
-) ->
- Msg = <<"Partition must not start with an underscore">>,
- throw({illegal_partition, Msg});
-handle_partition_req(#httpd{path_parts = [_, _, _]} = Req, _Db) ->
- send_method_not_allowed(Req, "GET");
-handle_partition_req(#httpd{path_parts = [DbName, _, PartId | Rest]} = Req, Db) ->
- case couch_db:is_partitioned(Db) of
- true ->
- couch_partition:validate_partition(PartId),
- QS = chttpd:qs(Req),
- PartIdStr = ?b2l(PartId),
- QSPartIdStr = couch_util:get_value("partition", QS, PartIdStr),
- if
- QSPartIdStr == PartIdStr ->
- ok;
- true ->
- Msg = <<"Conflicting value for `partition` in query string">>,
- throw({bad_request, Msg})
- end,
- NewQS = lists:ukeysort(1, [{"partition", PartIdStr} | QS]),
- NewReq = Req#httpd{
- path_parts = [DbName | Rest],
- qs = NewQS
- },
- update_partition_stats(Rest),
- case Rest of
- [OP | _] when OP == <<"_all_docs">> orelse ?IS_MANGO(OP) ->
- case chttpd_handlers:db_handler(OP, fun db_req/2) of
- Handler when is_function(Handler, 2) ->
- Handler(NewReq, Db);
- _ ->
- chttpd:send_error(Req, not_found)
- end;
- [<<"_design">>, _Name, <<"_", _/binary>> | _] ->
- handle_design_req(NewReq, Db);
- _ ->
- chttpd:send_error(Req, not_found)
- end;
- false ->
- throw({bad_request, <<"database is not partitioned">>})
- end;
-handle_partition_req(Req, _Db) ->
- chttpd:send_error(Req, not_found).
-
-update_partition_stats(PathParts) ->
- case PathParts of
- [<<"_design">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_view_requests]);
- [<<"_all_docs">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_all_docs_requests]);
- [<<"_find">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_find_requests]);
- [<<"_explain">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_explain_requests]);
- _ ->
- % ignore path that do not match
- ok
- end.
-
-handle_design_req(
- #httpd{
- path_parts = [_DbName, _Design, Name, <<"_", _/binary>> = Action | _Rest]
- } = Req,
- Db
-) ->
- DbName = mem3:dbname(couch_db:name(Db)),
- case ddoc_cache:open(DbName, <<"_design/", Name/binary>>) of
- {ok, DDoc} ->
- Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3),
- Handler(Req, Db, DDoc);
- Error ->
- throw(Error)
- end;
-handle_design_req(Req, Db) ->
- db_req(Req, Db).
-
-bad_action_req(#httpd{path_parts = [_, _, Name | FileNameParts]} = Req, Db, _DDoc) ->
- db_attachment_req(Req, Db, <<"_design/", Name/binary>>, FileNameParts).
-
-handle_design_info_req(#httpd{method = 'GET'} = Req, Db, #doc{} = DDoc) ->
- [_, _, Name, _] = Req#httpd.path_parts,
- {ok, GroupInfoList} = fabric:get_view_group_info(Db, DDoc),
- send_json(
- Req,
- 200,
- {[
- {name, Name},
- {view_index, {GroupInfoList}}
- ]}
- );
-handle_design_info_req(Req, _Db, _DDoc) ->
- send_method_not_allowed(Req, "GET").
-
-create_db_req(#httpd{} = Req, DbName) ->
- couch_httpd:verify_is_server_admin(Req),
- ShardsOpt = parse_shards_opt(Req),
- EngineOpt = parse_engine_opt(Req),
- DbProps = parse_partitioned_opt(Req),
- Options = lists:append([ShardsOpt, [{props, DbProps}], EngineOpt]),
- DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
- case fabric:create_db(DbName, Options) of
- ok ->
- send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
- accepted ->
- send_json(Req, 202, [{"Location", DocUrl}], {[{ok, true}]});
- {error, file_exists} ->
- chttpd:send_error(Req, file_exists);
- Error ->
- throw(Error)
- end.
-
-delete_db_req(#httpd{} = Req, DbName) ->
- couch_httpd:verify_is_server_admin(Req),
- case fabric:delete_db(DbName, []) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- accepted ->
- send_json(Req, 202, {[{ok, true}]});
- Error ->
- throw(Error)
- end.
-
-do_db_req(#httpd{path_parts = [DbName | _], user_ctx = Ctx} = Req, Fun) ->
- Shard = hd(mem3:shards(DbName)),
- Props = couch_util:get_value(props, Shard#shard.opts, []),
- Opts =
- case Ctx of
- undefined ->
- [{props, Props}];
- #user_ctx{} ->
- [{user_ctx, Ctx}, {props, Props}]
- end,
- {ok, Db} = couch_db:clustered_db(DbName, Opts),
- Fun(Req, Db).
-
-db_req(#httpd{method = 'GET', path_parts = [DbName]} = Req, _Db) ->
- % measure the time required to generate the etag, see if it's worth it
- T0 = os:timestamp(),
- {ok, DbInfo} = fabric:get_db_info(DbName),
- DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
- couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
- send_json(Req, {DbInfo});
-db_req(#httpd{method = 'POST', path_parts = [DbName], user_ctx = Ctx} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
-
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx, Ctx}, {w, W}],
-
- Doc = couch_db:doc_from_json_obj_validate(Db, chttpd:json_body(Req)),
- validate_attachment_names(Doc),
- Doc2 =
- case Doc#doc.id of
- <<"">> ->
- Doc#doc{id = couch_uuids:new(), revs = {0, []}};
- _ ->
- Doc
- end,
- DocId = Doc2#doc.id,
- case chttpd:qs_value(Req, "batch") of
- "ok" ->
- % async_batching
- spawn(fun() ->
- case catch (fabric:update_doc(Db, Doc2, Options)) of
- {ok, _} ->
- chttpd_stats:incr_writes(),
- ok;
- {accepted, _} ->
- chttpd_stats:incr_writes(),
- ok;
- Error ->
- couch_log:debug("Batch doc error (~s): ~p", [DocId, Error])
- end
- end),
-
- send_json(
- Req,
- 202,
- [],
- {[
- {ok, true},
- {id, DocId}
- ]}
- );
- _Normal ->
- % normal
- DocUrl = absolute_uri(Req, [
- $/,
- couch_util:url_encode(DbName),
- $/,
- couch_util:url_encode(DocId)
- ]),
- case fabric:update_doc(Db, Doc2, Options) of
- {ok, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
- end,
- send_json(
- Req,
- HttpCode,
- [{"Location", DocUrl}],
- {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)}
- ]}
- )
- end;
-db_req(#httpd{path_parts = [_DbName]} = Req, _Db) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-db_req(
- #httpd{
- method = 'POST',
- path_parts = [DbName, <<"_ensure_full_commit">>],
- user_ctx = Ctx
- } = Req,
- _Db
-) ->
- chttpd:validate_ctype(Req, "application/json"),
- %% use fabric call to trigger a database_does_not_exist exception
- %% for missing databases that'd return error 404 from chttpd
- %% get_security used to prefer shards on the same node over other nodes
- fabric:get_security(DbName, [{user_ctx, Ctx}]),
- CreationTime = mem3:shard_creation_time(DbName),
- send_json(
- Req,
- 201,
- {[
- {ok, true},
- {instance_start_time, CreationTime}
- ]}
- );
-db_req(#httpd{path_parts = [_, <<"_ensure_full_commit">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'POST', path_parts = [_, <<"_bulk_docs">>], user_ctx = Ctx} = Req, Db) ->
- couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
- chttpd:validate_ctype(Req, "application/json"),
- {JsonProps} = chttpd:json_body_obj(Req),
- DocsArray =
- case couch_util:get_value(<<"docs">>, JsonProps) of
- undefined ->
- throw({bad_request, <<"POST body must include `docs` parameter.">>});
- DocsArray0 when not is_list(DocsArray0) ->
- throw({bad_request, <<"`docs` parameter must be an array.">>});
- DocsArray0 ->
- DocsArray0
- end,
- couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
- W =
- case couch_util:get_value(<<"w">>, JsonProps) of
- Value when is_integer(Value) ->
- integer_to_list(Value);
- _ ->
- chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db)))
- end,
- case chttpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit, {user_ctx, Ctx}, {w, W}];
- "false" ->
- Options = [delay_commit, {user_ctx, Ctx}, {w, W}];
- _ ->
- Options = [{user_ctx, Ctx}, {w, W}]
- end,
- NewEdits = couch_util:get_value(<<"new_edits">>, JsonProps, true),
- Docs = lists:map(
- fun(JsonObj) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
- validate_revs(Doc, NewEdits),
- validate_attachment_names(Doc),
- case Doc#doc.id of
- <<>> -> Doc#doc{id = couch_uuids:new()};
- _ -> Doc
- end
- end,
- DocsArray
- ),
- case NewEdits of
- true ->
- Options2 =
- case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
- true -> [all_or_nothing | Options];
- _ -> Options
- end,
- case fabric:update_docs(Db, Docs, Options2) of
- {ok, Results} ->
- % output the results
- chttpd_stats:incr_writes(length(Results)),
- DocResults = lists:zipwith(
- fun update_doc_result_to_json/2,
- Docs,
- Results
- ),
- send_json(Req, 201, DocResults);
- {accepted, Results} ->
- % output the results
- chttpd_stats:incr_writes(length(Results)),
- DocResults = lists:zipwith(
- fun update_doc_result_to_json/2,
- Docs,
- Results
- ),
- send_json(Req, 202, DocResults);
- {error, Results} ->
- % output the results
- chttpd_stats:incr_writes(length(Results)),
- DocResults = lists:zipwith(
- fun update_doc_result_to_json/2,
- Docs,
- Results
- ),
- send_json(Req, 500, DocResults);
- {aborted, Errors} ->
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 417, ErrorsJson)
- end;
- false ->
- case fabric:update_docs(Db, Docs, [replicated_changes | Options]) of
- {ok, Errors} ->
- chttpd_stats:incr_writes(length(Docs)),
- ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 201, ErrorsJson);
- {accepted, Errors} ->
- chttpd_stats:incr_writes(length(Docs)),
- ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 202, ErrorsJson);
- {error, Errors} ->
- chttpd_stats:incr_writes(length(Docs)),
- ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 500, ErrorsJson)
- end;
- _ ->
- throw({bad_request, <<"`new_edits` parameter must be a boolean.">>})
- end;
-db_req(#httpd{path_parts = [_, <<"_bulk_docs">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(
- #httpd{
- method = 'POST',
- path_parts = [_, <<"_bulk_get">>],
- mochi_req = MochiReq
- } = Req,
- Db
-) ->
- couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
- couch_httpd:validate_ctype(Req, "application/json"),
- {JsonProps} = chttpd:json_body_obj(Req),
- case couch_util:get_value(<<"docs">>, JsonProps) of
- undefined ->
- throw({bad_request, <<"Missing JSON list of 'docs'.">>});
- Docs ->
- #doc_query_args{
- options = Options0
- } = bulk_get_parse_doc_query(Req),
- Options = [{user_ctx, Req#httpd.user_ctx} | Options0],
-
- AcceptJson = MochiReq:accepts_content_type("application/json"),
- AcceptMixedMp = MochiReq:accepts_content_type("multipart/mixed"),
- AcceptRelatedMp = MochiReq:accepts_content_type("multipart/related"),
- AcceptMp = not AcceptJson andalso (AcceptMixedMp orelse AcceptRelatedMp),
- case AcceptMp of
- false ->
- {ok, Resp} = start_json_response(Req, 200),
- send_chunk(Resp, <<"{\"results\": [">>),
- lists:foldl(
- fun(Doc, Sep) ->
- {DocId, Results, Options1} = bulk_get_open_doc_revs(
- Db,
- Doc,
- Options
- ),
- bulk_get_send_docs_json(Resp, DocId, Results, Options1, Sep),
- <<",">>
- end,
- <<"">>,
- Docs
- ),
- send_chunk(Resp, <<"]}">>),
- end_json_response(Resp);
- true ->
- OuterBoundary = bulk_get_multipart_boundary(),
- MpType =
- case AcceptMixedMp of
- true ->
- "multipart/mixed";
- _ ->
- "multipart/related"
- end,
- CType =
- {"Content-Type",
- MpType ++ "; boundary=\"" ++
- ?b2l(OuterBoundary) ++ "\""},
- {ok, Resp} = start_chunked_response(Req, 200, [CType]),
- lists:foldl(
- fun(Doc, _Pre) ->
- case bulk_get_open_doc_revs(Db, Doc, Options) of
- {_, {ok, []}, _Options1} ->
- ok;
- {_, {ok, Results}, Options1} ->
- send_docs_multipart_bulk_get(
- Results,
- Options1,
- OuterBoundary,
- Resp
- );
- {DocId, {error, {RevId, Error, Reason}}, _Options1} ->
- Json = ?JSON_ENCODE(
- {[
- {<<"id">>, DocId},
- {<<"rev">>, RevId},
- {<<"error">>, Error},
- {<<"reason">>, Reason}
- ]}
- ),
- couch_httpd:send_chunk(Resp, [
- <<"\r\n--", OuterBoundary/binary>>,
- <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json
- ])
- end
- end,
- <<"">>,
- Docs
- ),
- case Docs of
- [] ->
- ok;
- _ ->
- couch_httpd:send_chunk(
- Resp, <<"\r\n", "--", OuterBoundary/binary, "--\r\n">>
- )
- end,
- couch_httpd:last_chunk(Resp)
- end
- end;
-db_req(#httpd{path_parts = [_, <<"_bulk_get">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'POST', path_parts = [_, <<"_purge">>]} = Req, Db) ->
- couch_stats:increment_counter([couchdb, httpd, purge_requests]),
- chttpd:validate_ctype(Req, "application/json"),
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}],
- {IdsRevs} = chttpd:json_body_obj(Req),
- IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
- MaxIds = config:get_integer("purge", "max_document_id_number", 100),
- case length(IdsRevs2) =< MaxIds of
- false -> throw({bad_request, "Exceeded maximum number of documents."});
- true -> ok
- end,
- RevsLen = lists:foldl(
- fun({_Id, Revs}, Acc) ->
- length(Revs) + Acc
- end,
- 0,
- IdsRevs2
- ),
- MaxRevs = config:get_integer("purge", "max_revisions_number", 1000),
- case RevsLen =< MaxRevs of
- false -> throw({bad_request, "Exceeded maximum number of revisions."});
- true -> ok
- end,
- couch_stats:increment_counter([couchdb, document_purges, total], length(IdsRevs2)),
- Results2 =
- case fabric:purge_docs(Db, IdsRevs2, Options) of
- {ok, Results} ->
- chttpd_stats:incr_writes(length(Results)),
- Results;
- {accepted, Results} ->
- chttpd_stats:incr_writes(length(Results)),
- Results
- end,
- {Code, Json} = purge_results_to_json(IdsRevs2, Results2),
- send_json(Req, Code, {[{<<"purge_seq">>, null}, {<<"purged">>, {Json}}]});
-db_req(#httpd{path_parts = [_, <<"_purge">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'GET', path_parts = [_, OP]} = Req, Db) when ?IS_ALL_DOCS(OP) ->
- case chttpd:qs_json_value(Req, "keys", nil) of
- Keys when is_list(Keys) ->
- all_docs_view(Req, Db, Keys, OP);
- nil ->
- all_docs_view(Req, Db, undefined, OP);
- _ ->
- throw({bad_request, "`keys` parameter must be an array."})
- end;
-db_req(
- #httpd{
- method = 'POST',
- path_parts = [_, OP, <<"queries">>]
- } = Req,
- Db
-) when ?IS_ALL_DOCS(OP) ->
- Props = chttpd:json_body_obj(Req),
- case couch_mrview_util:get_view_queries(Props) of
- undefined ->
- throw({bad_request, <<"POST body must include `queries` parameter.">>});
- Queries ->
- multi_all_docs_view(Req, Db, OP, Queries)
- end;
-db_req(
- #httpd{path_parts = [_, OP, <<"queries">>]} = Req,
- _Db
-) when ?IS_ALL_DOCS(OP) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'POST', path_parts = [_, OP]} = Req, Db) when ?IS_ALL_DOCS(OP) ->
- chttpd:validate_ctype(Req, "application/json"),
- {Fields} = chttpd:json_body_obj(Req),
- case couch_util:get_value(<<"keys">>, Fields, nil) of
- Keys when is_list(Keys) ->
- all_docs_view(Req, Db, Keys, OP);
- nil ->
- all_docs_view(Req, Db, undefined, OP);
- _ ->
- throw({bad_request, "`keys` body member must be an array."})
- end;
-db_req(#httpd{path_parts = [_, OP]} = Req, _Db) when ?IS_ALL_DOCS(OP) ->
- send_method_not_allowed(Req, "GET,HEAD,POST");
-db_req(#httpd{method = 'POST', path_parts = [_, <<"_missing_revs">>]} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- {JsonDocIdRevs} = chttpd:json_body_obj(Req),
- case fabric:get_missing_revs(Db, JsonDocIdRevs) of
- {error, Reason} ->
- chttpd:send_error(Req, Reason);
- {ok, Results} ->
- Results2 = [
- {Id, couch_doc:revs_to_strs(Revs)}
- || {Id, Revs, _} <- Results
- ],
- send_json(
- Req,
- {[
- {missing_revs, {Results2}}
- ]}
- )
- end;
-db_req(#httpd{path_parts = [_, <<"_missing_revs">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'POST', path_parts = [_, <<"_revs_diff">>]} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- {JsonDocIdRevs} = chttpd:json_body_obj(Req),
- case fabric:get_missing_revs(Db, JsonDocIdRevs) of
- {error, Reason} ->
- chttpd:send_error(Req, Reason);
- {ok, Results} ->
- Results2 =
- lists:map(
- fun({Id, MissingRevs, PossibleAncestors}) ->
- {Id, {
- [{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
- if
- PossibleAncestors == [] ->
- [];
- true ->
- [
- {possible_ancestors,
- couch_doc:revs_to_strs(PossibleAncestors)}
- ]
- end
- }}
- end,
- Results
- ),
- send_json(Req, {Results2})
- end;
-db_req(#httpd{path_parts = [_, <<"_revs_diff">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(
- #httpd{method = 'PUT', path_parts = [_, <<"_security">>], user_ctx = Ctx} = Req,
- Db
-) ->
- DbName = ?b2l(couch_db:name(Db)),
- validate_security_can_be_edited(DbName),
- SecObj = chttpd:json_body(Req),
- case fabric:set_security(Db, SecObj, [{user_ctx, Ctx}]) of
- ok ->
- send_json(Req, {[{<<"ok">>, true}]});
- Else ->
- throw(Else)
- end;
-db_req(#httpd{method = 'GET', path_parts = [_, <<"_security">>]} = Req, Db) ->
- send_json(Req, fabric:get_security(Db));
-db_req(#httpd{path_parts = [_, <<"_security">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "PUT,GET");
-db_req(
- #httpd{method = 'PUT', path_parts = [_, <<"_revs_limit">>], user_ctx = Ctx} = Req,
- Db
-) ->
- Limit = chttpd:json_body(Req),
- ok = fabric:set_revs_limit(Db, Limit, [{user_ctx, Ctx}]),
- send_json(Req, {[{<<"ok">>, true}]});
-db_req(#httpd{method = 'GET', path_parts = [_, <<"_revs_limit">>]} = Req, Db) ->
- send_json(Req, fabric:get_revs_limit(Db));
-db_req(#httpd{path_parts = [_, <<"_revs_limit">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "PUT,GET");
-db_req(#httpd{method = 'PUT', path_parts = [_, <<"_purged_infos_limit">>]} = Req, Db) ->
- Options = [{user_ctx, Req#httpd.user_ctx}],
- case chttpd:json_body(Req) of
- Limit when is_integer(Limit), Limit > 0 ->
- case fabric:set_purge_infos_limit(Db, Limit, Options) of
- ok ->
- send_json(Req, {[{<<"ok">>, true}]});
- Error ->
- throw(Error)
- end;
- _ ->
- throw({bad_request, "`purge_infos_limit` must be positive integer"})
- end;
-db_req(#httpd{method = 'GET', path_parts = [_, <<"_purged_infos_limit">>]} = Req, Db) ->
- send_json(Req, fabric:get_purge_infos_limit(Db));
-% Special case to enable using an unencoded slash in the URL of design docs,
-% as slashes in document IDs must otherwise be URL encoded.
-db_req(
- #httpd{
- method = 'GET', mochi_req = MochiReq, path_parts = [_DbName, <<"_design/", _/binary>> | _]
- } = Req,
- _Db
-) ->
- [Head | Tail] = re:split(MochiReq:get(raw_path), "_design%2F", [{return, list}, caseless]),
- chttpd:send_redirect(Req, Head ++ "_design/" ++ Tail);
-db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name]} = Req, Db) ->
- db_doc_req(Req, Db, <<"_design/", Name/binary>>);
-db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name | FileNameParts]} = Req, Db) ->
- db_attachment_req(Req, Db, <<"_design/", Name/binary>>, FileNameParts);
-% Special case to allow for accessing local documents without %2F
-% encoding the docid. Throws out requests that don't have the second
-% path part or that specify an attachment name.
-db_req(#httpd{path_parts = [_DbName, <<"_local">>]}, _Db) ->
- throw({bad_request, <<"Invalid _local document id.">>});
-db_req(#httpd{path_parts = [_DbName, <<"_local/">>]}, _Db) ->
- throw({bad_request, <<"Invalid _local document id.">>});
-db_req(#httpd{path_parts = [_DbName, <<"_local">>, Name]} = Req, Db) ->
- db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-db_req(#httpd{path_parts = [_DbName, <<"_local">> | _Rest]}, _Db) ->
- throw({bad_request, <<"_local documents do not accept attachments.">>});
-db_req(#httpd{path_parts = [_, DocId]} = Req, Db) ->
- db_doc_req(Req, Db, DocId);
-db_req(#httpd{method = 'DELETE', path_parts = [_, DocId | FileNameParts]} = Req, Db) ->
- chttpd:body(Req),
- db_attachment_req(Req, Db, DocId, FileNameParts);
-db_req(#httpd{path_parts = [_, DocId | FileNameParts]} = Req, Db) ->
- db_attachment_req(Req, Db, DocId, FileNameParts).
-
-multi_all_docs_view(Req, Db, OP, Queries) ->
- Args0 = couch_mrview_http:parse_params(Req, undefined),
- Args1 = Args0#mrargs{view_type = map},
- ArgQueries = lists:map(
- fun({Query}) ->
- QueryArg1 = couch_mrview_http:parse_params(
- Query,
- undefined,
- Args1,
- [decoded]
- ),
- QueryArgs2 = fabric_util:validate_all_docs_args(Db, QueryArg1),
- set_namespace(OP, QueryArgs2)
- end,
- Queries
- ),
- Options = [{user_ctx, Req#httpd.user_ctx}],
- VAcc0 = #vacc{db = Db, req = Req, prepend = "\r\n"},
- FirstChunk = "{\"results\":[",
- {ok, Resp0} = chttpd:start_delayed_json_response(
- VAcc0#vacc.req,
- 200,
- [],
- FirstChunk
- ),
- VAcc1 = VAcc0#vacc{resp = Resp0},
- VAcc2 = lists:foldl(
- fun(Args, Acc0) ->
- {ok, Acc1} = fabric:all_docs(
- Db,
- Options,
- fun view_cb/2,
- Acc0,
- Args
- ),
- Acc1
- end,
- VAcc1,
- ArgQueries
- ),
- {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
- chttpd:end_delayed_json_response(Resp1).
-
-all_docs_view(Req, Db, Keys, OP) ->
- Args0 = couch_mrview_http:parse_body_and_query(Req, Keys),
- Args1 = Args0#mrargs{view_type = map},
- Args2 = fabric_util:validate_all_docs_args(Db, Args1),
- Args3 = set_namespace(OP, Args2),
- Options = [{user_ctx, Req#httpd.user_ctx}],
- Max = chttpd:chunked_response_buffer_size(),
- VAcc = #vacc{db = Db, req = Req, threshold = Max},
- {ok, Resp} = fabric:all_docs(Db, Options, fun view_cb/2, VAcc, Args3),
- {ok, Resp#vacc.resp}.
-
-view_cb({row, Row} = Msg, Acc) ->
- case lists:keymember(doc, 1, Row) of
- true -> chttpd_stats:incr_reads();
- false -> ok
- end,
- chttpd_stats:incr_rows(),
- couch_mrview_http:view_cb(Msg, Acc);
-view_cb(Msg, Acc) ->
- couch_mrview_http:view_cb(Msg, Acc).
-
-db_doc_req(#httpd{method = 'DELETE'} = Req, Db, DocId) ->
- % check for the existence of the doc to handle the 404 case.
- couch_doc_open(Db, DocId, nil, []),
- case chttpd:qs_value(Req, "rev") of
- undefined ->
- Body = {[{<<"_deleted">>, true}]};
- Rev ->
- Body = {[{<<"_rev">>, ?l2b(Rev)}, {<<"_deleted">>, true}]}
- end,
- Doc = couch_doc_from_req(Req, Db, DocId, Body),
- send_updated_doc(Req, Db, DocId, Doc);
-db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
- #doc_query_args{
- rev = Rev0,
- open_revs = Revs,
- options = Options0,
- atts_since = AttsSince
- } = parse_doc_query(Req),
- Options = [{user_ctx, Req#httpd.user_ctx} | Options0],
- case Revs of
- [] ->
- Options2 =
- if
- AttsSince /= nil ->
- [{atts_since, AttsSince}, attachments | Options];
- true ->
- Options
- end,
- Rev =
- case lists:member(latest, Options) of
- % couch_doc_open will open the winning rev despite of a rev passed
- % https://docs.couchdb.org/en/stable/api/document/common.html?highlight=latest#get--db-docid
- true -> nil;
- false -> Rev0
- end,
- Doc = couch_doc_open(Db, DocId, Rev, Options2),
- send_doc(Req, Doc, Options2);
- _ ->
- case fabric:open_revs(Db, DocId, Revs, Options) of
- {ok, []} when Revs == all ->
- chttpd:send_error(Req, {not_found, missing});
- {ok, Results} ->
- chttpd_stats:incr_reads(length(Results)),
- case MochiReq:accepts_content_type("multipart/mixed") of
- false ->
- {ok, Resp} = start_json_response(Req, 200),
- send_chunk(Resp, "["),
- % We loop through the docs. The first time through the separator
- % is whitespace, then a comma on subsequent iterations.
- lists:foldl(
- fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- {{not_found, missing}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- send_chunk(Resp, AccSeparator ++ Json)
- end,
- % AccSeparator now has a comma
- ","
- end,
- "",
- Results
- ),
- send_chunk(Resp, "]"),
- end_json_response(Resp);
- true ->
- send_docs_multipart(Req, Results, Options)
- end;
- {error, Error} ->
- chttpd:send_error(Req, Error)
- end
- end;
-db_doc_req(#httpd{method = 'POST', user_ctx = Ctx} = Req, Db, DocId) ->
- couch_httpd:validate_referer(Req),
- couch_db:validate_docid(Db, DocId),
- chttpd:validate_ctype(Req, "multipart/form-data"),
-
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx, Ctx}, {w, W}],
-
- Form = couch_httpd:parse_form(Req),
- case proplists:is_defined("_doc", Form) of
- true ->
- Json = ?JSON_DECODE(couch_util:get_value("_doc", Form)),
- Doc = couch_doc_from_req(Req, Db, DocId, Json);
- false ->
- Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
- Doc =
- case fabric:open_revs(Db, DocId, [Rev], []) of
- {ok, [{ok, Doc0}]} ->
- chttpd_stats:incr_reads(),
- Doc0;
- {error, Error} ->
- throw(Error)
- end
- end,
- UpdatedAtts = [
- couch_att:new([
- {name, validate_attachment_name(Name)},
- {type, list_to_binary(ContentType)},
- {data, Content}
- ])
- || {Name, {ContentType, _}, Content} <-
- proplists:get_all_values("_attachments", Form)
- ],
- #doc{atts = OldAtts} = Doc,
- OldAtts2 = lists:flatmap(
- fun(Att) ->
- OldName = couch_att:fetch(name, Att),
- case [1 || A <- UpdatedAtts, couch_att:fetch(name, A) == OldName] of
- % the attachment wasn't in the UpdatedAtts, return it
- [] -> [Att];
- % the attachment was in the UpdatedAtts, drop it
- _ -> []
- end
- end,
- OldAtts
- ),
- NewDoc = Doc#doc{
- atts = UpdatedAtts ++ OldAtts2
- },
- case fabric:update_doc(Db, NewDoc, Options) of
- {ok, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
- end,
- send_json(
- Req,
- HttpCode,
- [{"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}],
- {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)}
- ]}
- );
-db_doc_req(#httpd{method = 'PUT', user_ctx = Ctx} = Req, Db, DocId) ->
- #doc_query_args{
- update_type = UpdateType
- } = parse_doc_query(Req),
- DbName = couch_db:name(Db),
- couch_db:validate_docid(Db, DocId),
-
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx, Ctx}, {w, W}],
-
- Loc = absolute_uri(Req, [
- $/,
- couch_util:url_encode(DbName),
- $/,
- couch_util:url_encode(DocId)
- ]),
- RespHeaders = [{"Location", Loc}],
- case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
- ("multipart/related;" ++ _) = ContentType ->
- couch_httpd:check_max_request_length(Req),
- couch_httpd_multipart:num_mp_writers(mem3:n(mem3:dbname(DbName), DocId)),
- {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
- ContentType,
- fun() -> receive_request_data(Req) end
- ),
- Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
- try
- {HttpCode, RespHeaders1, RespBody} = update_doc_req(
- Req,
- Db,
- DocId,
- Doc,
- RespHeaders,
- UpdateType
- ),
- WaitFun(),
- send_json(Req, HttpCode, RespHeaders1, RespBody)
- catch
- throw:Err ->
- % Document rejected by a validate_doc_update function.
- couch_httpd_multipart:abort_multipart_stream(Parser),
- throw(Err)
- end;
- _Else ->
- case chttpd:qs_value(Req, "batch") of
- "ok" ->
- % batch
- Doc = couch_doc_from_req(Req, Db, DocId, chttpd:json_body(Req)),
-
- spawn(fun() ->
- case catch (fabric:update_doc(Db, Doc, Options)) of
- {ok, _} ->
- chttpd_stats:incr_writes(),
- ok;
- {accepted, _} ->
- chttpd_stats:incr_writes(),
- ok;
- Error ->
- couch_log:notice("Batch doc error (~s): ~p", [DocId, Error])
- end
- end),
- send_json(
- Req,
- 202,
- [],
- {[
- {ok, true},
- {id, DocId}
- ]}
- );
- _Normal ->
- % normal
- Body = chttpd:json_body(Req),
- Doc = couch_doc_from_req(Req, Db, DocId, Body),
- send_updated_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType)
- end
- end;
-db_doc_req(#httpd{method = 'COPY', user_ctx = Ctx} = Req, Db, SourceDocId) ->
- SourceRev =
- case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
- missing_rev -> nil;
- Rev -> Rev
- end,
- {TargetDocId0, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req),
- TargetDocId = list_to_binary(chttpd:unquote(TargetDocId0)),
- % open old doc
- Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
- % save new doc
- case
- fabric:update_doc(
- Db,
- Doc#doc{id = TargetDocId, revs = TargetRevs},
- [{user_ctx, Ctx}]
- )
- of
- {ok, NewTargetRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, NewTargetRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
- end,
- % respond
- DbName = couch_db:name(Db),
- {PartRes} = update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}),
- Loc = absolute_uri(
- Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(TargetDocId)
- ),
- send_json(
- Req,
- HttpCode,
- [
- {"Location", Loc},
- {"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}
- ],
- {PartRes}
- );
-db_doc_req(Req, _Db, _DocId) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
-
-send_doc(Req, Doc, Options) ->
- case Doc#doc.meta of
- [] ->
- DiskEtag = couch_httpd:doc_etag(Doc),
- % output etag only when we have no meta
- chttpd:etag_respond(Req, DiskEtag, fun() ->
- send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
- end);
- _ ->
- send_doc_efficiently(Req, Doc, [], Options)
- end.
-
-send_doc_efficiently(Req, #doc{atts = []} = Doc, Headers, Options) ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req, #doc{atts = Atts} = Doc, Headers, Options) ->
- case lists:member(attachments, Options) of
- true ->
- Refs = monitor_attachments(Atts),
- try
- case MochiReq:accepts_content_type("multipart/related") of
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
- true ->
- Boundary = couch_uuids:random(),
- JsonBytes = ?JSON_ENCODE(
- couch_doc:to_json_obj(
- Doc,
- [attachments, follows, att_encoding_info | Options]
- )
- ),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
- Boundary, JsonBytes, Atts, true
- ),
- CType = {"Content-Type", ContentType},
- {ok, Resp} = start_response_length(Req, 200, [CType | Headers], Len),
- couch_doc:doc_to_multi_part_stream(
- Boundary,
- JsonBytes,
- Atts,
- fun(Data) -> couch_httpd:send(Resp, Data) end,
- true
- )
- end
- after
- demonitor_refs(Refs)
- end;
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
- end.
-
-send_docs_multipart_bulk_get(Results, Options0, OuterBoundary, Resp) ->
- InnerBoundary = bulk_get_multipart_boundary(),
- Options = [attachments, follows, att_encoding_info | Options0],
- lists:foreach(
- fun
- ({ok, #doc{id = Id, revs = Revs, atts = Atts} = Doc}) ->
- Refs = monitor_attachments(Doc#doc.atts),
- try
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>),
- case Atts of
- [] ->
- couch_httpd:send_chunk(
- Resp, <<"\r\nContent-Type: application/json\r\n\r\n">>
- );
- _ ->
- lists:foreach(
- fun(Header) -> couch_httpd:send_chunk(Resp, Header) end,
- bulk_get_multipart_headers(Revs, Id, InnerBoundary)
- )
- end,
- couch_doc:doc_to_multi_part_stream(
- InnerBoundary,
- JsonBytes,
- Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data) end,
- true
- )
- after
- demonitor_refs(Refs)
- end;
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE(
- {[
- {<<"rev">>, RevStr},
- {<<"error">>, <<"not_found">>},
- {<<"reason">>, <<"missing">>}
- ]}
- ),
- couch_httpd:send_chunk(
- Resp,
- [
- <<"\r\n--", OuterBoundary/binary>>,
- <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json
- ]
- )
- end,
- Results
- ).
-
-send_docs_multipart(Req, Results, Options1) ->
- OuterBoundary = couch_uuids:random(),
- InnerBoundary = couch_uuids:random(),
- Options = [attachments, follows, att_encoding_info | Options1],
- CType = {"Content-Type", "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
- {ok, Resp} = start_chunked_response(Req, 200, [CType]),
- couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
- lists:foreach(
- fun
- ({ok, #doc{atts = Atts} = Doc}) ->
- Refs = monitor_attachments(Doc#doc.atts),
- try
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
- {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
- InnerBoundary, JsonBytes, Atts, true
- ),
- couch_httpd:send_chunk(
- Resp, <<"\r\nContent-Type: ", ContentType/binary, "\r\n\r\n">>
- ),
- couch_doc:doc_to_multi_part_stream(
- InnerBoundary,
- JsonBytes,
- Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data) end,
- true
- ),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
- after
- demonitor_refs(Refs)
- end;
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- couch_httpd:send_chunk(
- Resp,
- [
- <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json,
- <<"\r\n--", OuterBoundary/binary>>
- ]
- )
- end,
- Results
- ),
- couch_httpd:send_chunk(Resp, <<"--">>),
- couch_httpd:last_chunk(Resp).
-
-bulk_get_multipart_headers({0, []}, Id, Boundary) ->
- [
- <<"\r\nX-Doc-Id: ", Id/binary>>,
- <<"\r\nContent-Type: multipart/related; boundary=", Boundary/binary, "\r\n\r\n">>
- ];
-bulk_get_multipart_headers({Start, [FirstRevId | _]}, Id, Boundary) ->
- RevStr = couch_doc:rev_to_str({Start, FirstRevId}),
- [
- <<"\r\nX-Doc-Id: ", Id/binary>>,
- <<"\r\nX-Rev-Id: ", RevStr/binary>>,
- <<"\r\nContent-Type: multipart/related; boundary=", Boundary/binary, "\r\n\r\n">>
- ].
-
-bulk_get_multipart_boundary() ->
- Unique = couch_uuids:random(),
- <<"--", Unique/binary>>.
-
-receive_request_data(Req) ->
- receive_request_data(Req, chttpd:body_length(Req)).
-
-receive_request_data(Req, Len) when Len == chunked ->
- Ref = make_ref(),
- ChunkFun = fun({_Length, Binary}, _State) ->
- self() ! {chunk, Ref, Binary}
- end,
- couch_httpd:recv_chunked(Req, 4096, ChunkFun, ok),
- GetChunk = fun GC() ->
- receive
- {chunk, Ref, Binary} -> {Binary, GC}
- end
- end,
- {
- receive
- {chunk, Ref, Binary} -> Binary
- end,
- GetChunk
- };
-receive_request_data(Req, LenLeft) when LenLeft > 0 ->
- Len = erlang:min(4096, LenLeft),
- Data = chttpd:recv(Req, Len),
- {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end};
-receive_request_data(_Req, _) ->
- throw(<<"expected more data">>).
-
-update_doc_result_to_json({error, _} = Error) ->
- {_Code, Err, Msg} = chttpd:error_info(Error),
- {[
- {error, Err},
- {reason, Msg}
- ]};
-update_doc_result_to_json({{Id, Rev}, Error}) ->
- {_Code, Err, Msg} = chttpd:error_info(Error),
- {[
- {id, Id},
- {rev, couch_doc:rev_to_str(Rev)},
- {error, Err},
- {reason, Msg}
- ]}.
-
-update_doc_result_to_json(#doc{id = DocId}, Result) ->
- update_doc_result_to_json(DocId, Result);
-update_doc_result_to_json(DocId, {ok, NewRev}) ->
- {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
-update_doc_result_to_json(DocId, {accepted, NewRev}) ->
- {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}, {accepted, true}]};
-update_doc_result_to_json(DocId, Error) ->
- {_Code, ErrorStr, Reason} = chttpd:error_info(Error),
- {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-
-purge_results_to_json([], []) ->
- {201, []};
-purge_results_to_json([{DocId, _Revs} | RIn], [{ok, PRevs} | ROut]) ->
- {Code, Results} = purge_results_to_json(RIn, ROut),
- couch_stats:increment_counter([couchdb, document_purges, success]),
- {Code, [{DocId, couch_doc:revs_to_strs(PRevs)} | Results]};
-purge_results_to_json([{DocId, _Revs} | RIn], [{accepted, PRevs} | ROut]) ->
- {Code, Results} = purge_results_to_json(RIn, ROut),
- couch_stats:increment_counter([couchdb, document_purges, success]),
- NewResults = [{DocId, couch_doc:revs_to_strs(PRevs)} | Results],
- {erlang:max(Code, 202), NewResults};
-purge_results_to_json([{DocId, _Revs} | RIn], [Error | ROut]) ->
- {Code, Results} = purge_results_to_json(RIn, ROut),
- {NewCode, ErrorStr, Reason} = chttpd:error_info(Error),
- couch_stats:increment_counter([couchdb, document_purges, failure]),
- NewResults = [{DocId, {[{error, ErrorStr}, {reason, Reason}]}} | Results],
- {erlang:max(NewCode, Code), NewResults}.
-
-send_updated_doc(Req, Db, DocId, Json) ->
- send_updated_doc(Req, Db, DocId, Json, []).
-
-send_updated_doc(Req, Db, DocId, Doc, Headers) ->
- send_updated_doc(Req, Db, DocId, Doc, Headers, interactive_edit).
-
-send_updated_doc(Req, Db, DocId, Doc, Headers, Type) ->
- {Code, Headers1, Body} = update_doc_req(Req, Db, DocId, Doc, Headers, Type),
- send_json(Req, Code, Headers1, Body).
-
-update_doc_req(Req, Db, DocId, Doc, Headers, UpdateType) ->
- #httpd{user_ctx = Ctx} = Req,
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options =
- case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- [full_commit, UpdateType, {user_ctx, Ctx}, {w, W}];
- "false" ->
- [delay_commit, UpdateType, {user_ctx, Ctx}, {w, W}];
- _ ->
- [UpdateType, {user_ctx, Ctx}, {w, W}]
- end,
- {Status, {etag, Etag}, Body} = update_doc(Db, DocId, Doc, Options),
- HttpCode = http_code_from_status(Status),
- ResponseHeaders = [{"ETag", Etag} | Headers],
- {HttpCode, ResponseHeaders, Body}.
-
-http_code_from_status(Status) ->
- case Status of
- accepted ->
- 202;
- created ->
- 201;
- ok ->
- 200
- end.
-
-update_doc(Db, DocId, #doc{deleted = Deleted, body = DocBody} = Doc, Options) ->
- {_, Ref} = spawn_monitor(fun() ->
- try fabric:update_doc(Db, Doc, Options) of
- Resp ->
- exit({exit_ok, Resp})
- catch
- throw:Reason ->
- exit({exit_throw, Reason});
- error:Reason ->
- exit({exit_error, Reason});
- exit:Reason ->
- exit({exit_exit, Reason})
- end
- end),
- Result =
- receive
- {'DOWN', Ref, _, _, {exit_ok, Ret}} ->
- Ret;
- {'DOWN', Ref, _, _, {exit_throw, Reason}} ->
- throw(Reason);
- {'DOWN', Ref, _, _, {exit_error, Reason}} ->
- erlang:error(Reason);
- {'DOWN', Ref, _, _, {exit_exit, Reason}} ->
- erlang:exit(Reason)
- end,
-
- case Result of
- {ok, NewRev} ->
- Accepted = false;
- {accepted, NewRev} ->
- Accepted = true
- end,
- Etag = couch_httpd:doc_etag(DocId, DocBody, NewRev),
- Status =
- case {Accepted, Deleted} of
- {true, _} ->
- accepted;
- {false, true} ->
- ok;
- {false, false} ->
- created
- end,
- NewRevStr = couch_doc:rev_to_str(NewRev),
- Body = {[{ok, true}, {id, DocId}, {rev, NewRevStr}]},
- {Status, {etag, Etag}, Body}.
-
-couch_doc_from_req(Req, _Db, DocId, #doc{revs = Revs} = Doc) ->
- validate_attachment_names(Doc),
- Rev =
- case chttpd:qs_value(Req, "rev") of
- undefined ->
- undefined;
- QSRev ->
- couch_doc:parse_rev(QSRev)
- end,
- Revs2 =
- case Revs of
- {Start, [RevId | _]} ->
- if
- Rev /= undefined andalso Rev /= {Start, RevId} ->
- throw(
- {bad_request,
- "Document rev from request body and query "
- "string have different values"}
- );
- true ->
- case extract_header_rev(Req, {Start, RevId}) of
- missing_rev -> {0, []};
- _ -> Revs
- end
- end;
- _ ->
- case extract_header_rev(Req, Rev) of
- missing_rev -> {0, []};
- {Pos, RevId2} -> {Pos, [RevId2]}
- end
- end,
- Doc#doc{id = DocId, revs = Revs2};
-couch_doc_from_req(Req, Db, DocId, Json) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, Json),
- couch_doc_from_req(Req, Db, DocId, Doc).
-
-% Useful for debugging
-% couch_doc_open(Db, DocId) ->
-% couch_doc_open(Db, DocId, nil, []).
-
-couch_doc_open(Db, DocId, Rev, Options0) ->
- Options = [{user_ctx, couch_db:get_user_ctx(Db)} | Options0],
- case Rev of
- % open most recent rev
- nil ->
- case fabric:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- chttpd_stats:incr_reads(),
- Doc;
- Error ->
- throw(Error)
- end;
- % open a specific rev (deletions come back as stubs)
- _ ->
- case fabric:open_revs(Db, DocId, [Rev], Options) of
- {ok, [{ok, Doc}]} ->
- chttpd_stats:incr_reads(),
- Doc;
- {ok, [{{not_found, missing}, Rev}]} ->
- throw(not_found);
- {ok, [Else]} ->
- throw(Else);
- {error, Error} ->
- throw(Error)
- end
- end.
-
-get_existing_attachment(Atts, FileName) ->
- % Check if attachment exists, if not throw not_found
- case [A || A <- Atts, couch_att:fetch(name, A) == FileName] of
- [] -> throw({not_found, "Document is missing attachment"});
- [Att] -> Att
- end.
-
-% Attachment request handlers
-
-db_attachment_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId, FileNameParts) ->
- FileName = list_to_binary(
- mochiweb_util:join(
- lists:map(
- fun binary_to_list/1,
- FileNameParts
- ),
- "/"
- )
- ),
- #doc_query_args{
- rev = Rev,
- options = Options
- } = parse_doc_query(Req),
- #doc{
- atts = Atts
- } = Doc = couch_doc_open(Db, DocId, Rev, Options),
- Att = get_existing_attachment(Atts, FileName),
- [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch(
- [type, encoding, disk_len, att_len, md5], Att
- ),
- Refs = monitor_attachments(Att),
- try
- Etag =
- case Md5 of
- <<>> -> chttpd:doc_etag(Doc);
- _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
- end,
- ReqAcceptsAttEnc = lists:member(
- atom_to_list(Enc),
- couch_httpd:accepted_encodings(Req)
- ),
- Headers0 =
- [
- {"ETag", Etag},
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", binary_to_list(Type)}
- ] ++
- case ReqAcceptsAttEnc of
- true when Enc =/= identity ->
- % RFC 2616 says that the 'identify' encoding should not be used in
- % the Content-Encoding header
- [{"Content-Encoding", atom_to_list(Enc)}];
- _ ->
- []
- end ++
- case Enc of
- identity ->
- [{"Accept-Ranges", "bytes"}];
- _ ->
- [{"Accept-Ranges", "none"}]
- end,
- Headers = chttpd_util:maybe_add_csp_header("attachments", Headers0, "sandbox"),
- Len =
- case {Enc, ReqAcceptsAttEnc} of
- {identity, _} ->
- % stored and served in identity form
- DiskLen;
- {_, false} when DiskLen =/= AttLen ->
- % Stored encoded, but client doesn't accept the encoding we used,
- % so we need to decode on the fly. DiskLen is the identity length
- % of the attachment.
- DiskLen;
- {_, true} ->
- % Stored and served encoded. AttLen is the encoded length.
- AttLen;
- _ ->
- % We received an encoded attachment and stored it as such, so we
- % don't know the identity length. The client doesn't accept the
- % encoding, and since we cannot serve a correct Content-Length
- % header we'll fall back to a chunked response.
- undefined
- end,
- AttFun =
- case ReqAcceptsAttEnc of
- false ->
- fun couch_att:foldl_decode/3;
- true ->
- fun couch_att:foldl/3
- end,
- chttpd:etag_respond(
- Req,
- Etag,
- fun() ->
- case Len of
- undefined ->
- {ok, Resp} = start_chunked_response(Req, 200, Headers),
- AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- couch_httpd:last_chunk(Resp);
- _ ->
- Ranges = parse_ranges(MochiReq:get(range), Len),
- case {Enc, Ranges} of
- {identity, [{From, To}]} ->
- Headers1 =
- [{"Content-Range", make_content_range(From, To, Len)}] ++
- Headers,
- {ok, Resp} = start_response_length(
- Req, 206, Headers1, To - From + 1
- ),
- couch_att:range_foldl(
- Att,
- From,
- To + 1,
- fun(Seg, _) -> send(Resp, Seg) end,
- {ok, Resp}
- );
- {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
- send_ranges_multipart(Req, Type, Len, Att, Ranges);
- _ ->
- Headers1 =
- Headers ++
- if
- Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
- [
- {"Content-MD5",
- base64:encode(couch_att:fetch(md5, Att))}
- ];
- true ->
- []
- end,
- {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
- AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
- end
- end
- end
- )
- after
- demonitor_refs(Refs)
- end;
-db_attachment_req(#httpd{method = Method, user_ctx = Ctx} = Req, Db, DocId, FileNameParts) when
- (Method == 'PUT') or (Method == 'DELETE')
-->
- FileName = validate_attachment_name(
- mochiweb_util:join(
- lists:map(
- fun binary_to_list/1,
- FileNameParts
- ),
- "/"
- )
- ),
-
- NewAtt =
- case Method of
- 'DELETE' ->
- [];
- _ ->
- MimeType =
- case couch_httpd:header_value(Req, "Content-Type") of
- % We could throw an error here or guess by the FileName.
- % Currently, just giving it a default.
- undefined -> <<"application/octet-stream">>;
- CType -> list_to_binary(CType)
- end,
- Data = fabric:att_receiver(Req, couch_db:name(Db), chttpd:body_length(Req)),
- ContentLen =
- case couch_httpd:header_value(Req, "Content-Length") of
- undefined -> undefined;
- Length -> list_to_integer(Length)
- end,
- ContentEnc = string:to_lower(
- string:strip(
- couch_httpd:header_value(Req, "Content-Encoding", "identity")
- )
- ),
- Encoding =
- case ContentEnc of
- "identity" ->
- identity;
- "gzip" ->
- gzip;
- _ ->
- throw({
- bad_ctype,
- "Only gzip and identity content-encodings are supported"
- })
- end,
- [
- couch_att:new([
- {name, FileName},
- {type, MimeType},
- {data, Data},
- {att_len, ContentLen},
- {md5, get_md5_header(Req)},
- {encoding, Encoding}
- ])
- ]
- end,
-
- Doc =
- case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
- % make the new doc
- missing_rev ->
- if
- Method =/= 'DELETE' ->
- ok;
- true ->
- % check for the existence of the doc and attachment
- CurrDoc = #doc{} = couch_doc_open(Db, DocId, nil, []),
- get_existing_attachment(CurrDoc#doc.atts, FileName)
- end,
- couch_db:validate_docid(Db, DocId),
- #doc{id = DocId};
- Rev ->
- case fabric:open_revs(Db, DocId, [Rev], [{user_ctx, Ctx}]) of
- {ok, [{ok, Doc0}]} ->
- chttpd_stats:incr_reads(),
- if
- Method =/= 'DELETE' ->
- ok;
- true ->
- % check if attachment exists
- get_existing_attachment(Doc0#doc.atts, FileName)
- end,
- Doc0;
- {ok, [Error]} ->
- throw(Error);
- {error, Error} ->
- throw(Error)
- end
- end,
-
- #doc{atts = Atts} = Doc,
- DocEdited = Doc#doc{
- atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName]
- },
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- case fabric:update_doc(Db, DocEdited, [{user_ctx, Ctx}, {w, W}]) of
- {ok, UpdatedRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, UpdatedRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
- end,
- erlang:put(mochiweb_request_recv, true),
- DbName = couch_db:name(Db),
-
- {Status, Headers} =
- case Method of
- 'DELETE' ->
- {200, []};
- _ ->
- {HttpCode, [
- {"Location",
- absolute_uri(Req, [
- $/,
- DbName,
- $/,
- couch_util:url_encode(DocId),
- $/,
- couch_util:url_encode(FileName)
- ])}
- ]}
- end,
- send_json(
- Req,
- Status,
- Headers,
- {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(UpdatedRev)}
- ]}
- );
-db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
-
-send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
- Boundary = couch_uuids:random(),
- CType = {"Content-Type", "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
- {ok, Resp} = start_chunked_response(Req, 206, [CType]),
- couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
- lists:foreach(
- fun({From, To}) ->
- ContentRange = make_content_range(From, To, Len),
- couch_httpd:send_chunk(
- Resp,
- <<"\r\nContent-Type: ", ContentType/binary, "\r\n", "Content-Range: ",
- ContentRange/binary, "\r\n", "\r\n">>
- ),
- couch_att:range_foldl(
- Att,
- From,
- To + 1,
- fun(Seg, _) -> send_chunk(Resp, Seg) end,
- {ok, Resp}
- ),
- couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
- end,
- Ranges
- ),
- couch_httpd:send_chunk(Resp, <<"--">>),
- couch_httpd:last_chunk(Resp),
- {ok, Resp}.
-
-parse_ranges(undefined, _Len) ->
- undefined;
-parse_ranges(fail, _Len) ->
- undefined;
-parse_ranges(Ranges, Len) ->
- parse_ranges(Ranges, Len, []).
-
-parse_ranges([], _Len, Acc) ->
- lists:reverse(Acc);
-parse_ranges([{0, none} | _], _Len, _Acc) ->
- undefined;
-parse_ranges([{From, To} | _], _Len, _Acc) when
- is_integer(From) andalso is_integer(To) andalso To < From
-->
- throw(requested_range_not_satisfiable);
-parse_ranges([{From, To} | Rest], Len, Acc) when
- is_integer(To) andalso To >= Len
-->
- parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To} | Rest], Len, Acc) ->
- parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none} | Rest], Len, Acc) ->
- parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, To} | Rest], Len, Acc) ->
- parse_ranges(Rest, Len, [{From, To}] ++ Acc).
-
-make_content_range(From, To, Len) ->
- ?l2b(io_lib:format("bytes ~B-~B/~B", [From, To, Len])).
-
-get_md5_header(Req) ->
- ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
- Length = couch_httpd:body_length(Req),
- Trailer = couch_httpd:header_value(Req, "Trailer"),
- case {ContentMD5, Length, Trailer} of
- _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
- base64:decode(ContentMD5);
- {_, chunked, undefined} ->
- <<>>;
- {_, chunked, _} ->
- case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
- {match, _} ->
- md5_in_footer;
- _ ->
- <<>>
- end;
- _ ->
- <<>>
- end.
-
-parse_doc_query(Req) ->
- lists:foldl(fun parse_doc_query/2, #doc_query_args{}, chttpd:qs(Req)).
-
-parse_shards_opt(Req) ->
- [
- {n, parse_shards_opt("n", Req, config:get_integer("cluster", "n", 3))},
- {q, parse_shards_opt("q", Req, config:get_integer("cluster", "q", 2))},
- {placement,
- parse_shards_opt(
- "placement", Req, config:get("cluster", "placement")
- )}
- ].
-
-parse_shards_opt("placement", Req, Default) ->
- Err = <<"The `placement` value should be in a format `zone:n`.">>,
- case chttpd:qs_value(Req, "placement", Default) of
- Default ->
- Default;
- [] ->
- throw({bad_request, Err});
- Val ->
- try
- true = lists:all(
- fun(Rule) ->
- [_, N] = string:tokens(Rule, ":"),
- couch_util:validate_positive_int(N)
- end,
- string:tokens(Val, ",")
- ),
- Val
- catch
- _:_ ->
- throw({bad_request, Err})
- end
- end;
-parse_shards_opt(Param, Req, Default) ->
- Val = chttpd:qs_value(Req, Param, Default),
- Err = ?l2b(["The `", Param, "` value should be a positive integer."]),
- case couch_util:validate_positive_int(Val) of
- true -> Val;
- false -> throw({bad_request, Err})
- end.
-
-parse_engine_opt(Req) ->
- case chttpd:qs_value(Req, "engine") of
- undefined ->
- [];
- Extension ->
- Available = couch_server:get_engine_extensions(),
- case lists:member(Extension, Available) of
- true ->
- [{engine, iolist_to_binary(Extension)}];
- false ->
- throw({bad_request, invalid_engine_extension})
- end
- end.
-
-parse_partitioned_opt(Req) ->
- case chttpd:qs_value(Req, "partitioned") of
- undefined ->
- [];
- "false" ->
- [];
- "true" ->
- ok = validate_partitioned_db_enabled(Req),
- [
- {partitioned, true},
- {hash, [couch_partition, hash, []]}
- ];
- _ ->
- throw({bad_request, <<"Invalid `partitioned` parameter">>})
- end.
-
-validate_partitioned_db_enabled(Req) ->
- case couch_flags:is_enabled(partitioned, Req) of
- true ->
- ok;
- false ->
- throw({bad_request, <<"Partitioned feature is not enabled.">>})
- end.
-
-parse_doc_query({Key, Value}, Args) ->
- case {Key, Value} of
- {"attachments", "true"} ->
- Options = [attachments | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"meta", "true"} ->
- Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"revs", "true"} ->
- Options = [revs | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"local_seq", "true"} ->
- Options = [local_seq | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"revs_info", "true"} ->
- Options = [revs_info | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"conflicts", "true"} ->
- Options = [conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"deleted", "true"} ->
- Options = [deleted | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"deleted_conflicts", "true"} ->
- Options = [deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"rev", Rev} ->
- Args#doc_query_args{rev = couch_doc:parse_rev(Rev)};
- {"open_revs", "all"} ->
- Args#doc_query_args{open_revs = all};
- {"open_revs", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{open_revs = couch_doc:parse_revs(JsonArray)};
- {"latest", "true"} ->
- Options = [latest | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"atts_since", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
- {"new_edits", "false"} ->
- Args#doc_query_args{update_type = replicated_changes};
- {"new_edits", "true"} ->
- Args#doc_query_args{update_type = interactive_edit};
- {"att_encoding_info", "true"} ->
- Options = [att_encoding_info | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"r", R} ->
- Options = [{r, R} | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"w", W} ->
- Options = [{w, W} | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- % unknown key value pair, ignore.
- _Else ->
- Args
- end.
-
-parse_changes_query(Req) ->
- erlang:erase(changes_seq_interval),
- ChangesArgs = lists:foldl(
- fun({Key, Value}, Args) ->
- case {string:to_lower(Key), Value} of
- {"feed", "live"} ->
- %% sugar for continuous
- Args#changes_args{feed = "continuous"};
- {"feed", _} ->
- Args#changes_args{feed = Value};
- {"descending", "true"} ->
- Args#changes_args{dir = rev};
- {"since", _} ->
- Args#changes_args{since = Value};
- {"last-event-id", _} ->
- Args#changes_args{since = Value};
- {"limit", _} ->
- Args#changes_args{limit = list_to_integer(Value)};
- {"style", _} ->
- Args#changes_args{style = list_to_existing_atom(Value)};
- {"heartbeat", "true"} ->
- Args#changes_args{heartbeat = true};
- {"heartbeat", _} ->
- try list_to_integer(Value) of
- HeartbeatInteger when HeartbeatInteger > 0 ->
- Args#changes_args{heartbeat = HeartbeatInteger};
- _ ->
- throw(
- {bad_request,
- <<"The heartbeat value should be a positive integer (in milliseconds).">>}
- )
- catch
- error:badarg ->
- throw(
- {bad_request,
- <<"Invalid heartbeat value. Expecting a positive integer value (in milliseconds).">>}
- )
- end;
- {"timeout", _} ->
- Args#changes_args{timeout = list_to_integer(Value)};
- {"include_docs", "true"} ->
- Args#changes_args{include_docs = true};
- {"conflicts", "true"} ->
- Args#changes_args{conflicts = true};
- {"attachments", "true"} ->
- Options = [attachments | Args#changes_args.doc_options],
- Args#changes_args{doc_options = Options};
- {"att_encoding_info", "true"} ->
- Options = [att_encoding_info | Args#changes_args.doc_options],
- Args#changes_args{doc_options = Options};
- {"filter", _} ->
- Args#changes_args{filter = Value};
- {"seq_interval", _} ->
- try list_to_integer(Value) of
- V when V > 0 ->
- erlang:put(changes_seq_interval, V),
- Args;
- _ ->
- throw({bad_request, invalid_seq_interval})
- catch
- error:badarg ->
- throw({bad_request, invalid_seq_interval})
- end;
- % unknown key value pair, ignore.
- _Else ->
- Args
- end
- end,
- #changes_args{},
- chttpd:qs(Req)
- ),
- %% if it's an EventSource request with a Last-event-ID header
- %% that should override the `since` query string, since it's
- %% probably the browser reconnecting.
- case ChangesArgs#changes_args.feed of
- "eventsource" ->
- case couch_httpd:header_value(Req, "last-event-id") of
- undefined ->
- ChangesArgs;
- Value ->
- ChangesArgs#changes_args{since = Value}
- end;
- _ ->
- ChangesArgs
- end.
-
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev) ->
- extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
-extract_header_rev(Req, ExplicitRev) ->
- Etag =
- case chttpd:header_value(Req, "If-Match") of
- undefined -> undefined;
- Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
- end,
- case {ExplicitRev, Etag} of
- {undefined, undefined} -> missing_rev;
- {_, undefined} -> ExplicitRev;
- {undefined, _} -> Etag;
- _ when ExplicitRev == Etag -> Etag;
- _ -> throw({bad_request, "Document rev and etag have different values"})
- end.
-
-validate_security_can_be_edited(DbName) ->
- UserDbName = config:get("chttpd_auth", "authentication_db", "_users"),
- CanEditUserSecurityObject = config:get("couchdb", "users_db_security_editable", "false"),
- case {DbName, CanEditUserSecurityObject} of
- {UserDbName, "false"} ->
- Msg = "You can't edit the security object of the user database.",
- throw({forbidden, Msg});
- {_, _} ->
- ok
- end.
-
-validate_revs(_Doc, true) ->
- ok;
-validate_revs(#doc{revs = {0, []}}, false) ->
- throw(
- {bad_request,
- ?l2b(
- "When `new_edits: false`, " ++
- "the document needs `_rev` or `_revisions` specified"
- )}
- );
-validate_revs(_Doc, false) ->
- ok.
-
-validate_attachment_names(Doc) ->
- lists:foreach(
- fun(Att) ->
- Name = couch_att:fetch(name, Att),
- validate_attachment_name(Name)
- end,
- Doc#doc.atts
- ).
-
-validate_attachment_name(Name) when is_list(Name) ->
- validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_", Rest/binary>>) ->
- throw(
- {bad_request,
- <<"Attachment name '_", Rest/binary, "' starts with prohibited character '_'">>}
- );
-validate_attachment_name(Name) ->
- case couch_util:validate_utf8(Name) of
- true -> Name;
- false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
- end.
-
--spec monitor_attachments(couch_att:att() | [couch_att:att()]) -> [reference()].
-monitor_attachments(Atts) when is_list(Atts) ->
- lists:foldl(
- fun(Att, Monitors) ->
- case couch_att:fetch(data, Att) of
- {Fd, _} ->
- [monitor(process, Fd) | Monitors];
- stub ->
- Monitors;
- Else ->
- couch_log:error("~p from couch_att:fetch(data, ~p)", [Else, Att]),
- Monitors
- end
- end,
- [],
- Atts
- );
-monitor_attachments(Att) ->
- monitor_attachments([Att]).
-
-demonitor_refs(Refs) when is_list(Refs) ->
- [demonitor(Ref) || Ref <- Refs].
-
-set_namespace(<<"_all_docs">>, Args) ->
- set_namespace(undefined, Args);
-set_namespace(<<"_local_docs">>, Args) ->
- set_namespace(<<"_local">>, Args);
-set_namespace(<<"_design_docs">>, Args) ->
- set_namespace(<<"_design">>, Args);
-set_namespace(NS, #mrargs{} = Args) ->
- couch_mrview_util:set_extra(Args, namespace, NS).
-
-%% /db/_bulk_get stuff
-
-bulk_get_parse_doc_query(Req) ->
- lists:foldl(
- fun({Key, Value}, Args) ->
- ok = validate_query_param(Key),
- parse_doc_query({Key, Value}, Args)
- end,
- #doc_query_args{},
- chttpd:qs(Req)
- ).
-
-validate_query_param("open_revs" = Key) ->
- throw_bad_query_param(Key);
-validate_query_param("new_edits" = Key) ->
- throw_bad_query_param(Key);
-validate_query_param("w" = Key) ->
- throw_bad_query_param(Key);
-validate_query_param("rev" = Key) ->
- throw_bad_query_param(Key);
-validate_query_param("atts_since" = Key) ->
- throw_bad_query_param(Key);
-validate_query_param(_) ->
- ok.
-
-throw_bad_query_param(Key) when is_list(Key) ->
- throw_bad_query_param(?l2b(Key));
-throw_bad_query_param(Key) when is_binary(Key) ->
- Msg = <<"\"", Key/binary, "\" query parameter is not acceptable">>,
- throw({bad_request, Msg}).
-
-bulk_get_open_doc_revs(Db, {Props}, Options) ->
- bulk_get_open_doc_revs1(Db, Props, Options, {}).
-
-bulk_get_open_doc_revs1(Db, Props, Options, {}) ->
- case couch_util:get_value(<<"id">>, Props) of
- undefined ->
- Error = {null, bad_request, <<"document id missed">>},
- {null, {error, Error}, Options};
- DocId ->
- try
- couch_db:validate_docid(Db, DocId),
- bulk_get_open_doc_revs1(Db, Props, Options, {DocId})
- catch
- throw:{Error, Reason} ->
- {DocId, {error, {null, Error, Reason}}, Options}
- end
- end;
-bulk_get_open_doc_revs1(Db, Props, Options, {DocId}) ->
- RevStr = couch_util:get_value(<<"rev">>, Props),
-
- case parse_field(<<"rev">>, RevStr) of
- {error, {RevStr, Error, Reason}} ->
- {DocId, {error, {RevStr, Error, Reason}}, Options};
- {ok, undefined} ->
- bulk_get_open_doc_revs1(Db, Props, Options, {DocId, all});
- {ok, Rev} ->
- bulk_get_open_doc_revs1(Db, Props, Options, {DocId, [Rev]})
- end;
-bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs}) ->
- AttsSinceStr = couch_util:get_value(<<"atts_since">>, Props),
-
- case parse_field(<<"atts_since">>, AttsSinceStr) of
- {error, {BadAttsSinceRev, Error, Reason}} ->
- {DocId, {error, {BadAttsSinceRev, Error, Reason}}, Options};
- {ok, []} ->
- bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs, Options});
- {ok, RevList} ->
- Options1 = [{atts_since, RevList}, attachments | Options],
- bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs, Options1})
- end;
-bulk_get_open_doc_revs1(Db, Props, _, {DocId, Revs, Options}) ->
- case fabric:open_revs(Db, DocId, Revs, Options) of
- {ok, []} ->
- RevStr = couch_util:get_value(<<"rev">>, Props),
- Error = {RevStr, <<"not_found">>, <<"missing">>},
- {DocId, {error, Error}, Options};
- {ok, Resps} = Results ->
- chttpd_stats:incr_reads(length(Resps)),
- {DocId, Results, Options};
- Else ->
- {DocId, Else, Options}
- end.
-
-parse_field(<<"rev">>, undefined) ->
- {ok, undefined};
-parse_field(<<"rev">>, Value) ->
- try
- Rev = couch_doc:parse_rev(Value),
- {ok, Rev}
- catch
- throw:{bad_request = Error, Reason} ->
- {error, {Value, Error, Reason}}
- end;
-parse_field(<<"atts_since">>, undefined) ->
- {ok, []};
-parse_field(<<"atts_since">>, []) ->
- {ok, []};
-parse_field(<<"atts_since">>, Value) when is_list(Value) ->
- parse_atts_since(Value, []);
-parse_field(<<"atts_since">>, Value) ->
- {error, {Value, bad_request, <<"att_since value must be array of revs.">>}}.
-
-parse_atts_since([], Acc) ->
- {ok, lists:reverse(Acc)};
-parse_atts_since([RevStr | Rest], Acc) ->
- case parse_field(<<"rev">>, RevStr) of
- {ok, Rev} ->
- parse_atts_since(Rest, [Rev | Acc]);
- {error, _} = Error ->
- Error
- end.
-
-bulk_get_send_docs_json(Resp, DocId, Results, Options, Sep) ->
- Id = ?JSON_ENCODE(DocId),
- send_chunk(Resp, [Sep, <<"{\"id\": ">>, Id, <<", \"docs\": [">>]),
- bulk_get_send_docs_json1(Resp, DocId, Results, Options),
- send_chunk(Resp, <<"]}">>).
-
-bulk_get_send_docs_json1(Resp, DocId, {error, {Rev, Error, Reason}}, _) ->
- send_chunk(Resp, [bulk_get_json_error(DocId, Rev, Error, Reason)]);
-bulk_get_send_docs_json1(_Resp, _DocId, {ok, []}, _) ->
- ok;
-bulk_get_send_docs_json1(Resp, DocId, {ok, Docs}, Options) ->
- lists:foldl(
- fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, [AccSeparator, Json]);
- {{Error, Reason}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = bulk_get_json_error(DocId, RevStr, Error, Reason),
- send_chunk(Resp, [AccSeparator, Json])
- end,
- <<",">>
- end,
- <<"">>,
- Docs
- ).
-
-bulk_get_json_error(DocId, Rev, Error, Reason) ->
- ?JSON_ENCODE(
- {[
- {error,
- {[
- {<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, Error},
- {<<"reason">>, Reason}
- ]}}
- ]}
- ).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-monitor_attachments_test_() ->
- {"ignore stubs", fun() ->
- Atts = [couch_att:new([{data, stub}])],
- ?_assertEqual([], monitor_attachments(Atts))
- end}.
-
-parse_partitioned_opt_test_() ->
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_should_allow_partitioned_db(),
- t_should_throw_on_not_allowed_partitioned_db(),
- t_returns_empty_array_for_partitioned_false(),
- t_returns_empty_array_for_no_partitioned_qs()
- ]
- }.
-
-parse_shards_opt_test_() ->
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_should_allow_valid_q(),
- t_should_default_on_missing_q(),
- t_should_throw_on_invalid_q(),
- t_should_allow_valid_n(),
- t_should_default_on_missing_n(),
- t_should_throw_on_invalid_n(),
- t_should_allow_valid_placement(),
- t_should_default_on_missing_placement(),
- t_should_throw_on_invalid_placement()
- ]
- }.
-
-setup() ->
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- ok.
-
-teardown(_) ->
- meck:unload().
-
-mock_request(Url) ->
- Headers = mochiweb_headers:make([{"Host", "examples.com"}]),
- MochiReq = mochiweb_request:new(nil, 'PUT', Url, {1, 1}, Headers),
- #httpd{mochi_req = MochiReq}.
-
-t_should_allow_partitioned_db() ->
- ?_test(begin
- meck:expect(couch_flags, is_enabled, 2, true),
- Req = mock_request("/all-test21?partitioned=true"),
- [Partitioned, _] = parse_partitioned_opt(Req),
- ?assertEqual(Partitioned, {partitioned, true})
- end).
-
-t_should_throw_on_not_allowed_partitioned_db() ->
- ?_test(begin
- meck:expect(couch_flags, is_enabled, 2, false),
- Req = mock_request("/all-test21?partitioned=true"),
- Throw = {bad_request, <<"Partitioned feature is not enabled.">>},
- ?assertThrow(Throw, parse_partitioned_opt(Req))
- end).
-
-t_returns_empty_array_for_partitioned_false() ->
- ?_test(begin
- Req = mock_request("/all-test21?partitioned=false"),
- ?assertEqual(parse_partitioned_opt(Req), [])
- end).
-
-t_returns_empty_array_for_no_partitioned_qs() ->
- ?_test(begin
- Req = mock_request("/all-test21"),
- ?assertEqual(parse_partitioned_opt(Req), [])
- end).
-
-t_should_allow_valid_q() ->
- ?_test(begin
- Req = mock_request("/all-test21?q=1"),
- Opts = parse_shards_opt(Req),
- ?assertEqual("1", couch_util:get_value(q, Opts))
- end).
-
-t_should_default_on_missing_q() ->
- ?_test(begin
- Req = mock_request("/all-test21"),
- Opts = parse_shards_opt(Req),
- ?assertEqual(2, couch_util:get_value(q, Opts))
- end).
-
-t_should_throw_on_invalid_q() ->
- ?_test(begin
- Req = mock_request("/all-test21?q="),
- Err = <<"The `q` value should be a positive integer.">>,
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end).
-
-t_should_allow_valid_n() ->
- ?_test(begin
- Req = mock_request("/all-test21?n=1"),
- Opts = parse_shards_opt(Req),
- ?assertEqual("1", couch_util:get_value(n, Opts))
- end).
-
-t_should_default_on_missing_n() ->
- ?_test(begin
- Req = mock_request("/all-test21"),
- Opts = parse_shards_opt(Req),
- ?assertEqual(3, couch_util:get_value(n, Opts))
- end).
-
-t_should_throw_on_invalid_n() ->
- ?_test(begin
- Req = mock_request("/all-test21?n="),
- Err = <<"The `n` value should be a positive integer.">>,
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end).
-
-t_should_allow_valid_placement() ->
- {
- foreach,
- fun() -> ok end,
- [
- {"single zone",
- ?_test(begin
- Req = mock_request("/all-test21?placement=az:1"),
- Opts = parse_shards_opt(Req),
- ?assertEqual("az:1", couch_util:get_value(placement, Opts))
- end)},
- {"multi zone",
- ?_test(begin
- Req = mock_request("/all-test21?placement=az:1,co:3"),
- Opts = parse_shards_opt(Req),
- ?assertEqual(
- "az:1,co:3",
- couch_util:get_value(placement, Opts)
- )
- end)}
- ]
- }.
-
-t_should_default_on_missing_placement() ->
- ?_test(begin
- Req = mock_request("/all-test21"),
- Opts = parse_shards_opt(Req),
- ?assertEqual(undefined, couch_util:get_value(placement, Opts))
- end).
-
-t_should_throw_on_invalid_placement() ->
- Err = <<"The `placement` value should be in a format `zone:n`.">>,
- {
- foreach,
- fun() -> ok end,
- [
- {"empty placement",
- ?_test(begin
- Req = mock_request("/all-test21?placement="),
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end)},
- {"invalid format",
- ?_test(begin
- Req = mock_request("/all-test21?placement=moon"),
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end)},
- {"invalid n",
- ?_test(begin
- Req = mock_request("/all-test21?placement=moon:eagle"),
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end)},
- {"one invalid zone",
- ?_test(begin
- Req = mock_request("/all-test21?placement=az:1,co:moon"),
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end)}
- ]
- }.
-
--endif.
diff --git a/src/chttpd/src/chttpd_epi.erl b/src/chttpd/src/chttpd_epi.erl
deleted file mode 100644
index 5536c9e4d..000000000
--- a/src/chttpd/src/chttpd_epi.erl
+++ /dev/null
@@ -1,52 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- chttpd.
-
-providers() ->
- [
- {chttpd_handlers, chttpd_httpd_handlers}
- ].
-
-services() ->
- [
- {chttpd_auth, chttpd_auth},
- {chttpd_handlers, chttpd_handlers},
- {chttpd, chttpd_plugin}
- ].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl
deleted file mode 100644
index 352087d58..000000000
--- a/src/chttpd/src/chttpd_external.erl
+++ /dev/null
@@ -1,218 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_external).
-
--compile(tuple_calls).
-
--export([send_external_response/2]).
--export([json_req_obj_fields/0, json_req_obj/2, json_req_obj/3, json_req_obj/4]).
--export([default_or_content_type/2, parse_external_response/1]).
-
--import(chttpd, [send_error/4]).
-
--include_lib("couch/include/couch_db.hrl").
-
-json_req_obj(Req, Db) ->
- json_req_obj(Req, Db, null).
-json_req_obj(Req, Db, DocId) ->
- json_req_obj(Req, Db, DocId, all).
-json_req_obj(Req, Db, DocId, all) ->
- Fields = json_req_obj_fields(),
- json_req_obj(Req, Db, DocId, Fields);
-json_req_obj(Req, Db, DocId, Fields) when is_list(Fields) ->
- {[{Field, json_req_obj_field(Field, Req, Db, DocId)} || Field <- Fields]}.
-
-json_req_obj_fields() ->
- [
- <<"info">>,
- <<"uuid">>,
- <<"id">>,
- <<"method">>,
- <<"requested_path">>,
- <<"path">>,
- <<"raw_path">>,
- <<"query">>,
- <<"headers">>,
- <<"body">>,
- <<"peer">>,
- <<"form">>,
- <<"cookie">>,
- <<"userCtx">>,
- <<"secObj">>
- ].
-
-json_req_obj_field(<<"info">>, #httpd{}, Db, _DocId) ->
- {ok, Info} = get_db_info(Db),
- {Info};
-json_req_obj_field(<<"uuid">>, #httpd{}, _Db, _DocId) ->
- couch_uuids:new();
-json_req_obj_field(<<"id">>, #httpd{}, _Db, DocId) ->
- DocId;
-json_req_obj_field(<<"method">>, #httpd{method = Method}, _Db, _DocId) ->
- Method;
-json_req_obj_field(<<"requested_path">>, #httpd{requested_path_parts = Path}, _Db, _DocId) ->
- Path;
-json_req_obj_field(<<"path">>, #httpd{path_parts = Path}, _Db, _DocId) ->
- Path;
-json_req_obj_field(<<"raw_path">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
- ?l2b(Req:get(raw_path));
-json_req_obj_field(<<"query">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
- json_query_keys(to_json_terms(Req:parse_qs()));
-json_req_obj_field(<<"headers">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
- Headers = Req:get(headers),
- Hlist = mochiweb_headers:to_list(Headers),
- to_json_terms(Hlist);
-json_req_obj_field(<<"body">>, #httpd{req_body = undefined, mochi_req = Req}, _Db, _DocId) ->
- MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296
- ),
- try
- Req:recv_body(MaxSize)
- catch
- exit:{shutdown, _} ->
- exit({bad_request, <<"Invalid request body">>});
- exit:normal ->
- exit({bad_request, <<"Invalid request body">>})
- end;
-json_req_obj_field(<<"body">>, #httpd{req_body = Body}, _Db, _DocId) ->
- Body;
-json_req_obj_field(<<"peer">>, #httpd{peer = undefined, mochi_req = Req}, _, _) ->
- ?l2b(Req:get(peer));
-json_req_obj_field(<<"peer">>, #httpd{peer = Peer}, _Db, _DocId) ->
- ?l2b(Peer);
-json_req_obj_field(<<"form">>, #httpd{mochi_req = Req, method = Method} = HttpReq, Db, DocId) ->
- Body = json_req_obj_field(<<"body">>, HttpReq, Db, DocId),
- ParsedForm =
- case Req:get_primary_header_value("content-type") of
- "application/x-www-form-urlencoded" ++ _ when
- Method =:= 'POST' orelse Method =:= 'PUT'
- ->
- mochiweb_util:parse_qs(Body);
- _ ->
- []
- end,
- to_json_terms(ParsedForm);
-json_req_obj_field(<<"cookie">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
- to_json_terms(Req:parse_cookie());
-json_req_obj_field(<<"userCtx">>, #httpd{}, Db, _DocId) ->
- couch_util:json_user_ctx(Db);
-json_req_obj_field(<<"secObj">>, #httpd{user_ctx = UserCtx}, Db, _DocId) ->
- get_db_security(Db, UserCtx).
-
-get_db_info(Db) ->
- case couch_db:is_clustered(Db) of
- true ->
- fabric:get_db_info(Db);
- false ->
- couch_db:get_db_info(Db)
- end.
-
-get_db_security(Db, #user_ctx{}) ->
- case couch_db:is_clustered(Db) of
- true ->
- fabric:get_security(Db);
- false ->
- couch_db:get_security(Db)
- end.
-
-to_json_terms(Data) ->
- to_json_terms(Data, []).
-to_json_terms([], Acc) ->
- {lists:reverse(Acc)};
-to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) ->
- to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]);
-to_json_terms([{Key, Value} | Rest], Acc) ->
- to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]).
-
-json_query_keys({Json}) ->
- json_query_keys(Json, []).
-json_query_keys([], Acc) ->
- {lists:reverse(Acc)};
-json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)} | Acc]);
-json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)} | Acc]);
-json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)} | Acc]);
-json_query_keys([{<<"descending">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"descending">>, ?JSON_DECODE(Value)} | Acc]);
-json_query_keys([Term | Rest], Acc) ->
- json_query_keys(Rest, [Term | Acc]).
-
-send_external_response(Req, Response) ->
- #extern_resp_args{
- code = Code,
- data = Data,
- ctype = CType,
- headers = Headers0,
- json = Json
- } = parse_external_response(Response),
- Headers1 = default_or_content_type(CType, Headers0),
- case Json of
- nil ->
- Headers2 = chttpd_util:maybe_add_csp_header("showlist", Headers1, "sandbox"),
- chttpd:send_response(Req, Code, Headers2, Data);
- Json ->
- chttpd:send_json(Req, Code, Headers1, Json)
- end.
-
-parse_external_response({Response}) ->
- lists:foldl(
- fun({Key, Value}, Args) ->
- case {Key, Value} of
- {"", _} ->
- Args;
- {<<"code">>, Value} ->
- Args#extern_resp_args{code = Value};
- {<<"stop">>, true} ->
- Args#extern_resp_args{stop = true};
- {<<"json">>, Value} ->
- Args#extern_resp_args{
- json = Value,
- ctype = "application/json"
- };
- {<<"body">>, Value} ->
- Args#extern_resp_args{data = Value, ctype = "text/html; charset=utf-8"};
- {<<"base64">>, Value} ->
- Args#extern_resp_args{
- data = base64:decode(Value),
- ctype = "application/binary"
- };
- {<<"headers">>, {Headers}} ->
- NewHeaders = lists:map(
- fun({Header, HVal}) ->
- {couch_util:to_list(Header), couch_util:to_list(HVal)}
- end,
- Headers
- ),
- Args#extern_resp_args{headers = NewHeaders};
- % unknown key
- _ ->
- Msg = lists:flatten(
- io_lib:format("Invalid data from external server: ~p", [{Key, Value}])
- ),
- throw({external_response_error, Msg})
- end
- end,
- #extern_resp_args{},
- Response
- ).
-
-default_or_content_type(DefaultContentType, Headers) ->
- IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end,
- case lists:any(IsContentType, Headers) of
- false ->
- [{"Content-Type", DefaultContentType} | Headers];
- true ->
- Headers
- end.
diff --git a/src/chttpd/src/chttpd_handlers.erl b/src/chttpd/src/chttpd_handlers.erl
deleted file mode 100644
index 82eee7365..000000000
--- a/src/chttpd/src/chttpd_handlers.erl
+++ /dev/null
@@ -1,88 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_handlers).
-
--export([
- url_handler/2,
- db_handler/2,
- design_handler/2
-]).
-
--define(SERVICE_ID, chttpd_handlers).
-
--include_lib("couch/include/couch_db.hrl").
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-url_handler(HandlerKey, DefaultFun) ->
- select(collect(url_handler, [HandlerKey]), DefaultFun).
-
-db_handler(HandlerKey, DefaultFun) ->
- select(collect(db_handler, [HandlerKey]), DefaultFun).
-
-design_handler(HandlerKey, DefaultFun) ->
- select(collect(design_handler, [HandlerKey]), DefaultFun).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-collect(Func, Args) ->
- Results = do_apply(Func, Args, []),
- [HandlerFun || HandlerFun <- Results, HandlerFun /= no_match].
-
-do_apply(Func, Args, Opts) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
-
-select([], Default) ->
- Default;
-select([{default, OverrideDefault}], _Default) ->
- OverrideDefault;
-select(Handlers, _Default) ->
- [Handler] = do_select(Handlers, []),
- Handler.
-
-do_select([], Acc) ->
- Acc;
-do_select([{override, Handler} | _], _Acc) ->
- [Handler];
-do_select([{default, _} | Rest], Acc) ->
- do_select(Rest, Acc);
-do_select([Handler], Acc) ->
- [Handler | Acc];
-do_select([Handler | Rest], Acc) ->
- do_select(Rest, [Handler | Acc]).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-select_override_test() ->
- ?assertEqual(selected, select([{override, selected}, foo], default)),
- ?assertEqual(selected, select([foo, {override, selected}], default)),
- ?assertEqual(selected, select([{override, selected}, {override, bar}], default)),
- ?assertError({badmatch, [bar, foo]}, select([foo, bar], default)).
-
-select_default_override_test() ->
- ?assertEqual(selected, select([{default, new_default}, selected], old_default)),
- ?assertEqual(selected, select([selected, {default, new_default}], old_default)),
- ?assertEqual(selected, select([{default, selected}], old_default)),
- ?assertEqual(selected, select([], selected)),
- ?assertEqual(
- selected,
- select([{default, new_default}, {override, selected}, bar], old_default)
- ).
-
--endif.
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
deleted file mode 100644
index 932b52e5f..000000000
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1]).
-
-url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1;
-url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1;
-url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1;
-url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
-url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
-url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
-url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
-url_handler(<<"_node">>) -> fun chttpd_node:handle_node_req/1;
-url_handler(<<"_reload_query_servers">>) -> fun chttpd_misc:handle_reload_query_servers_req/1;
-url_handler(<<"_replicate">>) -> fun chttpd_misc:handle_replicate_req/1;
-url_handler(<<"_uuids">>) -> fun chttpd_misc:handle_uuids_req/1;
-url_handler(<<"_session">>) -> fun chttpd_auth:handle_session_req/1;
-url_handler(<<"_up">>) -> fun chttpd_misc:handle_up_req/1;
-url_handler(_) -> no_match.
-
-db_handler(<<"_view_cleanup">>) -> fun chttpd_db:handle_view_cleanup_req/2;
-db_handler(<<"_compact">>) -> fun chttpd_db:handle_compact_req/2;
-db_handler(<<"_design">>) -> fun chttpd_db:handle_design_req/2;
-db_handler(<<"_partition">>) -> fun chttpd_db:handle_partition_req/2;
-db_handler(<<"_temp_view">>) -> fun chttpd_view:handle_temp_view_req/2;
-db_handler(<<"_changes">>) -> fun chttpd_db:handle_changes_req/2;
-db_handler(_) -> no_match.
-
-design_handler(<<"_view">>) -> fun chttpd_view:handle_view_req/3;
-design_handler(<<"_show">>) -> fun chttpd_show:handle_doc_show_req/3;
-design_handler(<<"_list">>) -> fun chttpd_show:handle_view_list_req/3;
-design_handler(<<"_update">>) -> fun chttpd_show:handle_doc_update_req/3;
-design_handler(<<"_info">>) -> fun chttpd_db:handle_design_info_req/3;
-design_handler(<<"_rewrite">>) -> fun chttpd_rewrite:handle_rewrite_req/3;
-design_handler(_) -> no_match.
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
deleted file mode 100644
index 0dedeba4d..000000000
--- a/src/chttpd/src/chttpd_misc.erl
+++ /dev/null
@@ -1,328 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_misc).
-
--export([
- handle_all_dbs_req/1,
- handle_dbs_info_req/1,
- handle_favicon_req/1,
- handle_favicon_req/2,
- handle_replicate_req/1,
- handle_reload_query_servers_req/1,
- handle_task_status_req/1,
- handle_up_req/1,
- handle_utils_dir_req/1,
- handle_utils_dir_req/2,
- handle_uuids_req/1,
- handle_welcome_req/1,
- handle_welcome_req/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--import(
- chttpd,
- [
- send_json/2, send_json/3,
- send_method_not_allowed/2,
- send_chunk/2,
- start_chunked_response/3
- ]
-).
-
--define(MAX_DB_NUM_FOR_DBS_INFO, 100).
-
-% httpd global handlers
-
-handle_welcome_req(Req) ->
- handle_welcome_req(Req, <<"Welcome">>).
-
-handle_welcome_req(#httpd{method = 'GET'} = Req, WelcomeMessage) ->
- send_json(Req, {
- [
- {couchdb, WelcomeMessage},
- {version, list_to_binary(couch_server:get_version())},
- {git_sha, list_to_binary(couch_server:get_git_sha())},
- {uuid, couch_server:get_uuid()},
- {features, get_features()}
- ] ++
- case config:get("vendor") of
- [] ->
- [];
- Properties ->
- [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
- end
- });
-handle_welcome_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-get_features() ->
- case dreyfus:available() of
- true ->
- [search | config:features()];
- false ->
- config:features()
- end.
-
-handle_favicon_req(Req) ->
- handle_favicon_req(Req, get_docroot()).
-
-handle_favicon_req(#httpd{method = 'GET'} = Req, DocumentRoot) ->
- {DateNow, TimeNow} = calendar:universal_time(),
- DaysNow = calendar:date_to_gregorian_days(DateNow),
- DaysWhenExpires = DaysNow + 365,
- DateWhenExpires = calendar:gregorian_days_to_date(DaysWhenExpires),
- CachingHeaders = [
- %favicon should expire a year from now
- {"Cache-Control", "public, max-age=31536000"},
- {"Expires", couch_util:rfc1123_date({DateWhenExpires, TimeNow})}
- ],
- chttpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders);
-handle_favicon_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_utils_dir_req(Req) ->
- handle_utils_dir_req(Req, get_docroot()).
-
-handle_utils_dir_req(#httpd{method = 'GET'} = Req, DocumentRoot) ->
- "/" ++ UrlPath = chttpd:path(Req),
- case chttpd:partition(UrlPath) of
- {_ActionKey, "/", RelativePath} ->
- % GET /_utils/path or GET /_utils/
- CachingHeaders = [{"Cache-Control", "private, must-revalidate"}],
- DefaultValues =
- "child-src 'self' data: blob:; default-src 'self'; img-src 'self' data:; font-src 'self'; "
- "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';",
- Headers = chttpd_util:maybe_add_csp_header("utils", CachingHeaders, DefaultValues),
- chttpd:serve_file(Req, RelativePath, DocumentRoot, Headers);
- {_ActionKey, "", _RelativePath} ->
- % GET /_utils
- RedirectPath = chttpd:path(Req) ++ "/",
- chttpd:send_redirect(Req, RedirectPath)
- end;
-handle_utils_dir_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_all_dbs_req(#httpd{method = 'GET'} = Req) ->
- handle_all_dbs_info_req(Req);
-handle_all_dbs_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_all_dbs_info_req(Req) ->
- Args0 = couch_mrview_http:parse_params(Req, undefined),
- Args1 = couch_mrview_util:set_extra(Args0, namespace, <<"_non_design">>),
- ShardDbName = config:get("mem3", "shards_db", "_dbs"),
- %% shard_db is not sharded but mem3:shards treats it as an edge case
- %% so it can be pushed thru fabric
- {ok, Info} = fabric:get_db_info(ShardDbName),
- Etag = couch_httpd:make_etag({Info}),
- Options = [{user_ctx, Req#httpd.user_ctx}],
- {ok, Resp} = chttpd:etag_respond(Req, Etag, fun() ->
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"ETag", Etag}]),
- VAcc = #vacc{req = Req, resp = Resp},
- fabric:all_docs(ShardDbName, Options, fun all_dbs_info_callback/2, VAcc, Args1)
- end),
- case is_record(Resp, vacc) of
- true -> {ok, Resp#vacc.resp};
- _ -> {ok, Resp}
- end.
-
-all_dbs_info_callback({meta, _Meta}, #vacc{resp = Resp0} = Acc) ->
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
- {ok, Acc#vacc{resp = Resp1}};
-all_dbs_info_callback({row, Row}, #vacc{resp = Resp0} = Acc) when
- Acc#vacc.req#httpd.path_parts =:= [<<"_all_dbs">>]
-->
- Prepend = couch_mrview_http:prepend_val(Acc),
- DbName = couch_util:get_value(id, Row),
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
- {ok, Acc#vacc{prepend = ",", resp = Resp1}};
-all_dbs_info_callback({row, Row}, #vacc{resp = Resp0} = Acc) when
- Acc#vacc.req#httpd.path_parts =:= [<<"_dbs_info">>]
-->
- Prepend = couch_mrview_http:prepend_val(Acc),
- DbName = couch_util:get_value(id, Row),
- case chttpd_util:get_db_info(DbName) of
- {ok, DbInfo} ->
- Chunk = [Prepend, ?JSON_ENCODE({[{key, DbName}, {info, {DbInfo}}]})],
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, Chunk),
- {ok, Acc#vacc{prepend = ",", resp = Resp1}};
- {error, database_does_not_exist} ->
- {ok, Acc#vacc{resp = Resp0}};
- {error, Reason} ->
- {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
- {stop, Acc#vacc{resp = Resp1}}
- end;
-all_dbs_info_callback(complete, #vacc{resp = Resp0} = Acc) ->
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
- {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, Acc#vacc{resp = Resp2}};
-all_dbs_info_callback({error, Reason}, #vacc{resp = Resp0} = Acc) ->
- {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
- {ok, Acc#vacc{resp = Resp1}}.
-
-handle_dbs_info_req(#httpd{method = 'GET'} = Req) ->
- handle_all_dbs_info_req(Req);
-handle_dbs_info_req(#httpd{method = 'POST'} = Req) ->
- chttpd:validate_ctype(Req, "application/json"),
- Props = chttpd:json_body_obj(Req),
- Keys = couch_mrview_util:get_view_keys(Props),
- case Keys of
- undefined -> throw({bad_request, "`keys` member must exist."});
- _ -> ok
- end,
- MaxNumber = config:get_integer(
- "chttpd",
- "max_db_number_for_dbs_info_req",
- ?MAX_DB_NUM_FOR_DBS_INFO
- ),
- case length(Keys) =< MaxNumber of
- true -> ok;
- false -> throw({bad_request, too_many_keys})
- end,
- {ok, Resp} = chttpd:start_json_response(Req, 200),
- send_chunk(Resp, "["),
- lists:foldl(
- fun(DbName, AccSeparator) ->
- case catch fabric:get_db_info(DbName) of
- {ok, Result} ->
- Json = ?JSON_ENCODE({[{key, DbName}, {info, {Result}}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- _ ->
- Json = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
- send_chunk(Resp, AccSeparator ++ Json)
- end,
- % AccSeparator now has a comma
- ","
- end,
- "",
- Keys
- ),
- send_chunk(Resp, "]"),
- chttpd:end_json_response(Resp);
-handle_dbs_info_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD,POST").
-
-handle_task_status_req(#httpd{method = 'GET'} = Req) ->
- ok = chttpd:verify_is_server_admin(Req),
- {Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all),
- Response = lists:flatmap(
- fun({Node, Tasks}) ->
- [{[{node, Node} | Task]} || Task <- Tasks]
- end,
- Replies
- ),
- send_json(Req, lists:sort(Response));
-handle_task_status_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_replicate_req(#httpd{method = 'POST', user_ctx = Ctx, req_body = PostBody} = Req) ->
- chttpd:validate_ctype(Req, "application/json"),
- %% see HACK in chttpd.erl about replication
- case replicate(PostBody, Ctx) of
- {ok, {continuous, RepId}} ->
- send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
- {ok, {cancelled, RepId}} ->
- send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
- {ok, {JsonResults}} ->
- send_json(Req, {[{ok, true} | JsonResults]});
- {ok, stopped} ->
- send_json(Req, 200, {[{ok, stopped}]});
- {error, not_found = Error} ->
- chttpd:send_error(Req, Error);
- {error, {_, _} = Error} ->
- chttpd:send_error(Req, Error);
- {_, _} = Error ->
- chttpd:send_error(Req, Error)
- end;
-handle_replicate_req(Req) ->
- send_method_not_allowed(Req, "POST").
-
-replicate({Props} = PostBody, Ctx) ->
- case couch_util:get_value(<<"cancel">>, Props) of
- true ->
- cancel_replication(PostBody, Ctx);
- _ ->
- Node = choose_node([
- couch_util:get_value(<<"source">>, Props),
- couch_util:get_value(<<"target">>, Props)
- ]),
- case rpc:call(Node, couch_replicator, replicate, [PostBody, Ctx]) of
- {badrpc, Reason} ->
- erlang:error(Reason);
- Res ->
- Res
- end
- end.
-
-cancel_replication(PostBody, Ctx) ->
- {Res, _Bad} = rpc:multicall(couch_replicator, replicate, [PostBody, Ctx]),
- case [X || {ok, {cancelled, _}} = X <- Res] of
- [Success | _] ->
- % Report success if at least one node canceled the replication
- Success;
- [] ->
- case lists:usort(Res) of
- [UniqueReply] ->
- % Report a universally agreed-upon reply
- UniqueReply;
- [] ->
- {error, badrpc};
- Else ->
- % Unclear what to do here -- pick the first error?
- % Except try ignoring any {error, not_found} responses
- % because we'll always get two of those
- hd(Else -- [{error, not_found}])
- end
- end.
-
-choose_node(Key) when is_binary(Key) ->
- Checksum = erlang:crc32(Key),
- Nodes = lists:sort([node() | erlang:nodes()]),
- lists:nth(1 + Checksum rem length(Nodes), Nodes);
-choose_node(Key) ->
- choose_node(term_to_binary(Key)).
-
-handle_reload_query_servers_req(#httpd{method = 'POST'} = Req) ->
- chttpd:validate_ctype(Req, "application/json"),
- ok = couch_proc_manager:reload(),
- send_json(Req, 200, {[{ok, true}]});
-handle_reload_query_servers_req(Req) ->
- send_method_not_allowed(Req, "POST").
-
-handle_uuids_req(Req) ->
- couch_httpd_misc_handlers:handle_uuids_req(Req).
-
-handle_up_req(#httpd{method = 'GET'} = Req) ->
- case config:get("couchdb", "maintenance_mode") of
- "true" ->
- send_json(Req, 404, {[{status, maintenance_mode}]});
- "nolb" ->
- send_json(Req, 404, {[{status, nolb}]});
- _ ->
- {ok, {Status}} = mem3_seeds:get_status(),
- case couch_util:get_value(status, Status) of
- ok ->
- send_json(Req, 200, {Status});
- seeding ->
- send_json(Req, 404, {Status})
- end
- end;
-handle_up_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-get_docroot() ->
- % if the env var isn’t set, let’s not throw an error, but
- % assume the current working dir is what we want
- os:getenv("COUCHDB_FAUXTON_DOCROOT", "").
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
deleted file mode 100644
index cc3370a73..000000000
--- a/src/chttpd/src/chttpd_node.erl
+++ /dev/null
@@ -1,385 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_node).
--compile(tuple_calls).
-
--export([
- handle_node_req/1,
- get_stats/0,
- run_queues/0
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--import(
- chttpd,
- [
- send_json/2, send_json/3,
- send_method_not_allowed/2,
- send_chunk/2,
- start_chunked_response/3
- ]
-).
-
-% Node-specific request handler (_config and _stats)
-% Support _local meaning this node
-handle_node_req(#httpd{path_parts = [_, <<"_local">>]} = Req) ->
- send_json(Req, 200, {[{name, node()}]});
-handle_node_req(#httpd{path_parts = [A, <<"_local">> | Rest]} = Req) ->
- handle_node_req(Req#httpd{path_parts = [A, node()] ++ Rest});
-% GET /_node/$node/_versions
-handle_node_req(#httpd{method = 'GET', path_parts = [_, _Node, <<"_versions">>]} = Req) ->
- IcuVer = couch_ejson_compare:get_icu_version(),
- UcaVer = couch_ejson_compare:get_uca_version(),
- ColVer = couch_ejson_compare:get_collator_version(),
- send_json(Req, 200, #{
- erlang_version => ?l2b(?COUCHDB_ERLANG_VERSION),
- collation_driver => #{
- name => <<"libicu">>,
- library_version => couch_util:version_to_binary(IcuVer),
- collation_algorithm_version => couch_util:version_to_binary(UcaVer),
- collator_version => couch_util:version_to_binary(ColVer)
- },
- javascript_engine => #{
- name => <<"spidermonkey">>,
- version => couch_server:get_spidermonkey_version()
- }
- });
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_versions">>]} = Req) ->
- send_method_not_allowed(Req, "GET");
-% GET /_node/$node/_config
-handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_config">>]} = Req) ->
- Grouped = lists:foldl(
- fun({{Section, Key}, Value}, Acc) ->
- case dict:is_key(Section, Acc) of
- true ->
- dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
- false ->
- dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
- end
- end,
- dict:new(),
- call_node(Node, config, all, [])
- ),
- KVs = dict:fold(
- fun(Section, Values, Acc) ->
- [{list_to_binary(Section), {Values}} | Acc]
- end,
- [],
- Grouped
- ),
- send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>]} = Req) ->
- send_method_not_allowed(Req, "GET");
-% POST /_node/$node/_config/_reload - Flushes unpersisted config values from RAM
-handle_node_req(
- #httpd{method = 'POST', path_parts = [_, Node, <<"_config">>, <<"_reload">>]} = Req
-) ->
- case call_node(Node, config, reload, []) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- {error, Reason} ->
- chttpd:send_error(Req, {bad_request, Reason})
- end;
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, <<"_reload">>]} = Req) ->
- send_method_not_allowed(Req, "POST");
-% GET /_node/$node/_config/Section
-handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_config">>, Section]} = Req) ->
- KVs = [
- {list_to_binary(Key), list_to_binary(Value)}
- || {Key, Value} <- call_node(Node, config, get, [Section])
- ],
- send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, _Section]} = Req) ->
- send_method_not_allowed(Req, "GET");
-% PUT /_node/$node/_config/Section/Key
-% "value"
-handle_node_req(#httpd{method = 'PUT', path_parts = [_, Node, <<"_config">>, Section, Key]} = Req) ->
- couch_util:check_config_blacklist(Section),
- Value = couch_util:trim(chttpd:json_body(Req)),
- Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
- OldValue = call_node(Node, config, get, [Section, Key, ""]),
- IsSensitive = Section == <<"admins">>,
- Opts = #{persist => Persist, sensitive => IsSensitive},
- case call_node(Node, config, set, [Section, Key, ?b2l(Value), Opts]) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- {error, Reason} ->
- chttpd:send_error(Req, {bad_request, Reason})
- end;
-% GET /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_config">>, Section, Key]} = Req) ->
- case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- Value ->
- send_json(Req, 200, list_to_binary(Value))
- end;
-% DELETE /_node/$node/_config/Section/Key
-handle_node_req(
- #httpd{method = 'DELETE', path_parts = [_, Node, <<"_config">>, Section, Key]} = Req
-) ->
- couch_util:check_config_blacklist(Section),
- Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
- case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- OldValue ->
- case call_node(Node, config, delete, [Section, Key, Persist]) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- {error, Reason} ->
- chttpd:send_error(Req, {bad_request, Reason})
- end
- end;
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, _Section, _Key]} = Req) ->
- send_method_not_allowed(Req, "GET,PUT,DELETE");
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, _Section, _Key | _]} = Req) ->
- chttpd:send_error(Req, not_found);
-% GET /_node/$node/_stats
-handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_stats">> | Path]} = Req) ->
- flush(Node, Req),
- Stats0 = call_node(Node, couch_stats, fetch, []),
- Stats = couch_stats_httpd:transform_stats(Stats0),
- Nested = couch_stats_httpd:nest(Stats),
- EJSON0 = couch_stats_httpd:to_ejson(Nested),
- EJSON1 = couch_stats_httpd:extract_path(Path, EJSON0),
- chttpd:send_json(Req, EJSON1);
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_stats">>]} = Req) ->
- send_method_not_allowed(Req, "GET");
-handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_prometheus">>]} = Req) ->
- Metrics = call_node(Node, couch_prometheus_server, scrape, []),
- Version = call_node(Node, couch_prometheus_server, version, []),
- Type = "text/plain; version=" ++ Version,
- Header = [{<<"Content-Type">>, ?l2b(Type)}],
- chttpd:send_response(Req, 200, Header, Metrics);
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_prometheus">>]} = Req) ->
- send_method_not_allowed(Req, "GET");
-% GET /_node/$node/_system
-handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_system">>]} = Req) ->
- Stats = call_node(Node, chttpd_node, get_stats, []),
- EJSON = couch_stats_httpd:to_ejson(Stats),
- send_json(Req, EJSON);
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_system">>]} = Req) ->
- send_method_not_allowed(Req, "GET");
-% POST /_node/$node/_restart
-handle_node_req(#httpd{method = 'POST', path_parts = [_, Node, <<"_restart">>]} = Req) ->
- call_node(Node, init, restart, []),
- send_json(Req, 200, {[{ok, true}]});
-handle_node_req(#httpd{path_parts = [_, _Node, <<"_restart">>]} = Req) ->
- send_method_not_allowed(Req, "POST");
-handle_node_req(#httpd{
- path_parts = [_, Node | PathParts],
- mochi_req = MochiReq0
-}) ->
- % strip /_node/{node} from Req0 before descending further
- RawUri = MochiReq0:get(raw_path),
- {_, Query, Fragment} = mochiweb_util:urlsplit_path(RawUri),
- NewPath0 = "/" ++ lists:join("/", [couch_util:url_encode(P) || P <- PathParts]),
- NewRawPath = mochiweb_util:urlunsplit_path({NewPath0, Query, Fragment}),
- MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296
- ),
- NewOpts = [{body, MochiReq0:recv_body(MaxSize)} | MochiReq0:get(opts)],
- Ref = erlang:make_ref(),
- MochiReq = mochiweb_request:new(
- {remote, self(), Ref},
- NewOpts,
- MochiReq0:get(method),
- NewRawPath,
- MochiReq0:get(version),
- MochiReq0:get(headers)
- ),
- call_node(Node, couch_httpd, handle_request, [MochiReq]),
- recv_loop(Ref, MochiReq0);
-handle_node_req(#httpd{path_parts = [_]} = Req) ->
- chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
-handle_node_req(Req) ->
- chttpd:send_error(Req, not_found).
-
-recv_loop(Ref, ReqResp) ->
- receive
- {Ref, Code, Headers, _Args, start_response} ->
- recv_loop(Ref, ReqResp:start({Code, Headers}));
- {Ref, Code, Headers, Len, start_response_length} ->
- recv_loop(Ref, ReqResp:start_response_length({Code, Headers, Len}));
- {Ref, Code, Headers, chunked, respond} ->
- Resp = ReqResp:respond({Code, Headers, chunked}),
- recv_loop(Ref, Resp);
- {Ref, Code, Headers, Args, respond} ->
- Resp = ReqResp:respond({Code, Headers, Args}),
- {ok, Resp};
- {Ref, send, Data} ->
- ReqResp:send(Data),
- {ok, ReqResp};
- {Ref, chunk, <<>>} ->
- ReqResp:write_chunk(<<>>),
- {ok, ReqResp};
- {Ref, chunk, Data} ->
- ReqResp:write_chunk(Data),
- recv_loop(Ref, ReqResp);
- _Else ->
- recv_loop(Ref, ReqResp)
- end.
-
-call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
- Node1 =
- try
- list_to_existing_atom(?b2l(Node0))
- catch
- error:badarg ->
- throw({not_found, <<"no such node: ", Node0/binary>>})
- end,
- call_node(Node1, Mod, Fun, Args);
-call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
- case rpc:call(Node, Mod, Fun, Args) of
- {badrpc, nodedown} ->
- Reason = ?l2b(io_lib:format("~s is down", [Node])),
- throw({error, {nodedown, Reason}});
- Else ->
- Else
- end.
-
-flush(Node, Req) ->
- case couch_util:get_value("flush", chttpd:qs(Req)) of
- "true" ->
- call_node(Node, couch_stats_aggregator, flush, []);
- _Else ->
- ok
- end.
-
-get_stats() ->
- Other =
- erlang:memory(system) -
- lists:sum([
- X
- || {_, X} <-
- erlang:memory([atom, code, binary, ets])
- ]),
- Memory = [
- {other, Other}
- | erlang:memory([
- atom,
- atom_used,
- processes,
- processes_used,
- binary,
- code,
- ets
- ])
- ],
- {NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
- {{input, Input}, {output, Output}} = statistics(io),
- {CF, CDU} = db_pid_stats(),
- MessageQueues0 = [
- {couch_file, {CF}},
- {couch_db_updater, {CDU}},
- {couch_server, couch_server:aggregate_queue_len()},
- {index_server, couch_index_server:aggregate_queue_len()}
- ],
- MessageQueues = MessageQueues0 ++ message_queues(registered()),
- {SQ, DCQ} = run_queues(),
- [
- {uptime, couch_app:uptime() div 1000},
- {memory, {Memory}},
- {run_queue, SQ},
- {run_queue_dirty_cpu, DCQ},
- {ets_table_count, length(ets:all())},
- {context_switches, element(1, statistics(context_switches))},
- {reductions, element(1, statistics(reductions))},
- {garbage_collection_count, NumberOfGCs},
- {words_reclaimed, WordsReclaimed},
- {io_input, Input},
- {io_output, Output},
- {os_proc_count, couch_proc_manager:get_proc_count()},
- {stale_proc_count, couch_proc_manager:get_stale_proc_count()},
- {process_count, erlang:system_info(process_count)},
- {process_limit, erlang:system_info(process_limit)},
- {message_queues, {MessageQueues}},
- {internal_replication_jobs, mem3_sync:get_backlog()},
- {distribution, {get_distribution_stats()}}
- ].
-
-db_pid_stats() ->
- {monitors, M} = process_info(whereis(couch_stats_process_tracker), monitors),
- Candidates = [Pid || {process, Pid} <- M],
- CouchFiles = db_pid_stats(couch_file, Candidates),
- CouchDbUpdaters = db_pid_stats(couch_db_updater, Candidates),
- {CouchFiles, CouchDbUpdaters}.
-
-db_pid_stats(Mod, Candidates) ->
- Mailboxes = lists:foldl(
- fun(Pid, Acc) ->
- case process_info(Pid, [message_queue_len, dictionary]) of
- undefined ->
- Acc;
- PI ->
- Dictionary = proplists:get_value(dictionary, PI, []),
- case proplists:get_value('$initial_call', Dictionary) of
- {Mod, init, 1} ->
- case proplists:get_value(message_queue_len, PI) of
- undefined -> Acc;
- Len -> [Len | Acc]
- end;
- _ ->
- Acc
- end
- end
- end,
- [],
- Candidates
- ),
- format_pid_stats(Mailboxes).
-
-format_pid_stats([]) ->
- [];
-format_pid_stats(Mailboxes) ->
- Sorted = lists:sort(Mailboxes),
- Count = length(Sorted),
- [
- {count, Count},
- {min, hd(Sorted)},
- {max, lists:nth(Count, Sorted)},
- {'50', lists:nth(round(Count * 0.5), Sorted)},
- {'90', lists:nth(round(Count * 0.9), Sorted)},
- {'99', lists:nth(round(Count * 0.99), Sorted)}
- ].
-
-get_distribution_stats() ->
- lists:map(
- fun({Node, Socket}) ->
- {ok, Stats} = inet:getstat(Socket),
- {Node, {Stats}}
- end,
- erlang:system_info(dist_ctrl)
- ).
-
-message_queues(Registered) ->
- lists:map(
- fun(Name) ->
- Type = message_queue_len,
- {Type, Length} = process_info(whereis(Name), Type),
- {Name, Length}
- end,
- Registered
- ).
-
-%% Workaround for https://bugs.erlang.org/browse/ERL-1355
-run_queues() ->
- case erlang:system_info(dirty_cpu_schedulers) > 0 of
- false ->
- {statistics(run_queue), 0};
- true ->
- [DCQ | SQs] = lists:reverse(statistics(run_queue_lengths)),
- {lists:sum(SQs), DCQ}
- end.
diff --git a/src/chttpd/src/chttpd_plugin.erl b/src/chttpd/src/chttpd_plugin.erl
deleted file mode 100644
index 03d8ad6ac..000000000
--- a/src/chttpd/src/chttpd_plugin.erl
+++ /dev/null
@@ -1,64 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_plugin).
-
--export([
- before_request/1,
- after_request/2,
- handle_error/1,
- before_response/4,
- before_serve_file/5
-]).
-
--define(SERVICE_ID, chttpd).
-
--include_lib("couch/include/couch_db.hrl").
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-before_request(HttpReq) ->
- [HttpReq1] = with_pipe(before_request, [HttpReq]),
- {ok, HttpReq1}.
-
-after_request(HttpReq, Result) ->
- [_, Result1] = with_pipe(after_request, [HttpReq, Result]),
- {ok, Result1}.
-
-handle_error(Error) ->
- [Error1] = with_pipe(handle_error, [Error]),
- Error1.
-
-before_response(HttpReq0, Code0, Headers0, Value0) ->
- [HttpReq, Code, Headers, Value] =
- with_pipe(before_response, [HttpReq0, Code0, Headers0, Value0]),
- {ok, {HttpReq, Code, Headers, Value}}.
-
-before_serve_file(Req0, Code0, Headers0, RelativePath0, DocumentRoot0) ->
- [HttpReq, Code, Headers, RelativePath, DocumentRoot] =
- with_pipe(before_serve_file, [
- Req0, Code0, Headers0, RelativePath0, DocumentRoot0
- ]),
- {ok, {HttpReq, Code, Headers, RelativePath, DocumentRoot}}.
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-with_pipe(Func, Args) ->
- do_apply(Func, Args, [pipe]).
-
-do_apply(Func, Args, Opts) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
diff --git a/src/chttpd/src/chttpd_prefer_header.erl b/src/chttpd/src/chttpd_prefer_header.erl
deleted file mode 100644
index dbce54e65..000000000
--- a/src/chttpd/src/chttpd_prefer_header.erl
+++ /dev/null
@@ -1,61 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_prefer_header).
-
--compile(tuple_calls).
-
--export([
- maybe_return_minimal/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(DEFAULT_PREFER_MINIMAL,
- "Cache-Control, Content-Length, Content-Range, "
- "Content-Type, ETag, Server, Transfer-Encoding, Vary"
-).
-
-maybe_return_minimal(#httpd{mochi_req = MochiReq}, Headers) ->
- case get_prefer_header(MochiReq) of
- "return=minimal" ->
- filter_headers(Headers, get_header_list());
- _ ->
- Headers
- end.
-
-get_prefer_header(Req) ->
- case Req:get_header_value("Prefer") of
- Value when is_list(Value) ->
- string:to_lower(Value);
- undefined ->
- undefined
- end.
-
-filter_headers(Headers, IncludeList) ->
- lists:filter(
- fun({HeaderName, _}) ->
- lists:member(HeaderName, IncludeList)
- end,
- Headers
- ).
-
-get_header_list() ->
- SectionStr = config:get(
- "chttpd",
- "prefer_minimal",
- ?DEFAULT_PREFER_MINIMAL
- ),
- split_list(SectionStr).
-
-split_list(S) ->
- re:split(S, "\\s*,\\s*", [trim, {return, list}]).
diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl
deleted file mode 100644
index 4e77597d4..000000000
--- a/src/chttpd/src/chttpd_rewrite.erl
+++ /dev/null
@@ -1,553 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(chttpd_rewrite).
-
--compile(tuple_calls).
-
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-handle_rewrite_req(#httpd{} = Req, Db, DDoc) ->
- RewritesSoFar = erlang:get(?REWRITE_COUNT),
- MaxRewrites = chttpd_util:get_chttpd_config_integer("rewrite_limit", 100),
- case RewritesSoFar >= MaxRewrites of
- true ->
- throw({bad_request, <<"Exceeded rewrite recursion limit">>});
- false ->
- erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
- end,
- case get_rules(DDoc) of
- Rules when is_list(Rules) ->
- do_rewrite(Req, Rules);
- Rules when is_binary(Rules) ->
- case couch_query_servers:rewrite(Req, Db, DDoc) of
- undefined ->
- chttpd:send_error(
- Req,
- 404,
- <<"rewrite_error">>,
- <<"Invalid path.">>
- );
- Rewrite ->
- do_rewrite(Req, Rewrite)
- end;
- undefined ->
- chttpd:send_error(
- Req,
- 404,
- <<"rewrite_error">>,
- <<"Invalid path.">>
- )
- end.
-
-get_rules(#doc{body = {Props}}) ->
- couch_util:get_value(<<"rewrites">>, Props).
-
-do_rewrite(#httpd{mochi_req = MochiReq} = Req, {Props} = Rewrite) when is_list(Props) ->
- case couch_util:get_value(<<"code">>, Props) of
- undefined ->
- Method = rewrite_method(Req, Rewrite),
- Headers = rewrite_headers(Req, Rewrite),
- Path = ?b2l(rewrite_path(Req, Rewrite)),
- NewMochiReq = mochiweb_request:new(
- MochiReq:get(socket),
- Method,
- Path,
- MochiReq:get(version),
- Headers
- ),
- Body =
- case couch_util:get_value(<<"body">>, Props) of
- undefined -> erlang:get(mochiweb_request_body);
- B -> B
- end,
- NewMochiReq:cleanup(),
- case Body of
- undefined -> [];
- _ -> erlang:put(mochiweb_request_body, Body)
- end,
- couch_log:debug("rewrite to ~p", [Path]),
- chttpd:handle_request_int(NewMochiReq);
- Code ->
- chttpd:send_response(
- Req,
- Code,
- case couch_util:get_value(<<"headers">>, Props) of
- undefined -> [];
- {H1} -> H1
- end,
- rewrite_body(Rewrite)
- )
- end;
-do_rewrite(
- #httpd{
- method = Method,
- path_parts = [_DbName, <<"_design">>, _DesignName, _Rewrite | PathParts],
- mochi_req = MochiReq
- } = Req,
- Rules
-) when is_list(Rules) ->
- % create dispatch list from rules
- Prefix = path_prefix(Req),
- QueryList = lists:map(fun decode_query_value/1, chttpd:qs(Req)),
-
- DispatchList = [make_rule(Rule) || {Rule} <- Rules],
- Method1 = couch_util:to_binary(Method),
-
- %% get raw path by matching url to a rule.
- RawPath =
- case
- try_bind_path(
- DispatchList,
- Method1,
- PathParts,
- QueryList
- )
- of
- no_dispatch_path ->
- throw(not_found);
- {NewPathParts, Bindings} ->
- Parts = [quote_plus(X) || X <- NewPathParts],
-
- % build new path, reencode query args, eventually convert
- % them to json
- Bindings1 = maybe_encode_bindings(Bindings),
- Path = iolist_to_binary([
- string:join(Parts, [?SEPARATOR]),
- [["?", mochiweb_util:urlencode(Bindings1)] || Bindings1 =/= []]
- ]),
-
- % if path is relative detect it and rewrite path
- safe_relative_path(Prefix, Path)
- end,
-
- % normalize final path (fix levels "." and "..")
- RawPath1 = ?b2l(normalize_path(RawPath)),
-
- couch_log:debug("rewrite to ~p ~n", [RawPath1]),
-
- % build a new mochiweb request
- MochiReq1 = mochiweb_request:new(
- MochiReq:get(socket),
- MochiReq:get(method),
- RawPath1,
- MochiReq:get(version),
- MochiReq:get(headers)
- ),
-
- % cleanup, It force mochiweb to reparse raw uri.
- MochiReq1:cleanup(),
-
- chttpd:handle_request_int(MochiReq1).
-
-rewrite_method(#httpd{method = Method}, {Props}) ->
- DefaultMethod = couch_util:to_binary(Method),
- couch_util:get_value(<<"method">>, Props, DefaultMethod).
-
-rewrite_path(#httpd{} = Req, {Props} = Rewrite) ->
- Prefix = path_prefix(Req),
- RewritePath =
- case couch_util:get_value(<<"path">>, Props) of
- undefined ->
- throw({<<"rewrite_error">>, <<"Rewrite result must produce a new path.">>});
- P ->
- P
- end,
- SafeRelativePath = safe_relative_path(Prefix, RewritePath),
- NormalizedPath = normalize_path(SafeRelativePath),
- QueryParams = rewrite_query_params(Req, Rewrite),
- case QueryParams of
- <<"">> ->
- NormalizedPath;
- QueryParams ->
- <<NormalizedPath/binary, "?", QueryParams/binary>>
- end.
-
-rewrite_query_params(#httpd{} = Req, {Props}) ->
- RequestQS = chttpd:qs(Req),
- RewriteQS =
- case couch_util:get_value(<<"query">>, Props) of
- undefined -> RequestQS;
- {V} -> V
- end,
- RewriteQSEsc = [{chttpd:quote(K), chttpd:quote(V)} || {K, V} <- RewriteQS],
- iolist_to_binary(string:join([[K, "=", V] || {K, V} <- RewriteQSEsc], "&")).
-
-rewrite_headers(#httpd{mochi_req = MochiReq}, {Props}) ->
- case couch_util:get_value(<<"headers">>, Props) of
- undefined ->
- MochiReq:get(headers);
- {H} ->
- mochiweb_headers:enter_from_list(
- lists:map(fun({Key, Val}) -> {?b2l(Key), ?b2l(Val)} end, H),
- MochiReq:get(headers)
- )
- end.
-
-rewrite_body({Props}) ->
- Body =
- case couch_util:get_value(<<"body">>, Props) of
- undefined -> erlang:get(mochiweb_request_body);
- B -> B
- end,
- case Body of
- undefined ->
- [];
- _ ->
- erlang:put(mochiweb_request_body, Body),
- Body
- end.
-
-path_prefix(#httpd{path_parts = [DbName, <<"_design">>, DesignName | _]}) ->
- EscapedDesignName = ?l2b(couch_util:url_encode(DesignName)),
- EscapedDbName = ?l2b(couch_util:url_encode(DbName)),
- DesignId = <<"_design/", EscapedDesignName/binary>>,
- <<"/", EscapedDbName/binary, "/", DesignId/binary>>.
-
-safe_relative_path(Prefix, Path) ->
- case mochiweb_util:safe_relative_path(?b2l(Path)) of
- undefined ->
- <<Prefix/binary, "/", Path/binary>>;
- V0 ->
- V1 = ?l2b(V0),
- <<Prefix/binary, "/", V1/binary>>
- end.
-
-quote_plus({bind, X}) ->
- mochiweb_util:quote_plus(X);
-quote_plus(X) ->
- mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
- no_dispatch_path;
-try_bind_path([Dispatch | Rest], Method, PathParts, QueryList) ->
- [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
- case bind_method(Method1, Method) of
- true ->
- case bind_path(PathParts1, PathParts, []) of
- {ok, Remaining, Bindings} ->
- Bindings1 = Bindings ++ QueryList,
- % we parse query args from the rule and fill
- % it eventually with bindings vars
- QueryArgs1 = make_query_list(
- QueryArgs,
- Bindings1,
- Formats,
- []
- ),
- % remove params in QueryLists1 that are already in
- % QueryArgs1
- Bindings2 = lists:foldl(
- fun({K, V}, Acc) ->
- K1 = to_binding(K),
- KV =
- case couch_util:get_value(K1, QueryArgs1) of
- undefined -> [{K1, V}];
- _V1 -> []
- end,
- Acc ++ KV
- end,
- [],
- Bindings1
- ),
-
- FinalBindings = Bindings2 ++ QueryArgs1,
- NewPathParts = make_new_path(
- RedirectPath,
- FinalBindings,
- Remaining,
- []
- ),
- {NewPathParts, FinalBindings};
- fail ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end;
- false ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
- Acc;
-make_query_list([{Key, {Value}} | Rest], Bindings, Formats, Acc) ->
- Value1 = {Value},
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
-make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_binary(Value) ->
- Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
-make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_list(Value) ->
- Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
-make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) ->
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value} | Acc]).
-
-replace_var(<<"*">> = Value, Bindings, Formats) ->
- get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
- get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
- Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
- lists:reverse(
- lists:foldl(
- fun
- (<<":", Var/binary>> = Value1, Acc) ->
- [get_var(Var, Bindings, Value1, Formats) | Acc];
- (Value1, Acc) ->
- [Value1 | Acc]
- end,
- [],
- Value
- )
- );
-replace_var(Value, _Bindings, _Formats) ->
- Value.
-
-maybe_json(Key, Value) ->
- case
- lists:member(Key, [
- <<"key">>,
- <<"startkey">>,
- <<"start_key">>,
- <<"endkey">>,
- <<"end_key">>,
- <<"keys">>
- ])
- of
- true ->
- ?JSON_ENCODE(Value);
- false ->
- Value
- end.
-
-get_var(VarName, Props, Default, Formats) ->
- VarName1 = to_binding(VarName),
- Val = couch_util:get_value(VarName1, Props, Default),
- maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
- case couch_util:get_value(VarName, Formats) of
- undefined ->
- Value;
- Format ->
- format(Format, Value)
- end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
- Value;
-format(<<"int">>, Value) when is_binary(Value) ->
- format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
- case (catch list_to_integer(Value)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Value
- end;
-format(<<"bool">>, Value) when is_binary(Value) ->
- format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
- case string:to_lower(Value) of
- "true" -> true;
- "false" -> false;
- _ -> Value
- end;
-format(_Format, Value) ->
- Value.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
- lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([?MATCH_ALL | _Rest], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([{bind, P} | Rest], Bindings, Remaining, Acc) ->
- P2 =
- case couch_util:get_value({bind, P}, Bindings) of
- undefined -> <<"undefined">>;
- P1 -> iolist_to_binary(P1)
- end,
- make_new_path(Rest, Bindings, Remaining, [P2 | Acc]);
-make_new_path([P | Rest], Bindings, Remaining, Acc) ->
- make_new_path(Rest, Bindings, Remaining, [P | Acc]).
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method) ->
- true;
-bind_method({bind, Method}, Method) ->
- true;
-bind_method(_, _) ->
- false.
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
- {ok, [], Bindings};
-bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) ->
- {ok, Rest, Bindings};
-bind_path(_, [], _) ->
- fail;
-bind_path([{bind, Token} | RestToken], [Match | RestMatch], Bindings) ->
- bind_path(RestToken, RestMatch, [{{bind, Token}, Match} | Bindings]);
-bind_path([Token | RestToken], [Token | RestMatch], Bindings) ->
- bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
- fail.
-
-%% normalize path.
-normalize_path(Path) when is_binary(Path) ->
- normalize_path(?b2l(Path));
-normalize_path(Path) when is_list(Path) ->
- Segments = normalize_path1(string:tokens(Path, "/"), []),
- NormalizedPath = string:join(Segments, [?SEPARATOR]),
- iolist_to_binary(["/", NormalizedPath]).
-
-normalize_path1([], Acc) ->
- lists:reverse(Acc);
-normalize_path1([".." | Rest], Acc) ->
- Acc1 =
- case Acc of
- [] -> [".." | Acc];
- [T | _] when T =:= ".." -> [".." | Acc];
- [_ | R] -> R
- end,
- normalize_path1(Rest, Acc1);
-normalize_path1(["." | Rest], Acc) ->
- normalize_path1(Rest, Acc);
-normalize_path1([Path | Rest], Acc) ->
- normalize_path1(Rest, [Path | Acc]).
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
- Method =
- case couch_util:get_value(<<"method">>, Rule) of
- undefined -> ?MATCH_ALL;
- M -> to_binding(M)
- end,
- QueryArgs =
- case couch_util:get_value(<<"query">>, Rule) of
- undefined -> [];
- {Args} -> Args
- end,
- FromParts =
- case couch_util:get_value(<<"from">>, Rule) of
- undefined -> [?MATCH_ALL];
- From -> parse_path(From)
- end,
- ToParts =
- case couch_util:get_value(<<"to">>, Rule) of
- undefined ->
- throw({error, invalid_rewrite_target});
- To ->
- parse_path(To)
- end,
- Formats =
- case couch_util:get_value(<<"formats">>, Rule) of
- undefined -> [];
- {Fmts} -> Fmts
- end,
- [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
- {ok, SlashRE} = re:compile(<<"\\/">>),
- path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
- lists:reverse(Acc);
-path_to_list([<<>> | R], Acc, DotDotCount) ->
- path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">> | R], Acc, DotDotCount) ->
- path_to_list(R, [?MATCH_ALL | Acc], DotDotCount);
-path_to_list([<<"..">> | R], Acc, DotDotCount) when DotDotCount == 2 ->
- case chttpd_util:get_chttpd_config_boolean("secure_rewrites", true) of
- false ->
- path_to_list(R, [<<"..">> | Acc], DotDotCount + 1);
- true ->
- couch_log:notice(
- "insecure_rewrite_rule ~p blocked",
- [lists:reverse(Acc) ++ [<<"..">>] ++ R]
- ),
- throw({insecure_rewrite_rule, "too many ../.. segments"})
- end;
-path_to_list([<<"..">> | R], Acc, DotDotCount) ->
- path_to_list(R, [<<"..">> | Acc], DotDotCount + 1);
-path_to_list([P | R], Acc, DotDotCount) ->
- P1 =
- case P of
- <<":", Var/binary>> ->
- to_binding(Var);
- _ ->
- P
- end,
- path_to_list(R, [P1 | Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
- [];
-maybe_encode_bindings(Props) ->
- lists:foldl(
- fun
- ({{bind, <<"*">>}, _V}, Acc) ->
- Acc;
- ({{bind, K}, V}, Acc) ->
- V1 = iolist_to_binary(maybe_json(K, V)),
- [{K, V1} | Acc]
- end,
- [],
- Props
- ).
-
-decode_query_value({K, V}) ->
- case
- lists:member(K, [
- "key",
- "startkey",
- "start_key",
- "endkey",
- "end_key",
- "keys"
- ])
- of
- true ->
- {to_binding(K), ?JSON_DECODE(V)};
- false ->
- {to_binding(K), ?l2b(V)}
- end.
-
-to_binding({bind, V}) ->
- {bind, V};
-to_binding(V) when is_list(V) ->
- to_binding(?l2b(V));
-to_binding(V) ->
- {bind, V}.
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
deleted file mode 100644
index e798a98d6..000000000
--- a/src/chttpd/src/chttpd_show.erl
+++ /dev/null
@@ -1,331 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_show).
-
--export([handle_doc_show_req/3, handle_doc_update_req/3, handle_view_list_req/3]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-% /db/_design/foo/_show/bar/docid
-% show converts a json doc to a response of any content-type.
-% it looks up the doc an then passes it to the query server.
-% then it sends the response from the query server to the http client.
-
-maybe_open_doc(Db, DocId, Options) ->
- case fabric:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- chttpd_stats:incr_reads(),
- Doc;
- {not_found, _} ->
- nil
- end.
-
-handle_doc_show_req(
- #httpd{
- path_parts = [_, _, _, _, ShowName, DocId]
- } = Req,
- Db,
- DDoc
-) ->
- % open the doc
- Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
- Doc = maybe_open_doc(Db, DocId, Options),
-
- % we don't handle revs here b/c they are an internal api
- % returns 404 if there is no doc with DocId
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-handle_doc_show_req(
- #httpd{
- path_parts = [_, _, _, _, ShowName, DocId | Rest]
- } = Req,
- Db,
- DDoc
-) ->
- DocParts = [DocId | Rest],
- DocId1 = ?l2b(string:join([?b2l(P) || P <- DocParts], "/")),
-
- % open the doc
- Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
- Doc = maybe_open_doc(Db, DocId1, Options),
-
- % we don't handle revs here b/c they are an internal api
- % pass 404 docs to the show function
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-handle_doc_show_req(
- #httpd{
- path_parts = [_, _, _, _, ShowName]
- } = Req,
- Db,
- DDoc
-) ->
- % with no docid the doc is nil
- handle_doc_show(Req, Db, DDoc, ShowName, nil);
-handle_doc_show_req(Req, _Db, _DDoc) ->
- chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
- %% Will throw an exception if the _show handler is missing
- couch_util:get_nested_json_value(DDoc#doc.body, [<<"shows">>, ShowName]),
- % get responder for ddoc/showname
- CurrentEtag = show_etag(Req, Doc, DDoc, []),
- chttpd:etag_respond(Req, CurrentEtag, fun() ->
- JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
- JsonDoc = couch_query_servers:json_doc(Doc),
- [<<"resp">>, ExternalResp] =
- couch_query_servers:ddoc_prompt(
- DDoc,
- [<<"shows">>, ShowName],
- [JsonDoc, JsonReq]
- ),
- JsonResp = apply_etag(ExternalResp, CurrentEtag),
- chttpd_external:send_external_response(Req, JsonResp)
- end).
-
-show_etag(#httpd{user_ctx = UserCtx} = Req, Doc, DDoc, More) ->
- Accept = chttpd:header_value(Req, "Accept"),
- DocPart =
- case Doc of
- nil -> nil;
- Doc -> chttpd:doc_etag(Doc)
- end,
- couch_httpd:make_etag({
- couch_httpd:doc_etag(DDoc), DocPart, Accept, UserCtx#user_ctx.roles, More
- }).
-
-% /db/_design/foo/update/bar/docid
-% updates a doc based on a request
-% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
-% % anything but GET
-% send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
-
-handle_doc_update_req(
- #httpd{
- path_parts = [_, _, _, _, UpdateName]
- } = Req,
- Db,
- DDoc
-) ->
- send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
-handle_doc_update_req(
- #httpd{
- path_parts = [_, _, _, _, UpdateName | DocIdParts]
- } = Req,
- Db,
- DDoc
-) ->
- DocId = ?l2b(string:join([?b2l(P) || P <- DocIdParts], "/")),
- Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
- Doc = maybe_open_doc(Db, DocId, Options),
- send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
-handle_doc_update_req(Req, _Db, _DDoc) ->
- chttpd:send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
-
-send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
- %% Will throw an exception if the _update handler is missing
- couch_util:get_nested_json_value(DDoc#doc.body, [<<"updates">>, UpdateName]),
- JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
- JsonDoc = couch_query_servers:json_doc(Doc),
- Cmd = [<<"updates">>, UpdateName],
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
- JsonResp =
- case UpdateResp of
- [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
- case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
- "true" ->
- Options = [full_commit, {user_ctx, Req#httpd.user_ctx}, {w, W}];
- _ ->
- Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}]
- end,
- NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
- couch_doc:validate_docid(NewDoc#doc.id),
- {UpdateResult, NewRev} = fabric:update_doc(Db, NewDoc, Options),
- chttpd_stats:incr_writes(),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- case {UpdateResult, NewRev} of
- {ok, _} ->
- Code = 201;
- {accepted, _} ->
- Code = 202
- end,
- {JsonResp1} = apply_headers(JsonResp0, [
- {<<"X-Couch-Update-NewRev">>, NewRevStr},
- {<<"X-Couch-Id">>, couch_util:url_encode(NewDoc#doc.id)}
- ]),
- {[{<<"code">>, Code} | JsonResp1]};
- [<<"up">>, _Other, {JsonResp0}] ->
- {[{<<"code">>, 200} | JsonResp0]}
- end,
- % todo set location field
- chttpd_external:send_external_response(Req, JsonResp).
-
-% view-list request with view and list from same design doc.
-handle_view_list_req(
- #httpd{
- method = Method,
- path_parts = [_, _, DesignName, _, ListName, ViewName]
- } = Req,
- Db,
- DDoc
-) when
- Method =:= 'GET' orelse Method =:= 'OPTIONS'
-->
- Keys = chttpd:qs_json_value(Req, "keys", undefined),
- handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-% view-list request with view and list from different design docs.
-handle_view_list_req(
- #httpd{
- method = Method,
- path_parts = [_, _, _, _, ListName, DesignName, ViewName]
- } = Req,
- Db,
- DDoc
-) when
- Method =:= 'GET' orelse Method =:= 'OPTIONS'
-->
- Keys = chttpd:qs_json_value(Req, "keys", undefined),
- handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-handle_view_list_req(#httpd{method = Method} = Req, _Db, _DDoc) when
- Method =:= 'GET' orelse Method =:= 'OPTIONS'
-->
- chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-handle_view_list_req(
- #httpd{
- method = 'POST',
- path_parts = [_, _, DesignName, _, ListName, ViewName]
- } = Req,
- Db,
- DDoc
-) ->
- chttpd:validate_ctype(Req, "application/json"),
- {Props} = chttpd:json_body(Req),
- Keys = proplists:get_value(<<"keys">>, Props, undefined),
- handle_view_list(
- Req#httpd{req_body = {Props}},
- Db,
- DDoc,
- ListName,
- {DesignName, ViewName},
- Keys
- );
-handle_view_list_req(
- #httpd{
- method = 'POST',
- path_parts = [_, _, _, _, ListName, DesignName, ViewName]
- } = Req,
- Db,
- DDoc
-) ->
- chttpd:validate_ctype(Req, "application/json"),
- {Props} = chttpd:json_body(Req),
- Keys = proplists:get_value(<<"keys">>, Props, undefined),
- handle_view_list(
- Req#httpd{req_body = {Props}},
- Db,
- DDoc,
- ListName,
- {DesignName, ViewName},
- Keys
- );
-handle_view_list_req(#httpd{method = 'POST'} = Req, _Db, _DDoc) ->
- chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-handle_view_list_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) ->
- %% Will throw an exception if the _list handler is missing
- couch_util:get_nested_json_value(DDoc#doc.body, [<<"lists">>, LName]),
- DbName = couch_db:name(Db),
- {ok, VDoc} = ddoc_cache:open(DbName, <<"_design/", ViewDesignName/binary>>),
- CB = fun list_cb/2,
- QueryArgs = couch_mrview_http:parse_body_and_query(Req, Keys),
- Options = [{user_ctx, Req#httpd.user_ctx}],
- couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
- Acc = #lacc{
- lname = LName,
- req = Req,
- qserver = QServer,
- db = Db
- },
- case ViewName of
- <<"_all_docs">> ->
- fabric:all_docs(Db, Options, CB, Acc, QueryArgs);
- _ ->
- fabric:query_view(
- Db,
- Options,
- VDoc,
- ViewName,
- CB,
- Acc,
- QueryArgs
- )
- end
- end).
-
-list_cb({row, Row} = Msg, Acc) ->
- case lists:keymember(doc, 1, Row) of
- true -> chttpd_stats:incr_reads();
- false -> ok
- end,
- chttpd_stats:incr_rows(),
- couch_mrview_show:list_cb(Msg, Acc);
-list_cb(Msg, Acc) ->
- couch_mrview_show:list_cb(Msg, Acc).
-
-% Maybe this is in the proplists API
-% todo move to couch_util
-json_apply_field(H, {L}) ->
- json_apply_field(H, L, []).
-json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
- % drop matching keys
- json_apply_field({Key, NewValue}, Headers, Acc);
-json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
- % something else is next, leave it alone.
- json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
-json_apply_field({Key, NewValue}, [], Acc) ->
- % end of list, add ours
- {[{Key, NewValue} | Acc]}.
-
-apply_etag(JsonResp, undefined) ->
- JsonResp;
-apply_etag({ExternalResponse}, CurrentEtag) ->
- % Here we embark on the delicate task of replacing or creating the
- % headers on the JsonResponse object. We need to control the Etag and
- % Vary headers. If the external function controls the Etag, we'd have to
- % run it to check for a match, which sort of defeats the purpose.
- apply_headers(ExternalResponse, [
- {<<"ETag">>, CurrentEtag},
- {<<"Vary">>, <<"Accept">>}
- ]).
-
-apply_headers(JsonResp, []) ->
- JsonResp;
-apply_headers(JsonResp, NewHeaders) ->
- case couch_util:get_value(<<"headers">>, JsonResp) of
- undefined ->
- {[{<<"headers">>, {NewHeaders}} | JsonResp]};
- JsonHeaders ->
- Headers = apply_headers1(JsonHeaders, NewHeaders),
- NewKV = {<<"headers">>, Headers},
- {lists:keyreplace(<<"headers">>, 1, JsonResp, NewKV)}
- end.
-apply_headers1(JsonHeaders, [{Key, Value} | Rest]) ->
- NewJsonHeaders = json_apply_field({Key, Value}, JsonHeaders),
- apply_headers1(NewJsonHeaders, Rest);
-apply_headers1(JsonHeaders, []) ->
- JsonHeaders.
diff --git a/src/chttpd/src/chttpd_stats.erl b/src/chttpd/src/chttpd_stats.erl
deleted file mode 100644
index f6eb01659..000000000
--- a/src/chttpd/src/chttpd_stats.erl
+++ /dev/null
@@ -1,96 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_stats).
-
-% for the stacktrace macro only so far
--include_lib("couch/include/couch_db.hrl").
-
--export([
- init/0,
- report/2,
-
- incr_reads/0,
- incr_reads/1,
-
- incr_writes/0,
- incr_writes/1,
-
- incr_rows/0,
- incr_rows/1
-]).
-
--record(st, {
- reads = 0,
- writes = 0,
- rows = 0
-}).
-
--define(KEY, chttpd_stats).
-
-init() ->
- put(?KEY, #st{}).
-
-report(HttpReq, HttpResp) ->
- try
- case get(?KEY) of
- #st{} = St ->
- report(HttpReq, HttpResp, St);
- _ ->
- ok
- end
- catch ?STACKTRACE(T, R, S)
- Fmt = "Failed to report chttpd request stats: ~p:~p ~p",
- couch_log:error(Fmt, [T, R, S])
- end.
-
-report(HttpReq, HttpResp, St) ->
- case config:get("chttpd", "stats_reporter") of
- undefined ->
- ok;
- ModStr ->
- Mod = list_to_existing_atom(ModStr),
- #st{
- reads = Reads,
- writes = Writes,
- rows = Rows
- } = St,
- Mod:report(HttpReq, HttpResp, Reads, Writes, Rows)
- end.
-
-incr_reads() ->
- incr(#st.reads, 1).
-
-incr_reads(N) when is_integer(N), N >= 0 ->
- incr(#st.reads, N).
-
-incr_writes() ->
- incr(#st.writes, 1).
-
-incr_writes(N) when is_integer(N), N >= 0 ->
- incr(#st.writes, N).
-
-incr_rows() ->
- incr(#st.rows, 1).
-
-incr_rows(N) when is_integer(N), N >= 0 ->
- incr(#st.rows, N).
-
-incr(Idx, Count) ->
- case get(?KEY) of
- #st{} = St ->
- Total = element(Idx, St) + Count,
- NewSt = setelement(Idx, St, Total),
- put(?KEY, NewSt);
- _ ->
- ok
- end.
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
deleted file mode 100644
index ea4e62f80..000000000
--- a/src/chttpd/src/chttpd_sup.erl
+++ /dev/null
@@ -1,178 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_sup).
--behaviour(supervisor).
--vsn(1).
-
--behaviour(config_listener).
-
--export([init/1]).
-
--export([start_link/1]).
-
--export([handle_config_change/5, handle_config_terminate/3]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 100, Type, [I]}).
--define(DEFAULT_BACKLOG, 512).
--define(DEFAULT_SERVER_OPTIONS, "[{recbuf, undefined}]").
-
-start_link(Args) ->
- case supervisor:start_link({local, ?MODULE}, ?MODULE, Args) of
- {ok, _} = Resp ->
- notify_started(),
- notify_uris(),
- write_uris(),
- Resp;
- Else ->
- notify_error(Else),
- Else
- end.
-
-init([]) ->
- Children = [
- {
- config_listener_mon,
- {config_listener_mon, start_link, [?MODULE, settings()]},
- permanent,
- 5000,
- worker,
- [config_listener_mon]
- },
- ?CHILD(chttpd, worker),
- ?CHILD(chttpd_auth_cache, worker),
- {chttpd_auth_cache_lru, {ets_lru, start_link, [chttpd_auth_cache_lru, lru_opts()]},
- permanent, 5000, worker, [ets_lru]}
- ],
-
- {ok, {{one_for_one, 3, 10}, couch_epi:register_service(chttpd_epi, Children)}}.
-
-handle_config_change("chttpd", "bind_address", Value, _, Settings) ->
- maybe_replace(bind_address, Value, Settings);
-handle_config_change("chttpd", "port", Value, _, Settings) ->
- maybe_replace(port, Value, Settings);
-handle_config_change("chttpd", "backlog", Value, _, Settings) ->
- maybe_replace(backlog, Value, Settings);
-handle_config_change("chttpd", "server_options", Value, _, Settings) ->
- maybe_replace(server_options, Value, Settings);
-handle_config_change(_, _, _, _, Settings) ->
- {ok, Settings}.
-
-handle_config_terminate(_Server, _Reason, _State) ->
- ok.
-
-settings() ->
- [
- {bind_address, config:get("chttpd", "bind_address")},
- {port, config:get("chttpd", "port")},
- {backlog, config:get_integer("chttpd", "backlog", ?DEFAULT_BACKLOG)},
- {server_options,
- config:get(
- "chttpd",
- "server_options",
- ?DEFAULT_SERVER_OPTIONS
- )}
- ].
-
-maybe_replace(Key, Value, Settings) ->
- case couch_util:get_value(Key, Settings) of
- Value ->
- {ok, Settings};
- _ ->
- chttpd:stop(),
- {ok, lists:keyreplace(Key, 1, Settings, {Key, Value})}
- end.
-
-lru_opts() ->
- lists:foldl(fun append_if_set/2, [], [
- {max_objects, config:get_integer("chttpd_auth_cache", "max_objects", 0)},
- {max_size, config:get_integer("chttpd_auth_cache", "max_size", 104857600)},
- {max_lifetime, config:get_integer("chttpd_auth_cache", "max_lifetime", 600000)}
- ]).
-
-append_if_set({Key, Value}, Opts) when Value > 0 ->
- [{Key, Value} | Opts];
-append_if_set({_Key, 0}, Opts) ->
- Opts;
-append_if_set({Key, Value}, Opts) ->
- couch_log:error(
- "The value for `~s` should be string convertable "
- "to integer which is >= 0 (got `~p`)",
- [Key, Value]
- ),
- Opts.
-
-notify_started() ->
- couch_log:info("Apache CouchDB has started. Time to relax.~n", []).
-
-notify_error(Error) ->
- couch_log:error("Error starting Apache CouchDB:~n~n ~p~n~n", [Error]).
-
-notify_uris() ->
- lists:foreach(
- fun(Uri) ->
- couch_log:info("Apache CouchDB has started on ~s", [Uri])
- end,
- get_uris()
- ).
-
-write_uris() ->
- case config:get("couchdb", "uri_file", undefined) of
- undefined ->
- ok;
- UriFile ->
- Lines = [io_lib:format("~s~n", [Uri]) || Uri <- get_uris()],
- write_file(UriFile, Lines)
- end.
-
-get_uris() ->
- Ip = config:get("chttpd", "bind_address"),
- lists:flatmap(
- fun(Uri) ->
- case get_uri(Uri, Ip) of
- undefined -> [];
- Else -> [Else]
- end
- end,
- [chttpd, couch_httpd, https]
- ).
-
-get_uri(Name, Ip) ->
- case get_port(Name) of
- undefined ->
- undefined;
- Port ->
- io_lib:format("~s://~s:~w/", [get_scheme(Name), Ip, Port])
- end.
-
-get_scheme(chttpd) -> "http";
-get_scheme(couch_httpd) -> "http";
-get_scheme(https) -> "https".
-
-get_port(Name) ->
- try
- mochiweb_socket_server:get(Name, port)
- catch
- exit:{noproc, _} ->
- undefined
- end.
-
-write_file(FileName, Contents) ->
- case file:write_file(FileName, Contents) of
- ok ->
- ok;
- {error, Reason} ->
- Args = [FileName, file:format_error(Reason)],
- couch_log:error("Failed ot write ~s :: ~s", Args),
- throw({error, Reason})
- end.
diff --git a/src/chttpd/src/chttpd_test_util.erl b/src/chttpd/src/chttpd_test_util.erl
deleted file mode 100644
index 8a849acda..000000000
--- a/src/chttpd/src/chttpd_test_util.erl
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_test_util).
-
--export([start_couch/0, start_couch/1, stop_couch/1]).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-start_couch() ->
- start_couch(?CONFIG_CHAIN).
-
-start_couch(IniFiles) ->
- test_util:start_couch(IniFiles, [chttpd]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
diff --git a/src/chttpd/src/chttpd_util.erl b/src/chttpd/src/chttpd_util.erl
deleted file mode 100644
index 955beca57..000000000
--- a/src/chttpd/src/chttpd_util.erl
+++ /dev/null
@@ -1,112 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_util).
-
--export([
- get_chttpd_config/1,
- get_chttpd_config/2,
- get_chttpd_config_integer/2,
- get_chttpd_config_boolean/2,
- get_chttpd_auth_config/1,
- get_chttpd_auth_config/2,
- get_chttpd_auth_config_integer/2,
- get_chttpd_auth_config_boolean/2,
- maybe_add_csp_header/3,
- get_db_info/1
-]).
-
-get_chttpd_config(Key) ->
- config:get("chttpd", Key, config:get("httpd", Key)).
-
-get_chttpd_config(Key, Default) ->
- config:get("chttpd", Key, config:get("httpd", Key, Default)).
-
-get_chttpd_config_integer(Key, Default) ->
- config:get_integer(
- "chttpd",
- Key,
- config:get_integer("httpd", Key, Default)
- ).
-
-get_chttpd_config_boolean(Key, Default) ->
- config:get_boolean(
- "chttpd",
- Key,
- config:get_boolean("httpd", Key, Default)
- ).
-
-get_chttpd_auth_config(Key) ->
- config:get("chttpd_auth", Key, config:get("couch_httpd_auth", Key)).
-
-get_chttpd_auth_config(Key, Default) ->
- config:get(
- "chttpd_auth",
- Key,
- config:get("couch_httpd_auth", Key, Default)
- ).
-
-get_chttpd_auth_config_integer(Key, Default) ->
- config:get_integer(
- "chttpd_auth",
- Key,
- config:get_integer("couch_httpd_auth", Key, Default)
- ).
-
-get_chttpd_auth_config_boolean(Key, Default) ->
- config:get_boolean(
- "chttpd_auth",
- Key,
- config:get_boolean("couch_httpd_auth", Key, Default)
- ).
-
-maybe_add_csp_header(Component, OriginalHeaders, DefaultHeaderValue) ->
- Enabled = config:get_boolean("csp", Component ++ "_enable", true),
- case Enabled of
- true ->
- HeaderValue = config:get("csp", Component ++ "_header_value", DefaultHeaderValue),
- % As per https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy#multiple_content_security_policies
- % The top most CSP header defines the most open policy,
- % subsequent CSP headers set by show/list functions can
- % only further restrict the policy.
- %
- % Ours goes on top and we don’t have to worry about additional
- % headers set by users.
- [{"Content-Security-Policy", HeaderValue} | OriginalHeaders];
- false ->
- % Fallback for old config vars
- case Component of
- "utils" ->
- handle_legacy_config(OriginalHeaders, DefaultHeaderValue);
- _ ->
- OriginalHeaders
- end
- end.
-
-handle_legacy_config(OriginalHeaders, DefaultHeaderValue) ->
- LegacyUtilsEnabled = config:get_boolean("csp", "enable", true),
- case LegacyUtilsEnabled of
- true ->
- LegacyUtilsHeaderValue = config:get("csp", "header_value", DefaultHeaderValue),
- [{"Content-Security-Policy", LegacyUtilsHeaderValue} | OriginalHeaders];
- false ->
- OriginalHeaders
- end.
-
-get_db_info(DbName) ->
- Timeout = fabric_util:request_timeout(),
- IsolatedFun = fun() -> fabric:get_db_info(DbName) end,
- try
- fabric_util:isolate(IsolatedFun, Timeout)
- catch
- _Tag:Error -> {error, Error}
- end.
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
deleted file mode 100644
index 1d721d189..000000000
--- a/src/chttpd/src/chttpd_view.erl
+++ /dev/null
@@ -1,215 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_view).
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([handle_view_req/3, handle_temp_view_req/2]).
-
-multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
- Args0 = couch_mrview_http:parse_params(Req, undefined),
- {ok, #mrst{views = Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
- Args1 = couch_mrview_util:set_view_type(Args0, ViewName, Views),
- ArgQueries = lists:map(
- fun({Query}) ->
- QueryArg = couch_mrview_http:parse_params(
- Query,
- undefined,
- Args1,
- [decoded]
- ),
- QueryArg1 = couch_mrview_util:set_view_type(QueryArg, ViewName, Views),
- fabric_util:validate_args(Db, DDoc, QueryArg1)
- end,
- Queries
- ),
- Options = [{user_ctx, Req#httpd.user_ctx}],
- VAcc0 = #vacc{db = Db, req = Req, prepend = "\r\n"},
- FirstChunk = "{\"results\":[",
- {ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req, 200, [], FirstChunk),
- VAcc1 = VAcc0#vacc{resp = Resp0},
- VAcc2 = lists:foldl(
- fun(Args, Acc0) ->
- {ok, Acc1} = fabric:query_view(
- Db,
- Options,
- DDoc,
- ViewName,
- fun view_cb/2,
- Acc0,
- Args
- ),
- Acc1
- end,
- VAcc1,
- ArgQueries
- ),
- {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
- chttpd:end_delayed_json_response(Resp1).
-
-design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys) ->
- Args = couch_mrview_http:parse_body_and_query(Req, Props, Keys),
- fabric_query_view(Db, Req, DDoc, ViewName, Args).
-
-design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
- Args = couch_mrview_http:parse_params(Req, Keys),
- fabric_query_view(Db, Req, DDoc, ViewName, Args).
-
-fabric_query_view(Db, Req, DDoc, ViewName, Args) ->
- Max = chttpd:chunked_response_buffer_size(),
- VAcc = #vacc{db = Db, req = Req, threshold = Max},
- Options = [{user_ctx, Req#httpd.user_ctx}],
- {ok, Resp} = fabric:query_view(
- Db,
- Options,
- DDoc,
- ViewName,
- fun view_cb/2,
- VAcc,
- Args
- ),
- {ok, Resp#vacc.resp}.
-
-view_cb({row, Row} = Msg, Acc) ->
- case lists:keymember(doc, 1, Row) of
- true -> chttpd_stats:incr_reads();
- false -> ok
- end,
- chttpd_stats:incr_rows(),
- couch_mrview_http:view_cb(Msg, Acc);
-view_cb(Msg, Acc) ->
- couch_mrview_http:view_cb(Msg, Acc).
-
-handle_view_req(
- #httpd{
- method = 'POST',
- path_parts = [_, _, _, _, ViewName, <<"queries">>]
- } = Req,
- Db,
- DDoc
-) ->
- chttpd:validate_ctype(Req, "application/json"),
- Props = couch_httpd:json_body_obj(Req),
- case couch_mrview_util:get_view_queries(Props) of
- undefined ->
- throw({bad_request, <<"POST body must include `queries` parameter.">>});
- Queries ->
- multi_query_view(Req, Db, DDoc, ViewName, Queries)
- end;
-handle_view_req(
- #httpd{path_parts = [_, _, _, _, _, <<"queries">>]} = Req,
- _Db,
- _DDoc
-) ->
- chttpd:send_method_not_allowed(Req, "POST");
-handle_view_req(
- #httpd{
- method = 'GET',
- path_parts = [_, _, _, _, ViewName]
- } = Req,
- Db,
- DDoc
-) ->
- couch_stats:increment_counter([couchdb, httpd, view_reads]),
- Keys = chttpd:qs_json_value(Req, "keys", undefined),
- design_doc_view(Req, Db, DDoc, ViewName, Keys);
-handle_view_req(
- #httpd{
- method = 'POST',
- path_parts = [_, _, _, _, ViewName]
- } = Req,
- Db,
- DDoc
-) ->
- chttpd:validate_ctype(Req, "application/json"),
- Props = couch_httpd:json_body_obj(Req),
- assert_no_queries_param(couch_mrview_util:get_view_queries(Props)),
- Keys = couch_mrview_util:get_view_keys(Props),
- couch_stats:increment_counter([couchdb, httpd, view_reads]),
- design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys);
-handle_view_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_temp_view_req(Req, _Db) ->
- Msg = <<"Temporary views are not supported in CouchDB">>,
- chttpd:send_error(Req, 410, gone, Msg).
-
-% See https://github.com/apache/couchdb/issues/2168
-assert_no_queries_param(undefined) ->
- ok;
-assert_no_queries_param(_) ->
- throw({
- bad_request,
- "The `queries` parameter is no longer supported at this endpoint"
- }).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-check_multi_query_reduce_view_overrides_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_check_include_docs_throw_validation_error(),
- t_check_user_can_override_individual_query_type()
- ]
- }
- }.
-
-t_check_include_docs_throw_validation_error() ->
- ?_test(begin
- Req = #httpd{qs = []},
- Db = test_util:fake_db([{name, <<"foo">>}]),
- Query = {[{<<"include_docs">>, true}]},
- Throw = {query_parse_error, <<"`include_docs` is invalid for reduce">>},
- ?assertThrow(Throw, multi_query_view(Req, Db, ddoc, <<"v">>, [Query]))
- end).
-
-t_check_user_can_override_individual_query_type() ->
- ?_test(begin
- Req = #httpd{qs = []},
- Db = test_util:fake_db([{name, <<"foo">>}]),
- Query = {[{<<"include_docs">>, true}, {<<"reduce">>, false}]},
- multi_query_view(Req, Db, ddoc, <<"v">>, [Query]),
- ?assertEqual(1, meck:num_calls(chttpd, start_delayed_json_response, '_'))
- end).
-
-setup_all() ->
- Views = [#mrview{reduce_funs = [{<<"v">>, <<"_count">>}]}],
- meck:expect(couch_mrview_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
- meck:expect(chttpd, start_delayed_json_response, 4, {ok, resp}),
- meck:expect(fabric, query_view, 7, {ok, #vacc{}}),
- meck:expect(chttpd, send_delayed_chunk, 2, {ok, resp}),
- meck:expect(chttpd, end_delayed_json_response, 1, ok).
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset([
- chttpd,
- couch_mrview_util,
- fabric
- ]).
-
-teardown(_) ->
- ok.
-
--endif.
diff --git a/src/chttpd/src/chttpd_xframe_options.erl b/src/chttpd/src/chttpd_xframe_options.erl
deleted file mode 100644
index 15865057b..000000000
--- a/src/chttpd/src/chttpd_xframe_options.erl
+++ /dev/null
@@ -1,90 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_xframe_options).
-
--export([
- header/2,
- header/3
-]).
-
--define(DENY, "DENY").
--define(SAMEORIGIN, "SAMEORIGIN").
--define(ALLOWFROM, "ALLOW-FROM ").
-
--include_lib("couch/include/couch_db.hrl").
-
-% X-Frame-Options protects against clickjacking by limiting whether a response can be used in a
-% <frame>, <iframe> or <object>.
-
-header(Req, Headers) ->
- header(Req, Headers, get_xframe_config(Req)).
-
-header(Req, Headers, Config) ->
- case lists:keyfind(enabled, 1, Config) of
- {enabled, true} ->
- generate_xframe_header(Req, Headers, Config);
- _ ->
- Headers
- end.
-
-generate_xframe_header(Req, Headers, Config) ->
- XframeOption =
- case lists:keyfind(same_origin, 1, Config) of
- {same_origin, true} ->
- ?SAMEORIGIN;
- _ ->
- check_host(Req, Config)
- end,
- [{"X-Frame-Options", XframeOption} | Headers].
-
-check_host(#httpd{mochi_req = MochiReq} = Req, Config) ->
- Host = couch_httpd_vhost:host(MochiReq),
- case Host of
- [] ->
- ?DENY;
- Host ->
- FullHost = chttpd:absolute_uri(Req, ""),
- AcceptedHosts = get_accepted_hosts(Config),
- AcceptAll = ["*"] =:= AcceptedHosts,
- case AcceptAll orelse lists:member(FullHost, AcceptedHosts) of
- true -> ?ALLOWFROM ++ FullHost;
- false -> ?DENY
- end
- end.
-
-get_xframe_config(#httpd{xframe_config = undefined}) ->
- EnableXFrame = chttpd_util:get_chttpd_config_boolean(
- "enable_xframe_options", false
- ),
- SameOrigin = config:get("x_frame_options", "same_origin", "false") =:= "true",
- AcceptedHosts =
- case config:get("x_frame_options", "hosts") of
- undefined -> [];
- Hosts -> split_list(Hosts)
- end,
- [
- {enabled, EnableXFrame},
- {same_origin, SameOrigin},
- {hosts, AcceptedHosts}
- ];
-get_xframe_config(#httpd{xframe_config = Config}) ->
- Config.
-
-get_accepted_hosts(Config) ->
- case lists:keyfind(hosts, 1, Config) of
- false -> [];
- {hosts, AcceptedHosts} -> AcceptedHosts
- end.
-
-split_list(S) ->
- re:split(S, "\\s*,\\s*", [trim, {return, list}]).
diff --git a/src/chttpd/test/eunit/chttpd_auth_tests.erl b/src/chttpd/test/eunit/chttpd_auth_tests.erl
deleted file mode 100644
index 7beda9bc7..000000000
--- a/src/chttpd/test/eunit/chttpd_auth_tests.erl
+++ /dev/null
@@ -1,127 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_auth_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup() ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- BaseUrl = lists:concat(["http://", Addr, ":", Port]),
- BaseUrl.
-
-teardown(_Url) ->
- ok.
-
-require_valid_user_exception_test_() ->
- {
- "_up",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_handle_require_valid_user_except_up_on_up_route/1,
- fun should_handle_require_valid_user_except_up_on_non_up_routes/1
- ]
- }
- }
- }.
-
-set_require_user_false() ->
- ok = config:set("chttpd", "require_valid_user", "false", _Persist = false).
-
-set_require_user_true() ->
- ok = config:set("chttpd", "require_valid_user", "true", _Persist = false).
-
-set_require_user_except_for_up_false() ->
- ok = config:set("chttpd", "require_valid_user_except_for_up", "false", _Persist = false).
-
-set_require_user_except_for_up_true() ->
- ok = config:set("chttpd", "require_valid_user_except_for_up", "true", _Persist = false).
-
-should_handle_require_valid_user_except_up_on_up_route(_Url) ->
- ?_test(begin
- % require_valid_user | require_valid_user_except_up | up needs auth
- % 1 F | F | F
- % 2 F | T | F
- % 3 T | F | T
- % 4 T | T | F
-
- UpRequest = #httpd{path_parts = [<<"_up">>]},
- % we use ?ADMIN_USER here because these tests run under admin party
- % so this is equivalent to an unauthenticated request
- ExpectAuth = {unauthorized, <<"Authentication required.">>},
- ExpectNoAuth = #httpd{user_ctx = ?ADMIN_USER, path_parts = [<<"_up">>]},
-
- % 1
- set_require_user_false(),
- set_require_user_except_for_up_false(),
- Result1 = chttpd_auth:party_mode_handler(UpRequest),
- ?assertEqual(ExpectNoAuth, Result1),
-
- % 2
- set_require_user_false(),
- set_require_user_except_for_up_true(),
- Result2 = chttpd_auth:party_mode_handler(UpRequest),
- ?assertEqual(ExpectNoAuth, Result2),
-
- % 3
- set_require_user_true(),
- set_require_user_except_for_up_false(),
- ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(UpRequest)),
-
- % 4
- set_require_user_true(),
- set_require_user_except_for_up_true(),
- Result4 = chttpd_auth:party_mode_handler(UpRequest),
- ?assertEqual(ExpectNoAuth, Result4)
- end).
-
-should_handle_require_valid_user_except_up_on_non_up_routes(_Url) ->
- ?_test(begin
- % require_valid_user | require_valid_user_except_up | everything not _up requires auth
- % 5 F | F | F
- % 6 F | T | T
- % 7 T | F | T
- % 8 T | T | T
-
- NonUpRequest = #httpd{path_parts = [<<"/">>]},
- ExpectAuth = {unauthorized, <<"Authentication required.">>},
- ExpectNoAuth = #httpd{user_ctx = ?ADMIN_USER, path_parts = [<<"/">>]},
- % 5
- set_require_user_false(),
- set_require_user_except_for_up_false(),
- Result5 = chttpd_auth:party_mode_handler(NonUpRequest),
- ?assertEqual(ExpectNoAuth, Result5),
-
- % 6
- set_require_user_false(),
- set_require_user_except_for_up_true(),
- ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest)),
-
- % 7
- set_require_user_true(),
- set_require_user_except_for_up_false(),
- ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest)),
-
- % 8
- set_require_user_true(),
- set_require_user_except_for_up_true(),
- ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest))
- end).
diff --git a/src/chttpd/test/eunit/chttpd_cors_test.erl b/src/chttpd/test/eunit/chttpd_cors_test.erl
deleted file mode 100644
index 93b080fc6..000000000
--- a/src/chttpd/test/eunit/chttpd_cors_test.erl
+++ /dev/null
@@ -1,582 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_cors_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include_lib("chttpd/include/chttpd_cors.hrl").
-
--define(DEFAULT_ORIGIN, "http://example.com").
--define(DEFAULT_ORIGIN_HTTPS, "https://example.com").
--define(EXPOSED_HEADERS,
- "content-type, accept-ranges, etag, server, x-couch-request-id, " ++
- "x-couch-update-newrev, x-couchdb-body-time"
-).
-
--define(CUSTOM_SUPPORTED_METHODS, ?SUPPORTED_METHODS -- ["CONNECT"]).
--define(CUSTOM_SUPPORTED_HEADERS, ["extra" | ?SUPPORTED_HEADERS -- ["pragma"]]).
--define(CUSTOM_EXPOSED_HEADERS, ["expose" | ?COUCH_HEADERS]).
-
--define(CUSTOM_MAX_AGE, round(?CORS_DEFAULT_MAX_AGE / 2)).
-
-%% Test helpers
-
-empty_cors_config() ->
- [].
-
-minimal_cors_config() ->
- [
- {<<"enable_cors">>, true},
- {<<"origins">>, {[]}}
- ].
-
-simple_cors_config() ->
- [
- {<<"enable_cors">>, true},
- {<<"origins">>,
- {[
- {list_to_binary(?DEFAULT_ORIGIN), {[]}}
- ]}}
- ].
-
-wildcard_cors_config() ->
- [
- {<<"enable_cors">>, true},
- {<<"origins">>,
- {[
- {<<"*">>, {[]}}
- ]}}
- ].
-
-custom_cors_config() ->
- [
- {<<"enable_cors">>, true},
- {<<"allow_methods">>, ?CUSTOM_SUPPORTED_METHODS},
- {<<"allow_headers">>, ?CUSTOM_SUPPORTED_HEADERS},
- {<<"exposed_headers">>, ?CUSTOM_EXPOSED_HEADERS},
- {<<"max_age">>, ?CUSTOM_MAX_AGE},
- {<<"origins">>,
- {[
- {<<"*">>, {[]}}
- ]}}
- ].
-
-access_control_cors_config(AllowCredentials) ->
- [
- {<<"enable_cors">>, true},
- {<<"allow_credentials">>, AllowCredentials},
- {<<"origins">>,
- {[
- {list_to_binary(?DEFAULT_ORIGIN), {[]}}
- ]}}
- ].
-
-multiple_cors_config() ->
- [
- {<<"enable_cors">>, true},
- {<<"origins">>,
- {[
- {list_to_binary(?DEFAULT_ORIGIN), {[]}},
- {<<"https://example.com">>, {[]}},
- {<<"http://example.com:5984">>, {[]}},
- {<<"https://example.com:5984">>, {[]}}
- ]}}
- ].
-
-mock_request(Method, Path, Headers0) ->
- HeaderKey = "Access-Control-Request-Method",
- Headers =
- case proplists:get_value(HeaderKey, Headers0, undefined) of
- nil ->
- proplists:delete(HeaderKey, Headers0);
- undefined ->
- case Method of
- 'OPTIONS' ->
- [{HeaderKey, atom_to_list(Method)} | Headers0];
- _ ->
- Headers0
- end;
- _ ->
- Headers0
- end,
- Headers1 = mochiweb_headers:make(Headers),
- MochiReq = mochiweb_request:new(nil, Method, Path, {1, 1}, Headers1),
- PathParts = [
- list_to_binary(chttpd:unquote(Part))
- || Part <- string:tokens(Path, "/")
- ],
- #httpd{method = Method, mochi_req = MochiReq, path_parts = PathParts}.
-
-header(#httpd{} = Req, Key) ->
- chttpd:header_value(Req, Key);
-header({mochiweb_response, [_, _, Headers]}, Key) ->
- %% header(Headers, Key);
- mochiweb_headers:get_value(Key, Headers);
-header(Headers, Key) ->
- couch_util:get_value(Key, Headers, undefined).
-
-string_headers(H) ->
- string:join(H, ", ").
-
-assert_not_preflight_(Val) ->
- ?_assertEqual(not_preflight, Val).
-
-%% CORS disabled tests
-
-cors_disabled_test_() ->
- {"CORS disabled tests", [
- {"Empty user",
- {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
- {foreach, fun empty_cors_config/0, [
- fun test_no_access_control_method_preflight_request_/1,
- fun test_no_headers_/1,
- fun test_no_headers_server_/1,
- fun test_no_headers_db_/1
- ]}}}
- ]}.
-
-%% CORS enabled tests
-
-cors_enabled_minimal_config_test_() ->
- {"Minimal CORS enabled, no Origins",
- {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
- {foreach, fun minimal_cors_config/0, [
- fun test_no_access_control_method_preflight_request_/1,
- fun test_incorrect_origin_simple_request_/1,
- fun test_incorrect_origin_preflight_request_/1
- ]}}}.
-
-cors_enabled_simple_config_test_() ->
- {"Simple CORS config",
- {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
- {foreach, fun simple_cors_config/0, [
- fun test_no_access_control_method_preflight_request_/1,
- fun test_preflight_request_/1,
- fun test_bad_headers_preflight_request_/1,
- fun test_good_headers_preflight_request_/1,
- fun test_db_request_/1,
- fun test_db_preflight_request_/1,
- fun test_db_host_origin_request_/1,
- fun test_preflight_with_port_no_origin_/1,
- fun test_preflight_with_scheme_no_origin_/1,
- fun test_preflight_with_scheme_port_no_origin_/1,
- fun test_case_sensitive_mismatch_of_allowed_origins_/1
- ]}}}.
-
-cors_enabled_custom_config_test_() ->
- {"Simple CORS config with custom allow_methods/allow_headers/exposed_headers",
- {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
- {foreach, fun custom_cors_config/0, [
- fun test_good_headers_preflight_request_with_custom_config_/1,
- fun test_db_request_with_custom_config_/1
- ]}}}.
-
-cors_enabled_multiple_config_test_() ->
- {"Multiple options CORS config",
- {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
- {foreach, fun multiple_cors_config/0, [
- fun test_no_access_control_method_preflight_request_/1,
- fun test_preflight_request_/1,
- fun test_db_request_/1,
- fun test_db_preflight_request_/1,
- fun test_db_host_origin_request_/1,
- fun test_preflight_with_port_with_origin_/1,
- fun test_preflight_with_scheme_with_origin_/1,
- fun test_preflight_with_scheme_port_with_origin_/1
- ]}}}.
-
-%% Access-Control-Allow-Credentials tests
-
-%% http://www.w3.org/TR/cors/#supports-credentials
-%% 6.1.3
-%% If the resource supports credentials add a single
-%% Access-Control-Allow-Origin header, with the value
-%% of the Origin header as value, and add a single
-%% Access-Control-Allow-Credentials header with the
-%% case-sensitive string "true" as value.
-%% Otherwise, add a single Access-Control-Allow-Origin
-%% header, with either the value of the Origin header
-%% or the string "*" as value.
-%% Note: The string "*" cannot be used for a resource
-%% that supports credentials.
-
-db_request_credentials_header_off_test_() ->
- {"Allow credentials disabled",
- {setup,
- fun() ->
- access_control_cors_config(false)
- end,
- fun test_db_request_credentials_header_off_/1}}.
-
-db_request_credentials_header_on_test_() ->
- {"Allow credentials enabled",
- {setup,
- fun() ->
- access_control_cors_config(true)
- end,
- fun test_db_request_credentials_header_on_/1}}.
-
-%% CORS wildcard tests
-
-cors_enabled_wildcard_test_() ->
- {"Wildcard CORS config",
- {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
- {foreach, fun wildcard_cors_config/0, [
- fun test_no_access_control_method_preflight_request_/1,
- fun test_preflight_request_/1,
- fun test_preflight_request_no_allow_credentials_/1,
- fun test_preflight_request_empty_request_headers_/1,
- fun test_db_request_/1,
- fun test_db_preflight_request_/1,
- fun test_db_host_origin_request_/1,
- fun test_preflight_with_port_with_origin_/1,
- fun test_preflight_with_scheme_with_origin_/1,
- fun test_preflight_with_scheme_port_with_origin_/1,
- fun test_case_sensitive_mismatch_of_allowed_origins_/1
- ]}}}.
-
-%% Test generators
-
-test_no_headers_(OwnerConfig) ->
- Req = mock_request('GET', "/", []),
- assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)).
-
-test_no_headers_server_(OwnerConfig) ->
- Req = mock_request('GET', "/", [{"Origin", "http://127.0.0.1"}]),
- assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)).
-
-test_no_headers_db_(OwnerConfig) ->
- Headers = [{"Origin", "http://127.0.0.1"}],
- Req = mock_request('GET', "/my_db", Headers),
- assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)).
-
-test_incorrect_origin_simple_request_(OwnerConfig) ->
- Req = mock_request('GET', "/", [{"Origin", "http://127.0.0.1"}]),
- [
- ?_assert(chttpd_cors:is_cors_enabled(OwnerConfig)),
- assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig))
- ].
-
-test_incorrect_origin_preflight_request_(OwnerConfig) ->
- Headers = [
- {"Origin", "http://127.0.0.1"},
- {"Access-Control-Request-Method", "GET"}
- ],
- Req = mock_request('GET', "/", Headers),
- [
- ?_assert(chttpd_cors:is_cors_enabled(OwnerConfig)),
- assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig))
- ].
-
-test_bad_headers_preflight_request_(OwnerConfig) ->
- Headers = [
- {"Origin", ?DEFAULT_ORIGIN},
- {"Access-Control-Request-Method", "GET"},
- {"Access-Control-Request-Headers", "X-Not-An-Allowed-Headers"}
- ],
- Req = mock_request('OPTIONS', "/", Headers),
- [
- ?_assert(chttpd_cors:is_cors_enabled(OwnerConfig)),
- assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig))
- ].
-
-test_good_headers_preflight_request_(OwnerConfig) ->
- Headers = [
- {"Origin", ?DEFAULT_ORIGIN},
- {"Access-Control-Request-Method", "GET"},
- {"Access-Control-Request-Headers", "accept-language"}
- ],
- Req = mock_request('OPTIONS', "/", Headers),
- ?assert(chttpd_cors:is_cors_enabled(OwnerConfig)),
- {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods")
- ),
- ?_assertEqual(
- string_headers(["accept-language"]),
- header(Headers1, "Access-Control-Allow-Headers")
- )
- ].
-
-test_good_headers_preflight_request_with_custom_config_(OwnerConfig) ->
- Headers = [
- {"Origin", ?DEFAULT_ORIGIN},
- {"Access-Control-Request-Method", "GET"},
- {"Access-Control-Request-Headers", "accept-language, extra"},
- {"Access-Control-Max-Age", ?CORS_DEFAULT_MAX_AGE}
- ],
- Req = mock_request('OPTIONS', "/", Headers),
- ?assert(chttpd_cors:is_cors_enabled(OwnerConfig)),
- AllowMethods = couch_util:get_value(
- <<"allow_methods">>, OwnerConfig, ?SUPPORTED_METHODS
- ),
- MaxAge = couch_util:get_value(
- <<"max_age">>, OwnerConfig, ?CORS_DEFAULT_MAX_AGE
- ),
- {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- string_headers(AllowMethods),
- header(Headers1, "Access-Control-Allow-Methods")
- ),
- ?_assertEqual(
- string_headers(["accept-language", "extra"]),
- header(Headers1, "Access-Control-Allow-Headers")
- ),
- ?_assertEqual(
- MaxAge,
- header(Headers1, "Access-Control-Max-Age")
- )
- ].
-
-test_preflight_request_(OwnerConfig) ->
- Headers = [
- {"Origin", ?DEFAULT_ORIGIN},
- {"Access-Control-Request-Method", "GET"}
- ],
- Req = mock_request('OPTIONS', "/", Headers),
- {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods")
- )
- ].
-
-test_no_access_control_method_preflight_request_(OwnerConfig) ->
- Headers = [
- {"Origin", ?DEFAULT_ORIGIN},
- {"Access-Control-Request-Method", notnil}
- ],
- Req = mock_request('OPTIONS', "/", Headers),
- assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)).
-
-test_preflight_request_no_allow_credentials_(OwnerConfig) ->
- Headers = [
- {"Origin", ?DEFAULT_ORIGIN},
- {"Access-Control-Request-Method", "GET"}
- ],
- Req = mock_request('OPTIONS', "/", Headers),
- {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods")
- ),
- ?_assertEqual(
- undefined,
- header(Headers1, "Access-Control-Allow-Credentials")
- )
- ].
-
-test_preflight_request_empty_request_headers_(OwnerConfig) ->
- Headers = [
- {"Origin", ?DEFAULT_ORIGIN},
- {"Access-Control-Request-Method", "POST"},
- {"Access-Control-Request-Headers", ""}
- ],
- Req = mock_request('OPTIONS', "/", Headers),
- {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods")
- ),
- ?_assertEqual(
- "",
- header(Headers1, "Access-Control-Allow-Headers")
- )
- ].
-
-test_db_request_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN,
- Headers = [{"Origin", Origin}],
- Req = mock_request('GET', "/my_db", Headers),
- Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- ?EXPOSED_HEADERS,
- header(Headers1, "Access-Control-Expose-Headers")
- )
- ].
-
-test_db_request_with_custom_config_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN,
- Headers = [{"Origin", Origin}, {"extra", "EXTRA"}],
- Req = mock_request('GET', "/my_db", Headers),
- Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
- ExposedHeaders = couch_util:get_value(
- <<"exposed_headers">>, OwnerConfig, ?COUCH_HEADERS
- ),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- lists:sort(["content-type" | ExposedHeaders]),
- lists:sort(
- split_list(header(Headers1, "Access-Control-Expose-Headers"))
- )
- )
- ].
-
-test_db_preflight_request_(OwnerConfig) ->
- Headers = [
- {"Origin", ?DEFAULT_ORIGIN}
- ],
- Req = mock_request('OPTIONS', "/my_db", Headers),
- {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods")
- )
- ].
-
-test_db_host_origin_request_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN,
- Headers = [
- {"Origin", Origin},
- {"Host", "example.com"}
- ],
- Req = mock_request('GET', "/my_db", Headers),
- Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- ?EXPOSED_HEADERS,
- header(Headers1, "Access-Control-Expose-Headers")
- )
- ].
-
-test_preflight_origin_helper_(OwnerConfig, Origin, ExpectedOrigin) ->
- Headers = [
- {"Origin", Origin},
- {"Access-Control-Request-Method", "GET"}
- ],
- Req = mock_request('OPTIONS', "/", Headers),
- Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
- [
- ?_assertEqual(
- ExpectedOrigin,
- header(Headers1, "Access-Control-Allow-Origin")
- )
- ].
-
-test_preflight_with_port_no_origin_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN ++ ":5984",
- test_preflight_origin_helper_(OwnerConfig, Origin, undefined).
-
-test_preflight_with_port_with_origin_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN ++ ":5984",
- test_preflight_origin_helper_(OwnerConfig, Origin, Origin).
-
-test_preflight_with_scheme_no_origin_(OwnerConfig) ->
- test_preflight_origin_helper_(OwnerConfig, ?DEFAULT_ORIGIN_HTTPS, undefined).
-
-test_preflight_with_scheme_with_origin_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN_HTTPS,
- test_preflight_origin_helper_(OwnerConfig, Origin, Origin).
-
-test_preflight_with_scheme_port_no_origin_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN_HTTPS ++ ":5984",
- test_preflight_origin_helper_(OwnerConfig, Origin, undefined).
-
-test_preflight_with_scheme_port_with_origin_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN_HTTPS ++ ":5984",
- test_preflight_origin_helper_(OwnerConfig, Origin, Origin).
-
-test_case_sensitive_mismatch_of_allowed_origins_(OwnerConfig) ->
- Origin = "http://EXAMPLE.COM",
- Headers = [{"Origin", Origin}],
- Req = mock_request('GET', "/", Headers),
- Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- ?EXPOSED_HEADERS,
- header(Headers1, "Access-Control-Expose-Headers")
- )
- ].
-
-test_db_request_credentials_header_off_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN,
- Headers = [{"Origin", Origin}],
- Req = mock_request('GET', "/", Headers),
- Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- undefined,
- header(Headers1, "Access-Control-Allow-Credentials")
- )
- ].
-
-test_db_request_credentials_header_on_(OwnerConfig) ->
- Origin = ?DEFAULT_ORIGIN,
- Headers = [{"Origin", Origin}],
- Req = mock_request('GET', "/", Headers),
- Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
- [
- ?_assertEqual(
- ?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")
- ),
- ?_assertEqual(
- "true",
- header(Headers1, "Access-Control-Allow-Credentials")
- )
- ].
-
-split_list(S) ->
- re:split(S, "\\s*,\\s*", [trim, {return, list}]).
diff --git a/src/chttpd/test/eunit/chttpd_csp_tests.erl b/src/chttpd/test/eunit/chttpd_csp_tests.erl
deleted file mode 100644
index 4c77c5ab0..000000000
--- a/src/chttpd/test/eunit/chttpd_csp_tests.erl
+++ /dev/null
@@ -1,281 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_csp_tests).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch/include/couch_eunit.hrl").
-
--define(ADM_USER, "adm_user").
--define(ADM_PASS, "adm_pass").
--define(ADM, {?ADM_USER, ?ADM_PASS}).
--define(ACC_USER, "acc").
--define(ACC_PASS, "pass").
--define(ACC, {?ACC_USER, ?ACC_PASS}).
--define(DOC1, "doc1").
--define(DDOC1, "_design/ddoc1").
--define(DDOC1_PATH_ENC, "_design%2Fddoc1").
--define(LDOC1, "_local/ldoc1").
--define(LDOC1_PATH_ENC, "_local%2Fldoc1").
--define(ATT1, "att1").
--define(VIEW1, "view1").
--define(SHOW1, "show1").
--define(LIST1, "list1").
--define(SALT, <<"01234567890123456789012345678901">>).
--define(TDEF(Name), {atom_to_list(Name), fun Name/1}).
--define(TDEF(Name, Timeout), {atom_to_list(Name), Timeout, fun Name/1}).
--define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
--define(TDEF_FE(Name, Timeout), fun(Arg) ->
- {atom_to_list(Name), {timeout, Timeout, ?_test(Name(Arg))}}
-end).
-
-csp_test_() ->
- {
- "CSP Tests",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun cleanup/1,
- [
- ?TDEF_FE(plain_docs_not_sandboxed),
- ?TDEF_FE(plain_ddocs_not_sandboxed),
- ?TDEF_FE(local_docs_not_sandboxed),
- ?TDEF_FE(sandbox_doc_attachments),
- ?TDEF_FE(sandbox_ddoc_attachments),
- ?TDEF_FE(sandbox_shows),
- ?TDEF_FE(sandbox_lists),
- fun should_not_return_any_csp_headers_when_disabled/1,
- fun should_apply_default_policy_with_legacy_config/1,
- fun should_apply_default_policy/1,
- fun should_return_custom_policy/1
- ]
- }
- }
- }.
-
-plain_docs_not_sandboxed(DbName) ->
- DbUrl = base_url() ++ "/" ++ DbName,
- Url = DbUrl ++ "/" ++ ?DOC1,
- ?assertEqual({200, false}, req(get, ?ACC, Url)),
- config:set("csp", "attachments_enable", "false", false),
- ?assertEqual({200, false}, req(get, ?ACC, Url)).
-
-plain_ddocs_not_sandboxed(DbName) ->
- DbUrl = base_url() ++ "/" ++ DbName,
- Url = DbUrl ++ "/" ++ ?DDOC1,
- ?assertEqual({200, false}, req(get, ?ACC, Url)),
- config:set("csp", "attachments_enable", "false", false),
- ?assertEqual({200, false}, req(get, ?ACC, Url)).
-
-local_docs_not_sandboxed(DbName) ->
- DbUrl = base_url() ++ "/" ++ DbName,
- Url = DbUrl ++ "/" ++ ?LDOC1,
- ?assertEqual({200, false}, req(get, ?ACC, Url)),
- config:set("csp", "attachments_enable", "false", false),
- ?assertEqual({200, false}, req(get, ?ACC, Url)).
-
-sandbox_doc_attachments(DbName) ->
- DbUrl = base_url() ++ "/" ++ DbName,
- Url = DbUrl ++ "/" ++ ?DOC1 ++ "/" ++ ?ATT1,
- ?assertEqual({200, true}, req(get, ?ACC, Url)),
- config:set("csp", "attachments_enable", "false", false),
- ?assertEqual({200, false}, req(get, ?ACC, Url)).
-
-sandbox_ddoc_attachments(DbName) ->
- DbUrl = base_url() ++ "/" ++ DbName,
- Url = DbUrl ++ "/" ++ ?DDOC1 ++ "/" ++ ?ATT1,
- ?assertEqual({200, true}, req(get, ?ACC, Url)),
- config:set("csp", "attachments_enable", "false", false),
- ?assertEqual({200, false}, req(get, ?ACC, Url)).
-
-sandbox_shows(DbName) ->
- DbUrl = base_url() ++ "/" ++ DbName,
- DDocUrl = DbUrl ++ "/" ++ ?DDOC1,
- Url = DDocUrl ++ "/_show/" ++ ?SHOW1 ++ "/" ++ ?DOC1,
- ?assertEqual({200, true}, req(get, ?ACC, Url)),
- config:set("csp", "showlist_enable", "false", false),
- ?assertEqual({200, false}, req(get, ?ACC, Url)).
-
-sandbox_lists(DbName) ->
- DbUrl = base_url() ++ "/" ++ DbName,
- DDocUrl = DbUrl ++ "/" ++ ?DDOC1,
- Url = DDocUrl ++ "/_list/" ++ ?LIST1 ++ "/" ++ ?VIEW1,
- ?assertEqual({200, true}, req(get, ?ACC, Url)),
- config:set("csp", "showlist_enable", "false", false),
- ?assertEqual({200, false}, req(get, ?ACC, Url)).
-
-should_not_return_any_csp_headers_when_disabled(_DbName) ->
- ?_assertEqual(
- undefined,
- begin
- ok = config:set("csp", "utils_enable", "false", false),
- ok = config:set("csp", "enable", "false", false),
- {ok, _, Headers, _} = test_request:get(base_url() ++ "/_utils/"),
- proplists:get_value("Content-Security-Policy", Headers)
- end
- ).
-
-should_apply_default_policy(_DbName) ->
- ?_assertEqual(
- "child-src 'self' data: blob:; default-src 'self'; img-src 'self' data:; font-src 'self'; "
- "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';",
- begin
- {ok, _, Headers, _} = test_request:get(base_url() ++ "/_utils/"),
- proplists:get_value("Content-Security-Policy", Headers)
- end
- ).
-
-should_apply_default_policy_with_legacy_config(_DbName) ->
- ?_assertEqual(
- "child-src 'self' data: blob:; default-src 'self'; img-src 'self' data:; font-src 'self'; "
- "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';",
- begin
- ok = config:set("csp", "utils_enable", "false", false),
- ok = config:set("csp", "enable", "true", false),
- {ok, _, Headers, _} = test_request:get(base_url() ++ "/_utils/"),
- proplists:get_value("Content-Security-Policy", Headers)
- end
- ).
-
-should_return_custom_policy(_DbName) ->
- ?_assertEqual(
- "default-src 'http://example.com';",
- begin
- ok = config:set(
- "csp",
- "utils_header_value",
- "default-src 'http://example.com';",
- false
- ),
- {ok, _, Headers, _} = test_request:get(base_url() ++ "/_utils/"),
- proplists:get_value("Content-Security-Policy", Headers)
- end
- ).
-
-% Utility functions
-
-setup_all() ->
- Ctx = test_util:start_couch([chttpd]),
- Hashed = couch_passwords:hash_admin_password(?ADM_PASS),
- config:set("admins", ?ADM_USER, ?b2l(Hashed), false),
- config:set("log", "level", "debug", false),
- Ctx.
-
-teardown_all(Ctx) ->
- test_util:stop_couch(Ctx).
-
-setup() ->
- UsersDb = ?b2l(?tempdb()),
- config:set("chttpd_auth", "authentication_db", UsersDb, false),
- UsersDbUrl = base_url() ++ "/" ++ UsersDb,
- {201, _} = req(put, ?ADM, UsersDbUrl),
- % Since we're dealing with the auth cache and ets_lru, it's best to just
- % restart the whole application.
- application:stop(chttpd),
- ok = application:start(chttpd, permanent),
- ok = create_user(UsersDb, <<?ACC_USER>>, <<?ACC_PASS>>, []),
- DbName = ?b2l(?tempdb()),
- DbUrl = base_url() ++ "/" ++ DbName,
- {201, _} = req(put, ?ADM, DbUrl),
- ok = create_doc(?ACC, DbName, #{
- <<"_id">> => <<?DOC1>>,
- <<"_attachments">> => #{
- <<?ATT1>> => #{
- <<"data">> => base64:encode(<<"att1_data">>)
- }
- }
- }),
- ok = create_doc(?ADM, DbName, #{
- <<"_id">> => <<?DDOC1>>,
- <<"_attachments">> => #{
- <<?ATT1>> => #{
- <<"data">> => base64:encode(<<"att1_data">>)
- }
- },
- <<"views">> => #{
- <<?VIEW1>> => #{
- <<"map">> => <<"function(doc) {emit(doc._id, doc._rev)}">>
- }
- },
- <<"shows">> => #{
- <<?SHOW1>> => <<"function(doc, req) {return '<h1>show1!</h1>';}">>
- },
- <<"lists">> => #{
- <<?LIST1>> =>
- <<"function(head, req) {", "var row;", "while(row = getRow()){ send(row.key); };",
- "}">>
- }
- }),
- ok = create_doc(?ACC, DbName, #{<<"_id">> => <<?LDOC1>>}),
- DbName.
-
-cleanup(DbName) ->
- config:delete("csp", "utils_enable", _Persist = false),
- config:delete("csp", "attachments_enable", _Persist = false),
- config:delete("csp", "showlist_enable", _Persist = false),
- DbUrl = base_url() ++ "/" ++ DbName,
- {200, _} = req(delete, ?ADM, DbUrl),
- UsersDb = config:get("chttpd_auth", "authentication_db"),
- config:delete("chttpd_auth", "authentication_db", false),
- UsersDbUrl = base_url() ++ "/" ++ UsersDb,
- {200, _} = req(delete, ?ADM, UsersDbUrl).
-
-base_url() ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- "http://" ++ Addr ++ ":" ++ Port.
-
-create_user(UsersDb, Name, Pass, Roles) when
- is_list(UsersDb),
- is_binary(Name),
- is_binary(Pass),
- is_list(Roles)
-->
- Body = #{
- <<"name">> => Name,
- <<"type">> => <<"user">>,
- <<"roles">> => Roles,
- <<"password_sha">> => hash_password(Pass),
- <<"salt">> => ?SALT
- },
- Url = base_url() ++ "/" ++ UsersDb ++ "/" ++ "org.couchdb.user:" ++ ?b2l(Name),
- {201, _} = req(put, ?ADM, Url, Body),
- ok.
-
-hash_password(Password) when is_binary(Password) ->
- couch_passwords:simple(Password, ?SALT).
-
-create_doc(Auth, DbName, Body) ->
- Url = base_url() ++ "/" ++ DbName,
- {201, _} = req(post, Auth, Url, Body),
- ok.
-
-req(Method, {_, _} = Auth, Url) ->
- Hdrs = [{basic_auth, Auth}],
- {ok, Code, RespHdrs, _} = test_request:request(Method, Url, Hdrs),
- {Code, is_sandboxed(RespHdrs)}.
-
-req(Method, {_, _} = Auth, Url, #{} = Body) ->
- req(Method, {_, _} = Auth, Url, "application/json", #{} = Body).
-
-req(Method, {_, _} = Auth, Url, ContentType, #{} = Body) ->
- Hdrs = [{basic_auth, Auth}, {"Content-Type", ContentType}],
- Body1 = jiffy:encode(Body),
- {ok, Code, RespHdrs, _} = test_request:request(Method, Url, Hdrs, Body1),
- {Code, is_sandboxed(RespHdrs)}.
-
-is_sandboxed(Headers) ->
- lists:member({"Content-Security-Policy", "sandbox"}, Headers).
diff --git a/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
deleted file mode 100644
index e3975bb6e..000000000
--- a/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
+++ /dev/null
@@ -1,203 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_db_attachment_size_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_db_att_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(CONTENT_MULTI_RELATED, {"Content-Type", "multipart/related;boundary=\"bound\""}).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- ok = config:set("couchdb", "max_attachment_size", "50", _Persist = false),
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- Url = "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(TmpDb),
- create_db(Url),
- add_doc(Url, "doc1"),
- Url.
-
-teardown(Url) ->
- delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist = false),
- ok = config:delete("couchdb", "max_attachment_size").
-
-attachment_size_test_() ->
- {
- "chttpd max_attachment_size tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun put_inline/1,
- fun put_simple/1,
- fun put_simple_chunked/1,
- fun put_mp_related/1,
- fun put_chunked_mp_related/1
- ]
- }
- }
- }.
-
-put_inline(Url) ->
- ?_test(begin
- Status = put_inline(Url, "doc2", 50),
- ?assert(Status =:= 201 orelse Status =:= 202),
- ?assertEqual(413, put_inline(Url, "doc3", 51))
- end).
-
-put_simple(Url) ->
- ?_test(begin
- Headers = [{"Content-Type", "app/binary"}],
- Rev1 = doc_rev(Url, "doc1"),
- Data1 = data(50),
- Status1 = put_req(Url ++ "/doc1/att2?rev=" ++ Rev1, Headers, Data1),
- ?assert(Status1 =:= 201 orelse Status1 =:= 202),
- Data2 = data(51),
- Rev2 = doc_rev(Url, "doc1"),
- Status2 = put_req(Url ++ "/doc1/att3?rev=" ++ Rev2, Headers, Data2),
- ?assertEqual(413, Status2)
- end).
-
-put_simple_chunked(Url) ->
- ?_test(begin
- Headers = [{"Content-Type", "app/binary"}],
- Rev1 = doc_rev(Url, "doc1"),
- DataFun1 = data_stream_fun(50),
- Status1 = put_req_chunked(Url ++ "/doc1/att2?rev=" ++ Rev1, Headers, DataFun1),
- ?assert(Status1 =:= 201 orelse Status1 =:= 202),
- DataFun2 = data_stream_fun(51),
- Rev2 = doc_rev(Url, "doc1"),
- Status2 = put_req_chunked(Url ++ "/doc1/att3?rev=" ++ Rev2, Headers, DataFun2),
- ?assertEqual(413, Status2)
- end).
-
-put_mp_related(Url) ->
- ?_test(begin
- Headers = [?CONTENT_MULTI_RELATED],
- Body1 = mp_body(50),
- Status1 = put_req(Url ++ "/doc2", Headers, Body1),
- ?assert(Status1 =:= 201 orelse Status1 =:= 202),
- Body2 = mp_body(51),
- Status2 = put_req(Url ++ "/doc3", Headers, Body2),
- ?assertEqual(413, Status2)
- end).
-
-put_chunked_mp_related(Url) ->
- ?_test(begin
- Headers = [?CONTENT_MULTI_RELATED],
- Body = mp_body(50),
- Status = put_req_chunked(Url ++ "/doc4", Headers, Body),
- ?assert(Status =:= 201 orelse Status =:= 202)
- end).
-
-% Helper functions
-
-create_db(Url) ->
- Status = put_req(Url, "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-add_doc(Url, DocId) ->
- Status = put_req(Url ++ "/" ++ DocId, "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-put_inline(Url, DocId, Size) ->
- Doc =
- "{\"_attachments\": {\"att1\":{"
- "\"content_type\": \"app/binary\", "
- "\"data\": \"" ++ data_b64(Size) ++
- "\""
- "}}}",
- put_req(Url ++ "/" ++ DocId, Doc).
-
-mp_body(AttSize) ->
- AttData = data(AttSize),
- SizeStr = integer_to_list(AttSize),
- string:join(
- [
- "--bound",
-
- "Content-Type: application/json",
-
- "",
-
- "{\"_id\":\"doc2\", \"_attachments\":{\"att\":"
- "{\"content_type\":\"app/binary\", \"length\":" ++ SizeStr ++
- ","
- "\"follows\":true}}}",
-
- "--bound",
-
- "Content-Disposition: attachment; filename=\"att\"",
-
- "Content-Type: app/binary",
-
- "",
-
- AttData,
-
- "--bound--"
- ],
- "\r\n"
- ).
-
-doc_rev(Url, DocId) ->
- {200, ResultProps} = get_req(Url ++ "/" ++ DocId),
- {<<"_rev">>, BinRev} = lists:keyfind(<<"_rev">>, 1, ResultProps),
- binary_to_list(BinRev).
-
-put_req(Url, Body) ->
- put_req(Url, [], Body).
-
-put_req(Url, Headers, Body) ->
- {ok, Status, _, _} = test_request:put(Url, Headers ++ [?AUTH], Body),
- Status.
-
-put_req_chunked(Url, Headers, Body) ->
- Opts = [{transfer_encoding, {chunked, 1}}],
- {ok, Status, _, _} = test_request:put(Url, Headers ++ [?AUTH], Body, Opts),
- Status.
-
-get_req(Url) ->
- {ok, Status, _, ResultBody} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]),
- {[_ | _] = ResultProps} = ?JSON_DECODE(ResultBody),
- {Status, ResultProps}.
-
-% Data streaming generator for ibrowse client. ibrowse will repeatedly call the
-% function with State and it should return {ok, Data, NewState} or eof at end.
-data_stream_fun(Size) ->
- Fun = fun
- (0) -> eof;
- (BytesLeft) -> {ok, <<"x">>, BytesLeft - 1}
- end,
- {Fun, Size}.
-
-data(Size) ->
- string:copies("x", Size).
-
-data_b64(Size) ->
- base64:encode_to_string(data(Size)).
diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
deleted file mode 100644
index 91a3eaf19..000000000
--- a/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
+++ /dev/null
@@ -1,366 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
--module(chttpd_db_bulk_get_multipart_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 3000).
-
-setup_all() ->
- mock(config),
- mock(chttpd),
- mock(couch_epi),
- mock(couch_httpd),
- mock(couch_stats),
- mock(fabric),
- mock(mochireq).
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset([
- config,
- chttpd,
- couch_epi,
- couch_httpd,
- couch_stats,
- fabric,
- mochireq
- ]),
- spawn_accumulator().
-
-teardown(Pid) ->
- ok = stop_accumulator(Pid).
-
-bulk_get_test_() ->
- {
- "/db/_bulk_get tests",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_require_docs_field/1,
- fun should_not_accept_specific_query_params/1,
- fun should_return_empty_results_on_no_docs/1,
- fun should_get_doc_with_all_revs/1,
- fun should_validate_doc_with_bad_id/1,
- fun should_validate_doc_with_bad_rev/1,
- fun should_validate_missing_doc/1,
- fun should_validate_bad_atts_since/1,
- fun should_include_attachments_when_atts_since_specified/1
- ]
- }
- }
- }.
-
-should_require_docs_field(_) ->
- Req = fake_request({[{}]}),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- ?_assertThrow({bad_request, _}, chttpd_db:db_req(Req, Db)).
-
-should_not_accept_specific_query_params(_) ->
- Req = fake_request({[{<<"docs">>, []}]}),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- lists:map(
- fun(Param) ->
- {Param,
- ?_assertThrow({bad_request, _}, begin
- BadReq = Req#httpd{qs = [{Param, ""}]},
- chttpd_db:db_req(BadReq, Db)
- end)}
- end,
- ["rev", "open_revs", "atts_since", "w", "new_edits"]
- ).
-
-should_return_empty_results_on_no_docs(Pid) ->
- Req = fake_request({[{<<"docs">>, []}]}),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- chttpd_db:db_req(Req, Db),
- Results = get_results_from_response(Pid),
- ?_assertEqual([], Results).
-
-should_get_doc_with_all_revs(Pid) ->
- DocId = <<"docudoc">>,
- Req = fake_request(DocId),
- Db = test_util:fake_db([{name, <<"foo">>}]),
-
- DocRevA = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-ABC">>}]}},
- DocRevB = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-CDE">>}]}},
-
- mock_open_revs(all, {ok, [{ok, DocRevA}, {ok, DocRevB}]}),
- chttpd_db:db_req(Req, Db),
-
- Result = get_results_from_response(Pid),
- ?_assertEqual(DocId, couch_util:get_value(<<"_id">>, Result)).
-
-should_validate_doc_with_bad_id(Pid) ->
- DocId = <<"_docudoc">>,
-
- Req = fake_request(DocId),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- chttpd_db:db_req(Req, Db),
-
- Result = get_results_from_response(Pid),
- ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
-
- ?_assertMatch(
- [
- {<<"id">>, DocId},
- {<<"rev">>, null},
- {<<"error">>, <<"illegal_docid">>},
- {<<"reason">>, _}
- ],
- Result
- ).
-
-should_validate_doc_with_bad_rev(Pid) ->
- DocId = <<"docudoc">>,
- Rev = <<"revorev">>,
-
- Req = fake_request(DocId, Rev),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- chttpd_db:db_req(Req, Db),
-
- Result = get_results_from_response(Pid),
- ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
-
- ?_assertMatch(
- [
- {<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, _}
- ],
- Result
- ).
-
-should_validate_missing_doc(Pid) ->
- DocId = <<"docudoc">>,
- Rev = <<"1-revorev">>,
-
- Req = fake_request(DocId, Rev),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- mock_open_revs([{1, <<"revorev">>}], {ok, []}),
- chttpd_db:db_req(Req, Db),
-
- Result = get_results_from_response(Pid),
- ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
-
- ?_assertMatch(
- [
- {<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, <<"not_found">>},
- {<<"reason">>, _}
- ],
- Result
- ).
-
-should_validate_bad_atts_since(Pid) ->
- DocId = <<"docudoc">>,
- Rev = <<"1-revorev">>,
-
- Req = fake_request(DocId, Rev, <<"badattsince">>),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- mock_open_revs([{1, <<"revorev">>}], {ok, []}),
- chttpd_db:db_req(Req, Db),
-
- Result = get_results_from_response(Pid),
- ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
-
- ?_assertMatch(
- [
- {<<"id">>, DocId},
- {<<"rev">>, <<"badattsince">>},
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, _}
- ],
- Result
- ).
-
-should_include_attachments_when_atts_since_specified(_) ->
- DocId = <<"docudoc">>,
- Rev = <<"1-revorev">>,
-
- Req = fake_request(DocId, Rev, [<<"1-abc">>]),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- mock_open_revs([{1, <<"revorev">>}], {ok, []}),
- chttpd_db:db_req(Req, Db),
-
- ?_assert(
- meck:called(
- fabric,
- open_revs,
- [
- '_',
- DocId,
- [{1, <<"revorev">>}],
- [
- {atts_since, [{1, <<"abc">>}]},
- attachments,
- {user_ctx, undefined}
- ]
- ]
- )
- ).
-
-%% helpers
-
-fake_request(Payload) when is_tuple(Payload) ->
- #httpd{
- method = 'POST',
- path_parts = [<<"db">>, <<"_bulk_get">>],
- mochi_req = mochireq,
- req_body = Payload
- };
-fake_request(DocId) when is_binary(DocId) ->
- fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}]}]}]}).
-
-fake_request(DocId, Rev) ->
- fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, {<<"rev">>, Rev}]}]}]}).
-
-fake_request(DocId, Rev, AttsSince) ->
- fake_request(
- {[
- {<<"docs">>, [
- {[
- {<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"atts_since">>, AttsSince}
- ]}
- ]}
- ]}
- ).
-
-mock_open_revs(RevsReq0, RevsResp) ->
- ok = meck:expect(
- fabric,
- open_revs,
- fun(_, _, RevsReq1, _) ->
- ?assertEqual(RevsReq0, RevsReq1),
- RevsResp
- end
- ).
-
-mock(mochireq) ->
- ok = meck:new(mochireq, [non_strict]),
- ok = meck:expect(mochireq, parse_qs, fun() -> [] end),
- ok = meck:expect(mochireq, accepts_content_type, fun
- ("multipart/mixed") -> true;
- ("multipart/related") -> true;
- (_) -> false
- end),
- ok;
-mock(couch_httpd) ->
- ok = meck:new(couch_httpd, [passthrough]),
- ok = meck:expect(couch_httpd, validate_ctype, fun(_, _) -> ok end),
- ok = meck:expect(couch_httpd, last_chunk, fun(_) -> {ok, nil} end),
- ok = meck:expect(couch_httpd, send_chunk, fun send_chunk/2),
- ok;
-mock(chttpd) ->
- ok = meck:new(chttpd, [passthrough]),
- ok = meck:expect(chttpd, start_json_response, fun(_, _) -> {ok, nil} end),
- ok = meck:expect(chttpd, start_chunked_response, fun(_, _, _) -> {ok, nil} end),
- ok = meck:expect(chttpd, end_json_response, fun(_) -> ok end),
- ok = meck:expect(chttpd, send_chunk, fun send_chunk/2),
- ok = meck:expect(chttpd, json_body_obj, fun(#httpd{req_body = Body}) -> Body end),
- ok;
-mock(couch_epi) ->
- ok = meck:new(couch_epi, [passthrough]),
- ok = meck:expect(couch_epi, any, fun(_, _, _, _, _) -> false end),
- ok;
-mock(couch_stats) ->
- ok = meck:new(couch_stats, [passthrough]),
- ok = meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
- ok = meck:expect(couch_stats, increment_counter, fun(_, _) -> ok end),
- ok = meck:expect(couch_stats, decrement_counter, fun(_) -> ok end),
- ok = meck:expect(couch_stats, decrement_counter, fun(_, _) -> ok end),
- ok = meck:expect(couch_stats, update_histogram, fun(_, _) -> ok end),
- ok = meck:expect(couch_stats, update_gauge, fun(_, _) -> ok end),
- ok;
-mock(fabric) ->
- ok = meck:new(fabric, [passthrough]),
- ok;
-mock(config) ->
- ok = meck:new(config, [passthrough]),
- ok = meck:expect(config, get, fun(_, _, Default) -> Default end),
- ok.
-
-spawn_accumulator() ->
- Parent = self(),
- Pid = spawn(fun() -> accumulator_loop(Parent, []) end),
- erlang:put(chunks_gather, Pid),
- Pid.
-
-accumulator_loop(Parent, Acc) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref};
- {get, Ref} ->
- Parent ! {ok, Ref, Acc},
- accumulator_loop(Parent, Acc);
- {put, Ref, Chunk} ->
- Parent ! {ok, Ref},
- accumulator_loop(Parent, [Chunk | Acc])
- end.
-
-stop_accumulator(Pid) ->
- Ref = make_ref(),
- Pid ! {stop, Ref},
- receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- throw({timeout, <<"process stop timeout">>})
- end.
-
-send_chunk(_, []) ->
- {ok, nil};
-send_chunk(_Req, [H | T] = Chunk) when is_list(Chunk) ->
- send_chunk(_Req, H),
- send_chunk(_Req, T);
-send_chunk(_, Chunk) ->
- Worker = erlang:get(chunks_gather),
- Ref = make_ref(),
- Worker ! {put, Ref, Chunk},
- receive
- {ok, Ref} -> {ok, nil}
- after ?TIMEOUT ->
- throw({timeout, <<"send chunk timeout">>})
- end.
-
-get_response(Pid) ->
- Ref = make_ref(),
- Pid ! {get, Ref},
- receive
- {ok, Ref, Acc} ->
- Acc
- after ?TIMEOUT ->
- throw({timeout, <<"get response timeout">>})
- end.
-
-get_results_from_response(Pid) ->
- case get_response(Pid) of
- [] ->
- [];
- Result ->
- {Result1} = ?JSON_DECODE(lists:nth(2, Result)),
- Result1
- end.
diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
deleted file mode 100644
index 81dfe098b..000000000
--- a/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
+++ /dev/null
@@ -1,372 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
--module(chttpd_db_bulk_get_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 3000).
-
-setup_all() ->
- mock(config),
- mock(chttpd),
- mock(couch_epi),
- mock(couch_httpd),
- mock(couch_stats),
- mock(fabric),
- mock(mochireq).
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- spawn_accumulator().
-
-teardown(Pid) ->
- ok = stop_accumulator(Pid).
-
-bulk_get_test_() ->
- {
- "/db/_bulk_get tests",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_require_docs_field/1,
- fun should_not_accept_specific_query_params/1,
- fun should_return_empty_results_on_no_docs/1,
- fun should_get_doc_with_all_revs/1,
- fun should_validate_doc_with_bad_id/1,
- fun should_validate_doc_with_bad_rev/1,
- fun should_validate_missing_doc/1,
- fun should_validate_bad_atts_since/1,
- fun should_include_attachments_when_atts_since_specified/1
- ]
- }
- }
- }.
-
-should_require_docs_field(_) ->
- Req = fake_request({[{}]}),
- ?_assertThrow({bad_request, _}, chttpd_db:db_req(Req, nil)).
-
-should_not_accept_specific_query_params(_) ->
- Req = fake_request({[{<<"docs">>, []}]}),
- lists:map(
- fun(Param) ->
- {Param,
- ?_assertThrow({bad_request, _}, begin
- BadReq = Req#httpd{qs = [{Param, ""}]},
- chttpd_db:db_req(BadReq, nil)
- end)}
- end,
- ["rev", "open_revs", "atts_since", "w", "new_edits"]
- ).
-
-should_return_empty_results_on_no_docs(Pid) ->
- Req = fake_request({[{<<"docs">>, []}]}),
- chttpd_db:db_req(Req, nil),
- Results = get_results_from_response(Pid),
- ?_assertEqual([], Results).
-
-should_get_doc_with_all_revs(Pid) ->
- DocId = <<"docudoc">>,
- Req = fake_request(DocId),
-
- RevA = {[{<<"_id">>, DocId}, {<<"_rev">>, <<"1-ABC">>}]},
- RevB = {[{<<"_id">>, DocId}, {<<"_rev">>, <<"1-CDE">>}]},
- DocRevA = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-ABC">>}]}},
- DocRevB = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-CDE">>}]}},
-
- mock_open_revs(all, {ok, [{ok, DocRevA}, {ok, DocRevB}]}),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
-
- [{Result}] = get_results_from_response(Pid),
- ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
-
- Docs = couch_util:get_value(<<"docs">>, Result),
- ?assertEqual(2, length(Docs)),
-
- [{DocA0}, {DocB0}] = Docs,
-
- DocA = couch_util:get_value(<<"ok">>, DocA0),
- DocB = couch_util:get_value(<<"ok">>, DocB0),
-
- ?_assertEqual([RevA, RevB], [DocA, DocB]).
-
-should_validate_doc_with_bad_id(Pid) ->
- DocId = <<"_docudoc">>,
-
- Req = fake_request(DocId),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
-
- [{Result}] = get_results_from_response(Pid),
- ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
-
- Docs = couch_util:get_value(<<"docs">>, Result),
- ?assertEqual(1, length(Docs)),
- [{DocResult}] = Docs,
-
- Doc = couch_util:get_value(<<"error">>, DocResult),
-
- ?_assertMatch(
- {[
- {<<"id">>, DocId},
- {<<"rev">>, null},
- {<<"error">>, <<"illegal_docid">>},
- {<<"reason">>, _}
- ]},
- Doc
- ).
-
-should_validate_doc_with_bad_rev(Pid) ->
- DocId = <<"docudoc">>,
- Rev = <<"revorev">>,
-
- Req = fake_request(DocId, Rev),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
-
- [{Result}] = get_results_from_response(Pid),
- ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
-
- Docs = couch_util:get_value(<<"docs">>, Result),
- ?assertEqual(1, length(Docs)),
- [{DocResult}] = Docs,
-
- Doc = couch_util:get_value(<<"error">>, DocResult),
-
- ?_assertMatch(
- {[
- {<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, _}
- ]},
- Doc
- ).
-
-should_validate_missing_doc(Pid) ->
- DocId = <<"docudoc">>,
- Rev = <<"1-revorev">>,
-
- Req = fake_request(DocId, Rev),
- mock_open_revs([{1, <<"revorev">>}], {ok, []}),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
-
- [{Result}] = get_results_from_response(Pid),
- ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
-
- Docs = couch_util:get_value(<<"docs">>, Result),
- ?assertEqual(1, length(Docs)),
- [{DocResult}] = Docs,
-
- Doc = couch_util:get_value(<<"error">>, DocResult),
-
- ?_assertMatch(
- {[
- {<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, <<"not_found">>},
- {<<"reason">>, _}
- ]},
- Doc
- ).
-
-should_validate_bad_atts_since(Pid) ->
- DocId = <<"docudoc">>,
- Rev = <<"1-revorev">>,
-
- Req = fake_request(DocId, Rev, <<"badattsince">>),
- mock_open_revs([{1, <<"revorev">>}], {ok, []}),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
-
- [{Result}] = get_results_from_response(Pid),
- ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
-
- Docs = couch_util:get_value(<<"docs">>, Result),
- ?assertEqual(1, length(Docs)),
- [{DocResult}] = Docs,
-
- Doc = couch_util:get_value(<<"error">>, DocResult),
-
- ?_assertMatch(
- {[
- {<<"id">>, DocId},
- {<<"rev">>, <<"badattsince">>},
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, _}
- ]},
- Doc
- ).
-
-should_include_attachments_when_atts_since_specified(_) ->
- DocId = <<"docudoc">>,
- Rev = <<"1-revorev">>,
-
- Req = fake_request(DocId, Rev, [<<"1-abc">>]),
- mock_open_revs([{1, <<"revorev">>}], {ok, []}),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
-
- ?_assert(
- meck:called(
- fabric,
- open_revs,
- [
- '_',
- DocId,
- [{1, <<"revorev">>}],
- [
- {atts_since, [{1, <<"abc">>}]},
- attachments,
- {user_ctx, undefined}
- ]
- ]
- )
- ).
-
-%% helpers
-
-fake_request(Payload) when is_tuple(Payload) ->
- #httpd{
- method = 'POST',
- path_parts = [<<"db">>, <<"_bulk_get">>],
- mochi_req = mochireq,
- req_body = Payload
- };
-fake_request(DocId) when is_binary(DocId) ->
- fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}]}]}]}).
-
-fake_request(DocId, Rev) ->
- fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, {<<"rev">>, Rev}]}]}]}).
-
-fake_request(DocId, Rev, AttsSince) ->
- fake_request(
- {[
- {<<"docs">>, [
- {[
- {<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"atts_since">>, AttsSince}
- ]}
- ]}
- ]}
- ).
-
-mock_open_revs(RevsReq0, RevsResp) ->
- ok = meck:expect(
- fabric,
- open_revs,
- fun(_, _, RevsReq1, _) ->
- ?assertEqual(RevsReq0, RevsReq1),
- RevsResp
- end
- ).
-
-mock(mochireq) ->
- ok = meck:new(mochireq, [non_strict]),
- ok = meck:expect(mochireq, parse_qs, fun() -> [] end),
- ok = meck:expect(mochireq, accepts_content_type, fun(_) -> false end),
- ok;
-mock(couch_httpd) ->
- ok = meck:new(couch_httpd, [passthrough]),
- ok = meck:expect(couch_httpd, validate_ctype, fun(_, _) -> ok end),
- ok;
-mock(chttpd) ->
- ok = meck:new(chttpd, [passthrough]),
- ok = meck:expect(chttpd, start_json_response, fun(_, _) -> {ok, nil} end),
- ok = meck:expect(chttpd, end_json_response, fun(_) -> ok end),
- ok = meck:expect(chttpd, send_chunk, fun send_chunk/2),
- ok = meck:expect(chttpd, json_body_obj, fun(#httpd{req_body = Body}) -> Body end),
- ok;
-mock(couch_epi) ->
- ok = meck:new(couch_epi, [passthrough]),
- ok = meck:expect(couch_epi, any, fun(_, _, _, _, _) -> false end),
- ok;
-mock(couch_stats) ->
- ok = meck:new(couch_stats, [passthrough]),
- ok = meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
- ok = meck:expect(couch_stats, increment_counter, fun(_, _) -> ok end),
- ok = meck:expect(couch_stats, decrement_counter, fun(_) -> ok end),
- ok = meck:expect(couch_stats, decrement_counter, fun(_, _) -> ok end),
- ok = meck:expect(couch_stats, update_histogram, fun(_, _) -> ok end),
- ok = meck:expect(couch_stats, update_gauge, fun(_, _) -> ok end),
- ok;
-mock(fabric) ->
- ok = meck:new(fabric, [passthrough]),
- ok;
-mock(config) ->
- ok = meck:new(config, [passthrough]),
- ok = meck:expect(config, get, fun(_, _, Default) -> Default end),
- ok.
-
-spawn_accumulator() ->
- Parent = self(),
- Pid = spawn(fun() -> accumulator_loop(Parent, []) end),
- erlang:put(chunks_gather, Pid),
- Pid.
-
-accumulator_loop(Parent, Acc) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref};
- {get, Ref} ->
- Parent ! {ok, Ref, Acc},
- accumulator_loop(Parent, Acc);
- {put, Ref, Chunk} ->
- Parent ! {ok, Ref},
- accumulator_loop(Parent, [Chunk | Acc])
- end.
-
-stop_accumulator(Pid) ->
- Ref = make_ref(),
- Pid ! {stop, Ref},
- receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- throw({timeout, <<"process stop timeout">>})
- end.
-
-send_chunk(_, []) ->
- {ok, nil};
-send_chunk(_Req, [H | T] = Chunk) when is_list(Chunk) ->
- send_chunk(_Req, H),
- send_chunk(_Req, T);
-send_chunk(_, Chunk) ->
- Worker = erlang:get(chunks_gather),
- Ref = make_ref(),
- Worker ! {put, Ref, Chunk},
- receive
- {ok, Ref} -> {ok, nil}
- after ?TIMEOUT ->
- throw({timeout, <<"send chunk timeout">>})
- end.
-
-get_response(Pid) ->
- Ref = make_ref(),
- Pid ! {get, Ref},
- receive
- {ok, Ref, Acc} ->
- ?JSON_DECODE(iolist_to_binary(lists:reverse(Acc)))
- after ?TIMEOUT ->
- throw({timeout, <<"get response timeout">>})
- end.
-
-get_results_from_response(Pid) ->
- {Resp} = get_response(Pid),
- couch_util:get_value(<<"results">>, Resp).
diff --git a/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
deleted file mode 100644
index 01ef16f23..000000000
--- a/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
+++ /dev/null
@@ -1,225 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_db_doc_size_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_db_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(CONTENT_MULTI_RELATED, {"Content-Type", "multipart/related;boundary=\"bound\""}).
--define(CONTENT_MULTI_FORM, {"Content-Type", "multipart/form-data;boundary=\"bound\""}).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- ok = config:set("couchdb", "max_document_size", "50"),
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- create_db(Url),
- Url.
-
-teardown(Url) ->
- delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist = false),
- ok = config:delete("couchdb", "max_document_size").
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- case Status of
- 201 -> ok;
- 202 -> ok;
- _ -> io:format(user, "~n HTTP Status Code: ~p~n", [Status])
- end,
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-all_test_() ->
- {
- "chttpd db max_document_size tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun post_single_doc/1,
- fun put_single_doc/1,
- fun bulk_doc/1,
- fun put_post_doc_attach_inline/1,
- fun put_multi_part_related/1,
- fun post_multi_part_form/1
- ]
- }
- }
- }.
-
-post_single_doc(Url) ->
- NewDoc =
- "{\"post_single_doc\": \"some_doc\",\n"
- " \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}",
- {ok, _, _, ResultBody} = test_request:post(
- Url,
- [?CONTENT_JSON, ?AUTH],
- NewDoc
- ),
- {[ErrorMsg | _]} = ?JSON_DECODE(ResultBody),
- ?_assertEqual({<<"error">>, <<"document_too_large">>}, ErrorMsg).
-
-put_single_doc(Url) ->
- NewDoc =
- "{\"post_single_doc\": \"some_doc\",\n"
- " \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}",
- {ok, _, _, ResultBody} = test_request:put(
- Url ++ "/" ++ "testid",
- [?CONTENT_JSON, ?AUTH],
- NewDoc
- ),
- {[ErrorMsg | _]} = ?JSON_DECODE(ResultBody),
- ?_assertEqual({<<"error">>, <<"document_too_large">>}, ErrorMsg).
-
-bulk_doc(Url) ->
- NewDoc =
- "{\"docs\": [{\"doc1\": 1}, {\"errordoc\":\n"
- " \"this_should_be_the_too_large_error_document\"}]}",
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH],
- NewDoc
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- Expect = {[{<<"error">>, <<"document_too_large">>}, {<<"reason">>, <<>>}]},
- ?_assertEqual(Expect, ResultJson).
-
-put_post_doc_attach_inline(Url) ->
- Body1 = "{\"body\":\"This is a body.\",",
- Body2 = lists:concat([
- "{\"body\":\"This is a body it should fail",
- "because there are too many characters.\","
- ]),
- DocRest = lists:concat([
- "\"_attachments\":{\"foo.txt\":{",
- "\"content_type\":\"text/plain\",",
- "\"data\": \"VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=\"}}}"
- ]),
- Doc1 = lists:concat([Body1, DocRest]),
- Doc2 = lists:concat([Body2, DocRest]),
-
- {ok, _, _, ResultBody} = test_request:post(
- Url,
- [?CONTENT_JSON, ?AUTH],
- Doc1
- ),
- {[Msg | _]} = ?JSON_DECODE(ResultBody),
- {ok, _, _, ResultBody1} = test_request:post(
- Url,
- [?CONTENT_JSON, ?AUTH],
- Doc2
- ),
- {[Msg1 | _]} = ?JSON_DECODE(ResultBody1),
-
- {ok, _, _, ResultBody2} = test_request:put(
- Url ++ "/" ++ "accept",
- [?CONTENT_JSON, ?AUTH],
- Doc1
- ),
- {[Msg2 | _]} = ?JSON_DECODE(ResultBody2),
- {ok, _, _, ResultBody3} = test_request:put(
- Url ++ "/" ++ "fail",
- [?CONTENT_JSON, ?AUTH],
- Doc2
- ),
- {[Msg3 | _]} = ?JSON_DECODE(ResultBody3),
- [
- ?_assertEqual({<<"ok">>, true}, Msg),
- ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg1),
- ?_assertEqual({<<"ok">>, true}, Msg2),
- ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg3)
- ].
-
-put_multi_part_related(Url) ->
- Body1 = "{\"body\":\"This is a body.\",",
- Body2 = lists:concat([
- "{\"body\":\"This is a body it should fail",
- "because there are too many characters.\","
- ]),
- DocBeg = "--bound\r\nContent-Type: application/json\r\n\r\n",
- DocRest = lists:concat([
- "\"_attachments\":{\"foo.txt\":{\"follows\":true,",
- "\"content_type\":\"text/plain\",\"length\":21},\"bar.txt\":",
- "{\"follows\":true,\"content_type\":\"text/plain\",",
- "\"length\":20}}}\r\n--bound\r\n\r\nthis is 21 chars long",
- "\r\n--bound\r\n\r\nthis is 20 chars lon\r\n--bound--epilogue"
- ]),
- Doc1 = lists:concat([DocBeg, Body1, DocRest]),
- Doc2 = lists:concat([DocBeg, Body2, DocRest]),
- {ok, _, _, ResultBody} = test_request:put(
- Url ++ "/" ++ "accept",
- [?CONTENT_MULTI_RELATED, ?AUTH],
- Doc1
- ),
- {[Msg | _]} = ?JSON_DECODE(ResultBody),
- {ok, _, _, ResultBody1} = test_request:put(
- Url ++ "/" ++ "faildoc",
- [?CONTENT_MULTI_RELATED, ?AUTH],
- Doc2
- ),
- {[Msg1 | _]} = ?JSON_DECODE(ResultBody1),
- [
- ?_assertEqual({<<"ok">>, true}, Msg),
- ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg1)
- ].
-
-post_multi_part_form(Url) ->
- Port = mochiweb_socket_server:get(chttpd, port),
- Host = lists:concat(["http://127.0.0.1:", Port]),
- Referer = {"Referer", Host},
- Body1 = "{\"body\":\"This is a body.\"}",
- Body2 = lists:concat([
- "{\"body\":\"This is a body it should fail",
- "because there are too many characters.\"}"
- ]),
- DocBeg = "--bound\r\nContent-Disposition: form-data; name=\"_doc\"\r\n\r\n",
- DocRest = lists:concat([
- "\r\n--bound\r\nContent-Disposition:",
- "form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n",
- "Content-Type: text/plain\r\n\r\ncontents of file.txt\r\n\r\n",
- "--bound--"
- ]),
- Doc1 = lists:concat([DocBeg, Body1, DocRest]),
- Doc2 = lists:concat([DocBeg, Body2, DocRest]),
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/" ++ "accept",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer],
- Doc1
- ),
- {[Msg | _]} = ?JSON_DECODE(ResultBody),
- {ok, _, _, ResultBody1} = test_request:post(
- Url ++ "/" ++ "fail",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer],
- Doc2
- ),
- {[Msg1 | _]} = ?JSON_DECODE(ResultBody1),
- [
- ?_assertEqual({<<"ok">>, true}, Msg),
- ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg1)
- ].
diff --git a/src/chttpd/test/eunit/chttpd_db_test.erl b/src/chttpd/test/eunit/chttpd_db_test.erl
deleted file mode 100644
index c76b31581..000000000
--- a/src/chttpd/test/eunit/chttpd_db_test.erl
+++ /dev/null
@@ -1,618 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_db_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_db_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(DESTHEADER1, {"Destination", "foo%E5%95%8Abar"}).
--define(DESTHEADER2, {"Destination", "foo%2Fbar%23baz%3Fpow%3Afiz"}).
--define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
--define(i2l(I), integer_to_list(I)).
-% seconds
--define(TIMEOUT, 60).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- create_db(Url),
- Url.
-
-teardown(Url) ->
- delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-create_doc(Url, Id) ->
- test_request:put(
- Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH],
- "{\"mr\": \"rockoartischocko\"}"
- ).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-all_test_() ->
- {
- "chttpd db tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_return_ok_true_on_bulk_update/1,
- fun should_return_201_new_edits_false_with_revs_on_bulk_update/1,
- fun should_return_400_new_edits_false_no_revs_on_bulk_update/1,
- fun should_return_ok_true_on_ensure_full_commit/1,
- fun should_return_404_for_ensure_full_commit_on_no_db/1,
- fun should_accept_live_as_an_alias_for_continuous/1,
- fun should_return_headers_after_starting_continious/1,
- fun should_return_404_for_delete_att_on_notadoc/1,
- fun should_return_409_for_del_att_without_rev/1,
- fun should_return_200_for_del_att_with_rev/1,
- fun should_return_409_for_put_att_nonexistent_rev/1,
- fun should_return_update_seq_when_set_on_all_docs/1,
- fun should_not_return_update_seq_when_unset_on_all_docs/1,
- fun should_return_correct_id_on_doc_copy/1,
- fun should_return_only_one_ok_on_doc_copy/1,
- fun should_return_400_for_bad_engine/1,
- fun should_not_change_db_proper_after_rewriting_shardmap/1,
- fun should_succeed_on_all_docs_with_queries_keys/1,
- fun should_succeed_on_all_docs_with_queries_limit_skip/1,
- fun should_succeed_on_all_docs_with_multiple_queries/1,
- fun should_succeed_on_design_docs_with_queries_keys/1,
- fun should_succeed_on_design_docs_with_queries_limit_skip/1,
- fun should_succeed_on_design_docs_with_multiple_queries/1,
- fun should_succeed_on_local_docs_with_queries_keys/1,
- fun should_succeed_on_local_docs_with_queries_limit_skip/1,
- fun should_succeed_on_local_docs_with_multiple_queries/1
- ]
- }
- }
- }.
-
-should_return_ok_true_on_bulk_update(Url) ->
- {timeout, ?TIMEOUT,
- ?_assertEqual(
- true,
- begin
- {ok, _, _, Body} = create_doc(Url, "testdoc"),
- {Json} = ?JSON_DECODE(Body),
- Ref = couch_util:get_value(<<"rev">>, Json, undefined),
- NewDoc = "{\"docs\": [{\"_rev\": \"" ++ ?b2l(Ref) ++ "\", \"_id\": \"testdoc\"}]}",
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH],
- NewDoc
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = lists:nth(1, ResultJson),
- couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end
- )}.
-
-should_return_201_new_edits_false_with_revs_on_bulk_update(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(
- begin
- {ok, _, _, Body} = create_doc(Url, "dochasrev"),
- {Json} = ?JSON_DECODE(Body),
- Ref = couch_util:get_value(<<"rev">>, Json, undefined),
- NewDoc =
- "{\"docs\": [{\"_rev\": \"" ++ ?b2l(Ref) ++
- "\", \"_id\": \"dochasrev\"}], \"new_edits\": false}",
- {ok, Status, _, ResultBody} = test_request:post(
- Url ++
- "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH],
- NewDoc
- ),
- ?assertEqual(201, Status),
- ?assertEqual([], ?JSON_DECODE(ResultBody))
- end
- )}.
-
-should_return_400_new_edits_false_no_revs_on_bulk_update(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(
- begin
- {ok, _, _, _} = create_doc(Url, "docnorev"),
- NewDoc =
- "{\"docs\": [{\"_id\": \"docnorev\"}], " ++
- "\"new_edits\": false}",
- {ok, Status, _, ResultBody} = test_request:post(
- Url ++
- "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH],
- NewDoc
- ),
- {ResultJson} = ?JSON_DECODE(ResultBody),
- ?assertEqual(400, Status),
- ?assertEqual(
- <<"bad_request">>,
- couch_util:get_value(<<"error">>, ResultJson)
- )
- end
- )}.
-
-should_return_ok_true_on_ensure_full_commit(Url0) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Url = Url0 ++ "/_ensure_full_commit",
- {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []),
- {Json} = ?JSON_DECODE(Body),
- ?assertEqual(201, RC),
- ?assert(couch_util:get_value(<<"ok">>, Json))
- end)}.
-
-should_return_404_for_ensure_full_commit_on_no_db(Url0) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Url = Url0 ++ "-missing-db" ++ "/_ensure_full_commit",
- {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []),
- {Json} = ?JSON_DECODE(Body),
- ?assertEqual(404, RC),
- ?assertEqual(<<"not_found">>, couch_util:get_value(<<"error">>, Json))
- end)}.
-
-should_accept_live_as_an_alias_for_continuous(Url) ->
- GetLastSeq = fun(Chunks) ->
- LastSeqBin = lists:last(Chunks),
- {Result} =
- try ?JSON_DECODE(LastSeqBin) of
- Data -> Data
- catch
- _:_ ->
- % should not happen, abort
- ?assert(false)
- end,
- couch_util:get_value(<<"last_seq">>, Result, undefined)
- end,
- {timeout, ?TIMEOUT,
- ?_test(begin
- LastSeq1 = GetLastSeq(wait_non_empty_chunk(Url)),
-
- {ok, _, _, _} = create_doc(Url, "testdoc2"),
-
- LastSeq2 = GetLastSeq(wait_non_empty_chunk(Url)),
-
- ?assertNotEqual(LastSeq1, LastSeq2)
- end)}.
-
-should_return_404_for_delete_att_on_notadoc(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- {ok, RC, _, RespBody} = test_request:delete(
- Url ++ "/notadoc/att.pdf",
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?assertEqual(404, RC),
- ?assertEqual(
- {[
- {<<"error">>, <<"not_found">>},
- {<<"reason">>, <<"missing">>}
- ]},
- jiffy:decode(RespBody)
- ),
- {ok, RC1, _, _} = test_request:get(
- Url ++ "/notadoc",
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?assertEqual(404, RC1)
- end)}.
-
-should_return_409_for_del_att_without_rev(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- {ok, RC, _, _} = test_request:put(
- Url ++ "/testdoc3",
- [?CONTENT_JSON, ?AUTH],
- jiffy:encode(attachment_doc())
- ),
- ?assertEqual(201, RC),
-
- {ok, RC1, _, _} = test_request:delete(
- Url ++ "/testdoc3/file.erl",
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?assertEqual(409, RC1)
- end)}.
-
-should_return_200_for_del_att_with_rev(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- {ok, RC, _Headers, RespBody} = test_request:put(
- Url ++ "/testdoc4",
- [?CONTENT_JSON, ?AUTH],
- jiffy:encode(attachment_doc())
- ),
- ?assertEqual(201, RC),
-
- {ResultJson} = ?JSON_DECODE(RespBody),
- Rev = couch_util:get_value(<<"rev">>, ResultJson, undefined),
-
- {ok, RC1, _, _} = test_request:delete(
- Url ++ "/testdoc4/file.erl?rev=" ++ Rev,
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?assertEqual(200, RC1)
- end)}.
-
-should_return_409_for_put_att_nonexistent_rev(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- {ok, RC, _Headers, RespBody} = test_request:put(
- Url ++ "/should_return_404/file.erl?rev=1-000",
- [?CONTENT_JSON, ?AUTH],
- jiffy:encode(attachment_doc())
- ),
- ?assertEqual(409, RC),
- ?assertMatch(
- {[
- {<<"error">>, <<"not_found">>},
- {<<"reason">>, <<"missing_rev">>}
- ]},
- ?JSON_DECODE(RespBody)
- )
- end)}.
-
-should_return_update_seq_when_set_on_all_docs(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
- {ok, RC, _, RespBody} = test_request:get(
- Url ++ "/_all_docs/" ++
- "?update_seq=true&keys=[\"testdoc1\"]",
- [?CONTENT_JSON, ?AUTH]
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ?assertNotEqual(
- undefined,
- couch_util:get_value(<<"update_seq">>, ResultJson)
- ),
- ?assertNotEqual(
- undefined,
- couch_util:get_value(<<"offset">>, ResultJson)
- )
- end)}.
-
-should_not_return_update_seq_when_unset_on_all_docs(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
- {ok, RC, _, RespBody} = test_request:get(
- Url ++ "/_all_docs/" ++
- "?update_seq=false&keys=[\"testdoc1\"]",
- [?CONTENT_JSON, ?AUTH]
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ?assertEqual(
- undefined,
- couch_util:get_value(<<"update_seq">>, ResultJson)
- ),
- ?assertNotEqual(
- undefined,
- couch_util:get_value(<<"offset">>, ResultJson)
- )
- end)}.
-
-should_return_correct_id_on_doc_copy(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- {ok, _, _, _} = create_doc(Url, "testdoc"),
- {_, _, _, ResultBody1} = test_request:copy(
- Url ++ "/testdoc/",
- [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]
- ),
- {ResultJson1} = ?JSON_DECODE(ResultBody1),
- Id1 = couch_util:get_value(<<"id">>, ResultJson1),
-
- {_, _, _, ResultBody2} = test_request:copy(
- Url ++ "/testdoc/",
- [?CONTENT_JSON, ?AUTH, ?DESTHEADER2]
- ),
- {ResultJson2} = ?JSON_DECODE(ResultBody2),
- Id2 = couch_util:get_value(<<"id">>, ResultJson2),
- [
- ?assertEqual(<<102, 111, 111, 229, 149, 138, 98, 97, 114>>, Id1),
- ?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2)
- ]
- end)}.
-
-should_return_only_one_ok_on_doc_copy(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- {ok, _, _, _} = create_doc(Url, "testdoc"),
- {_, _, _, ResultBody} = test_request:copy(
- Url ++ "/testdoc",
- [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]
- ),
- {ResultJson} = jiffy:decode(ResultBody),
- NumOks = length(lists:filter(fun({Key, _Value}) -> Key == <<"ok">> end, ResultJson)),
- [
- ?assertEqual(1, NumOks)
- ]
- end)}.
-
-attachment_doc() ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- {[
- {<<"_attachments">>,
- {[
- {<<"file.erl">>,
- {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"data">>, base64:encode(Data)}
- ]}}
- ]}}
- ]}.
-
-should_return_400_for_bad_engine(_) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- Url = BaseUrl ++ "?engine=cowabunga",
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assertEqual(400, Status)
- end)}.
-
-should_not_change_db_proper_after_rewriting_shardmap(_) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
-
- BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- Url = BaseUrl ++ "?partitioned=true&q=1",
- {ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
-
- ShardDbName = ?l2b(config:get("mem3", "shards_db", "_dbs")),
- {ok, ShardDb} = mem3_util:ensure_exists(ShardDbName),
- {ok, #doc{body = {Props}}} = couch_db:open_doc(
- ShardDb, TmpDb, [ejson_body]
- ),
- Shards = mem3_util:build_shards(TmpDb, Props),
-
- {Prop2} = ?JSON_DECODE(?JSON_ENCODE({Props})),
- Shards2 = mem3_util:build_shards(TmpDb, Prop2),
- ?assertEqual(Shards2, Shards),
- {ok, 200, _, _} = test_request:delete(BaseUrl, [?AUTH])
- end)}.
-
-should_succeed_on_all_docs_with_queries_keys(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++ "/_all_docs/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
-should_succeed_on_all_docs_with_queries_limit_skip(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++ "/_all_docs/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
-should_succeed_on_all_docs_with_multiple_queries(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc =
- "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]},\n"
- " {\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++ "/_all_docs/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson1} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
- {InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end)}.
-
-should_succeed_on_design_docs_with_queries_keys(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc =
- "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",\n"
- " \"_design/ddoc8\"]}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++
- "/_design_docs/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
-should_succeed_on_design_docs_with_queries_limit_skip(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++
- "/_design_docs/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
-should_succeed_on_design_docs_with_multiple_queries(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc =
- "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",\n"
- " \"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++
- "/_design_docs/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson1} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
- {InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end)}.
-
-should_succeed_on_local_docs_with_queries_keys(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc =
- "{\"queries\": [{\"keys\":\n"
- " [ \"_local/doc3\", \"_local/doc8\"]}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++ "/_local_docs/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
-should_succeed_on_local_docs_with_queries_limit_skip(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++
- "/_local_docs/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
-should_succeed_on_local_docs_with_multiple_queries(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc =
- "{\"queries\": [{\"keys\": [ \"_local/doc3\",\n"
- " \"_local/doc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++
- "/_local_docs/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson1} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
- {InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end)}.
-
-should_return_headers_after_starting_continious(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- {ok, _, _, Bin} =
- test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]),
-
- Parts = binary:split(Bin, <<"\n">>, [global]),
- %% we should receive at least one part even when timeout=1
- ?assertNotEqual([], Parts)
- end)}.
-
-wait_non_empty_chunk(Url) ->
- test_util:wait(fun() ->
- {ok, _, _, Bin} =
- test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]),
-
- Parts = binary:split(Bin, <<"\n">>, [global]),
-
- case [P || P <- Parts, size(P) > 0] of
- [] -> wait;
- Chunks -> Chunks
- end
- end).
diff --git a/src/chttpd/test/eunit/chttpd_dbs_info_test.erl b/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
deleted file mode 100644
index cb386d63f..000000000
--- a/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
+++ /dev/null
@@ -1,334 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_dbs_info_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include("chttpd_test.hrl").
-
--define(USER, "chttpd_db_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
-
-start() ->
- Ctx = chttpd_test_util:start_couch(),
- DbDir = config:get("couchdb", "database_dir"),
- Suffix = ?b2l(couch_uuids:random()),
- test_util:with_couch_server_restart(fun() ->
- config:set("couchdb", "database_dir", DbDir ++ "/" ++ Suffix, false)
- end),
- mock([fabric_util, chttpd_util]),
- Ctx.
-
-stop(Ctx) ->
- config:delete("couchdb", "database_dir", false),
- chttpd_test_util:stop_couch(Ctx).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- Suffix = ?b2l(couch_uuids:random()),
- Db1 = testdb("db1", Suffix),
- Db2 = testdb("db2", Suffix),
- create_db(base_url(Db1)),
- create_db(base_url(Db2)),
- {Suffix, Db1, Db2}.
-
-teardown({_, Db1, Db2}) ->
- meck:unload(),
- delete_db(base_url(Db1)),
- delete_db(base_url(Db2)),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-setup_with_shards_db_ddoc() ->
- {Suffix, Db1, Db2} = setup(),
- {Suffix, Db1, Db2, create_shards_db_ddoc(Suffix)}.
-
-teardown_with_shards_db_ddoc({Suffix, Db1, Db2, UrlDDoc}) ->
- ok = delete_shards_db_ddoc(UrlDDoc),
- teardown({Suffix, Db1, Db2}).
-
-dbs_info_test_() ->
- {
- "chttpd dbs info tests",
- {
- setup,
- fun start/0,
- fun stop/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- ?TDEF_FE(get_db_info_should_return_db_info),
- ?TDEF_FE(get_db_info_should_return_error_when_db_not_exist),
- ?TDEF_FE(get_db_info_should_return_error_when_time_out),
- ?TDEF_FE(should_return_error_for_put_dbs_info),
- ?TDEF_FE(should_return_dbs_info_for_get_dbs_info),
- ?TDEF_FE(should_return_nothing_when_db_not_exist_for_get_dbs_info),
- ?TDEF_FE(should_return_500_time_out_when_time_is_not_enough_for_get_dbs_info),
- ?TDEF_FE(should_return_db2_for_get_dbs_info_with_descending),
- ?TDEF_FE(should_return_db1_for_get_dbs_info_with_limit_1),
- ?TDEF_FE(should_return_db2_for_get_dbs_info_with_skip_1),
- ?TDEF_FE(should_return_dbs_info_with_correct_start_end_key),
- ?TDEF_FE(should_return_empty_list_with_wrong_start_end_key),
- ?TDEF_FE(should_return_dbs_info_for_single_db),
- ?TDEF_FE(should_return_dbs_info_for_multiple_dbs),
- ?TDEF_FE(should_return_error_for_exceeded_keys),
- ?TDEF_FE(should_return_error_for_missing_keys),
- ?TDEF_FE(should_return_dbs_info_for_dbs_with_mixed_state)
- ]
- }
- }
- }.
-
-skip_limit_test_() ->
- {
- "chttpd skip limit tests",
- {
- setup,
- fun start/0,
- fun stop/1,
- {
- foreach,
- fun setup_with_shards_db_ddoc/0,
- fun teardown_with_shards_db_ddoc/1,
- [
- ?TDEF_FE(t_dbs_info_when_shards_db_design_doc_exist),
- ?TDEF_FE(t_all_dbs_when_shards_db_design_doc_exist)
- ]
- }
- }
- }.
-
-get_db_info_should_return_db_info({_, Db1, _}) ->
- DbInfo = fabric:get_db_info(Db1),
- ?assertEqual(DbInfo, chttpd_util:get_db_info(Db1)).
-
-get_db_info_should_return_error_when_db_not_exist(_) ->
- ?assertEqual(
- {error, database_does_not_exist},
- chttpd_util:get_db_info("db_not_exist")
- ).
-
-get_db_info_should_return_error_when_time_out({_, Db1, _}) ->
- mock_timeout(),
- ?assertEqual({error, timeout}, chttpd_util:get_db_info(Db1)).
-
-should_return_error_for_put_dbs_info(_) ->
- {ok, Code, _, ResultBody} = test_request:put(
- dbs_info_url(), [?CONTENT_JSON, ?AUTH], ""
- ),
- {Body} = jiffy:decode(ResultBody),
- ?assertEqual(
- <<"method_not_allowed">>,
- couch_util:get_value(<<"error">>, Body)
- ),
- ?assertEqual(405, Code).
-
-should_return_dbs_info_for_get_dbs_info({Suffix, Db1, Db2}) ->
- {ok, _, _, ResultBody} = test_request:get(
- dbs_info_url(), [?CONTENT_JSON, ?AUTH]
- ),
- FilteredDbs = filter_dbs(Suffix, ResultBody),
- ?assertEqual([Db1, Db2], FilteredDbs).
-
-should_return_nothing_when_db_not_exist_for_get_dbs_info(_) ->
- mock_db_not_exist(),
- {ok, Code, _, ResultBody} = test_request:get(
- dbs_info_url(), [?CONTENT_JSON, ?AUTH]
- ),
- Info = jiffy:decode(ResultBody),
- ?assertEqual([], Info),
- ?assertEqual(200, Code).
-
-should_return_500_time_out_when_time_is_not_enough_for_get_dbs_info(_) ->
- mock_timeout(),
- {ok, Code, _, ResultBody} = test_request:get(
- dbs_info_url("buffer_response=true"), [?CONTENT_JSON, ?AUTH]
- ),
- {Body} = jiffy:decode(ResultBody),
- ?assertEqual(<<"timeout">>, couch_util:get_value(<<"error">>, Body)),
- ?assertEqual(500, Code).
-
-should_return_db2_for_get_dbs_info_with_descending({Suffix, Db1, Db2}) ->
- {ok, _, _, ResultBody} = test_request:get(
- dbs_info_url("descending=true"), [?CONTENT_JSON, ?AUTH]
- ),
- FilteredDbs = filter_dbs(Suffix, ResultBody),
- ?assertEqual([Db2, Db1], FilteredDbs).
-
-should_return_db1_for_get_dbs_info_with_limit_1({Suffix, Db1, _}) ->
- {ok, _, _, ResultBody} = test_request:get(
- dbs_info_url("limit=1"), [?CONTENT_JSON, ?AUTH]
- ),
- FilteredDbs = filter_dbs(Suffix, ResultBody),
- ?assertEqual([Db1], FilteredDbs).
-
-should_return_db2_for_get_dbs_info_with_skip_1({Suffix, _, Db2}) ->
- {ok, _, _, ResultBody} = test_request:get(
- dbs_info_url("skip=1"), [?CONTENT_JSON, ?AUTH]
- ),
- FilteredDbs = filter_dbs(Suffix, ResultBody),
- ?assertEqual([Db2], FilteredDbs).
-
-should_return_dbs_info_with_correct_start_end_key({Suffix, Db1, _}) ->
- {ok, _, _, ResultBody} = test_request:get(
- dbs_info_url("startkey=\"db1\"&endkey=\"db2\""), [?CONTENT_JSON, ?AUTH]
- ),
- FilteredDbs = filter_dbs(Suffix, ResultBody),
- ?assertEqual([Db1], FilteredDbs).
-
-should_return_empty_list_with_wrong_start_end_key(_) ->
- {ok, _, _, ResultBody} = test_request:get(
- dbs_info_url("startkey=\"db3\"&endkey=\"db4\""), [?CONTENT_JSON, ?AUTH]
- ),
- ?assertEqual([], jiffy:decode(ResultBody)).
-
-should_return_dbs_info_for_single_db({_, Db1, _}) ->
- NewDoc = "{\"keys\": [\"" ++ Db1 ++ "\"]}",
- {ok, _, _, ResultBody} = test_request:post(
- dbs_info_url(), [?CONTENT_JSON, ?AUTH], NewDoc
- ),
- BodyJson = jiffy:decode(ResultBody),
- {Db1Data} = lists:nth(1, BodyJson),
- ?assertEqual(?l2b(Db1), couch_util:get_value(<<"key">>, Db1Data)),
- ?assertNotEqual(undefined, couch_util:get_value(<<"info">>, Db1Data)).
-
-should_return_dbs_info_for_multiple_dbs({_, Db1, Db2}) ->
- NewDoc = "{\"keys\": [\"" ++ Db1 ++ "\", \"" ++ Db2 ++ "\"]}",
- {ok, _, _, ResultBody} = test_request:post(
- dbs_info_url(), [?CONTENT_JSON, ?AUTH], NewDoc
- ),
- BodyJson = jiffy:decode(ResultBody),
- {Db1Data} = lists:nth(1, BodyJson),
- {Db2Data} = lists:nth(2, BodyJson),
- ?assertEqual(?l2b(Db1), couch_util:get_value(<<"key">>, Db1Data)),
- ?assertNotEqual(undefined, couch_util:get_value(<<"info">>, Db1Data)),
- ?assertEqual(?l2b(Db2), couch_util:get_value(<<"key">>, Db2Data)),
- ?assertNotEqual(undefined, couch_util:get_value(<<"info">>, Db2Data)).
-
-should_return_error_for_exceeded_keys({_, Db1, Db2}) ->
- NewDoc = "{\"keys\": [\"" ++ Db1 ++ "\", \"" ++ Db2 ++ "\"]}",
- ok = config:set("chttpd", "max_db_number_for_dbs_info_req", "1"),
- {ok, Code, _, ResultBody} = test_request:post(
- dbs_info_url(), [?CONTENT_JSON, ?AUTH], NewDoc
- ),
- {Body} = jiffy:decode(ResultBody),
- ok = config:delete("chttpd", "max_db_number_for_dbs_info_req"),
- ?assertEqual(<<"bad_request">>, couch_util:get_value(<<"error">>, Body)),
- ?assertEqual(400, Code).
-
-should_return_error_for_missing_keys({_, Db1, Db2}) ->
- NewDoc = "{\"missingkeys\": [\"" ++ Db1 ++ "\", \"" ++ Db2 ++ "\"]}",
- {ok, Code, _, ResultBody} = test_request:post(
- dbs_info_url(), [?CONTENT_JSON, ?AUTH], NewDoc
- ),
- {Body} = jiffy:decode(ResultBody),
- ?assertEqual(<<"bad_request">>, couch_util:get_value(<<"error">>, Body)),
- ?assertEqual(400, Code).
-
-should_return_dbs_info_for_dbs_with_mixed_state({_, Db1, _}) ->
- NewDoc = "{\"keys\": [\"" ++ Db1 ++ "\", \"noexisteddb\"]}",
- {ok, _, _, ResultBody} = test_request:post(
- dbs_info_url(), [?CONTENT_JSON, ?AUTH], NewDoc
- ),
- Json = jiffy:decode(ResultBody),
- {Db1Data} = lists:nth(1, Json),
- {Db2Data} = lists:nth(2, Json),
- ?assertEqual(?l2b(Db1), couch_util:get_value(<<"key">>, Db1Data)),
- ?assertNotEqual(undefined, couch_util:get_value(<<"info">>, Db1Data)),
- ?assertEqual(<<"noexisteddb">>, couch_util:get_value(<<"key">>, Db2Data)),
- ?assertEqual(undefined, couch_util:get_value(<<"info">>, Db2Data)).
-
-t_dbs_info_when_shards_db_design_doc_exist({Suffix, _, Db2, _}) ->
- {ok, _, _, ResultBody} = test_request:get(
- dbs_info_url("limit=1&skip=1"), [?CONTENT_JSON, ?AUTH]
- ),
- FilteredDbs = filter_dbs(Suffix, ResultBody),
- ?assertEqual([Db2], FilteredDbs).
-
-t_all_dbs_when_shards_db_design_doc_exist({_, _, Db2, _}) ->
- {ok, _, _, ResultBody} = test_request:get(
- base_url("_all_dbs?limit=1&skip=1"), [?CONTENT_JSON, ?AUTH]
- ),
- ?assertEqual([?l2b(Db2)], jiffy:decode(ResultBody)).
-
-%% Utility functions
-testdb(Name, Suffix) ->
- Name ++ "-" ++ Suffix.
-
-base_url() ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- lists:concat(["http://", Addr, ":", Port, "/"]).
-
-base_url(Path) ->
- base_url() ++ Path.
-
-dbs_info_url() ->
- base_url() ++ "_dbs_info".
-
-dbs_info_url(Option) ->
- dbs_info_url() ++ "?" ++ Option.
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-mock(Modules) ->
- lists:foreach(fun(Mod) -> meck:new(Mod, [passthrough]) end, Modules).
-
-mock_timeout() ->
- meck:expect(fabric_util, request_timeout, fun() -> 0 end).
-
-mock_db_not_exist() ->
- meck:expect(
- chttpd_util,
- get_db_info,
- fun(_) -> {error, database_does_not_exist} end
- ).
-
-create_shards_db_ddoc(Suffix) ->
- DDocId = ?l2b("_design/ddoc-" ++ Suffix),
- DDoc = #{<<"_id">> => DDocId},
- ShardsDb = "_node/_local/" ++ ?b2l(mem3_sync:shards_db()),
- {ok, Code, _, Resp} = test_request:post(
- base_url(ShardsDb), [?CONTENT_JSON, ?AUTH], jiffy:encode(DDoc)
- ),
- RespBody = jiffy:decode(Resp, [return_maps]),
- #{<<"rev">> := Rev} = RespBody,
- UrlDDoc = base_url(ShardsDb) ++ "/" ++ ?b2l(DDocId) ++ "?rev=" ++ ?b2l(Rev),
- ?assert(lists:member(Code, [200, 201])),
- UrlDDoc.
-
-delete_shards_db_ddoc(UrlDDoc) ->
- {ok, Code, _, _} = test_request:delete(UrlDDoc, [?AUTH]),
- ?assertEqual(Code, 200),
- ok.
-
-filter_dbs(Suffix, ResultBody) ->
- Dbs = jiffy:decode(ResultBody, [return_maps]),
- SuffixBin = ?l2b(Suffix),
- SuffixSize = size(SuffixBin),
- FilterFun =
- fun(Db) ->
- Name = maps:get(<<"key">>, Db),
- size(Name) > SuffixSize andalso
- binary:part(Name, size(Name), -SuffixSize) =:= SuffixBin
- end,
- [?b2l(maps:get(<<"key">>, Db)) || Db <- Dbs, FilterFun(Db)].
diff --git a/src/chttpd/test/eunit/chttpd_delayed_test.erl b/src/chttpd/test/eunit/chttpd_delayed_test.erl
deleted file mode 100644
index 4b0fbd55b..000000000
--- a/src/chttpd/test/eunit/chttpd_delayed_test.erl
+++ /dev/null
@@ -1,72 +0,0 @@
--module(chttpd_delayed_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_view_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(DDOC,
- "{\"_id\": \"_design/bar\", \"views\": {\"baz\":\n"
- " {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}"
-).
-
--define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
--define(i2l(I), integer_to_list(I)).
-% seconds
--define(TIMEOUT, 60).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- ok = config:set("chttpd", "buffer_response", "true", _Persist = false),
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- create_db(Url),
- Url.
-
-teardown(Url) ->
- delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-all_test_() ->
- {
- "chttpd delay tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun test_buffer_response_all_docs/1,
- fun test_buffer_response_changes/1
- ]
- }
- }
- }.
-
-test_buffer_response_all_docs(Url) ->
- assert_successful_response(Url ++ "/_all_docs").
-
-test_buffer_response_changes(Url) ->
- assert_successful_response(Url ++ "/_changes").
-
-assert_successful_response(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- {ok, Code, _Headers, _Body} = test_request:get(Url, [?AUTH]),
- ?assertEqual(200, Code)
- end)}.
diff --git a/src/chttpd/test/eunit/chttpd_error_info_tests.erl b/src/chttpd/test/eunit/chttpd_error_info_tests.erl
deleted file mode 100644
index aefb3bdc5..000000000
--- a/src/chttpd/test/eunit/chttpd_error_info_tests.erl
+++ /dev/null
@@ -1,171 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_error_info_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-error_info_test() ->
- Error = <<"error">>,
- Reason = <<"reason">>,
- ArgResult = [
- {
- bad_request,
- {400, <<"bad_request">>, <<>>}
- },
- {
- {bad_request, Reason},
- {400, <<"bad_request">>, Reason}
- },
- {
- {bad_request, "error", "reason"},
- {400, Error, Reason}
- },
- {
- {query_parse_error, Reason},
- {400, <<"query_parse_error">>, Reason}
- },
- {
- database_does_not_exist,
- {404, <<"not_found">>, <<"Database does not exist.">>}
- },
- {
- not_found,
- {404, <<"not_found">>, <<"missing">>}
- },
- {
- {not_found, Reason},
- {404, <<"not_found">>, Reason}
- },
- {
- {not_acceptable, Reason},
- {406, <<"not_acceptable">>, Reason}
- },
- {
- conflict,
- {409, <<"conflict">>, <<"Document update conflict.">>}
- },
- {
- {conflict, Reason},
- %% yes, the reason is ignored
- {409, <<"conflict">>, <<"Document update conflict.">>}
- },
- {
- {forbidden, Reason},
- {403, <<"forbidden">>, Reason}
- },
- {
- {forbidden, Error, Reason},
- {403, Error, Reason}
- },
- {
- {unauthorized, Reason},
- {401, <<"unauthorized">>, Reason}
- },
- {
- file_exists,
- {412, <<"file_exists">>,
- <<"The database could not be created, the file already exists.">>}
- },
- {
- {error, {nodedown, Reason}}, {412, <<"nodedown">>, Reason}
- },
- {
- {maintenance_mode, Reason},
- {412, <<"nodedown">>, Reason}
- },
- {
- {maintenance_mode, nil, Reason},
- {412, <<"nodedown">>, Reason}
- },
- {
- {w_quorum_not_met, Reason},
- {500, <<"write_quorum_not_met">>, Reason}
- },
- {
- request_uri_too_long,
- {414, <<"too_long">>, <<"the request uri is too long">>}
- },
- {
- {bad_ctype, Reason},
- {415, <<"bad_content_type">>, Reason}
- },
- {
- requested_range_not_satisfiable,
- {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>}
- },
- {
- {error, {illegal_database_name, <<"foo">>}},
- {400, <<"illegal_database_name">>, <<
- "Name: 'foo'. Only lowercase characters (a-z), digits (0-9), and any of"
- " the characters _, $, (, ), +, -, and / are allowed."
- " Must begin with a letter."
- >>}
- },
- {
- {Error, {illegal_docid, 1}},
- {400, <<"illegal_docid">>, 1}
- },
- {
- {missing_stub, Reason},
- {412, <<"missing_stub">>, Reason}
- },
- {
- request_entity_too_large,
- {413, <<"too_large">>, <<"the request entity is too large">>}
- },
- {
- not_implemented,
- {501, <<"not_implemented">>, <<"this feature is not yet implemented">>}
- },
- {
- timeout,
- {500, <<"timeout">>, <<
- "The request could not be processed in a reasonable"
- " amount of time."
- >>}
- },
- {
- {timeout, Error},
- {500, <<"timeout">>, <<
- "The request could not be processed in a reasonable"
- " amount of time."
- >>}
- },
- {
- {Error, null},
- {500, <<"unknown_error">>, Error}
- },
- {
- {Error, Reason},
- {500, Error, Reason}
- },
- {
- {Error, nil, [{}]},
- {500, <<"unknown_error">>, Error}
- },
- {
- {Error, Reason, [{}]},
- {500, Error, Reason}
- },
- {
- Error,
- {500, <<"unknown_error">>, Error}
- }
- ],
-
- lists:foreach(
- fun({Arg, Result}) ->
- ?assertEqual(Result, chttpd:error_info(Arg))
- end,
- ArgResult
- ).
diff --git a/src/chttpd/test/eunit/chttpd_external_test.erl b/src/chttpd/test/eunit/chttpd_external_test.erl
deleted file mode 100644
index cd691fbaa..000000000
--- a/src/chttpd/test/eunit/chttpd_external_test.erl
+++ /dev/null
@@ -1,122 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_external_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup_mock() ->
- ok = meck:new([config, couch], [passthrough]),
- ok = meck:expect(couch_db, is_clustered, 1, false),
- ok = meck:expect(couch_db, get_db_info, 1, {ok, [{name, <<"fake">>}]}),
- ok = meck:expect(couch_db, name, 1, <<"fake">>),
- ok = meck:expect(couch_db, get_user_ctx, 1, #user_ctx{}),
- ok = meck:expect(couch_db, get_security, 1, []),
- ok = meck:expect(couch_uuids, new, 0, <<"4">>),
- ok = meck:expect(config, get_integer, fun(_, _, N) -> N end).
-
-teardown_mock(_) ->
- meck:unload().
-
-setup_local_httpd_req() ->
- ok = meck:new(mochiweb, [passthrough]),
- ok = meck:expect(mochiweb_socket, peername, fun(_) ->
- {ok, {{127, 0, 0, 1}, 5984}}
- end),
- ok = meck:expect(mochiweb_request, recv_body, 2, {[{<<"a">>, 42}]}),
- Headers = mochiweb_headers:make([{"host", "example.com"}]),
- MochiReq = mochiweb_request:new(nil, 'GET', "/", {1, 1}, Headers),
- #httpd{
- mochi_req = MochiReq,
- method = 'GET',
- path_parts = [<<"/">>],
- requested_path_parts = [<<"/">>],
- user_ctx = #user_ctx{}
- }.
-
-setup_remote_httpd_req() ->
- Headers = mochiweb_headers:make([{"host", "example.com"}]),
- MochiReq = mochiweb_request:new(nil, 'GET', "/", {1, 1}, Headers),
- #httpd{
- mochi_req = MochiReq,
- method = 'GET',
- path_parts = [<<"/">>],
- requested_path_parts = [<<"/">>],
- peer = "127.0.0.1",
- req_body = {[{<<"a">>, 42}]},
- user_ctx = #user_ctx{}
- }.
-
-json_req_obj_local_httpd_req_test_() ->
- {
- "chttpd external local httpd_req tests",
- {
- setup,
- fun setup_mock/0,
- fun teardown_mock/1,
- {
- setup,
- fun setup_local_httpd_req/0,
- fun should_convert_req_to_json_obj/1
- }
- }
- }.
-
-json_req_obj_remote_httpd_req_test_() ->
- {
- "chttpd external remote httpd_req tests",
- {
- setup,
- fun setup_mock/0,
- fun teardown_mock/1,
- {
- setup,
- fun setup_remote_httpd_req/0,
- fun should_convert_req_to_json_obj/1
- }
- }
- }.
-
-should_convert_req_to_json_obj(HttpdReq) ->
- Expect = expect(),
- {Result} = chttpd_external:json_req_obj(HttpdReq, <<"fake">>),
- lists:map(
- fun({K, V}) ->
- {K, ?_assertEqual(couch_util:get_value(K, Expect), V)}
- end,
- Result
- ).
-
-expect() ->
- [
- {<<"info">>, {[{name, <<"fake">>}]}},
- {<<"uuid">>, <<"4">>},
- {<<"id">>, null},
- {<<"method">>, 'GET'},
- {<<"requested_path">>, [<<"/">>]},
- {<<"path">>, [<<"/">>]},
- {<<"raw_path">>, <<"/">>},
- {<<"query">>, {[]}},
- {<<"headers">>, {[{<<"host">>, <<"example.com">>}]}},
- {<<"body">>, {[{<<"a">>, 42}]}},
- {<<"peer">>, <<"127.0.0.1">>},
- {<<"form">>, {[]}},
- {<<"cookie">>, {[]}},
- {<<"userCtx">>,
- {[
- {<<"db">>, <<"fake">>},
- {<<"name">>, null},
- {<<"roles">>, []}
- ]}},
- {<<"secObj">>, []}
- ].
diff --git a/src/chttpd/test/eunit/chttpd_handlers_tests.erl b/src/chttpd/test/eunit/chttpd_handlers_tests.erl
deleted file mode 100644
index 7cca6659d..000000000
--- a/src/chttpd/test/eunit/chttpd_handlers_tests.erl
+++ /dev/null
@@ -1,88 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_handlers_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup() ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- BaseUrl = lists:concat(["http://", Addr, ":", Port]),
- BaseUrl.
-
-teardown(_Url) ->
- ok.
-
-replicate_test_() ->
- {
- "_replicate",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_escape_dbname_on_replicate/1
- ]
- }
- }
- }.
-
-should_escape_dbname_on_replicate(Url) ->
- ?_test(
- begin
- UrlBin = ?l2b(Url),
- Request = couch_util:json_encode(
- {[
- {<<"source">>, <<UrlBin/binary, "/foo%2Fbar">>},
- {<<"target">>, <<"bar/baz">>},
- {<<"create_target">>, true}
- ]}
- ),
- {ok, 200, _, Body} = request_replicate(Url ++ "/_replicate", Request),
- JSON = couch_util:json_decode(Body),
-
- Source = json_value(JSON, [<<"source">>]),
- Target = json_value(JSON, [<<"target">>, <<"url">>]),
- ?assertEqual(<<UrlBin/binary, "/foo%2Fbar">>, Source),
- ?assertEqual(<<UrlBin/binary, "/bar%2Fbaz">>, Target)
- end
- ).
-
-json_value(JSON, Keys) ->
- couch_util:get_nested_json_value(JSON, Keys).
-
-request_replicate(Url, Body) ->
- Headers = [{"Content-Type", "application/json"}],
- Handler = {chttpd_misc, handle_replicate_req},
- request(post, Url, Headers, Body, Handler, fun(Req) ->
- chttpd:send_json(Req, 200, Req#httpd.req_body)
- end).
-
-request(Method, Url, Headers, Body, {M, F}, MockFun) ->
- meck:new(M, [passthrough, non_strict]),
- try
- meck:expect(M, F, MockFun),
- Result = test_request:Method(Url, Headers, Body),
- ?assert(meck:validate(M)),
- Result
- catch
- Kind:Reason ->
- {Kind, Reason}
- after
- meck:unload(M)
- end.
diff --git a/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl
deleted file mode 100644
index 3eda08ae0..000000000
--- a/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl
+++ /dev/null
@@ -1,124 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_open_revs_error_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_db_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(CONTENT_MULTI_FORM, {"Content-Type", "multipart/form-data;boundary=\"bound\""}).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- mock(fabric),
- create_db(Url),
- Url.
-
-teardown(Url) ->
- delete_db(Url),
- (catch meck:unload(fabric)),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-create_doc(Url, Id) ->
- test_request:put(
- Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH],
- "{\"mr\": \"rockoartischocko\"}"
- ).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-open_revs_error_test_() ->
- {
- "open revs error tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_return_503_error_for_open_revs_get/1,
- fun should_return_503_error_for_open_revs_post_form/1
- ]
- }
- }
- }.
-
-should_return_503_error_for_open_revs_get(Url) ->
- {ok, _, _, Body} = create_doc(Url, "testdoc"),
- {Json} = ?JSON_DECODE(Body),
- Ref = couch_util:get_value(<<"rev">>, Json, undefined),
- mock_open_revs({error, all_workers_died}),
- {ok, Code, _, _} = test_request:get(
- Url ++
- "/testdoc?rev=" ++ ?b2l(Ref),
- [?AUTH]
- ),
- ?_assertEqual(503, Code).
-
-should_return_503_error_for_open_revs_post_form(Url) ->
- Port = mochiweb_socket_server:get(chttpd, port),
- Host = lists:concat(["http://127.0.0.1:", Port]),
- Referer = {"Referer", Host},
- Body1 = "{\"body\":\"This is a body.\"}",
- DocBeg = "--bound\r\nContent-Disposition: form-data; name=\"_doc\"\r\n\r\n",
- DocRev = "--bound\r\nContent-Disposition: form-data; name=\"_rev\"\r\n\r\n",
- DocRest =
- "\r\n--bound\r\nContent-Disposition:"
- "form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n"
- "Content-Type: text/plain\r\n\r\ncontents of file.txt\r\n\r\n"
- "--bound--",
- Doc1 = lists:concat([DocBeg, Body1, DocRest]),
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/" ++ "RevDoc",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer],
- Doc1
- ),
- {Json} = ?JSON_DECODE(ResultBody),
- Ref = couch_util:get_value(<<"rev">>, Json, undefined),
- Doc2 = lists:concat([DocRev, ?b2l(Ref), DocRest]),
-
- mock_open_revs({error, all_workers_died}),
- {ok, Code, _, ResultBody1} = test_request:post(
- Url ++ "/" ++ "RevDoc",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer],
- Doc2
- ),
- {Json1} = ?JSON_DECODE(ResultBody1),
- ErrorMessage = couch_util:get_value(<<"error">>, Json1),
- [
- ?_assertEqual(503, Code),
- ?_assertEqual(<<"service unvailable">>, ErrorMessage)
- ].
-
-mock_open_revs(RevsResp) ->
- ok = meck:expect(fabric, open_revs, fun(_, _, _, _) -> RevsResp end).
-
-mock(fabric) ->
- ok = meck:new(fabric, [passthrough]).
diff --git a/src/chttpd/test/eunit/chttpd_plugin_tests.erl b/src/chttpd/test/eunit/chttpd_plugin_tests.erl
deleted file mode 100644
index effef589a..000000000
--- a/src/chttpd/test/eunit/chttpd_plugin_tests.erl
+++ /dev/null
@@ -1,200 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_plugin_tests).
-
--export([
- before_request/1,
- after_request/2,
- handle_error/1,
- before_response/4,
- before_serve_file/5
-]).
-
-%% couch_epi_plugin behaviour
--export([
- app/0,
- providers/0,
- services/0,
- data_providers/0,
- data_subscriptions/0,
- processes/0,
- notify/3
-]).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-%% couch_epi_plugin behaviour
-
-app() -> test_app.
-providers() -> [{chttpd, ?MODULE}].
-services() -> [].
-data_providers() -> [].
-data_subscriptions() -> [].
-processes() -> [].
-notify(_, _, _) -> ok.
-
-setup() ->
- couch_tests:setup([
- couch_epi_dispatch:dispatch(chttpd, ?MODULE)
- ]).
-
-teardown(Ctx) ->
- couch_tests:teardown(Ctx).
-
-before_request({true, Id}) -> [{true, [{before_request, Id}]}];
-before_request({false, Id}) -> [{false, Id}];
-before_request({fail, Id}) -> throw({before_request, Id}).
-
-after_request({true, Id}, A) -> [{true, [{after_request, Id}]}, A];
-after_request({false, Id}, A) -> [{false, Id}, A];
-after_request({fail, Id}, _A) -> throw({after_request, Id}).
-
-handle_error({true, Id}) -> [{true, [{handle_error, Id}]}];
-handle_error({false, Id}) -> [{false, Id}];
-handle_error({fail, Id}) -> throw({handle_error, Id}).
-
-before_response({true, Id}, A, B, C) ->
- [{true, [{before_response, Id}]}, A, B, C];
-before_response({false, Id}, A, B, C) ->
- [{false, Id}, A, B, C];
-before_response({fail, Id}, _A, _B, _C) ->
- throw({before_response, Id}).
-
-before_serve_file({true, Id}, A, B, C, D) ->
- [{true, [{before_serve_file, Id}]}, A, B, C, D];
-before_serve_file({false, Id}, A, B, C, D) ->
- [{false, Id}, A, B, C, D];
-before_serve_file({fail, _Id}, _A, _B, _C, _D) ->
- throw(before_serve_file).
-
-callback_test_() ->
- {
- "callback tests",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- fun before_request_match/0,
- fun before_request_no_match/0,
- fun before_request_throw/0,
-
- fun after_request_match/0,
- fun after_request_no_match/0,
- fun after_request_throw/0,
-
- fun handle_error_match/0,
- fun handle_error_no_match/0,
- fun handle_error_throw/0,
-
- fun before_response_match/0,
- fun before_response_no_match/0,
- fun before_response_throw/0,
-
- fun before_serve_file_match/0,
- fun before_serve_file_no_match/0,
- fun before_serve_file_throw/0
- ]
- }
- }.
-
-before_request_match() ->
- ?assertEqual(
- {ok, {true, [{before_request, foo}]}},
- chttpd_plugin:before_request({true, foo})
- ).
-
-before_request_no_match() ->
- ?assertEqual(
- {ok, {false, foo}},
- chttpd_plugin:before_request({false, foo})
- ).
-
-before_request_throw() ->
- ?assertThrow(
- {before_request, foo},
- chttpd_plugin:before_request({fail, foo})
- ).
-
-after_request_match() ->
- ?assertEqual(
- {ok, bar},
- chttpd_plugin:after_request({true, foo}, bar)
- ).
-
-after_request_no_match() ->
- ?assertEqual(
- {ok, bar},
- chttpd_plugin:after_request({false, foo}, bar)
- ).
-
-after_request_throw() ->
- ?assertThrow(
- {after_request, foo},
- chttpd_plugin:after_request({fail, foo}, bar)
- ).
-
-handle_error_match() ->
- ?assertEqual(
- {true, [{handle_error, foo}]},
- chttpd_plugin:handle_error({true, foo})
- ).
-
-handle_error_no_match() ->
- ?assertEqual(
- {false, foo},
- chttpd_plugin:handle_error({false, foo})
- ).
-
-handle_error_throw() ->
- ?assertThrow(
- {handle_error, foo},
- chttpd_plugin:handle_error({fail, foo})
- ).
-
-before_response_match() ->
- ?assertEqual(
- {ok, {{true, [{before_response, foo}]}, 1, 2, 3}},
- chttpd_plugin:before_response({true, foo}, 1, 2, 3)
- ).
-
-before_response_no_match() ->
- ?assertEqual(
- {ok, {{false, foo}, 1, 2, 3}},
- chttpd_plugin:before_response({false, foo}, 1, 2, 3)
- ).
-
-before_response_throw() ->
- ?assertThrow(
- {before_response, foo},
- chttpd_plugin:before_response({fail, foo}, 1, 2, 3)
- ).
-
-before_serve_file_match() ->
- ?assertEqual(
- {ok, {{true, [{before_serve_file, foo}]}, 1, 2, 3, 4}},
- chttpd_plugin:before_serve_file({true, foo}, 1, 2, 3, 4)
- ).
-
-before_serve_file_no_match() ->
- ?assertEqual(
- {ok, {{false, foo}, 1, 2, 3, 4}},
- chttpd_plugin:before_serve_file({false, foo}, 1, 2, 3, 4)
- ).
-
-before_serve_file_throw() ->
- ?assertThrow(
- before_serve_file,
- chttpd_plugin:before_serve_file({fail, foo}, 1, 2, 3, 4)
- ).
diff --git a/src/chttpd/test/eunit/chttpd_prefer_header_test.erl b/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
deleted file mode 100644
index 55c9f350e..000000000
--- a/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
+++ /dev/null
@@ -1,114 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_prefer_header_test).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-mock_request(ExcludeHeader) ->
- Headers = mochiweb_headers:make(ExcludeHeader),
- MochiReq = mochiweb_request:new(nil, 'GET', "/", {1, 1}, Headers),
- MochiReq:cleanup(),
- #httpd{mochi_req = MochiReq}.
-
-default_headers() ->
- [
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", "application/json"},
- {"Content-Length", "100"},
- {"ETag", "\"12343\""},
- {"X-Couch-Request-ID", "7bd1adab86"},
- {"X-CouchDB-Body-Time", "0"},
- {"Vary", "Accept-Encoding"},
- {"Server", "CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"}
- ].
-
-minimal_options_headers() ->
- [
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", "application/json"},
- {"Content-Length", "100"},
- {"ETag", "\"12343\""},
- {"Vary", "Accept-Encoding"},
- {"Server", "CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"}
- ].
-
-default_no_exclude_header_test() ->
- Headers = chttpd_prefer_header:maybe_return_minimal(
- mock_request([]),
- default_headers()
- ),
- ?assertEqual(default_headers(), Headers).
-
-unsupported_exclude_header_test() ->
- Req = mock_request([{"prefer", "Wrong"}]),
- Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
- ?assertEqual(default_headers(), Headers).
-
-empty_header_test() ->
- Req = mock_request([{"prefer", ""}]),
- Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
- ?assertEqual(default_headers(), Headers).
-
-setup_all() ->
- ok = meck:new(config),
- ok = meck:expect(config, get, fun("chttpd", "prefer_minimal", _) ->
- "Cache-Control, Content-Length, Content-Type, ETag, Server, Vary"
- end),
- ok.
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset([config]).
-
-teardown(_) ->
- ok.
-
-exclude_headers_test_() ->
- {
- "Test Prefer headers",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun minimal_options/1,
- fun minimal_options_check_header_case/1,
- fun minimal_options_check_header_value_case/1
- ]
- }
- }
- }.
-
-minimal_options(_) ->
- Req = mock_request([{"Prefer", "return=minimal"}]),
- Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
- ?_assertEqual(minimal_options_headers(), Headers).
-
-minimal_options_check_header_case(_) ->
- Req = mock_request([{"prefer", "return=minimal"}]),
- Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
- ?_assertEqual(minimal_options_headers(), Headers).
-
-minimal_options_check_header_value_case(_) ->
- Req = mock_request([{"prefer", "RETURN=MINIMAL"}]),
- Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
- ?_assertEqual(minimal_options_headers(), Headers).
diff --git a/src/chttpd/test/eunit/chttpd_purge_tests.erl b/src/chttpd/test/eunit/chttpd_purge_tests.erl
deleted file mode 100644
index a8e1a955d..000000000
--- a/src/chttpd/test/eunit/chttpd_purge_tests.erl
+++ /dev/null
@@ -1,491 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_purge_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_db_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- create_db(Url),
- Url.
-
-teardown(Url) ->
- delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-create_doc(Url, Id) ->
- test_request:put(
- Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH],
- "{\"mr\": \"rockoartischocko\"}"
- ).
-
-create_doc(Url, Id, Content) ->
- test_request:put(
- Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH],
- "{\"mr\": \"" ++ Content ++ "\"}"
- ).
-
-create_docs(Url, Docs) ->
- test_request:post(
- Url ++ "/_bulk_docs",
- [?CONTENT_JSON, ?AUTH],
- ?JSON_ENCODE({[{docs, Docs}]})
- ).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-purge_test_() ->
- {
- "chttpd db tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun test_empty_purge_request/1,
- fun test_ok_purge_request/1,
- fun test_ok_purge_request_with_101_docid/1,
- fun test_accepted_purge_request/1,
- fun test_partial_purge_request/1,
- fun test_mixed_purge_request/1,
- fun test_overmany_ids_or_revs_purge_request/1,
- fun test_exceed_limits_on_purge_infos/1,
- fun should_error_set_purged_docs_limit_to0/1,
- fun test_timeout_set_purged_infos_limit/1
- ]
- }
- }
- }.
-
-test_empty_purge_request(Url) ->
- ?_test(begin
- IdsRevs = "{}",
- {ok, Status, _, ResultBody} = test_request:post(
- Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH],
- IdsRevs
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- ?assert(Status =:= 201 orelse Status =:= 202),
- ?assertEqual(
- {[
- {<<"purge_seq">>, null},
- {<<"purged">>, {[]}}
- ]},
- ResultJson
- )
- end).
-
-test_ok_purge_request(Url) ->
- ?_test(begin
- {ok, _, _, Body} = create_doc(Url, "doc1"),
- {Json} = ?JSON_DECODE(Body),
- Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
- {ok, _, _, Body2} = create_doc(Url, "doc2"),
- {Json2} = ?JSON_DECODE(Body2),
- Rev2 = couch_util:get_value(<<"rev">>, Json2, undefined),
- {ok, _, _, Body3} = create_doc(Url, "doc3"),
- {Json3} = ?JSON_DECODE(Body3),
- Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined),
-
- IdsRevsEJson =
- {[
- {<<"doc1">>, [Rev1]},
- {<<"doc2">>, [Rev2]},
- {<<"doc3">>, [Rev3]}
- ]},
- IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
-
- {ok, Status, _, ResultBody} = test_request:post(
- Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH],
- IdsRevs
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- ?assert(Status =:= 201 orelse Status =:= 202),
- ?assertEqual(
- {[
- {<<"purge_seq">>, null},
- {<<"purged">>,
- {[
- {<<"doc1">>, [Rev1]},
- {<<"doc2">>, [Rev2]},
- {<<"doc3">>, [Rev3]}
- ]}}
- ]},
- ResultJson
- )
- end).
-
-test_ok_purge_request_with_101_docid(Url) ->
- ?_test(begin
- PurgedDocsNum = 101,
- Docs = lists:foldl(
- fun(I, Acc) ->
- Id = list_to_binary(integer_to_list(I)),
- Doc = {[{<<"_id">>, Id}, {value, I}]},
- [Doc | Acc]
- end,
- [],
- lists:seq(1, PurgedDocsNum)
- ),
-
- {ok, _, _, Body} = create_docs(Url, Docs),
- BodyJson = ?JSON_DECODE(Body),
-
- PurgeBody = lists:map(
- fun({DocResp}) ->
- Id = couch_util:get_value(<<"id">>, DocResp, undefined),
- Rev = couch_util:get_value(<<"rev">>, DocResp, undefined),
- {Id, [Rev]}
- end,
- BodyJson
- ),
-
- ok = config:set("purge", "max_document_id_number", "101"),
- try
- {ok, Status, _, _} = test_request:post(
- Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH],
- ?JSON_ENCODE({PurgeBody})
- ),
- ?assert(Status =:= 201 orelse Status =:= 202)
- after
- ok = config:delete("purge", "max_document_id_number")
- end
- end).
-
-test_accepted_purge_request(Url) ->
- ?_test(begin
- {ok, _, _, Body} = create_doc(Url, "doc1"),
- {Json} = ?JSON_DECODE(Body),
- Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
- IdsRevsEJson =
- {[
- {<<"doc1">>, [Rev1]}
- ]},
- IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
- meck:new(fabric, [passthrough]),
- meck:expect(
- fabric,
- purge_docs,
- fun(_, _, _) ->
- {accepted, [
- {accepted, [
- {1,
- <<57, 27, 64, 134, 152, 18, 73, 243, 40, 1, 141, 214, 135, 104, 79,
- 188>>}
- ]}
- ]}
- end
- ),
- {ok, Status, _, ResultBody} = test_request:post(
- Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH],
- IdsRevs
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- meck:unload(fabric),
- ?assert(Status =:= 202),
- ?assertEqual(
- {[
- {<<"purge_seq">>, null},
- {<<"purged">>,
- {[
- {<<"doc1">>, [Rev1]}
- ]}}
- ]},
- ResultJson
- )
- end).
-
-test_partial_purge_request(Url) ->
- ?_test(begin
- {ok, _, _, Body} = create_doc(Url, "doc1"),
- {Json} = ?JSON_DECODE(Body),
- Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
-
- NewDoc =
- "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\",\n"
- " \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]},\n"
- " \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}",
- {ok, _, _, _} = test_request:post(
- Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH],
- NewDoc
- ),
-
- IdsRevsEJson = {[{<<"doc1">>, [Rev1]}]},
- IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
- {ok, Status, _, ResultBody} = test_request:post(
- Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH],
- IdsRevs
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- ?assert(Status =:= 201 orelse Status =:= 202),
- ?assertEqual(
- {[
- {<<"purge_seq">>, null},
- {<<"purged">>,
- {[
- {<<"doc1">>, [Rev1]}
- ]}}
- ]},
- ResultJson
- ),
- {ok, Status2, _, ResultBody2} = test_request:get(
- Url ++
- "/doc1/",
- [?AUTH]
- ),
- {Json2} = ?JSON_DECODE(ResultBody2),
- Content = couch_util:get_value(<<"content">>, Json2, undefined),
- ?assertEqual(<<"updated">>, Content),
- ?assert(Status2 =:= 200)
- end).
-
-test_mixed_purge_request(Url) ->
- ?_test(begin
- {ok, _, _, Body} = create_doc(Url, "doc1"),
- {Json} = ?JSON_DECODE(Body),
- Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
-
- NewDoc =
- "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\",\n"
- " \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]},\n"
- " \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}",
- {ok, _, _, _} = test_request:post(
- Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH],
- NewDoc
- ),
-
- {ok, _, _, _Body2} = create_doc(Url, "doc2", "content2"),
- {ok, _, _, Body3} = create_doc(Url, "doc3", "content3"),
- {Json3} = ?JSON_DECODE(Body3),
- Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined),
-
- IdsRevsEJson =
- {[
- % partial purge
- {<<"doc1">>, [Rev1]},
- % correct format, but invalid rev
- {<<"doc2">>, [Rev3, Rev1]},
- % correct format and rev
- {<<"doc3">>, [Rev3]}
- ]},
- IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
- {ok, Status, _, Body4} = test_request:post(
- Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH],
- IdsRevs
- ),
- ResultJson = ?JSON_DECODE(Body4),
- ?assert(Status =:= 201 orelse Status =:= 202),
- ?assertEqual(
- {[
- {<<"purge_seq">>, null},
- {<<"purged">>,
- {[
- {<<"doc1">>, [Rev1]},
- {<<"doc2">>, []},
- {<<"doc3">>, [Rev3]}
- ]}}
- ]},
- ResultJson
- ),
- {ok, Status2, _, Body5} = test_request:get(
- Url ++
- "/doc1/",
- [?AUTH]
- ),
- {Json5} = ?JSON_DECODE(Body5),
- Content = couch_util:get_value(<<"content">>, Json5, undefined),
- ?assertEqual(<<"updated">>, Content),
- ?assert(Status2 =:= 200)
- end).
-
-test_overmany_ids_or_revs_purge_request(Url) ->
- ?_test(begin
- {ok, _, _, Body} = create_doc(Url, "doc1"),
- {Json} = ?JSON_DECODE(Body),
- Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
-
- NewDoc =
- "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\",\n"
- " \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]},\n"
- " \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}",
- {ok, _, _, _} = test_request:post(
- Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH],
- NewDoc
- ),
-
- {ok, _, _, _Body2} = create_doc(Url, "doc2", "content2"),
- {ok, _, _, Body3} = create_doc(Url, "doc3", "content3"),
- {Json3} = ?JSON_DECODE(Body3),
- Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined),
-
- IdsRevsEJson =
- {[
- % partial purge
- {<<"doc1">>, [Rev1]},
- % correct format, but invalid rev
- {<<"doc2">>, [Rev3, Rev1]},
- % correct format and rev
- {<<"doc3">>, [Rev3]}
- ]},
- IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
-
- % Ids larger than expected
- config:set("purge", "max_document_id_number", "1"),
- {ok, Status, _, Body4} = test_request:post(
- Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH],
- IdsRevs
- ),
- config:delete("purge", "max_document_id_number"),
- ResultJson = ?JSON_DECODE(Body4),
- ?assertEqual(400, Status),
- ?assertMatch(
- {[
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, <<"Exceeded maximum number of documents.">>}
- ]},
- ResultJson
- ),
-
- % Revs larger than expected
- config:set("purge", "max_revisions_number", "1"),
- {ok, Status2, _, Body5} = test_request:post(
- Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH],
- IdsRevs
- ),
- config:delete("purge", "max_revisions_number"),
- ResultJson2 = ?JSON_DECODE(Body5),
- ?assertEqual(400, Status2),
- ?assertMatch(
- {[
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, <<"Exceeded maximum number of revisions.">>}
- ]},
- ResultJson2
- )
- end).
-
-test_exceed_limits_on_purge_infos(Url) ->
- ?_test(begin
- {ok, Status1, _, _} = test_request:put(
- Url ++ "/_purged_infos_limit/",
- [?CONTENT_JSON, ?AUTH],
- "2"
- ),
- ?assert(Status1 =:= 200),
-
- {ok, _, _, Body} = create_doc(Url, "doc1"),
- {Json} = ?JSON_DECODE(Body),
- Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
- {ok, _, _, Body2} = create_doc(Url, "doc2"),
- {Json2} = ?JSON_DECODE(Body2),
- Rev2 = couch_util:get_value(<<"rev">>, Json2, undefined),
- {ok, _, _, Body3} = create_doc(Url, "doc3"),
- {Json3} = ?JSON_DECODE(Body3),
- Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined),
-
- IdsRevsEJson =
- {[
- {<<"doc1">>, [Rev1]},
- {<<"doc2">>, [Rev2]},
- {<<"doc3">>, [Rev3]}
- ]},
- IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
-
- {ok, Status2, _, ResultBody} = test_request:post(
- Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH],
- IdsRevs
- ),
-
- ResultJson = ?JSON_DECODE(ResultBody),
- ?assert(Status2 =:= 201 orelse Status2 =:= 202),
- ?assertEqual(
- {[
- {<<"purge_seq">>, null},
- {<<"purged">>,
- {[
- {<<"doc1">>, [Rev1]},
- {<<"doc2">>, [Rev2]},
- {<<"doc3">>, [Rev3]}
- ]}}
- ]},
- ResultJson
- )
- end).
-
-should_error_set_purged_docs_limit_to0(Url) ->
- ?_test(begin
- {ok, Status, _, _} = test_request:put(
- Url ++ "/_purged_infos_limit/",
- [?CONTENT_JSON, ?AUTH],
- "0"
- ),
- ?assert(Status =:= 400)
- end).
-
-test_timeout_set_purged_infos_limit(Url) ->
- ?_test(begin
- meck:new(fabric, [passthrough]),
- meck:expect(fabric, set_purge_infos_limit, fun(_, _, _) ->
- {error, timeout}
- end),
- {ok, Status, _, ResultBody} = test_request:put(
- Url ++
- "/_purged_infos_limit/",
- [?CONTENT_JSON, ?AUTH],
- "2"
- ),
- meck:unload(fabric),
- ResultJson = ?JSON_DECODE(ResultBody),
- ?assert(Status =:= 500),
- ?assertMatch(
- {[
- {<<"error">>, <<"error">>},
- {<<"reason">>, <<"timeout">>}
- ]},
- ResultJson
- )
- end).
diff --git a/src/chttpd/test/eunit/chttpd_revs_diff_tests.erl b/src/chttpd/test/eunit/chttpd_revs_diff_tests.erl
deleted file mode 100644
index 9a9bd25b7..000000000
--- a/src/chttpd/test/eunit/chttpd_revs_diff_tests.erl
+++ /dev/null
@@ -1,238 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_revs_diff_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
-
--define(USER, "chttpd_revs_diff_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(JSON, {"Content-Type", "application/json"}).
-
--define(DOC1, <<"doc1">>).
--define(DOC2, <<"doc2">>).
--define(REVA, <<"reva">>).
--define(REVB, <<"revb">>).
--define(REVC, <<"revc">>).
--define(REVD, <<"revd">>).
-
-test_docs() ->
- [
- {?DOC1, [?REVB, ?REVA]},
- {?DOC1, [?REVC, ?REVA]},
- {?DOC2, [?REVD]}
- ].
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Db = binary_to_list(?tempdb()),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/"]),
- ok = create_db(Url, Db),
- ok = create_docs(Url, Db, test_docs()),
- {Url, Db}.
-
-teardown({Url, Db}) ->
- delete_db(Url, Db),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-start_couch() ->
- test_util:start_couch([chttpd]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-chttpd_revs_diff_test_() ->
- {
- "chttpd _revs_diff tests",
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- ?TDEF_FE(t_empty_revs_diff),
- ?TDEF_FE(t_revs_diff_no_revs),
- ?TDEF_FE(t_revs_diff_non_existent_doc),
- ?TDEF_FE(t_revs_diff_all_revs),
- ?TDEF_FE(t_revs_diff_some_missing_some_not),
- ?TDEF_FE(t_empty_missing_revs),
- ?TDEF_FE(t_missing_revs_no_revs),
- ?TDEF_FE(t_missing_revs_non_existent_doc),
- ?TDEF_FE(t_missing_revs_all_revs),
- ?TDEF_FE(t_missing_revs_some_missing_some_not)
- ]
- }
- }
- }.
-
-t_empty_revs_diff({Top, Db}) ->
- {Code, Res} = req(post, Top ++ Db ++ "/_revs_diff", #{}),
- ?assertEqual(200, Code),
- ?assertEqual(#{}, Res).
-
-t_revs_diff_no_revs({Top, Db}) ->
- Body = #{?DOC1 => [], <<"non_existent_doc">> => []},
- {Code, Res} = req(post, Top ++ Db ++ "/_revs_diff", Body),
- ?assertEqual(200, Code),
- ?assertEqual(#{}, Res).
-
-t_revs_diff_non_existent_doc({Top, Db}) ->
- Body = #{<<"non_existent_doc">> => [<<"1-rev">>]},
- {Code, Res} = req(post, Top ++ Db ++ "/_revs_diff", Body),
- ?assertEqual(200, Code),
- ?assertEqual(
- #{
- <<"non_existent_doc">> => #{
- <<"missing">> => [<<"1-rev">>]
- }
- },
- Res
- ).
-
-t_revs_diff_all_revs({Top, Db}) ->
- Body = #{
- ?DOC1 => [<<"2-", ?REVB/binary>>, <<"2-", ?REVC/binary>>],
- ?DOC2 => [<<"1-", ?REVD/binary>>]
- },
- {Code, Res} = req(post, Top ++ Db ++ "/_revs_diff", Body),
- ?assertEqual(200, Code),
- ?assertEqual(#{}, Res).
-
-t_revs_diff_some_missing_some_not({Top, Db}) ->
- Body = #{
- ?DOC1 => [<<"2-", ?REVB/binary>>, <<"1-xyz">>, <<"2-def">>, <<"3-klm">>],
- ?DOC2 => [<<"1-pqr">>]
- },
- {Code, Res} = req(post, Top ++ Db ++ "/_revs_diff", Body),
- ?assertEqual(200, Code),
- ?assertEqual(
- #{
- ?DOC1 => #{
- <<"missing">> => [<<"1-xyz">>, <<"2-def">>, <<"3-klm">>],
- <<"possible_ancestors">> => [<<"2-revb">>, <<"2-revc">>]
- },
- ?DOC2 => #{
- <<"missing">> => [<<"1-pqr">>]
- }
- },
- Res
- ).
-
-t_empty_missing_revs({Top, Db}) ->
- {Code, Res} = req(post, Top ++ Db ++ "/_missing_revs", #{}),
- ?assertEqual(200, Code),
- ?assertEqual(#{<<"missing_revs">> => #{}}, Res).
-
-t_missing_revs_no_revs({Top, Db}) ->
- Body = #{?DOC1 => [], <<"non_existent_doc">> => []},
- {Code, Res} = req(post, Top ++ Db ++ "/_missing_revs", Body),
- ?assertEqual(200, Code),
- ?assertEqual(#{<<"missing_revs">> => #{}}, Res).
-
-t_missing_revs_non_existent_doc({Top, Db}) ->
- Body = #{<<"non_existent_doc">> => [<<"1-rev">>]},
- {Code, Res} = req(post, Top ++ Db ++ "/_missing_revs", Body),
- ?assertEqual(200, Code),
- ?assertEqual(
- #{
- <<"missing_revs">> => #{
- <<"non_existent_doc">> => [<<"1-rev">>]
- }
- },
- Res
- ).
-
-t_missing_revs_all_revs({Top, Db}) ->
- Body = #{
- ?DOC1 => [<<"2-", ?REVB/binary>>, <<"2-", ?REVC/binary>>],
- ?DOC2 => [<<"1-", ?REVD/binary>>]
- },
- {Code, Res} = req(post, Top ++ Db ++ "/_missing_revs", Body),
- ?assertEqual(200, Code),
- ?assertEqual(#{<<"missing_revs">> => #{}}, Res).
-
-t_missing_revs_some_missing_some_not({Top, Db}) ->
- Body = #{
- ?DOC1 => [<<"2-", ?REVB/binary>>, <<"1-xyz">>, <<"2-def">>, <<"3-klm">>],
- ?DOC2 => [<<"1-pqr">>]
- },
- {Code, Res} = req(post, Top ++ Db ++ "/_missing_revs", Body),
- ?assertEqual(200, Code),
- ?assertEqual(
- #{
- <<"missing_revs">> => #{
- ?DOC1 => [<<"1-xyz">>, <<"2-def">>, <<"3-klm">>],
- ?DOC2 => [<<"1-pqr">>]
- }
- },
- Res
- ).
-
-create_db(Top, Db) ->
- case req(put, Top ++ Db) of
- {201, #{}} ->
- ok;
- Error ->
- error({failed_to_create_test_db, Db, Error})
- end.
-
-delete_db(Top, Db) ->
- case req(delete, Top ++ Db) of
- {200, #{}} ->
- ok;
- Error ->
- error({failed_to_delete_test_db, Db, Error})
- end.
-
-create_docs(Top, Db, DocRevs) ->
- Docs = lists:map(
- fun({Id, Revs}) ->
- #{
- <<"_id">> => Id,
- <<"_revisions">> => #{
- <<"ids">> => Revs,
- <<"start">> => length(Revs)
- }
- }
- end,
- DocRevs
- ),
- Body = #{
- <<"docs">> => Docs,
- <<"new_edits">> => false
- },
- {Code, Res} = req(post, Top ++ Db ++ "/_bulk_docs", Body),
- ?assertEqual(201, Code),
- ?assertEqual([], Res),
- ok.
-
-req(Method, Url) ->
- Headers = [?JSON, ?AUTH],
- {ok, Code, _, Res} = test_request:request(Method, Url, Headers),
- {Code, jiffy:decode(Res, [return_maps])}.
-
-req(Method, Url, #{} = Body) ->
- req(Method, Url, jiffy:encode(Body));
-req(Method, Url, Body) ->
- Headers = [?JSON, ?AUTH],
- {ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body),
- {Code, jiffy:decode(Res, [return_maps])}.
diff --git a/src/chttpd/test/eunit/chttpd_security_tests.erl b/src/chttpd/test/eunit/chttpd_security_tests.erl
deleted file mode 100644
index d8a39ffc8..000000000
--- a/src/chttpd/test/eunit/chttpd_security_tests.erl
+++ /dev/null
@@ -1,521 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_security_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_db_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
--define(TEST_MEMBER, "test_member").
--define(TEST_MEMBER_PASS, "test_member_pass").
--define(TEST_MEMBER_AUTH, {basic_auth, {?TEST_MEMBER, ?TEST_MEMBER_PASS}}).
-
--define(TEST_ADMIN, "test_admin").
--define(TEST_ADMIN_PASS, "test_admin_pass").
--define(TEST_ADMIN_AUTH, {basic_auth, {?TEST_ADMIN, ?TEST_ADMIN_PASS}}).
-
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- Persist = false,
- ok = config:set("admins", ?USER, ?b2l(Hashed), Persist),
- UserDb = ?tempdb(),
- TmpDb = ?tempdb(),
- ok = config:set("chttpd_auth", "authentication_db", ?b2l(UserDb), Persist),
-
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- BaseUrl = lists:concat(["http://", Addr, ":", Port, "/"]),
- Url = lists:concat([BaseUrl, ?b2l(TmpDb)]),
- UsersUrl = lists:concat([BaseUrl, ?b2l(UserDb)]),
- create_db(UsersUrl),
- create_db(Url),
- create_design_doc(Url),
- create_user(UsersUrl, ?TEST_MEMBER, ?TEST_MEMBER_PASS, [<<?TEST_MEMBER>>]),
- create_user(UsersUrl, ?TEST_ADMIN, ?TEST_ADMIN_PASS, [<<?TEST_ADMIN>>]),
- set_security(Url),
- [Url, UsersUrl].
-
-teardown([Url, UsersUrl]) ->
- delete_db(Url),
- delete_db(UsersUrl),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-create_design_doc(Url) ->
- {ok, Status, _, _} = test_request:put(
- lists:concat([Url, '/_design/test']),
- [?CONTENT_JSON, ?AUTH],
- "{\"id\":\"_design/test\"}"
- ),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-set_security(Url) ->
- SecurityUrl = lists:concat([Url, "/_security"]),
- SecurityProperties = [
- {<<"admins">>, {[{<<"roles">>, [<<?TEST_ADMIN>>]}]}},
- {<<"members">>, {[{<<"roles">>, [<<?TEST_MEMBER>>]}]}}
- ],
-
- Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, _} = test_request:put(SecurityUrl, [?CONTENT_JSON, ?AUTH], Body),
- ?assert(Status =:= 200).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-create_user(UsersUrl, Name, Password, Roles) ->
- Body =
- "{\"name\":\"" ++ Name ++
- "\",\"type\":\"user\",\"roles\":" ++ erlang:binary_to_list(jiffy:encode(Roles)) ++
- ",\"password\":\"" ++ Password ++ "\"}",
-
- Url = lists:concat([
- UsersUrl, "/org.couchdb.user:", Name
- ]),
- {ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], Body).
-
-all_test_() ->
- {
- "chttpd security tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_allow_admin_db_compaction/1,
- fun should_allow_valid_password_to_create_user/1,
- fun should_disallow_invalid_password_to_create_user/1,
- fun should_disallow_anonymous_db_compaction/1,
- fun should_disallow_db_member_db_compaction/1,
- fun should_allow_db_admin_db_compaction/1,
- fun should_allow_admin_view_compaction/1,
- fun should_disallow_anonymous_view_compaction/1,
- fun should_allow_admin_db_view_cleanup/1,
- fun should_disallow_anonymous_db_view_cleanup/1,
- fun should_allow_admin_purge/1,
- fun should_disallow_anonymous_purge/1,
- fun should_disallow_db_member_purge/1,
- fun should_allow_admin_purged_infos_limit/1,
- fun should_disallow_anonymous_purged_infos_limit/1,
- fun should_disallow_db_member_purged_infos_limit/1
- ]
- }
- }
- }.
-
-security_object_validate_test_() ->
- {
- "chttpd security object validate tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_return_ok_for_sec_obj_with_roles/1,
- fun should_return_ok_for_sec_obj_with_names/1,
- fun should_return_ok_for_sec_obj_with_roles_and_names/1,
- fun should_return_error_for_sec_obj_with_incorrect_roles_and_names/1,
- fun should_return_error_for_sec_obj_with_incorrect_roles/1,
- fun should_return_error_for_sec_obj_with_incorrect_names/1,
- fun should_return_error_for_sec_obj_in_user_db/1
- ]
- }
- }
- }.
-
-should_allow_admin_db_compaction([Url, _UsersUrl]) ->
- ?_assertEqual(
- true,
- begin
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_compact",
- [?CONTENT_JSON, ?AUTH],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end
- ).
-
-should_allow_valid_password_to_create_user([_Url, UsersUrl]) ->
- UserDoc =
- "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",\n"
- " \"type\": \"user\", \"roles\": [], \"password\": \"bar\"}",
- {ok, _, _, ResultBody} = test_request:post(
- UsersUrl,
- [?CONTENT_JSON, ?AUTH],
- UserDoc
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ?_assertEqual(true, couch_util:get_value(<<"ok">>, InnerJson)).
-
-should_disallow_invalid_password_to_create_user([_Url, UsersUrl]) ->
- UserDoc =
- "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",\n"
- " \"type\": \"user\", \"roles\": [], \"password\": 123}",
- {ok, _, _, ResultBody} = test_request:post(
- UsersUrl,
- [?CONTENT_JSON, ?AUTH],
- UserDoc
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"forbidden">>, ErrType).
-
-should_disallow_anonymous_db_compaction([Url, _UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_compact",
- [?CONTENT_JSON],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>, ErrType).
-
-should_disallow_db_member_db_compaction([Url, _UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_compact",
- [?CONTENT_JSON, ?TEST_MEMBER_AUTH],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>, ErrType).
-
-should_allow_db_admin_db_compaction([Url, _UsersUrl]) ->
- ?_assertEqual(
- true,
- begin
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_compact",
- [?CONTENT_JSON, ?TEST_ADMIN_AUTH],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end
- ).
-
-should_allow_admin_view_compaction([Url, _UsersUrl]) ->
- ?_assertEqual(
- true,
- begin
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_compact/test",
- [?CONTENT_JSON, ?AUTH],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end
- ).
-
-should_disallow_anonymous_view_compaction([Url, _UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_compact/test",
- [?CONTENT_JSON],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>, ErrType).
-
-should_allow_admin_db_view_cleanup([Url, _UsersUrl]) ->
- ?_assertEqual(
- true,
- begin
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_view_cleanup",
- [?CONTENT_JSON, ?AUTH],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end
- ).
-
-should_disallow_anonymous_db_view_cleanup([Url, _UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_view_cleanup",
- [?CONTENT_JSON],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>, ErrType).
-
-should_allow_admin_purge([Url, _UsersUrl]) ->
- ?_assertEqual(
- null,
- begin
- IdsRevs = "{}",
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_purge",
- [?CONTENT_JSON, ?AUTH],
- IdsRevs
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- couch_util:get_value(<<"purge_seq">>, InnerJson, undefined)
- end
- ).
-
-should_disallow_anonymous_purge([Url, _UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_purge",
- [?CONTENT_JSON],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>, ErrType).
-
-should_disallow_db_member_purge([Url, _UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(
- Url ++ "/_purge",
- [?CONTENT_JSON, ?TEST_MEMBER_AUTH],
- ""
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>, ErrType).
-
-should_allow_admin_purged_infos_limit([Url, _UsersUrl]) ->
- ?_assertEqual(
- true,
- begin
- {ok, _, _, ResultBody} = test_request:put(
- Url ++
- "/_purged_infos_limit/",
- [?CONTENT_JSON, ?AUTH],
- "2"
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end
- ).
-
-should_disallow_anonymous_purged_infos_limit([Url, _UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:put(
- Url ++ "/_purged_infos_limit/",
- [?CONTENT_JSON, ?TEST_MEMBER_AUTH],
- "2"
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>, ErrType).
-
-should_disallow_db_member_purged_infos_limit([Url, _UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:put(
- Url ++ "/_purged_infos_limit/",
- [?CONTENT_JSON, ?TEST_MEMBER_AUTH],
- "2"
- ),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = ResultJson,
- ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>, ErrType).
-
-should_return_ok_for_sec_obj_with_roles([Url, _UsersUrl]) ->
- SecurityUrl = lists:concat([Url, "/_security"]),
- SecurityProperties = [
- {<<"admins">>, {[{<<"roles">>, [<<?TEST_ADMIN>>]}]}},
- {<<"members">>, {[{<<"roles">>, [<<?TEST_MEMBER>>]}]}}
- ],
-
- Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, _} = test_request:put(
- SecurityUrl,
- [?CONTENT_JSON, ?AUTH],
- Body
- ),
- ?_assertEqual(200, Status).
-
-should_return_ok_for_sec_obj_with_names([Url, _UsersUrl]) ->
- SecurityUrl = lists:concat([Url, "/_security"]),
- SecurityProperties = [
- {<<"admins">>, {[{<<"names">>, [<<?TEST_ADMIN>>]}]}},
- {<<"members">>, {[{<<"names">>, [<<?TEST_MEMBER>>]}]}}
- ],
-
- Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, _} = test_request:put(
- SecurityUrl,
- [?CONTENT_JSON, ?AUTH],
- Body
- ),
- ?_assertEqual(200, Status).
-
-should_return_ok_for_sec_obj_with_roles_and_names([Url, _UsersUrl]) ->
- SecurityUrl = lists:concat([Url, "/_security"]),
- SecurityProperties = [
- {<<"admins">>,
- {[
- {<<"names">>, [<<?TEST_ADMIN>>]},
- {<<"roles">>, [<<?TEST_ADMIN>>]}
- ]}},
- {<<"members">>,
- {[
- {<<"names">>, [<<?TEST_MEMBER>>]},
- {<<"roles">>, [<<?TEST_MEMBER>>]}
- ]}}
- ],
-
- Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, _} = test_request:put(
- SecurityUrl,
- [?CONTENT_JSON, ?AUTH],
- Body
- ),
- ?_assertEqual(200, Status).
-
-should_return_error_for_sec_obj_with_incorrect_roles_and_names(
- [Url, _UsersUrl]
-) ->
- SecurityUrl = lists:concat([Url, "/_security"]),
- SecurityProperties = [
- {<<"admins">>, {[{<<"names">>, [123]}]}},
- {<<"members">>, {[{<<"roles">>, ["foo"]}]}}
- ],
-
- Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, RespBody} = test_request:put(
- SecurityUrl,
- [?CONTENT_JSON, ?AUTH],
- Body
- ),
- ResultJson = ?JSON_DECODE(RespBody),
- [
- ?_assertEqual(500, Status),
- ?_assertEqual(
- {[
- {<<"error">>, <<"error">>},
- {<<"reason">>, <<"no_majority">>}
- ]},
- ResultJson
- )
- ].
-
-should_return_error_for_sec_obj_with_incorrect_roles([Url, _UsersUrl]) ->
- SecurityUrl = lists:concat([Url, "/_security"]),
- SecurityProperties = [
- {<<"admins">>, {[{<<"roles">>, [?TEST_ADMIN]}]}},
- {<<"members">>, {[{<<"roles">>, [<<?TEST_MEMBER>>]}]}}
- ],
-
- Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, RespBody} = test_request:put(
- SecurityUrl,
- [?CONTENT_JSON, ?AUTH],
- Body
- ),
- ResultJson = ?JSON_DECODE(RespBody),
- [
- ?_assertEqual(500, Status),
- ?_assertEqual(
- {[
- {<<"error">>, <<"error">>},
- {<<"reason">>, <<"no_majority">>}
- ]},
- ResultJson
- )
- ].
-
-should_return_error_for_sec_obj_with_incorrect_names([Url, _UsersUrl]) ->
- SecurityUrl = lists:concat([Url, "/_security"]),
- SecurityProperties = [
- {<<"admins">>, {[{<<"names">>, [<<?TEST_ADMIN>>]}]}},
- {<<"members">>, {[{<<"names">>, [?TEST_MEMBER]}]}}
- ],
-
- Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, RespBody} = test_request:put(
- SecurityUrl,
- [?CONTENT_JSON, ?AUTH],
- Body
- ),
- ResultJson = ?JSON_DECODE(RespBody),
- [
- ?_assertEqual(500, Status),
- ?_assertEqual(
- {[
- {<<"error">>, <<"error">>},
- {<<"reason">>, <<"no_majority">>}
- ]},
- ResultJson
- )
- ].
-
-should_return_error_for_sec_obj_in_user_db([_, _UsersUrl]) ->
- SecurityUrl = lists:concat([_UsersUrl, "/_security"]),
- SecurityProperties = [
- {<<"admins">>,
- {[
- {<<"names">>, [<<?TEST_ADMIN>>]},
- {<<"roles">>, [<<?TEST_ADMIN>>]}
- ]}},
- {<<"members">>,
- {[
- {<<"names">>, [<<?TEST_MEMBER>>]},
- {<<"roles">>, [<<?TEST_MEMBER>>]}
- ]}}
- ],
-
- Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, RespBody} = test_request:put(
- SecurityUrl,
- [?CONTENT_JSON, ?AUTH],
- Body
- ),
- ResultJson = ?JSON_DECODE(RespBody),
- [
- ?_assertEqual(403, Status),
- ?_assertEqual(
- {[
- {<<"error">>, <<"forbidden">>},
- {<<"reason">>, <<"You can't edit the security object of the user database.">>}
- ]},
- ResultJson
- )
- ].
diff --git a/src/chttpd/test/eunit/chttpd_session_tests.erl b/src/chttpd/test/eunit/chttpd_session_tests.erl
deleted file mode 100644
index 3d99e3b10..000000000
--- a/src/chttpd/test/eunit/chttpd_session_tests.erl
+++ /dev/null
@@ -1,81 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_session_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include("chttpd_test.hrl").
-
--define(USER, "chttpd_test_admin").
--define(PASS, "pass").
-
-setup() ->
- ok = config:delete("chttpd_auth", "authentication_db", _Persist = false),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, binary_to_list(Hashed), _Persist = false),
- root_url() ++ "/_session".
-
-cleanup(_) ->
- ok = config:delete("chttpd_auth", "authentication_db", _Persist = false),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-session_test_() ->
- {
- "Session tests",
- {
- setup,
- fun() -> test_util:start_couch([fabric, chttpd]) end,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun cleanup/1,
- [
- ?TDEF_FE(session_authentication_db_absent),
- ?TDEF_FE(session_authentication_db_present),
- ?TDEF_FE(session_authentication_gzip_request)
- ]
- }
- }
- }.
-
-session_authentication_db_absent(Url) ->
- ok = config:delete("chttpd_auth", "authentication_db", _Persist = false),
- ?assertThrow({not_found, _}, session_authentication_db(Url)).
-
-session_authentication_db_present(Url) ->
- Name = "_users",
- ok = config:set("chttpd_auth", "authentication_db", Name, false),
- ?assertEqual(list_to_binary(Name), session_authentication_db(Url)).
-
-session_authentication_gzip_request(Url) ->
- {ok, 200, _, Body} = test_request:request(
- post,
- Url,
- [{"Content-Type", "application/json"}, {"Content-Encoding", "gzip"}],
- zlib:gzip(
- jiffy:encode({[{username, list_to_binary(?USER)}, {password, list_to_binary(?PASS)}]})
- )
- ),
- {BodyJson} = jiffy:decode(Body),
- ?assert(lists:member({<<"name">>, list_to_binary(?USER)}, BodyJson)).
-
-session_authentication_db(Url) ->
- {ok, 200, _, Body} = test_request:get(Url, [{basic_auth, {?USER, ?PASS}}]),
- couch_util:get_nested_json_value(
- jiffy:decode(Body), [<<"info">>, <<"authentication_db">>]
- ).
-
-root_url() ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- lists:concat(["http://", Addr, ":", Port]).
diff --git a/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl b/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl
deleted file mode 100644
index bde2c8512..000000000
--- a/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl
+++ /dev/null
@@ -1,116 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_socket_buffer_size_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_db_socket_buffer_size_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
-
-setup(SocketOpts) ->
- StartCtx = start_couch_with_cfg(SocketOpts),
- Db = ?tempdb(),
- create_db(url(Db)),
- {StartCtx, Db}.
-
-teardown(_, {StartCtx, Db}) ->
- delete_db(url(Db)),
- ok = config:delete("admins", ?USER, _Persist = false),
- test_util:stop_couch(StartCtx).
-
-socket_buffer_size_test_() ->
- {
- "chttpd socket_buffer_size_test",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {"[{recbuf, undefined}]", fun default_buffer/2},
- {"[{recbuf, 1024}]", fun small_recbuf/2},
- {"[{buffer, 1024}]", fun small_buffer/2}
- ]
- }
- }.
-
-small_recbuf(_, {_, Db}) ->
- {timeout, 30,
- ?_test(begin
- Id = data(2048),
- Response = put_req(url(Db) ++ "/" ++ Id, "{}"),
- ?assert(Response =:= 400 orelse Response =:= request_failed)
- end)}.
-
-small_buffer(_, {_, Db}) ->
- {timeout, 30,
- ?_test(begin
- Id = data(2048),
- Response = put_req(url(Db) ++ "/" ++ Id, "{}"),
- ?assert(Response =:= 400 orelse Response =:= request_failed)
- end)}.
-
-default_buffer(_, {_, Db}) ->
- {timeout, 30,
- ?_test(begin
- Id = data(7000),
- Headers = [{"Blah", data(7000)}],
- Status = put_req(url(Db) ++ "/" ++ Id, Headers, "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202)
- end)}.
-
-% Helper functions
-
-url() ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- "http://" ++ Addr ++ ":" ++ Port.
-
-url(Db) ->
- url() ++ "/" ++ ?b2l(Db).
-
-create_db(Url) ->
- Status = put_req(Url ++ "?q=1&n=1", "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-put_req(Url, Body) ->
- put_req(Url, [], Body).
-
-put_req(Url, Headers, Body) ->
- AllHeaders = Headers ++ [?CONTENT_JSON, ?AUTH],
- case test_request:put(Url, AllHeaders, Body) of
- {ok, Status, _, _} -> Status;
- {error, Error} -> Error
- end.
-
-data(Size) ->
- string:copies("x", Size).
-
-append_to_cfg_chain(Cfg) ->
- CfgDir = filename:dirname(lists:last(?CONFIG_CHAIN)),
- CfgFile = filename:join([CfgDir, "chttpd_socket_buffer_extra_cfg.ini"]),
- CfgSect = io_lib:format("[chttpd]~nserver_options = ~s~n", [Cfg]),
- ok = file:write_file(CfgFile, CfgSect),
- ?CONFIG_CHAIN ++ [CfgFile].
-
-start_couch_with_cfg(Cfg) ->
- CfgChain = append_to_cfg_chain(Cfg),
- StartCtx = test_util:start_couch(CfgChain, [chttpd]),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- StartCtx.
diff --git a/src/chttpd/test/eunit/chttpd_test.hrl b/src/chttpd/test/eunit/chttpd_test.hrl
deleted file mode 100644
index 6db97ec2b..000000000
--- a/src/chttpd/test/eunit/chttpd_test.hrl
+++ /dev/null
@@ -1,35 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-% Borrowed from fabric2_test.hrl
-
-% Some test modules do not use with, so squash the unused fun compiler warning
--compile([{nowarn_unused_function, [{with, 1}]}]).
-
-
--define(TDEF(Name), {atom_to_list(Name), fun Name/1}).
--define(TDEF(Name, Timeout), {atom_to_list(Name), Timeout, fun Name/1}).
-
--define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
--define(TDEF_FE(Name, Timeout), fun(Arg) -> {atom_to_list(Name), {timeout, Timeout, ?_test(Name(Arg))}} end).
-
-
-with(Tests) ->
- fun(ArgsTuple) ->
- lists:map(fun
- ({Name, Fun}) ->
- {Name, ?_test(Fun(ArgsTuple))};
- ({Name, Timeout, Fun}) ->
- {Name, {timeout, Timeout, ?_test(Fun(ArgsTuple))}}
- end, Tests)
- end.
diff --git a/src/chttpd/test/eunit/chttpd_util_test.erl b/src/chttpd/test/eunit/chttpd_util_test.erl
deleted file mode 100644
index 4ad2b8b83..000000000
--- a/src/chttpd/test/eunit/chttpd_util_test.erl
+++ /dev/null
@@ -1,114 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_util_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include("chttpd_test.hrl").
-
-setup() ->
- ok = lists:foreach(
- fun(Section) ->
- ok = config_delete_all_keys(Section)
- end,
- ["httpd", "chttpd", "couch_httpd_auth", "chttpd_auth"]
- ),
-
- ok = config:set(
- "httpd",
- "authentication_handlers",
- "{couch_httpd_auth, cookie_authentication_handler}, "
- "{couch_httpd_auth, default_authentication_handler}",
- _Persist = false
- ),
- ok = config:set("httpd", "backlog", "512", _Persist = false),
- ok = config:set("chttpd", "require_valid_user", "false", _Persist = false),
- ok = config:set("httpd", "both_exist", "get_in_httpd", _Persist = false),
- ok = config:set("chttpd", "both_exist", "get_in_chttpd", _Persist = false),
- ok = config:set("httpd", "httpd_only", "true", _Persist = false),
- ok = config:set("chttpd", "chttpd_only", "1", _Persist = false),
- ok = config:set("couch_httpd_auth", "both_exist", "cha", _Persist = false),
- ok = config:set("chttpd_auth", "both_exist", "ca", _Persist = false),
- ok = config:set("couch_httpd_auth", "cha_only", "true", _Persist = false),
- ok = config:set("chttpd_auth", "ca_only", "1", _Persist = false).
-
-teardown(_) ->
- ok = config:delete("httpd", "authentication_handlers", _Persist = false),
- ok = config:delete("httpd", "backlog", _Persist = false),
- ok = config:delete("chttpd", "require_valid_user", _Persist = false),
- ok = config:delete("httpd", "both_exist", _Persist = false),
- ok = config:delete("chttpd", "both_exist", _Persist = false),
- ok = config:delete("httpd", "httpd_only", _Persist = false),
- ok = config:delete("chttpd", "chttpd_only", _Persist = false),
- ok = config:delete("couch_httpd_auth", "both_exist", _Persist = false),
- ok = config:delete("chttpd_auth", "both_exist", _Persist = false),
- ok = config:delete("couch_httpd_auth", "cha_only", _Persist = false),
- ok = config:delete("chttpd_auth", "ca_only", _Persist = false).
-
-config_delete_all_keys(Section) ->
- lists:foreach(
- fun({Key, _Val}) ->
- ok = config:delete(Section, Key, _Persist = false)
- end,
- config:get(Section)
- ).
-
-chttpd_util_config_test_() ->
- {
- "chttpd util config tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- ?TDEF_FE(test_chttpd_behavior),
- ?TDEF_FE(test_with_undefined_option),
- ?TDEF_FE(test_auth_behavior),
- ?TDEF_FE(test_auth_with_undefined_option)
- ]
- }
- }
- }.
-
-test_chttpd_behavior(_) ->
- ?assertEqual("get_in_chttpd", chttpd_util:get_chttpd_config("both_exist")),
- ?assertEqual(1, chttpd_util:get_chttpd_config_integer("chttpd_only", 0)),
- ?assert(chttpd_util:get_chttpd_config_boolean("httpd_only", false)).
-
-test_with_undefined_option(_) ->
- ?assertEqual(undefined, chttpd_util:get_chttpd_config("undefined_option")),
- ?assertEqual(abc, chttpd_util:get_chttpd_config("undefined_option", abc)),
- ?assertEqual(123, chttpd_util:get_chttpd_config("undefined_option", 123)),
- ?assertEqual(0.2, chttpd_util:get_chttpd_config("undefined_option", 0.2)),
- ?assertEqual("a", chttpd_util:get_chttpd_config("undefined_option", "a")),
- ?assertEqual("", chttpd_util:get_chttpd_config("undefined_option", "")),
- ?assert(chttpd_util:get_chttpd_config("undefined_option", true)),
- ?assertNot(chttpd_util:get_chttpd_config("undefined_option", false)).
-
-test_auth_behavior(_) ->
- ?assertEqual("ca", chttpd_util:get_chttpd_auth_config("both_exist")),
- ?assertEqual(1, chttpd_util:get_chttpd_auth_config_integer("ca_only", 0)),
- ?assert(chttpd_util:get_chttpd_auth_config_boolean("cha_only", false)).
-
-test_auth_with_undefined_option(_) ->
- ?assertEqual(undefined, chttpd_util:get_chttpd_auth_config("undefine")),
- ?assertEqual(abc, chttpd_util:get_chttpd_auth_config("undefine", abc)),
- ?assertEqual(123, chttpd_util:get_chttpd_auth_config("undefine", 123)),
- ?assertEqual(0.2, chttpd_util:get_chttpd_auth_config("undefine", 0.2)),
- ?assertEqual("a", chttpd_util:get_chttpd_auth_config("undefine", "a")),
- ?assertEqual("", chttpd_util:get_chttpd_auth_config("undefine", "")),
- ?assert(chttpd_util:get_chttpd_auth_config("undefine", true)),
- ?assertNot(chttpd_util:get_chttpd_auth_config("undefine", false)).
diff --git a/src/chttpd/test/eunit/chttpd_view_test.erl b/src/chttpd/test/eunit/chttpd_view_test.erl
deleted file mode 100644
index ceff2a902..000000000
--- a/src/chttpd/test/eunit/chttpd_view_test.erl
+++ /dev/null
@@ -1,154 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_view_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_view_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(DDOC,
- "{\"_id\": \"_design/bar\", \"views\": {\"baz\":\n"
- " {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}"
-).
-
--define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
--define(i2l(I), integer_to_list(I)).
-% seconds
--define(TIMEOUT, 60).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- create_db(Url),
- Url.
-
-teardown(Url) ->
- delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-create_doc(Url, Id) ->
- test_request:put(
- Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH],
- "{\"mr\": \"rockoartischocko\"}"
- ).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-all_view_test_() ->
- {
- "chttpd view tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_succeed_on_view_with_queries_keys/1,
- fun should_succeed_on_view_with_queries_limit_skip/1,
- fun should_succeed_on_view_with_multiple_queries/1
- ]
- }
- }
- }.
-
-should_succeed_on_view_with_queries_keys(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- {ok, _, _, _} = test_request:put(
- Url ++ "/_design/bar",
- [?CONTENT_JSON, ?AUTH],
- ?DDOC
- ),
- QueryDoc =
- "{\"queries\": [{\"keys\": [ \"testdoc3\",\n"
- " \"testdoc8\"]}]}",
- {ok, _, _, RespBody} = test_request:post(
- Url ++ "/_design/bar/" ++
- "_view/baz/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
-should_succeed_on_view_with_queries_limit_skip(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- {ok, _, _, _} = test_request:put(
- Url ++ "/_design/bar",
- [?CONTENT_JSON, ?AUTH],
- ?DDOC
- ),
- QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++ "/_design/bar/" ++
- "_view/baz/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
-should_succeed_on_view_with_multiple_queries(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- {ok, _, _, _} = test_request:put(
- Url ++ "/_design/bar",
- [?CONTENT_JSON, ?AUTH],
- ?DDOC
- ),
- QueryDoc =
- "{\"queries\": [{\"keys\": [ \"testdoc3\",\n"
- " \"testdoc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(
- Url ++ "/_design/bar/" ++
- "_view/baz/queries/",
- [?CONTENT_JSON, ?AUTH],
- QueryDoc
- ),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson1} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
- {InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end)}.
diff --git a/src/chttpd/test/eunit/chttpd_welcome_test.erl b/src/chttpd/test/eunit/chttpd_welcome_test.erl
deleted file mode 100644
index 7a24efb71..000000000
--- a/src/chttpd/test/eunit/chttpd_welcome_test.erl
+++ /dev/null
@@ -1,101 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_welcome_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_db_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/"]),
- Url.
-
-teardown(_Url) ->
- ok = config:delete("admins", ?USER, _Persist = false).
-
-welcome_test_() ->
- {
- "chttpd welcome endpoint tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_have_version/1,
- fun should_have_features/1,
- fun should_have_uuid/1
- ]
- }
- }
- }.
-
-should_have_uuid(Url) ->
- ?_test(begin
- {ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]),
- ?assertEqual(200, Status),
- {Json} = ?JSON_DECODE(Body),
- CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined),
- Uuid = couch_util:get_value(<<"uuid">>, Json, undefined),
- Features = couch_util:get_value(<<"features">>, Json, undefined),
- Sha = couch_util:get_value(<<"git_sha">>, Json, undefined),
- ?assertNotEqual(Sha, undefined),
- ?assertEqual(<<"Welcome">>, CouchDB),
- RealUuid = couch_server:get_uuid(),
-
- ?assertEqual(RealUuid, Uuid),
- ?assert(is_list(Features))
- end).
-
-should_have_version(Url) ->
- ?_test(begin
- {ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]),
- ?assertEqual(200, Status),
- {Json} = ?JSON_DECODE(Body),
- Version = couch_util:get_value(<<"version">>, Json, undefined),
- CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined),
- Features = couch_util:get_value(<<"features">>, Json, undefined),
- Sha = couch_util:get_value(<<"git_sha">>, Json, undefined),
- ?assertNotEqual(Sha, undefined),
- ?assertEqual(<<"Welcome">>, CouchDB),
- RealVersion = list_to_binary(couch_server:get_version()),
- ?assertEqual(RealVersion, Version),
- ?assert(is_list(Features))
- end).
-
-should_have_features(Url) ->
- ?_test(begin
- config:enable_feature(snek),
- {ok, 200, _, Body1} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]),
- {Json1} = ?JSON_DECODE(Body1),
- Features1 = couch_util:get_value(<<"features">>, Json1, undefined),
- ?assert(is_list(Features1)),
- ?assert(lists:member(<<"snek">>, Features1)),
- config:disable_feature(snek),
- {ok, 200, _, Body2} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]),
- {Json2} = ?JSON_DECODE(Body2),
- Features2 = couch_util:get_value(<<"features">>, Json2, undefined),
- ?assert(is_list(Features2)),
- ?assertNot(lists:member(<<"snek">>, Features2))
- end).
diff --git a/src/chttpd/test/eunit/chttpd_xframe_test.erl b/src/chttpd/test/eunit/chttpd_xframe_test.erl
deleted file mode 100644
index ee2a0996b..000000000
--- a/src/chttpd/test/eunit/chttpd_xframe_test.erl
+++ /dev/null
@@ -1,95 +0,0 @@
--module(chttpd_xframe_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- ok = meck:new(config),
- ok = meck:expect(config, get, fun(_, _, _) -> "X-Forwarded-Host" end),
- ok.
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset([config]).
-
-teardown(_) ->
- ok.
-
-mock_request() ->
- Headers = mochiweb_headers:make([{"Host", "examples.com"}]),
- MochiReq = mochiweb_request:new(nil, 'GET', '/', {1, 1}, Headers),
- #httpd{mochi_req = MochiReq}.
-
-config_disabled() ->
- [
- {enabled, false}
- ].
-
-config_sameorigin() ->
- [
- {enabled, true},
- {same_origin, true}
- ].
-
-config_wildcard() ->
- [
- {enabled, true},
- {same_origin, false},
- {hosts, ["*"]}
- ].
-
-config_specific_hosts() ->
- [
- {enabled, true},
- {same_origin, false},
- {hosts, ["http://couchdb.org", "http://examples.com"]}
- ].
-
-config_diffent_specific_hosts() ->
- [
- {enabled, true},
- {same_origin, false},
- {hosts, ["http://couchdb.org"]}
- ].
-
-no_header_if_xframe_disabled_test() ->
- Headers = chttpd_xframe_options:header(mock_request(), [], config_disabled()),
- ?assertEqual(Headers, []).
-
-enabled_with_same_origin_test() ->
- Headers = chttpd_xframe_options:header(mock_request(), [], config_sameorigin()),
- ?assertEqual(Headers, [{"X-Frame-Options", "SAMEORIGIN"}]).
-
-xframe_host_test_() ->
- {
- "xframe host tests",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun allow_with_wildcard_host/1,
- fun allow_with_specific_host/1,
- fun deny_with_different_host/1
- ]
- }
- }
- }.
-
-allow_with_wildcard_host(_) ->
- Headers = chttpd_xframe_options:header(mock_request(), [], config_wildcard()),
- ?_assertEqual([{"X-Frame-Options", "ALLOW-FROM http://examples.com"}], Headers).
-
-allow_with_specific_host(_) ->
- Headers = chttpd_xframe_options:header(mock_request(), [], config_specific_hosts()),
- ?_assertEqual([{"X-Frame-Options", "ALLOW-FROM http://examples.com"}], Headers).
-
-deny_with_different_host(_) ->
- Headers = chttpd_xframe_options:header(mock_request(), [], config_diffent_specific_hosts()),
- ?_assertEqual([{"X-Frame-Options", "DENY"}], Headers).
diff --git a/src/couch/.gitignore b/src/couch/.gitignore
deleted file mode 100644
index 861974adb..000000000
--- a/src/couch/.gitignore
+++ /dev/null
@@ -1,23 +0,0 @@
-*.o
-*.so
-ebin/
-
-priv/couch_js/config.h
-priv/couchjs
-priv/couchspawnkillable
-priv/*.exp
-priv/*.lib
-priv/*.dll
-priv/*.exe
-vc120.pdb
-compile_commands.json
-
-test/engines/coverage/
-test/engines/data/
-test/engines/etc/
-test/engines/log/
-
-.rebar/
-.eunit
-
-rebar.config
diff --git a/src/couch/LICENSE b/src/couch/LICENSE
deleted file mode 100644
index 3ddd66426..000000000
--- a/src/couch/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl
deleted file mode 100644
index 019c205ab..000000000
--- a/src/couch/include/couch_db.hrl
+++ /dev/null
@@ -1,244 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(LOCAL_DOC_PREFIX, "_local/").
--define(DESIGN_DOC_PREFIX0, "_design").
--define(DESIGN_DOC_PREFIX, "_design/").
--define(DEFAULT_COMPRESSION, snappy).
-
--define(MIN_STR, <<"">>).
--define(MAX_STR, <<255>>). % illegal utf string
-
--define(REWRITE_COUNT, couch_rewrite_count).
-
--define(JSON_ENCODE(V), couch_util:json_encode(V)).
--define(JSON_DECODE(V), couch_util:json_decode(V)).
-
--define(IS_OLD_RECORD(V, R), (tuple_size(V) /= tuple_size(R))).
-
--define(b2l(V), binary_to_list(V)).
--define(l2b(V), list_to_binary(V)).
--define(i2b(V), couch_util:integer_to_boolean(V)).
--define(b2i(V), couch_util:boolean_to_integer(V)).
--define(term_to_bin(T), term_to_binary(T, [{minor_version, 1}])).
--define(term_size(T), erlang:external_size(T, [{minor_version, 1}])).
-
--define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>).
-
--define(ADMIN_USER, #user_ctx{roles=[<<"_admin">>]}).
--define(ADMIN_CTX, {user_ctx, ?ADMIN_USER}).
-
--define(SYSTEM_DATABASES, [
- <<"_dbs">>,
- <<"_global_changes">>,
- <<"_metadata">>,
- <<"_nodes">>,
- <<"_replicator">>,
- <<"_users">>
-]).
-
-
--type branch() :: {Key::term(), Value::term(), Tree::term()}.
--type path() :: {Start::pos_integer(), branch()}.
--type update_type() :: replicated_changes | interactive_edit.
-
--record(rev_info, {
- rev,
- seq = 0,
- deleted = false,
- body_sp = nil % stream pointer
-}).
-
--record(doc_info, {
- id = <<"">>,
- high_seq = 0,
- revs = [] % rev_info
-}).
-
--record(size_info, {
- active = 0,
- external = 0
-}).
-
--record(full_doc_info, {
- id = <<"">>,
- update_seq = 0,
- deleted = false,
- rev_tree = [],
- sizes = #size_info{}
-}).
-
--record(httpd, {
- mochi_req,
- peer,
- method,
- requested_path_parts,
- path_parts,
- db_url_handlers,
- user_ctx,
- req_body = undefined,
- design_url_handlers,
- auth,
- default_fun,
- url_handlers,
- authentication_handlers = [],
- absolute_uri,
- auth_module,
- begin_ts,
- original_method,
- nonce,
- cors_config,
- xframe_config,
- qs
-}).
-
-
--record(doc, {
- id = <<"">>,
- revs = {0, []},
-
- % the json body object.
- body = {[]},
-
- % Atts can be a binary when a storage engine
- % returns attachment info blob in compressed
- % form.
- atts = [] :: [couch_att:att()] | binary(), % attachments
-
- deleted = false,
-
- % key/value tuple of meta information, provided when using special options:
- % couch_db:open_doc(Db, Id, Options).
- meta = []
-}).
-
-
--record(user_ctx, {
- name=null,
- roles=[],
- handler
-}).
-
--record(view_fold_helper_funs, {
- reduce_count,
- passed_end,
- start_response,
- send_row
-}).
-
--record(reduce_fold_helper_funs, {
- start_response,
- send_row
-}).
-
--record(extern_resp_args, {
- code = 200,
- stop = false,
- data = <<>>,
- ctype = "application/json",
- headers = [],
- json = nil
-}).
-
--record(index_header, {
- seq=0,
- purge_seq=0,
- id_btree_state=nil,
- view_states=nil
-}).
-
-% small value used in revision trees to indicate the revision isn't stored
--define(REV_MISSING, []).
-
--record(changes_args, {
- feed = "normal",
- dir = fwd,
- since = 0,
- limit = 1000000000000000,
- style = main_only,
- heartbeat,
- timeout,
- filter = "",
- filter_fun,
- filter_args = [],
- include_docs = false,
- doc_options = [],
- conflicts = false,
- db_open_options = []
-}).
-
--record(btree, {
- fd,
- root,
- extract_kv,
- assemble_kv,
- less,
- reduce = nil,
- compression = ?DEFAULT_COMPRESSION
-}).
-
--record(proc, {
- pid,
- lang,
- client = nil,
- ddoc_keys = [],
- prompt_fun,
- set_timeout_fun,
- stop_fun
-}).
-
--record(leaf, {
- deleted,
- ptr,
- seq,
- sizes = #size_info{},
- atts = []
-}).
-
--record (fabric_changes_acc, {
- db,
- seq,
- args,
- options,
- pending,
- epochs
-}).
-
--type doc() :: #doc{}.
--type ddoc() :: #doc{}.
--type user_ctx() :: #user_ctx{}.
--type sec_props() :: [tuple()].
--type sec_obj() :: {sec_props()}.
-
-%% Erlang/OTP 21 deprecates and 23 removes get_stacktrace(), so
-%% we have to monkey around until we can drop support < 21.
-%% h/t https://github.com/erlang/otp/pull/1783#issuecomment-386190970
-
-%% use like so:
-% try function1(Arg1)
-% catch
-% ?STACKTRACE(exit, badarg, ErrorStackTrace)
-% % do stuff with ErrorStackTrace
-% % ...
-% end,
-
-% Get the stacktrace in a way that is backwards compatible
-% OTP_VERSION is only available in OTP 21 and later, so we don’t need
-% to do any other version magic here.
--ifdef(OTP_RELEASE).
--define(STACKTRACE(ErrorType, Error, Stack),
- ErrorType:Error:Stack ->).
--else.
--define(STACKTRACE(ErrorType, Error, Stack),
- ErrorType:Error ->
- Stack = erlang:get_stacktrace(),).
--endif.
diff --git a/src/couch/include/couch_eunit.hrl b/src/couch/include/couch_eunit.hrl
deleted file mode 100644
index 188524893..000000000
--- a/src/couch/include/couch_eunit.hrl
+++ /dev/null
@@ -1,77 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include_lib("eunit/include/eunit.hrl").
-
--define(BUILDDIR,
- fun() ->
- case os:getenv("BUILDDIR") of
- false ->
- throw("BUILDDIR environment variable must be set");
- Dir ->
- Dir
- end
- end).
--define(CONFIG_DEFAULT,
- filename:join([?BUILDDIR(), "tmp", "etc", "default_eunit.ini"])).
--define(CONFIG_CHAIN, [
- ?CONFIG_DEFAULT,
- filename:join([?BUILDDIR(), "tmp", "etc", "local_eunit.ini"]),
- filename:join([?BUILDDIR(), "tmp", "etc", "eunit.ini"])]).
--define(FIXTURESDIR,
- filename:join([?BUILDDIR(), "src", "couch", "test", "eunit", "fixtures"])).
--define(TEMPDIR,
- filename:join([?BUILDDIR(), "tmp", "tmp_data"])).
-
--define(APPDIR, filename:dirname(element(2, file:get_cwd()))).
-%% Account for the fact that source files are in src/<app>/.eunit/<module>.erl
-%% when run from eunit
--define(ABS_PATH(File), %% src/<app>/.eunit/<module>.erl
- filename:join([?APPDIR, File])).
-
--define(tempfile,
- fun() ->
- Suffix = couch_uuids:random(),
- FileName = io_lib:format("~p-~s", [node(), Suffix]),
- filename:join([?TEMPDIR, FileName])
- end).
--define(tempdb,
- fun() ->
- Suffix = couch_uuids:random(),
- iolist_to_binary(["eunit-test-db-", Suffix])
- end).
--define(tempshard,
- fun() ->
- Suffix = couch_uuids:random(),
- iolist_to_binary(["shards/80000000-ffffffff/eunit-test-db-", Suffix])
- end).
--define(docid,
- fun() ->
- integer_to_list(couch_util:unique_monotonic_integer())
- end).
-
-%% Like assertEqual, but using == instead of =:=
--ifndef(assertEquiv).
--define(assertEquiv(Expect, Expr),
- ((fun (__X) ->
- case (Expr) of
- __V when __V == __X -> ok;
- __Y -> erlang:error({assertEquiv_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {expression, (??Expr)},
- {expected, __X},
- {value, __Y}]})
- end
- end)(Expect))).
--endif.
--define(_assertEquiv(Expect, Expr), ?_test(?assertEquiv(Expect, Expr))).
diff --git a/src/couch/include/couch_eunit_proper.hrl b/src/couch/include/couch_eunit_proper.hrl
deleted file mode 100644
index dcf07701a..000000000
--- a/src/couch/include/couch_eunit_proper.hrl
+++ /dev/null
@@ -1,33 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include_lib("proper/include/proper.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(EUNIT_QUICKCHECK(QuickcheckTimeout, NumTests),
- [
- {
- atom_to_list(F),
- {timeout, QuickcheckTimeout,
- ?_assert(proper:quickcheck(?MODULE:F(), [
- {to_file, user},
- {start_size, 2},
- {numtests, NumTests},
- long_result
- ]))}
- }
- || {F, 0} <- ?MODULE:module_info(exports), F > 'prop_', F < 'prop`'
- ]).
-
--define(EUNIT_QUICKCHECK(QuickcheckTimeout),
- ?EUNIT_QUICKCHECK(QuickcheckTimeout, 100)
- ).
diff --git a/src/couch/include/couch_js_functions.hrl b/src/couch/include/couch_js_functions.hrl
deleted file mode 100644
index 994382b8b..000000000
--- a/src/couch/include/couch_js_functions.hrl
+++ /dev/null
@@ -1,163 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(AUTH_DB_DOC_VALIDATE_FUNCTION, <<"
- function(newDoc, oldDoc, userCtx, secObj) {
- if (newDoc._deleted === true) {
- // allow deletes by admins and matching users
- // without checking the other fields
- if ((userCtx.roles.indexOf('_admin') !== -1) ||
- (userCtx.name == oldDoc.name)) {
- return;
- } else {
- throw({forbidden: 'Only admins may delete other user docs.'});
- }
- }
-
- if (newDoc.type !== 'user') {
- throw({forbidden : 'doc.type must be user'});
- } // we only allow user docs for now
-
- if (!newDoc.name) {
- throw({forbidden: 'doc.name is required'});
- }
-
- if (!newDoc.roles) {
- throw({forbidden: 'doc.roles must exist'});
- }
-
- if (!isArray(newDoc.roles)) {
- throw({forbidden: 'doc.roles must be an array'});
- }
-
- for (var idx = 0; idx < newDoc.roles.length; idx++) {
- if (typeof newDoc.roles[idx] !== 'string') {
- throw({forbidden: 'doc.roles can only contain strings'});
- }
- }
-
- if (newDoc._id !== ('org.couchdb.user:' + newDoc.name)) {
- throw({
- forbidden: 'Doc ID must be of the form org.couchdb.user:name'
- });
- }
-
- if (oldDoc) { // validate all updates
- if (oldDoc.name !== newDoc.name) {
- throw({forbidden: 'Usernames can not be changed.'});
- }
- }
-
- if (newDoc.password_sha && !newDoc.salt) {
- throw({
- forbidden: 'Users with password_sha must have a salt.' +
- 'See /_utils/script/couch.js for example code.'
- });
- }
-
- var available_schemes = [\"simple\", \"pbkdf2\", \"bcrypt\"];
- if (newDoc.password_scheme
- && available_schemes.indexOf(newDoc.password_scheme) == -1) {
- throw({
- forbidden: 'Password scheme `' + newDoc.password_scheme
- + '` not supported.'
- });
- }
-
- if (newDoc.password_scheme === \"pbkdf2\") {
- if (typeof(newDoc.iterations) !== \"number\") {
- throw({forbidden: \"iterations must be a number.\"});
- }
- if (typeof(newDoc.derived_key) !== \"string\") {
- throw({forbidden: \"derived_key must be a string.\"});
- }
- }
-
- var is_server_or_database_admin = function(userCtx, secObj) {
- // see if the user is a server admin
- if(userCtx.roles.indexOf('_admin') !== -1) {
- return true; // a server admin
- }
-
- // see if the user a database admin specified by name
- if(secObj && secObj.admins && secObj.admins.names) {
- if(secObj.admins.names.indexOf(userCtx.name) !== -1) {
- return true; // database admin
- }
- }
-
- // see if the user a database admin specified by role
- if(secObj && secObj.admins && secObj.admins.roles) {
- var db_roles = secObj.admins.roles;
- for(var idx = 0; idx < userCtx.roles.length; idx++) {
- var user_role = userCtx.roles[idx];
- if(db_roles.indexOf(user_role) !== -1) {
- return true; // role matches!
- }
- }
- }
-
- return false; // default to no admin
- }
-
- if (!is_server_or_database_admin(userCtx, secObj)) {
- if (oldDoc) { // validate non-admin updates
- if (userCtx.name !== newDoc.name) {
- throw({
- forbidden: 'You may only update your own user document.'
- });
- }
- // validate role updates
- var oldRoles = (oldDoc.roles || []).sort();
- var newRoles = newDoc.roles.sort();
-
- if (oldRoles.length !== newRoles.length) {
- throw({forbidden: 'Only _admin may edit roles'});
- }
-
- for (var i = 0; i < oldRoles.length; i++) {
- if (oldRoles[i] !== newRoles[i]) {
- throw({forbidden: 'Only _admin may edit roles'});
- }
- }
- } else if (newDoc.roles.length > 0) {
- throw({forbidden: 'Only _admin may set roles'});
- }
- }
-
- // no system roles in users db
- for (var i = 0; i < newDoc.roles.length; i++) {
- if (newDoc.roles[i] !== '_metrics') {
- if (newDoc.roles[i][0] === '_') {
- throw({
- forbidden:
- 'No system roles (starting with underscore) in users db.'
- });
- }
- }
- }
-
- // no system names as names
- if (newDoc.name[0] === '_') {
- throw({forbidden: 'Username may not start with underscore.'});
- }
-
- var badUserNameChars = [':'];
-
- for (var i = 0; i < badUserNameChars.length; i++) {
- if (newDoc.name.indexOf(badUserNameChars[i]) >= 0) {
- throw({forbidden: 'Character `' + badUserNameChars[i] +
- '` is not allowed in usernames.'});
- }
- }
- }
-">>).
diff --git a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c b/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
deleted file mode 100644
index a4e9d1cfa..000000000
--- a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
-#include "erl_nif.h"
-#include "unicode/ucol.h"
-#include "unicode/ucasemap.h"
-
-#define MAX_DEPTH 10
-
-#define NO_ERROR 0
-#define BAD_ARG_ERROR 1
-#define MAX_DEPTH_ERROR 2
-
-#if (ERL_NIF_MAJOR_VERSION > 2) || \
- (ERL_NIF_MAJOR_VERSION == 2 && ERL_NIF_MINOR_VERSION >= 3)
-/* OTP R15B or higher */
-#define term_is_number(env, t) enif_is_number(env, t)
-#else
-#define term_is_number(env, t) \
- (!enif_is_binary(env, t) && \
- !enif_is_list(env, t) && \
- !enif_is_tuple(env, t))
-#endif
-
-#ifdef _MSC_VER
-#define threadlocal __declspec(thread)
-#else
-#define threadlocal __thread
-#endif
-
-static ERL_NIF_TERM ATOM_TRUE;
-static ERL_NIF_TERM ATOM_FALSE;
-static ERL_NIF_TERM ATOM_NULL;
-static ERL_NIF_TERM ATOM_MAX_DEPTH_ERROR;
-
-typedef struct {
- ErlNifEnv* env;
- int error;
- UCollator* coll;
-} ctx_t;
-
-static threadlocal UCollator* collator = NULL;
-static threadlocal int64_t threadEpoch = 0;
-static UCollator** collators = NULL;
-static int numCollators = 0;
-static int numSchedulers = 0;
-static int64_t loadEpoch = 0;
-static ErlNifMutex* collMutex = NULL;
-
-static ERL_NIF_TERM less_json_nif(ErlNifEnv*, int, const ERL_NIF_TERM []);
-static ERL_NIF_TERM compare_strings_nif(ErlNifEnv*, int, const ERL_NIF_TERM []);
-static ERL_NIF_TERM get_icu_version(ErlNifEnv*, int, const ERL_NIF_TERM []);
-static ERL_NIF_TERM get_uca_version(ErlNifEnv*, int, const ERL_NIF_TERM []);
-static ERL_NIF_TERM get_collator_version(ErlNifEnv*, int, const ERL_NIF_TERM []);
-static int on_load(ErlNifEnv*, void**, ERL_NIF_TERM);
-static void on_unload(ErlNifEnv*, void*);
-static __inline int less_json(int, ctx_t*, ERL_NIF_TERM, ERL_NIF_TERM);
-static __inline int atom_sort_order(ErlNifEnv*, ERL_NIF_TERM);
-static __inline int compare_strings(ctx_t*, ErlNifBinary, ErlNifBinary);
-static __inline int compare_lists(int, ctx_t*, ERL_NIF_TERM, ERL_NIF_TERM);
-static __inline int compare_props(int, ctx_t*, ERL_NIF_TERM, ERL_NIF_TERM);
-static __inline int is_max_utf8_marker(ErlNifBinary);
-static __inline UCollator* get_collator(void);
-
-/* Should match the <<255,255,255,255>> in:
- * - src/mango/src/mango_idx_view.hrl#L13
- * - src/couch_mrview/src/couch_mrview_util.erl#L40 */
-static const unsigned char max_utf8_marker[] = {255, 255, 255, 255};
-
-
-UCollator*
-get_collator(void)
-{
- UErrorCode status = U_ZERO_ERROR;
-
- if(collator != NULL && threadEpoch == loadEpoch) {
- return collator;
- }
-
- collator = ucol_open("", &status);
-
- if (U_FAILURE(status)) {
- ucol_close(collator);
- return NULL;
- }
-
- enif_mutex_lock(collMutex);
- collators[numCollators] = collator;
- numCollators++;
- enif_mutex_unlock(collMutex);
-
- assert(numCollators <= numSchedulers && "Number of schedulers shrank.");
-
- threadEpoch = loadEpoch;
-
- return collator;
-}
-
-ERL_NIF_TERM
-less_json_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- ctx_t ctx;
- int result;
-
- ctx.env = env;
- ctx.error = NO_ERROR;
- ctx.coll = get_collator();
-
- result = less_json(1, &ctx, argv[0], argv[1]);
-
- /*
- * There are 2 possible failure reasons:
- *
- * 1) We got an invalid EJSON operand;
- * 2) The EJSON structures are too deep - to avoid allocating too
- * many C stack frames (because less_json is a recursive function),
- * and running out of memory, we throw a badarg exception to Erlang
- * and do the comparison in Erlang land. In practice, views keys are
- * EJSON structures with very little nesting.
- */
- if (ctx.error == NO_ERROR) {
- return enif_make_int(env, result);
- } else if (ctx.error == MAX_DEPTH_ERROR) {
- return enif_raise_exception(env, ATOM_MAX_DEPTH_ERROR);
- } else {
- return enif_make_badarg(env);
- }
-}
-
-
-ERL_NIF_TERM
-compare_strings_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- ctx_t ctx;
- int result;
- ErlNifBinary binA, binB;
-
- if (!enif_inspect_binary(env, argv[0], &binA)) {
- return enif_make_badarg(env);
- }
-
- if (!enif_inspect_binary(env, argv[1], &binB)) {
- return enif_make_badarg(env);
- }
-
- ctx.env = env;
- ctx.error = NO_ERROR;
- ctx.coll = get_collator();
-
- result = compare_strings(&ctx, binA, binB);
-
- if (ctx.error == NO_ERROR){
- return enif_make_int(env, result);
- } else {
- return enif_make_badarg(env);
- }
-}
-
-
-ERL_NIF_TERM
-get_icu_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- UVersionInfo ver = {0};
- ERL_NIF_TERM tup[U_MAX_VERSION_LENGTH] = {0};
- int i;
-
- u_getVersion(ver);
-
- for (i = 0; i < U_MAX_VERSION_LENGTH; i++) {
- tup[i] = enif_make_int(env, ver[i]);
- }
-
- return enif_make_tuple_from_array(env, tup, U_MAX_VERSION_LENGTH);
-}
-
-
-ERL_NIF_TERM
-get_uca_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- UVersionInfo ver = {0};
- ERL_NIF_TERM tup[U_MAX_VERSION_LENGTH] = {0};
- int i;
-
- ucol_getUCAVersion(get_collator(), ver);
-
- for (i = 0; i < U_MAX_VERSION_LENGTH; i++) {
- tup[i] = enif_make_int(env, ver[i]);
- }
-
- return enif_make_tuple_from_array(env, tup, U_MAX_VERSION_LENGTH);
-}
-
-ERL_NIF_TERM
-get_collator_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- UVersionInfo ver = {0};
- ERL_NIF_TERM tup[U_MAX_VERSION_LENGTH] = {0};
- int i;
-
- ucol_getVersion(get_collator(), ver);
-
- for (i = 0; i < U_MAX_VERSION_LENGTH; i++) {
- tup[i] = enif_make_int(env, ver[i]);
- }
-
- return enif_make_tuple_from_array(env, tup, U_MAX_VERSION_LENGTH);
-}
-
-int
-less_json(int depth, ctx_t* ctx, ERL_NIF_TERM a, ERL_NIF_TERM b)
-{
- int aIsAtom, bIsAtom;
- int aIsBin, bIsBin;
- int aIsNumber, bIsNumber;
- int aIsList, bIsList;
- int aArity, bArity;
- int aIsProps, bIsProps;
- const ERL_NIF_TERM *aProps, *bProps;
-
- /*
- * Avoid too much recursion. Normally there isn't more than a few levels
- * of recursion, as in practice view keys do not go beyond 1 to 3 levels
- * of nesting. In case of too much recursion, signal it to the Erlang land
- * via an exception and do the EJSON comparison in Erlang land.
- */
- if (depth > MAX_DEPTH) {
- ctx->error = MAX_DEPTH_ERROR;
- return 0;
- }
-
- aIsAtom = enif_is_atom(ctx->env, a);
- bIsAtom = enif_is_atom(ctx->env, b);
-
- if (aIsAtom) {
- if (bIsAtom) {
- int aSortOrd, bSortOrd;
-
- if ((aSortOrd = atom_sort_order(ctx->env, a)) == -1) {
- ctx->error = BAD_ARG_ERROR;
- return 0;
- }
-
- if ((bSortOrd = atom_sort_order(ctx->env, b)) == -1) {
- ctx->error = BAD_ARG_ERROR;
- return 0;
- }
-
- return aSortOrd - bSortOrd;
- }
-
- return -1;
- }
-
- if (bIsAtom) {
- return 1;
- }
-
- aIsNumber = term_is_number(ctx->env, a);
- bIsNumber = term_is_number(ctx->env, b);
-
- if (aIsNumber) {
- if (bIsNumber) {
- return enif_compare(a, b);
- }
-
- return -1;
- }
-
- if (bIsNumber) {
- return 1;
- }
-
- aIsBin = enif_is_binary(ctx->env, a);
- bIsBin = enif_is_binary(ctx->env, b);
-
- if (aIsBin) {
- if (bIsBin) {
- ErlNifBinary binA, binB;
-
- enif_inspect_binary(ctx->env, a, &binA);
- enif_inspect_binary(ctx->env, b, &binB);
-
- return compare_strings(ctx, binA, binB);
- }
-
- return -1;
- }
-
- if (bIsBin) {
- return 1;
- }
-
- aIsList = enif_is_list(ctx->env, a);
- bIsList = enif_is_list(ctx->env, b);
-
- if (aIsList) {
- if (bIsList) {
- return compare_lists(depth, ctx, a, b);
- }
-
- return -1;
- }
-
- if (bIsList) {
- return 1;
- }
-
-
- aIsProps = 0;
- if (enif_get_tuple(ctx->env, a, &aArity, &aProps)) {
- if (aArity == 1 && enif_is_list(ctx->env, aProps[0])) {
- aIsProps = 1;
- }
- }
-
- bIsProps = 0;
- if (enif_get_tuple(ctx->env, b, &bArity, &bProps)) {
- if (bArity == 1 && enif_is_list(ctx->env, bProps[0])) {
- bIsProps = 1;
- }
- }
-
- if (aIsProps) {
- if (bIsProps) {
- return compare_props(depth, ctx, aProps[0], bProps[0]);
- }
- return -1;
- }
-
- if (bIsProps) {
- return 1;
- }
-
- /*
- * Both arguments are unsupported data types. Return a badarg error
- */
- ctx->error = BAD_ARG_ERROR;
- return 0;
-}
-
-
-int
-atom_sort_order(ErlNifEnv* env, ERL_NIF_TERM a)
-{
- if (enif_compare(a, ATOM_NULL) == 0) {
- return 1;
- } else if (enif_compare(a, ATOM_FALSE) == 0) {
- return 2;
- } else if (enif_compare(a, ATOM_TRUE) == 0) {
- return 3;
- }
-
- return -1;
-}
-
-
-int
-compare_lists(int depth, ctx_t* ctx, ERL_NIF_TERM a, ERL_NIF_TERM b)
-{
- ERL_NIF_TERM headA, tailA;
- ERL_NIF_TERM headB, tailB;
- int aIsEmpty, bIsEmpty;
- int result;
-
- while (1) {
- aIsEmpty = !enif_get_list_cell(ctx->env, a, &headA, &tailA);
- bIsEmpty = !enif_get_list_cell(ctx->env, b, &headB, &tailB);
-
- if (aIsEmpty) {
- if (bIsEmpty) {
- return 0;
- }
- return -1;
- }
-
- if (bIsEmpty) {
- return 1;
- }
-
- result = less_json(depth + 1, ctx, headA, headB);
-
- if (ctx->error || result != 0) {
- return result;
- }
-
- a = tailA;
- b = tailB;
- }
-
- return result;
-}
-
-
-int
-compare_props(int depth, ctx_t* ctx, ERL_NIF_TERM a, ERL_NIF_TERM b)
-{
- ERL_NIF_TERM headA, tailA;
- ERL_NIF_TERM headB, tailB;
- int aArity, bArity;
- const ERL_NIF_TERM *aKV, *bKV;
- ErlNifBinary keyA, keyB;
- int aIsEmpty, bIsEmpty;
- int keyCompResult, valueCompResult;
-
- while (1) {
- aIsEmpty = !enif_get_list_cell(ctx->env, a, &headA, &tailA);
- bIsEmpty = !enif_get_list_cell(ctx->env, b, &headB, &tailB);
-
- if (aIsEmpty) {
- if (bIsEmpty) {
- return 0;
- }
- return -1;
- }
-
- if (bIsEmpty) {
- return 1;
- }
-
- if (!enif_get_tuple(ctx->env, headA, &aArity, &aKV)) {
- ctx->error = BAD_ARG_ERROR;
- return 0;
- }
- if ((aArity != 2) || !enif_inspect_binary(ctx->env, aKV[0], &keyA)) {
- ctx->error = BAD_ARG_ERROR;
- return 0;
- }
-
- if (!enif_get_tuple(ctx->env, headB, &bArity, &bKV)) {
- ctx->error = BAD_ARG_ERROR;
- return 0;
- }
- if ((bArity != 2) || !enif_inspect_binary(ctx->env, bKV[0], &keyB)) {
- ctx->error = BAD_ARG_ERROR;
- return 0;
- }
-
- keyCompResult = compare_strings(ctx, keyA, keyB);
-
- if (ctx->error || keyCompResult != 0) {
- return keyCompResult;
- }
-
- valueCompResult = less_json(depth + 1, ctx, aKV[1], bKV[1]);
-
- if (ctx->error || valueCompResult != 0) {
- return valueCompResult;
- }
-
- a = tailA;
- b = tailB;
- }
-
- return 0;
-}
-
-
-int
-is_max_utf8_marker(ErlNifBinary bin)
-{
- if (bin.size == sizeof(max_utf8_marker)) {
- if(memcmp(bin.data, max_utf8_marker, sizeof(max_utf8_marker)) == 0) {
- return 1;
- }
- return 0;
- }
- return 0;
-}
-
-
-int
-compare_strings(ctx_t* ctx, ErlNifBinary a, ErlNifBinary b)
-{
- UErrorCode status = U_ZERO_ERROR;
- UCharIterator iterA, iterB;
- int result;
-
- /* libicu versions earlier than 59 (at least) don't consider the
- * {255,255,255,255} to be the highest sortable string as CouchDB expects.
- * While we are still shipping CentOS 7 packages with libicu 50, we should
- * explicitly check for the marker, later on we can remove the max
- * logic */
-
- int a_is_max = is_max_utf8_marker(a);
- int b_is_max = is_max_utf8_marker(b);
-
- if(a_is_max && b_is_max) {
- return 0;
- }
-
- if(a_is_max) {
- return 1;
- }
-
- if(b_is_max) {
- return -1;
- }
-
- uiter_setUTF8(&iterA, (const char *) a.data, (uint32_t) a.size);
- uiter_setUTF8(&iterB, (const char *) b.data, (uint32_t) b.size);
-
- result = ucol_strcollIter(ctx->coll, &iterA, &iterB, &status);
-
- if (U_FAILURE(status)) {
- ctx->error = BAD_ARG_ERROR;
- return 0;
- }
-
- /* ucol_strcollIter returns 0, -1 or 1
- * (see type UCollationResult in unicode/ucol.h) */
-
- return result;
-}
-
-
-int
-on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
-{
- if (!enif_get_int(env, info, &numSchedulers)) {
- return 1;
- }
-
- if (numSchedulers < 1) {
- return 2;
- }
-
- loadEpoch += 1;
-
- collMutex = enif_mutex_create("coll_mutex");
-
- if (collMutex == NULL) {
- return 3;
- }
-
- collators = enif_alloc(sizeof(UCollator*) * numSchedulers);
-
- if (collators == NULL) {
- enif_mutex_destroy(collMutex);
- return 4;
- }
-
- ATOM_TRUE = enif_make_atom(env, "true");
- ATOM_FALSE = enif_make_atom(env, "false");
- ATOM_NULL = enif_make_atom(env, "null");
- ATOM_MAX_DEPTH_ERROR = enif_make_atom(env, "max_depth_error");
-
- return 0;
-}
-
-
-void
-on_unload(ErlNifEnv* env, void* priv_data)
-{
- if (collators != NULL) {
- int i;
-
- for (i = 0; i < numCollators; i++) {
- ucol_close(collators[i]);
- }
-
- enif_free(collators);
- }
-
- numCollators = 0;
-
- if (collMutex != NULL) {
- enif_mutex_destroy(collMutex);
- }
-}
-
-
-static ErlNifFunc nif_functions[] = {
- {"less_nif", 2, less_json_nif},
- {"compare_strings_nif", 2, compare_strings_nif},
- {"get_icu_version", 0, get_icu_version},
- {"get_uca_version", 0, get_uca_version},
- {"get_collator_version", 0, get_collator_version}
-};
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-ERL_NIF_INIT(couch_ejson_compare, nif_functions, &on_load, NULL, NULL, &on_unload);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/couch/priv/couch_js/1.8.5/help.h b/src/couch/priv/couch_js/1.8.5/help.h
deleted file mode 100644
index 3a19901f0..000000000
--- a/src/couch/priv/couch_js/1.8.5/help.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_HELP_H
-#define COUCHJS_HELP_H
-
-#include "config.h"
-
-static const char VERSION_TEMPLATE[] =
- "%s - %s (SpiderMonkey 1.8.5)\n"
- "\n"
- "Licensed under the Apache License, Version 2.0 (the \"License\"); you may "
- "not use\n"
- "this file except in compliance with the License. You may obtain a copy of"
- "the\n"
- "License at\n"
- "\n"
- " http://www.apache.org/licenses/LICENSE-2.0\n"
- "\n"
- "Unless required by applicable law or agreed to in writing, software "
- "distributed\n"
- "under the License is distributed on an \"AS IS\" BASIS, WITHOUT "
- "WARRANTIES OR\n"
- "CONDITIONS OF ANY KIND, either express or implied. See the License "
- "for the\n"
- "specific language governing permissions and limitations under the "
- "License.\n";
-
-static const char USAGE_TEMPLATE[] =
- "Usage: %s [FILE]\n"
- "\n"
- "The %s command runs the %s JavaScript interpreter.\n"
- "\n"
- "The exit status is 0 for success or 1 for failure.\n"
- "\n"
- "Options:\n"
- "\n"
- " -h display a short help message and exit\n"
- " -V display version information and exit\n"
- " -S SIZE specify that the runtime should allow at\n"
- " most SIZE bytes of memory to be allocated\n"
- " default is 64 MiB\n"
- " --eval Enable runtime code evaluation (dangerous!)\n"
- "\n"
- "Report bugs at <%s>.\n";
-
-#define BASENAME COUCHJS_NAME
-
-#define couch_version(basename) \
- fprintf( \
- stdout, \
- VERSION_TEMPLATE, \
- basename, \
- PACKAGE_STRING)
-
-#define DISPLAY_VERSION couch_version(BASENAME)
-
-
-#define couch_usage(basename) \
- fprintf( \
- stdout, \
- USAGE_TEMPLATE, \
- basename, \
- basename, \
- PACKAGE_NAME, \
- PACKAGE_BUGREPORT)
-
-#define DISPLAY_USAGE couch_usage(BASENAME)
-
-#endif // Included help.h
diff --git a/src/couch/priv/couch_js/1.8.5/main.c b/src/couch/priv/couch_js/1.8.5/main.c
deleted file mode 100644
index c8e385cc9..000000000
--- a/src/couch/priv/couch_js/1.8.5/main.c
+++ /dev/null
@@ -1,307 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#ifdef XP_WIN
-#include <windows.h>
-#else
-#include <unistd.h>
-#endif
-
-#include <jsapi.h>
-#include "config.h"
-#include "utf8.h"
-#include "util.h"
-
-
-#define SETUP_REQUEST(cx) \
- JS_SetContextThread(cx); \
- JS_BeginRequest(cx);
-#define FINISH_REQUEST(cx) \
- JS_EndRequest(cx); \
- JS_ClearContextThread(cx);
-
-
-static JSClass global_class = {
- "GlobalClass",
- JSCLASS_GLOBAL_FLAGS,
- JS_PropertyStub,
- JS_PropertyStub,
- JS_PropertyStub,
- JS_StrictPropertyStub,
- JS_EnumerateStub,
- JS_ResolveStub,
- JS_ConvertStub,
- JS_FinalizeStub,
- JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-static JSBool
-evalcx(JSContext *cx, uintN argc, jsval* vp)
-{
- jsval* argv = JS_ARGV(cx, vp);
- JSString* str;
- JSObject* sandbox;
- JSObject* global;
- JSContext* subcx;
- JSCrossCompartmentCall* call = NULL;
- const jschar* src;
- size_t srclen;
- jsval rval;
- JSBool ret = JS_FALSE;
- char *name = NULL;
-
- sandbox = NULL;
- if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) {
- return JS_FALSE;
- }
-
- subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L);
- if(!subcx) {
- JS_ReportOutOfMemory(cx);
- return JS_FALSE;
- }
-
- SETUP_REQUEST(subcx);
-
- src = JS_GetStringCharsAndLength(cx, str, &srclen);
-
- // Re-use the compartment associated with the main context,
- // rather than creating a new compartment */
- global = JS_GetGlobalObject(cx);
- if(global == NULL) goto done;
- call = JS_EnterCrossCompartmentCall(subcx, global);
-
- if(!sandbox) {
- sandbox = JS_NewGlobalObject(subcx, &global_class);
- if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) {
- goto done;
- }
- }
-
- if(argc > 2) {
- name = enc_string(cx, argv[2], NULL);
- }
-
- if(srclen == 0) {
- JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(sandbox));
- } else {
- JS_EvaluateUCScript(subcx, sandbox, src, srclen, name, 1, &rval);
- JS_SET_RVAL(cx, vp, rval);
- }
-
- ret = JS_TRUE;
-
-done:
- if(name) JS_free(cx, name);
- JS_LeaveCrossCompartmentCall(call);
- FINISH_REQUEST(subcx);
- JS_DestroyContext(subcx);
- return ret;
-}
-
-
-static JSBool
-gc(JSContext* cx, uintN argc, jsval* vp)
-{
- JS_GC(cx);
- JS_SET_RVAL(cx, vp, JSVAL_VOID);
- return JS_TRUE;
-}
-
-
-static JSBool
-print(JSContext* cx, uintN argc, jsval* vp)
-{
- jsval* argv = JS_ARGV(cx, vp);
- couch_print(cx, argc, argv);
- JS_SET_RVAL(cx, vp, JSVAL_VOID);
- return JS_TRUE;
-}
-
-
-static JSBool
-quit(JSContext* cx, uintN argc, jsval* vp)
-{
- jsval* argv = JS_ARGV(cx, vp);
- int exit_code = 0;
- JS_ConvertArguments(cx, argc, argv, "/i", &exit_code);
- exit(exit_code);
-}
-
-
-static JSBool
-readline(JSContext* cx, uintN argc, jsval* vp)
-{
- JSString* line;
-
- /* GC Occasionally */
- JS_MaybeGC(cx);
-
- line = couch_readline(cx, stdin);
- if(line == NULL) return JS_FALSE;
-
- JS_SET_RVAL(cx, vp, STRING_TO_JSVAL(line));
- return JS_TRUE;
-}
-
-
-static JSBool
-seal(JSContext* cx, uintN argc, jsval* vp)
-{
- jsval* argv = JS_ARGV(cx, vp);
- JSObject *target;
- JSBool deep = JS_FALSE;
- JSBool ret;
-
- if(!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep))
- return JS_FALSE;
-
- if(!target) {
- JS_SET_RVAL(cx, vp, JSVAL_VOID);
- return JS_TRUE;
- }
-
-
- ret = deep ? JS_DeepFreezeObject(cx, target) : JS_FreezeObject(cx, target);
- JS_SET_RVAL(cx, vp, JSVAL_VOID);
- return ret;
-}
-
-
-static JSFunctionSpec global_functions[] = {
- JS_FS("evalcx", evalcx, 0, 0),
- JS_FS("gc", gc, 0, 0),
- JS_FS("print", print, 0, 0),
- JS_FS("quit", quit, 0, 0),
- JS_FS("readline", readline, 0, 0),
- JS_FS("seal", seal, 0, 0),
- JS_FS_END
-};
-
-
-static JSBool
-csp_allows(JSContext* cx)
-{
- couch_args *args = (couch_args*)JS_GetContextPrivate(cx);
- if(args->eval) {
- return JS_TRUE;
- } else {
- return JS_FALSE;
- }
-}
-
-
-static JSSecurityCallbacks security_callbacks = {
- NULL,
- NULL,
- NULL,
- csp_allows
-};
-
-
-int
-main(int argc, const char* argv[])
-{
- JSRuntime* rt = NULL;
- JSContext* cx = NULL;
- JSObject* global = NULL;
- JSCrossCompartmentCall *call = NULL;
- JSSCRIPT_TYPE script;
- JSString* scriptsrc;
- const jschar* schars;
- size_t slen;
- jsval sroot;
- jsval result;
- int i;
-
- couch_args* args = couch_parse_args(argc, argv);
-
- rt = JS_NewRuntime(args->stack_size);
- if(rt == NULL)
- return 1;
-
- cx = JS_NewContext(rt, 8L * 1024L);
- if(cx == NULL)
- return 1;
-
- JS_SetErrorReporter(cx, couch_error);
- JS_ToggleOptions(cx, JSOPTION_XML);
- JS_SetOptions(cx, JSOPTION_METHODJIT);
-#ifdef JSOPTION_TYPE_INFERENCE
- JS_SetOptions(cx, JSOPTION_TYPE_INFERENCE);
-#endif
- JS_SetContextPrivate(cx, args);
- JS_SetRuntimeSecurityCallbacks(rt, &security_callbacks);
-
- SETUP_REQUEST(cx);
-
- global = JS_NewCompartmentAndGlobalObject(cx, &global_class, NULL);
- if(global == NULL)
- return 1;
-
- call = JS_EnterCrossCompartmentCall(cx, global);
-
- JS_SetGlobalObject(cx, global);
-
- if(!JS_InitStandardClasses(cx, global))
- return 1;
-
- if(couch_load_funcs(cx, global, global_functions) != JS_TRUE)
- return 1;
-
- for(i = 0 ; args->scripts[i] ; i++) {
- // Convert script source to jschars.
- scriptsrc = couch_readfile(cx, args->scripts[i]);
- if(!scriptsrc)
- return 1;
-
- schars = JS_GetStringCharsAndLength(cx, scriptsrc, &slen);
-
- // Root it so GC doesn't collect it.
- sroot = STRING_TO_JSVAL(scriptsrc);
- if(JS_AddValueRoot(cx, &sroot) != JS_TRUE) {
- fprintf(stderr, "Internal root error.\n");
- return 1;
- }
-
- // Compile and run
- script = JS_CompileUCScript(cx, global, schars, slen,
- args->scripts[i], 1);
- if(!script) {
- fprintf(stderr, "Failed to compile script.\n");
- return 1;
- }
-
- if(JS_ExecuteScript(cx, global, script, &result) != JS_TRUE) {
- fprintf(stderr, "Failed to execute script.\n");
- return 1;
- }
-
- // Warning message if we don't remove it.
- JS_RemoveValueRoot(cx, &sroot);
-
- // Give the GC a chance to run.
- JS_MaybeGC(cx);
- }
-
- JS_LeaveCrossCompartmentCall(call);
- FINISH_REQUEST(cx);
- JS_DestroyContext(cx);
- JS_DestroyRuntime(rt);
- JS_ShutDown();
-
- return 0;
-}
diff --git a/src/couch/priv/couch_js/1.8.5/utf8.c b/src/couch/priv/couch_js/1.8.5/utf8.c
deleted file mode 100644
index 4cdb9c21f..000000000
--- a/src/couch/priv/couch_js/1.8.5/utf8.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <jsapi.h>
-#include "config.h"
-
-static int
-enc_char(uint8 *utf8Buffer, uint32 ucs4Char)
-{
- int utf8Length = 1;
-
- if (ucs4Char < 0x80)
- {
- *utf8Buffer = (uint8)ucs4Char;
- }
- else
- {
- int i;
- uint32 a = ucs4Char >> 11;
- utf8Length = 2;
- while(a)
- {
- a >>= 5;
- utf8Length++;
- }
- i = utf8Length;
- while(--i)
- {
- utf8Buffer[i] = (uint8)((ucs4Char & 0x3F) | 0x80);
- ucs4Char >>= 6;
- }
- *utf8Buffer = (uint8)(0x100 - (1 << (8-utf8Length)) + ucs4Char);
- }
-
- return utf8Length;
-}
-
-static JSBool
-enc_charbuf(const jschar* src, size_t srclen, char* dst, size_t* dstlenp)
-{
- size_t i;
- size_t utf8Len;
- size_t dstlen = *dstlenp;
- size_t origDstlen = dstlen;
- jschar c;
- jschar c2;
- uint32 v;
- uint8 utf8buf[6];
-
- if(!dst)
- {
- dstlen = origDstlen = (size_t) -1;
- }
-
- while(srclen)
- {
- c = *src++;
- srclen--;
-
- if(c <= 0xD7FF || c >= 0xE000)
- {
- v = (uint32) c;
- }
- else if(c >= 0xD800 && c <= 0xDBFF)
- {
- if(srclen < 1) goto buffer_too_small;
- c2 = *src++;
- srclen--;
- if(c2 >= 0xDC00 && c2 <= 0xDFFF)
- {
- v = (uint32) (((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000);
- }
- else
- {
- // Invalid second half of surrogate pair
- v = (uint32) 0xFFFD;
- // Undo our character advancement
- src--;
- srclen++;
- }
- }
- else
- {
- // Invalid first half surrogate pair
- v = (uint32) 0xFFFD;
- }
-
- if(v < 0x0080)
- {
- /* no encoding necessary - performance hack */
- if(!dstlen) goto buffer_too_small;
- if(dst) *dst++ = (char) v;
- utf8Len = 1;
- }
- else
- {
- utf8Len = enc_char(utf8buf, v);
- if(utf8Len > dstlen) goto buffer_too_small;
- if(dst)
- {
- for (i = 0; i < utf8Len; i++)
- {
- *dst++ = (char) utf8buf[i];
- }
- }
- }
- dstlen -= utf8Len;
- }
-
- *dstlenp = (origDstlen - dstlen);
- return JS_TRUE;
-
-buffer_too_small:
- *dstlenp = (origDstlen - dstlen);
- return JS_FALSE;
-}
-
-char*
-enc_string(JSContext* cx, jsval arg, size_t* buflen)
-{
- JSString* str = NULL;
- const jschar* src = NULL;
- char* bytes = NULL;
- size_t srclen = 0;
- size_t byteslen = 0;
-
- str = JS_ValueToString(cx, arg);
- if(!str) goto error;
-
-#ifdef HAVE_JS_GET_STRING_CHARS_AND_LENGTH
- src = JS_GetStringCharsAndLength(cx, str, &srclen);
-#else
- src = JS_GetStringChars(str);
- srclen = JS_GetStringLength(str);
-#endif
-
- if(!enc_charbuf(src, srclen, NULL, &byteslen)) goto error;
-
- bytes = JS_malloc(cx, (byteslen) + 1);
- bytes[byteslen] = 0;
-
- if(!enc_charbuf(src, srclen, bytes, &byteslen)) goto error;
-
- if(buflen) *buflen = byteslen;
- goto success;
-
-error:
- if(bytes != NULL) JS_free(cx, bytes);
- bytes = NULL;
-
-success:
- return bytes;
-}
-
-static uint32
-dec_char(const uint8 *utf8Buffer, int utf8Length)
-{
- uint32 ucs4Char;
- uint32 minucs4Char;
-
- /* from Unicode 3.1, non-shortest form is illegal */
- static const uint32 minucs4Table[] = {
- 0x00000080, 0x00000800, 0x0001000, 0x0020000, 0x0400000
- };
-
- if (utf8Length == 1)
- {
- ucs4Char = *utf8Buffer;
- }
- else
- {
- ucs4Char = *utf8Buffer++ & ((1<<(7-utf8Length))-1);
- minucs4Char = minucs4Table[utf8Length-2];
- while(--utf8Length)
- {
- ucs4Char = ucs4Char<<6 | (*utf8Buffer++ & 0x3F);
- }
- if(ucs4Char < minucs4Char || ucs4Char == 0xFFFE || ucs4Char == 0xFFFF)
- {
- ucs4Char = 0xFFFD;
- }
- }
-
- return ucs4Char;
-}
-
-static JSBool
-dec_charbuf(const char *src, size_t srclen, jschar *dst, size_t *dstlenp)
-{
- uint32 v;
- size_t offset = 0;
- size_t j;
- size_t n;
- size_t dstlen = *dstlenp;
- size_t origDstlen = dstlen;
-
- if(!dst) dstlen = origDstlen = (size_t) -1;
-
- while(srclen)
- {
- v = (uint8) *src;
- n = 1;
-
- if(v & 0x80)
- {
- while(v & (0x80 >> n))
- {
- n++;
- }
-
- if(n > srclen) goto buffer_too_small;
- if(n == 1 || n > 6) goto bad_character;
-
- for(j = 1; j < n; j++)
- {
- if((src[j] & 0xC0) != 0x80) goto bad_character;
- }
-
- v = dec_char((const uint8 *) src, n);
- if(v >= 0x10000)
- {
- v -= 0x10000;
-
- if(v > 0xFFFFF || dstlen < 2)
- {
- *dstlenp = (origDstlen - dstlen);
- return JS_FALSE;
- }
-
- if(dstlen < 2) goto buffer_too_small;
-
- if(dst)
- {
- *dst++ = (jschar)((v >> 10) + 0xD800);
- v = (jschar)((v & 0x3FF) + 0xDC00);
- }
- dstlen--;
- }
- }
-
- if(!dstlen) goto buffer_too_small;
- if(dst) *dst++ = (jschar) v;
-
- dstlen--;
- offset += n;
- src += n;
- srclen -= n;
- }
-
- *dstlenp = (origDstlen - dstlen);
- return JS_TRUE;
-
-bad_character:
- *dstlenp = (origDstlen - dstlen);
- return JS_FALSE;
-
-buffer_too_small:
- *dstlenp = (origDstlen - dstlen);
- return JS_FALSE;
-}
-
-JSString*
-dec_string(JSContext* cx, const char* bytes, size_t byteslen)
-{
- JSString* str = NULL;
- jschar* chars = NULL;
- size_t charslen;
-
- if(!dec_charbuf(bytes, byteslen, NULL, &charslen)) goto error;
-
- chars = JS_malloc(cx, (charslen + 1) * sizeof(jschar));
- if(!chars) return NULL;
- chars[charslen] = 0;
-
- if(!dec_charbuf(bytes, byteslen, chars, &charslen)) goto error;
-
- str = JS_NewUCString(cx, chars, charslen - 1);
- if(!str) goto error;
-
- goto success;
-
-error:
- if(chars != NULL) JS_free(cx, chars);
- str = NULL;
-
-success:
- return str;
-}
diff --git a/src/couch/priv/couch_js/1.8.5/utf8.h b/src/couch/priv/couch_js/1.8.5/utf8.h
deleted file mode 100644
index c5cb86c46..000000000
--- a/src/couch/priv/couch_js/1.8.5/utf8.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCH_JS_UTF_8_H
-#define COUCH_JS_UTF_8_H
-
-char* enc_string(JSContext* cx, jsval arg, size_t* buflen);
-JSString* dec_string(JSContext* cx, const char* buf, size_t buflen);
-
-#endif
diff --git a/src/couch/priv/couch_js/1.8.5/util.c b/src/couch/priv/couch_js/1.8.5/util.c
deleted file mode 100644
index 5cf94b63a..000000000
--- a/src/couch/priv/couch_js/1.8.5/util.c
+++ /dev/null
@@ -1,296 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <jsapi.h>
-
-#include "help.h"
-#include "util.h"
-#include "utf8.h"
-
-
-size_t
-slurp_file(const char* file, char** outbuf_p)
-{
- FILE* fp;
- char fbuf[16384];
- char *buf = NULL;
- char* tmp;
- size_t nread = 0;
- size_t buflen = 0;
-
- if(strcmp(file, "-") == 0) {
- fp = stdin;
- } else {
- fp = fopen(file, "r");
- if(fp == NULL) {
- fprintf(stderr, "Failed to read file: %s\n", file);
- exit(3);
- }
- }
-
- while((nread = fread(fbuf, 1, 16384, fp)) > 0) {
- if(buf == NULL) {
- buf = (char*) malloc(nread + 1);
- if(buf == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(3);
- }
- memcpy(buf, fbuf, nread);
- } else {
- tmp = (char*) malloc(buflen + nread + 1);
- if(tmp == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(3);
- }
- memcpy(tmp, buf, buflen);
- memcpy(tmp+buflen, fbuf, nread);
- free(buf);
- buf = tmp;
- }
- buflen += nread;
- buf[buflen] = '\0';
- }
- *outbuf_p = buf;
- return buflen + 1;
-}
-
-couch_args*
-couch_parse_args(int argc, const char* argv[])
-{
- couch_args* args;
- int i = 1;
-
- args = (couch_args*) malloc(sizeof(couch_args));
- if(args == NULL)
- return NULL;
-
- memset(args, '\0', sizeof(couch_args));
- args->stack_size = 64L * 1024L * 1024L;
-
- while(i < argc) {
- if(strcmp("-h", argv[i]) == 0) {
- DISPLAY_USAGE;
- exit(0);
- } else if(strcmp("-V", argv[i]) == 0) {
- DISPLAY_VERSION;
- exit(0);
- } else if(strcmp("-H", argv[i]) == 0) {
- args->use_http = 1;
- } else if(strcmp("-T", argv[i]) == 0) {
- args->use_test_funs = 1;
- } else if(strcmp("-S", argv[i]) == 0) {
- args->stack_size = atoi(argv[++i]);
- if(args->stack_size <= 0) {
- fprintf(stderr, "Invalid stack size.\n");
- exit(2);
- }
- } else if(strcmp("--eval", argv[i]) == 0) {
- args->eval = 1;
- } else if(strcmp("--", argv[i]) == 0) {
- i++;
- break;
- } else {
- break;
- }
- i++;
- }
-
- if(i >= argc) {
- DISPLAY_USAGE;
- exit(3);
- }
- args->scripts = argv + i;
-
- return args;
-}
-
-
-int
-couch_fgets(char* buf, int size, FILE* fp)
-{
- int n, i, c;
-
- if(size <= 0) return -1;
- n = size - 1;
-
- for(i = 0; i < n && (c = getc(fp)) != EOF; i++) {
- buf[i] = c;
- if(c == '\n') {
- i++;
- break;
- }
- }
-
- buf[i] = '\0';
- return i;
-}
-
-
-JSString*
-couch_readline(JSContext* cx, FILE* fp)
-{
- JSString* str;
- char* bytes = NULL;
- char* tmp = NULL;
- size_t used = 0;
- size_t byteslen = 256;
- size_t readlen = 0;
-
- bytes = JS_malloc(cx, byteslen);
- if(bytes == NULL) return NULL;
-
- while((readlen = couch_fgets(bytes+used, byteslen-used, fp)) > 0) {
- used += readlen;
-
- if(bytes[used-1] == '\n') {
- bytes[used-1] = '\0';
- break;
- }
-
- // Double our buffer and read more.
- byteslen *= 2;
- tmp = JS_realloc(cx, bytes, byteslen);
- if(!tmp) {
- JS_free(cx, bytes);
- return NULL;
- }
-
- bytes = tmp;
- }
-
- // Treat empty strings specially
- if(used == 0) {
- JS_free(cx, bytes);
- return JSVAL_TO_STRING(JS_GetEmptyStringValue(cx));
- }
-
- // Shring the buffer to the actual data size
- tmp = JS_realloc(cx, bytes, used);
- if(!tmp) {
- JS_free(cx, bytes);
- return NULL;
- }
- bytes = tmp;
- byteslen = used;
-
- str = dec_string(cx, bytes, byteslen);
- JS_free(cx, bytes);
- return str;
-}
-
-
-JSString*
-couch_readfile(JSContext* cx, const char* filename)
-{
- JSString *string;
- size_t byteslen;
- char *bytes;
-
- if((byteslen = slurp_file(filename, &bytes))) {
- string = dec_string(cx, bytes, byteslen);
-
- free(bytes);
- return string;
- }
- return NULL;
-}
-
-
-void
-couch_print(JSContext* cx, uintN argc, jsval* argv)
-{
- char *bytes = NULL;
- FILE *stream = stdout;
-
- if (argc) {
- if (argc > 1 && argv[1] == JSVAL_TRUE) {
- stream = stderr;
- }
- bytes = enc_string(cx, argv[0], NULL);
- if(!bytes) return;
- fprintf(stream, "%s", bytes);
- JS_free(cx, bytes);
- }
-
- fputc('\n', stream);
- fflush(stream);
-}
-
-
-void
-couch_error(JSContext* cx, const char* mesg, JSErrorReport* report)
-{
- jsval v, replace;
- char* bytes;
- JSObject* regexp, *stack;
- jsval re_args[2];
-
- if(!report || !JSREPORT_IS_WARNING(report->flags))
- {
- fprintf(stderr, "%s\n", mesg);
-
- // Print a stack trace, if available.
- if (JSREPORT_IS_EXCEPTION(report->flags) &&
- JS_GetPendingException(cx, &v))
- {
- // Clear the exception before an JS method calls or the result is
- // infinite, recursive error report generation.
- JS_ClearPendingException(cx);
-
- // Use JS regexp to indent the stack trace.
- // If the regexp can't be created, don't JS_ReportError since it is
- // probably not productive to wind up here again.
-#ifdef SM185
- if(JS_GetProperty(cx, JSVAL_TO_OBJECT(v), "stack", &v) &&
- (regexp = JS_NewRegExpObjectNoStatics(
- cx, "^(?=.)", 6, JSREG_GLOB | JSREG_MULTILINE)))
-#else
- if(JS_GetProperty(cx, JSVAL_TO_OBJECT(v), "stack", &v) &&
- (regexp = JS_NewRegExpObject(
- cx, "^(?=.)", 6, JSREG_GLOB | JSREG_MULTILINE)))
-#endif
- {
- // Set up the arguments to ``String.replace()``
- re_args[0] = OBJECT_TO_JSVAL(regexp);
- re_args[1] = STRING_TO_JSVAL(JS_InternString(cx, "\t"));
-
- // Perform the replacement
- if(JS_ValueToObject(cx, v, &stack) &&
- JS_GetProperty(cx, stack, "replace", &replace) &&
- JS_CallFunctionValue(cx, stack, replace, 2, re_args, &v))
- {
- // Print the result
- bytes = enc_string(cx, v, NULL);
- fprintf(stderr, "Stacktrace:\n%s", bytes);
- JS_free(cx, bytes);
- }
- }
- }
- }
-}
-
-
-JSBool
-couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs)
-{
- JSFunctionSpec* f;
- for(f = funcs; f->name != NULL; f++) {
- if(!JS_DefineFunction(cx, obj, f->name, f->call, f->nargs, f->flags)) {
- fprintf(stderr, "Failed to create function: %s\n", f->name);
- return JS_FALSE;
- }
- }
- return JS_TRUE;
-}
diff --git a/src/couch/priv/couch_js/1.8.5/util.h b/src/couch/priv/couch_js/1.8.5/util.h
deleted file mode 100644
index 9dd290a4c..000000000
--- a/src/couch/priv/couch_js/1.8.5/util.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_UTIL_H
-#define COUCHJS_UTIL_H
-
-#include <jsapi.h>
-
-typedef struct {
- int eval;
- int use_http;
- int use_test_funs;
- int stack_size;
- const char** scripts;
-} couch_args;
-
-couch_args* couch_parse_args(int argc, const char* argv[]);
-int couch_fgets(char* buf, int size, FILE* fp);
-JSString* couch_readline(JSContext* cx, FILE* fp);
-JSString* couch_readfile(JSContext* cx, const char* filename);
-void couch_print(JSContext* cx, uintN argc, jsval* argv);
-void couch_error(JSContext* cx, const char* mesg, JSErrorReport* report);
-JSBool couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs);
-
-
-#endif // Included util.h
diff --git a/src/couch/priv/couch_js/60/help.h b/src/couch/priv/couch_js/60/help.h
deleted file mode 100644
index 826babbba..000000000
--- a/src/couch/priv/couch_js/60/help.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_HELP_H
-#define COUCHJS_HELP_H
-
-#include "config.h"
-
-static const char VERSION_TEMPLATE[] =
- "%s - %s (SpiderMonkey 60)\n"
- "\n"
- "Licensed under the Apache License, Version 2.0 (the \"License\"); you may "
- "not use\n"
- "this file except in compliance with the License. You may obtain a copy of"
- "the\n"
- "License at\n"
- "\n"
- " http://www.apache.org/licenses/LICENSE-2.0\n"
- "\n"
- "Unless required by applicable law or agreed to in writing, software "
- "distributed\n"
- "under the License is distributed on an \"AS IS\" BASIS, WITHOUT "
- "WARRANTIES OR\n"
- "CONDITIONS OF ANY KIND, either express or implied. See the License "
- "for the\n"
- "specific language governing permissions and limitations under the "
- "License.\n";
-
-static const char USAGE_TEMPLATE[] =
- "Usage: %s [FILE]\n"
- "\n"
- "The %s command runs the %s JavaScript interpreter.\n"
- "\n"
- "The exit status is 0 for success or 1 for failure.\n"
- "\n"
- "Options:\n"
- "\n"
- " -h display a short help message and exit\n"
- " -V display version information and exit\n"
- " -S SIZE specify that the runtime should allow at\n"
- " most SIZE bytes of memory to be allocated\n"
- " default is 64 MiB\n"
- " --eval Enable runtime code evaluation (dangerous!)\n"
- "\n"
- "Report bugs at <%s>.\n";
-
-#define BASENAME COUCHJS_NAME
-
-#define couch_version(basename) \
- fprintf( \
- stdout, \
- VERSION_TEMPLATE, \
- basename, \
- PACKAGE_STRING)
-
-#define DISPLAY_VERSION couch_version(BASENAME)
-
-
-#define couch_usage(basename) \
- fprintf( \
- stdout, \
- USAGE_TEMPLATE, \
- basename, \
- basename, \
- PACKAGE_NAME, \
- PACKAGE_BUGREPORT)
-
-#define DISPLAY_USAGE couch_usage(BASENAME)
-
-#endif // Included help.h
diff --git a/src/couch/priv/couch_js/60/main.cpp b/src/couch/priv/couch_js/60/main.cpp
deleted file mode 100644
index 5169b05d7..000000000
--- a/src/couch/priv/couch_js/60/main.cpp
+++ /dev/null
@@ -1,336 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#ifdef XP_WIN
-#define NOMINMAX
-#include <windows.h>
-#else
-#include <unistd.h>
-#endif
-
-#include <jsapi.h>
-#include <js/Initialization.h>
-#include <js/Conversions.h>
-#include <js/Wrapper.h>
-
-#include "config.h"
-#include "util.h"
-
-static bool enableSharedMemory = true;
-
-static JSClassOps global_ops = {
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- JS_GlobalObjectTraceHook
-};
-
-/* The class of the global object. */
-static JSClass global_class = {
- "global",
- JSCLASS_GLOBAL_FLAGS,
- &global_ops
-};
-
-static void
-SetStandardCompartmentOptions(JS::CompartmentOptions& options)
-{
- options.creationOptions().setSharedMemoryAndAtomicsEnabled(enableSharedMemory);
-}
-
-static JSObject*
-NewSandbox(JSContext* cx, bool lazy)
-{
- JS::CompartmentOptions options;
- SetStandardCompartmentOptions(options);
- JS::RootedObject obj(cx, JS_NewGlobalObject(cx, &global_class, nullptr,
- JS::DontFireOnNewGlobalHook, options));
- if (!obj)
- return nullptr;
-
- {
- JSAutoCompartment ac(cx, obj);
- if (!lazy && !JS_InitStandardClasses(cx, obj))
- return nullptr;
-
- JS::RootedValue value(cx, JS::BooleanValue(lazy));
- if (!JS_DefineProperty(cx, obj, "lazy", value, JSPROP_PERMANENT | JSPROP_READONLY))
- return nullptr;
-
- JS_FireOnNewGlobalObject(cx, obj);
- }
-
- if (!JS_WrapObject(cx, &obj))
- return nullptr;
- return obj;
-}
-
-static bool
-evalcx(JSContext *cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
- bool ret = false;
-
- JS::RootedString str(cx, JS::ToString(cx, args[0]));
- if (!str)
- return false;
-
- JS::RootedObject sandbox(cx);
- if (args.hasDefined(1)) {
- sandbox = JS::ToObject(cx, args[1]);
- if (!sandbox)
- return false;
- }
-
- JSAutoRequest ar(cx);
-
- if (!sandbox) {
- sandbox = NewSandbox(cx, false);
- if (!sandbox)
- return false;
- }
-
- js::AutoStableStringChars strChars(cx);
- if (!strChars.initTwoByte(cx, str))
- return false;
-
- mozilla::Range<const char16_t> chars = strChars.twoByteRange();
- size_t srclen = chars.length();
- const char16_t* src = chars.begin().get();
-
- if(srclen == 0) {
- args.rval().setObject(*sandbox);
- } else {
- mozilla::Maybe<JSAutoCompartment> ac;
- unsigned flags;
- JSObject* unwrapped = UncheckedUnwrap(sandbox, true, &flags);
- if (flags & js::Wrapper::CROSS_COMPARTMENT) {
- sandbox = unwrapped;
- ac.emplace(cx, sandbox);
- }
-
- JS::CompileOptions opts(cx);
- JS::RootedValue rval(cx);
- opts.setFileAndLine("<unknown>", 1);
- if (!JS::Evaluate(cx, opts, src, srclen, args.rval())) {
- return false;
- }
- }
- ret = true;
- if (!JS_WrapValue(cx, args.rval()))
- return false;
-
- return ret;
-}
-
-
-static bool
-gc(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
- JS_GC(cx);
- args.rval().setUndefined();
- return true;
-}
-
-
-static bool
-print(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
-
- bool use_stderr = false;
- if(argc > 1 && args[1].isTrue()) {
- use_stderr = true;
- }
-
- if(!args[0].isString()) {
- JS_ReportErrorUTF8(cx, "Unable to print non-string value.");
- return false;
- }
-
- couch_print(cx, args[0], use_stderr);
-
- args.rval().setUndefined();
- return true;
-}
-
-
-static bool
-quit(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
-
- int exit_code = args[0].toInt32();;
- exit(exit_code);
-}
-
-
-static bool
-readline(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JSString* line;
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
-
- /* GC Occasionally */
- JS_MaybeGC(cx);
-
- line = couch_readline(cx, stdin);
- if(line == NULL) return false;
-
- // return with JSString* instead of JSValue in the past
- args.rval().setString(line);
- return true;
-}
-
-
-static bool
-seal(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
- JS::RootedObject target(cx);
- target = JS::ToObject(cx, args[0]);
- if (!target) {
- args.rval().setUndefined();
- return true;
- }
- bool deep = false;
- deep = args[1].toBoolean();
- bool ret = deep ? JS_DeepFreezeObject(cx, target) : JS_FreezeObject(cx, target);
- args.rval().setUndefined();
- return ret;
-}
-
-
-static JSFunctionSpec global_functions[] = {
- JS_FN("evalcx", evalcx, 0, 0),
- JS_FN("gc", gc, 0, 0),
- JS_FN("print", print, 0, 0),
- JS_FN("quit", quit, 0, 0),
- JS_FN("readline", readline, 0, 0),
- JS_FN("seal", seal, 0, 0),
- JS_FS_END
-};
-
-
-static bool
-csp_allows(JSContext* cx)
-{
- couch_args* args = static_cast<couch_args*>(JS_GetContextPrivate(cx));
- if(args->eval) {
- return true;
- } else {
- return false;
- }
-}
-
-
-static JSSecurityCallbacks security_callbacks = {
- csp_allows,
- nullptr
-};
-
-
-int
-main(int argc, const char* argv[])
-{
- JSContext* cx = NULL;
- char* scriptsrc;
- size_t slen;
- int i;
-
- couch_args* args = couch_parse_args(argc, argv);
-
- JS_Init();
- cx = JS_NewContext(args->stack_size, 8L * 1024L);
- if(cx == NULL)
- return 1;
-
- JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_BASELINE_ENABLE, 0);
- JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_ION_ENABLE, 0);
-
- if (!JS::InitSelfHostedCode(cx))
- return 1;
-
- JS::SetWarningReporter(cx, couch_error);
- JS::SetOutOfMemoryCallback(cx, couch_oom, NULL);
- JS_SetContextPrivate(cx, args);
- JS_SetSecurityCallbacks(cx, &security_callbacks);
-
- JSAutoRequest ar(cx);
- JS::CompartmentOptions options;
- JS::RootedObject global(cx, JS_NewGlobalObject(cx, &global_class, nullptr,
- JS::FireOnNewGlobalHook, options));
- if (!global)
- return 1;
-
- JSAutoCompartment ac(cx, global);
-
- if(!JS_InitStandardClasses(cx, global))
- return 1;
-
- if(couch_load_funcs(cx, global, global_functions) != true)
- return 1;
-
- for(i = 0 ; args->scripts[i] ; i++) {
- slen = couch_readfile(args->scripts[i], &scriptsrc);
-
- // Compile and run
- JS::CompileOptions options(cx);
- options.setFileAndLine(args->scripts[i], 1);
- options.setUTF8(true);
- JS::RootedScript script(cx);
-
- if(!JS_CompileScript(cx, scriptsrc, slen, options, &script)) {
- JS::RootedValue exc(cx);
- if(!JS_GetPendingException(cx, &exc)) {
- fprintf(stderr, "Failed to compile script.\n");
- } else {
- JS::RootedObject exc_obj(cx, &exc.toObject());
- JSErrorReport* report = JS_ErrorFromException(cx, exc_obj);
- couch_error(cx, report);
- }
- return 1;
- }
-
- free(scriptsrc);
-
- JS::RootedValue result(cx);
- if(JS_ExecuteScript(cx, script, &result) != true) {
- JS::RootedValue exc(cx);
- if(!JS_GetPendingException(cx, &exc)) {
- fprintf(stderr, "Failed to execute script.\n");
- } else {
- JS::RootedObject exc_obj(cx, &exc.toObject());
- JSErrorReport* report = JS_ErrorFromException(cx, exc_obj);
- couch_error(cx, report);
- }
- return 1;
- }
-
- // Give the GC a chance to run.
- JS_MaybeGC(cx);
- }
-
- return 0;
-}
diff --git a/src/couch/priv/couch_js/60/util.cpp b/src/couch/priv/couch_js/60/util.cpp
deleted file mode 100644
index 3bc58a921..000000000
--- a/src/couch/priv/couch_js/60/util.cpp
+++ /dev/null
@@ -1,355 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <sstream>
-
-#include <jsapi.h>
-#include <js/Initialization.h>
-#include <js/CharacterEncoding.h>
-#include <js/Conversions.h>
-#include <mozilla/Unused.h>
-
-#include "help.h"
-#include "util.h"
-
-std::string
-js_to_string(JSContext* cx, JS::HandleValue val)
-{
- JS::AutoSaveExceptionState exc_state(cx);
- JS::RootedString sval(cx);
- sval = val.toString();
-
- JS::UniqueChars chars(JS_EncodeStringToUTF8(cx, sval));
- if(!chars) {
- JS_ClearPendingException(cx);
- return std::string();
- }
-
- return chars.get();
-}
-
-bool
-js_to_string(JSContext* cx, JS::HandleValue val, std::string& str)
-{
- if(!val.isString()) {
- return false;
- }
-
- if(JS_GetStringLength(val.toString()) == 0) {
- str = "";
- return true;
- }
-
- std::string conv = js_to_string(cx, val);
- if(!conv.size()) {
- return false;
- }
-
- str = conv;
- return true;
-}
-
-JSString*
-string_to_js(JSContext* cx, const std::string& raw)
-{
- JS::UTF8Chars utf8(raw.c_str(), raw.size());
- JS::UniqueTwoByteChars utf16;
- size_t len;
-
- utf16.reset(JS::UTF8CharsToNewTwoByteCharsZ(cx, utf8, &len).get());
- if(!utf16) {
- return nullptr;
- }
-
- JSString* ret = JS_NewUCString(cx, utf16.get(), len);
-
- if(ret) {
- // JS_NewUCString took ownership on success. We shift
- // the resulting pointer into Unused to silence the
- // compiler warning.
- mozilla::Unused << utf16.release();
- }
-
- return ret;
-}
-
-size_t
-couch_readfile(const char* file, char** outbuf_p)
-{
- FILE* fp;
- char fbuf[16384];
- char *buf = NULL;
- char* tmp;
- size_t nread = 0;
- size_t buflen = 0;
-
- if(strcmp(file, "-") == 0) {
- fp = stdin;
- } else {
- fp = fopen(file, "r");
- if(fp == NULL) {
- fprintf(stderr, "Failed to read file: %s\n", file);
- exit(3);
- }
- }
-
- while((nread = fread(fbuf, 1, 16384, fp)) > 0) {
- if(buf == NULL) {
- buf = new char[nread + 1];
- if(buf == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(3);
- }
- memcpy(buf, fbuf, nread);
- } else {
- tmp = new char[buflen + nread + 1];
- if(tmp == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(3);
- }
- memcpy(tmp, buf, buflen);
- memcpy(tmp+buflen, fbuf, nread);
- delete buf;
- buf = tmp;
- }
- buflen += nread;
- buf[buflen] = '\0';
- }
- *outbuf_p = buf;
- return buflen ;
-}
-
-couch_args*
-couch_parse_args(int argc, const char* argv[])
-{
- couch_args* args;
- int i = 1;
-
- args = new couch_args();
- if(args == NULL)
- return NULL;
-
- args->eval = 0;
- args->stack_size = 64L * 1024L * 1024L;
- args->scripts = nullptr;
-
- while(i < argc) {
- if(strcmp("-h", argv[i]) == 0) {
- DISPLAY_USAGE;
- exit(0);
- } else if(strcmp("-V", argv[i]) == 0) {
- DISPLAY_VERSION;
- exit(0);
- } else if(strcmp("-S", argv[i]) == 0) {
- args->stack_size = atoi(argv[++i]);
- if(args->stack_size <= 0) {
- fprintf(stderr, "Invalid stack size.\n");
- exit(2);
- }
- } else if(strcmp("--eval", argv[i]) == 0) {
- args->eval = 1;
- } else if(strcmp("--", argv[i]) == 0) {
- i++;
- break;
- } else {
- break;
- }
- i++;
- }
-
- if(i >= argc) {
- DISPLAY_USAGE;
- exit(3);
- }
- args->scripts = argv + i;
-
- return args;
-}
-
-
-int
-couch_fgets(char* buf, int size, FILE* fp)
-{
- int n, i, c;
-
- if(size <= 0) return -1;
- n = size - 1;
-
- for(i = 0; i < n && (c = getc(fp)) != EOF; i++) {
- buf[i] = c;
- if(c == '\n') {
- i++;
- break;
- }
- }
-
- buf[i] = '\0';
- return i;
-}
-
-
-JSString*
-couch_readline(JSContext* cx, FILE* fp)
-{
- JSString* str;
- char* bytes = NULL;
- char* tmp = NULL;
- size_t used = 0;
- size_t byteslen = 256;
- size_t oldbyteslen = 256;
- size_t readlen = 0;
-
- bytes = static_cast<char*>(JS_malloc(cx, byteslen));
- if(bytes == NULL) return NULL;
-
- while((readlen = couch_fgets(bytes+used, byteslen-used, fp)) > 0) {
- used += readlen;
-
- if(bytes[used-1] == '\n') {
- bytes[used-1] = '\0';
- break;
- }
-
- // Double our buffer and read more.
- oldbyteslen = byteslen;
- byteslen *= 2;
- tmp = static_cast<char*>(JS_realloc(cx, bytes, oldbyteslen, byteslen));
- if(!tmp) {
- JS_free(cx, bytes);
- return NULL;
- }
-
- bytes = tmp;
- }
-
- // Treat empty strings specially
- if(used == 0) {
- JS_free(cx, bytes);
- return JS_NewStringCopyZ(cx, nullptr);
- }
-
- // Shrink the buffer to the actual data size
- tmp = static_cast<char*>(JS_realloc(cx, bytes, byteslen, used));
- if(!tmp) {
- JS_free(cx, bytes);
- return NULL;
- }
- bytes = tmp;
- byteslen = used;
-
- str = string_to_js(cx, std::string(tmp));
- JS_free(cx, bytes);
- return str;
-}
-
-
-void
-couch_print(JSContext* cx, JS::HandleValue obj, bool use_stderr)
-{
- FILE* stream = stdout;
-
- if(use_stderr) {
- stream = stderr;
- }
-
- std::string val = js_to_string(cx, obj);
- fprintf(stream, "%s\n", val.c_str());
- fflush(stream);
-}
-
-
-void
-couch_error(JSContext* cx, JSErrorReport* report)
-{
- if(!report) {
- return;
- }
-
- if(JSREPORT_IS_WARNING(report->flags)) {
- return;
- }
-
- std::ostringstream msg;
- msg << "error: " << report->message().c_str();
-
- mozilla::Maybe<JSAutoCompartment> ac;
- JS::RootedValue exc(cx);
- JS::RootedObject exc_obj(cx);
- JS::RootedObject stack_obj(cx);
- JS::RootedString stack_str(cx);
- JS::RootedValue stack_val(cx);
-
- if(!JS_GetPendingException(cx, &exc)) {
- goto done;
- }
-
- // Clear the exception before an JS method calls or the result is
- // infinite, recursive error report generation.
- JS_ClearPendingException(cx);
-
- exc_obj.set(exc.toObjectOrNull());
- stack_obj.set(JS::ExceptionStackOrNull(exc_obj));
-
- if(!stack_obj) {
- // Compilation errors don't have a stack
-
- msg << " at ";
-
- if(report->filename) {
- msg << report->filename;
- } else {
- msg << "<unknown>";
- }
-
- if(report->lineno) {
- msg << ':' << report->lineno << ':' << report->column;
- }
-
- goto done;
- }
-
- if(!JS::BuildStackString(cx, stack_obj, &stack_str, 2)) {
- goto done;
- }
-
- stack_val.set(JS::StringValue(stack_str));
- msg << std::endl << std::endl << js_to_string(cx, stack_val).c_str();
-
-done:
- msg << std::endl;
- fprintf(stderr, "%s", msg.str().c_str());
-}
-
-
-void
-couch_oom(JSContext* cx, void* data)
-{
- fprintf(stderr, "out of memory\n");
- exit(1);
-}
-
-
-bool
-couch_load_funcs(JSContext* cx, JS::HandleObject obj, JSFunctionSpec* funcs)
-{
- JSFunctionSpec* f;
- for(f = funcs; f->name != NULL; f++) {
- if(!JS_DefineFunction(cx, obj, f->name, f->call.op, f->nargs, f->flags)) {
- fprintf(stderr, "Failed to create function: %s\n", f->name);
- return false;
- }
- }
- return true;
-}
diff --git a/src/couch/priv/couch_js/60/util.h b/src/couch/priv/couch_js/60/util.h
deleted file mode 100644
index 35882a614..000000000
--- a/src/couch/priv/couch_js/60/util.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_UTIL_H
-#define COUCHJS_UTIL_H
-
-#include <jsapi.h>
-
-typedef struct {
- int eval;
- int stack_size;
- const char** scripts;
-} couch_args;
-
-std::string js_to_string(JSContext* cx, JS::HandleValue val);
-bool js_to_string(JSContext* cx, JS::HandleValue val, std::string& str);
-JSString* string_to_js(JSContext* cx, const std::string& s);
-
-couch_args* couch_parse_args(int argc, const char* argv[]);
-int couch_fgets(char* buf, int size, FILE* fp);
-JSString* couch_readline(JSContext* cx, FILE* fp);
-size_t couch_readfile(const char* file, char** outbuf_p);
-void couch_print(JSContext* cx, JS::HandleValue str, bool use_stderr);
-void couch_error(JSContext* cx, JSErrorReport* report);
-void couch_oom(JSContext* cx, void* data);
-bool couch_load_funcs(JSContext* cx, JS::HandleObject obj, JSFunctionSpec* funcs);
-
-
-#endif // Included util.h
diff --git a/src/couch/priv/couch_js/68/help.h b/src/couch/priv/couch_js/68/help.h
deleted file mode 100644
index 7c7550cc2..000000000
--- a/src/couch/priv/couch_js/68/help.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_HELP_H
-#define COUCHJS_HELP_H
-
-#include "config.h"
-
-static const char VERSION_TEMPLATE[] =
- "%s - %s (SpiderMonkey 68)\n"
- "\n"
- "Licensed under the Apache License, Version 2.0 (the \"License\"); you may "
- "not use\n"
- "this file except in compliance with the License. You may obtain a copy of"
- "the\n"
- "License at\n"
- "\n"
- " http://www.apache.org/licenses/LICENSE-2.0\n"
- "\n"
- "Unless required by applicable law or agreed to in writing, software "
- "distributed\n"
- "under the License is distributed on an \"AS IS\" BASIS, WITHOUT "
- "WARRANTIES OR\n"
- "CONDITIONS OF ANY KIND, either express or implied. See the License "
- "for the\n"
- "specific language governing permissions and limitations under the "
- "License.\n";
-
-static const char USAGE_TEMPLATE[] =
- "Usage: %s [FILE]\n"
- "\n"
- "The %s command runs the %s JavaScript interpreter.\n"
- "\n"
- "The exit status is 0 for success or 1 for failure.\n"
- "\n"
- "Options:\n"
- "\n"
- " -h display a short help message and exit\n"
- " -V display version information and exit\n"
- " -S SIZE specify that the runtime should allow at\n"
- " most SIZE bytes of memory to be allocated\n"
- " default is 64 MiB\n"
- " --eval Enable runtime code evaluation (dangerous!)\n"
- "\n"
- "Report bugs at <%s>.\n";
-
-#define BASENAME COUCHJS_NAME
-
-#define couch_version(basename) \
- fprintf( \
- stdout, \
- VERSION_TEMPLATE, \
- basename, \
- PACKAGE_STRING)
-
-#define DISPLAY_VERSION couch_version(BASENAME)
-
-
-#define couch_usage(basename) \
- fprintf( \
- stdout, \
- USAGE_TEMPLATE, \
- basename, \
- basename, \
- PACKAGE_NAME, \
- PACKAGE_BUGREPORT)
-
-#define DISPLAY_USAGE couch_usage(BASENAME)
-
-#endif // Included help.h
diff --git a/src/couch/priv/couch_js/68/main.cpp b/src/couch/priv/couch_js/68/main.cpp
deleted file mode 100644
index bb62d16ca..000000000
--- a/src/couch/priv/couch_js/68/main.cpp
+++ /dev/null
@@ -1,337 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#ifdef XP_WIN
-#define NOMINMAX
-#include <windows.h>
-#else
-#include <unistd.h>
-#endif
-
-#include <jsapi.h>
-#include <js/CompilationAndEvaluation.h>
-#include <js/Conversions.h>
-#include <js/Initialization.h>
-#include <js/SourceText.h>
-#include <js/Warnings.h>
-#include <js/Wrapper.h>
-
-#include "config.h"
-#include "util.h"
-
-static bool enableSharedMemory = true;
-
-static JSClassOps global_ops = {
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- JS_GlobalObjectTraceHook
-};
-
-/* The class of the global object. */
-static JSClass global_class = {
- "global",
- JSCLASS_GLOBAL_FLAGS,
- &global_ops
-};
-
-static JSObject*
-NewSandbox(JSContext* cx, bool lazy)
-{
- JS::RealmOptions options;
- options.creationOptions().setSharedMemoryAndAtomicsEnabled(enableSharedMemory);
- options.creationOptions().setNewCompartmentAndZone();
- JS::RootedObject obj(cx, JS_NewGlobalObject(cx, &global_class, nullptr,
- JS::DontFireOnNewGlobalHook, options));
- if (!obj)
- return nullptr;
-
- {
- JSAutoRealm ac(cx, obj);
- if (!lazy && !JS::InitRealmStandardClasses(cx))
- return nullptr;
-
- JS::RootedValue value(cx, JS::BooleanValue(lazy));
- if (!JS_DefineProperty(cx, obj, "lazy", value, JSPROP_PERMANENT | JSPROP_READONLY))
- return nullptr;
-
- JS_FireOnNewGlobalObject(cx, obj);
- }
-
- if (!JS_WrapObject(cx, &obj))
- return nullptr;
- return obj;
-}
-
-static bool
-evalcx(JSContext *cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
- bool ret = false;
-
- JS::RootedString str(cx, args[0].toString());
- if (!str)
- return false;
-
- JS::RootedObject sandbox(cx);
- if (args.hasDefined(1)) {
- sandbox = JS::ToObject(cx, args[1]);
- if (!sandbox)
- return false;
- }
-
- if (!sandbox) {
- sandbox = NewSandbox(cx, false);
- if (!sandbox)
- return false;
- }
-
- JS::AutoStableStringChars strChars(cx);
- if (!strChars.initTwoByte(cx, str))
- return false;
-
- mozilla::Range<const char16_t> chars = strChars.twoByteRange();
- JS::SourceText<char16_t> srcBuf;
- if (!srcBuf.init(cx, chars.begin().get(), chars.length(),
- JS::SourceOwnership::Borrowed)) {
- return false;
- }
-
- if(srcBuf.length() == 0) {
- args.rval().setObject(*sandbox);
- } else {
- mozilla::Maybe<JSAutoRealm> ar;
- unsigned flags;
- JSObject* unwrapped = UncheckedUnwrap(sandbox, true, &flags);
- if (flags & js::Wrapper::CROSS_COMPARTMENT) {
- sandbox = unwrapped;
- ar.emplace(cx, sandbox);
- }
-
- JS::CompileOptions opts(cx);
- JS::RootedValue rval(cx);
- opts.setFileAndLine("<unknown>", 1);
-
- if (!JS::Evaluate(cx, opts, srcBuf, args.rval())) {
- return false;
- }
- }
- ret = true;
- if (!JS_WrapValue(cx, args.rval()))
- return false;
-
- return ret;
-}
-
-
-static bool
-gc(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
- JS_GC(cx);
- args.rval().setUndefined();
- return true;
-}
-
-
-static bool
-print(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
-
- bool use_stderr = false;
- if(argc > 1 && args[1].isTrue()) {
- use_stderr = true;
- }
-
- if(!args[0].isString()) {
- JS_ReportErrorUTF8(cx, "Unable to print non-string value.");
- return false;
- }
-
- couch_print(cx, args[0], use_stderr);
-
- args.rval().setUndefined();
- return true;
-}
-
-
-static bool
-quit(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
-
- int exit_code = args[0].toInt32();;
- exit(exit_code);
-}
-
-
-static bool
-readline(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JSString* line;
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
-
- /* GC Occasionally */
- JS_MaybeGC(cx);
-
- line = couch_readline(cx, stdin);
- if(line == NULL) return false;
-
- // return with JSString* instead of JSValue in the past
- args.rval().setString(line);
- return true;
-}
-
-
-static bool
-seal(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
- JS::RootedObject target(cx);
- target = JS::ToObject(cx, args[0]);
- if (!target) {
- args.rval().setUndefined();
- return true;
- }
- bool deep = false;
- deep = args[1].toBoolean();
- bool ret = deep ? JS_DeepFreezeObject(cx, target) : JS_FreezeObject(cx, target);
- args.rval().setUndefined();
- return ret;
-}
-
-
-static JSFunctionSpec global_functions[] = {
- JS_FN("evalcx", evalcx, 0, 0),
- JS_FN("gc", gc, 0, 0),
- JS_FN("print", print, 0, 0),
- JS_FN("quit", quit, 0, 0),
- JS_FN("readline", readline, 0, 0),
- JS_FN("seal", seal, 0, 0),
- JS_FS_END
-};
-
-
-static bool
-csp_allows(JSContext* cx, JS::HandleValue code)
-{
- couch_args* args = static_cast<couch_args*>(JS_GetContextPrivate(cx));
- if(args->eval) {
- return true;
- } else {
- return false;
- }
-}
-
-
-static JSSecurityCallbacks security_callbacks = {
- csp_allows,
- nullptr
-};
-
-
-int
-main(int argc, const char* argv[])
-{
- JSContext* cx = NULL;
- int i;
-
- couch_args* args = couch_parse_args(argc, argv);
-
- JS_Init();
- cx = JS_NewContext(args->stack_size, 8L * 1024L);
- if(cx == NULL)
- return 1;
-
- JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_BASELINE_ENABLE, 0);
- JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_ION_ENABLE, 0);
-
- if (!JS::InitSelfHostedCode(cx))
- return 1;
-
- JS::SetWarningReporter(cx, couch_error);
- JS::SetOutOfMemoryCallback(cx, couch_oom, NULL);
- JS_SetContextPrivate(cx, args);
- JS_SetSecurityCallbacks(cx, &security_callbacks);
-
- JS::RealmOptions options;
- JS::RootedObject global(cx, JS_NewGlobalObject(cx, &global_class, nullptr,
- JS::FireOnNewGlobalHook, options));
- if (!global)
- return 1;
-
- JSAutoRealm ar(cx, global);
-
- if(!JS::InitRealmStandardClasses(cx))
- return 1;
-
- if(couch_load_funcs(cx, global, global_functions) != true)
- return 1;
-
- for(i = 0 ; args->scripts[i] ; i++) {
- const char* filename = args->scripts[i];
-
- // Compile and run
- JS::CompileOptions options(cx);
- options.setFileAndLine(filename, 1);
- JS::RootedScript script(cx);
- FILE* fp;
-
- fp = fopen(args->scripts[i], "r");
- if(fp == NULL) {
- fprintf(stderr, "Failed to read file: %s\n", filename);
- return 3;
- }
- script = JS::CompileUtf8File(cx, options, fp);
- fclose(fp);
- if (!script) {
- JS::RootedValue exc(cx);
- if(!JS_GetPendingException(cx, &exc)) {
- fprintf(stderr, "Failed to compile file: %s\n", filename);
- } else {
- JS::RootedObject exc_obj(cx, &exc.toObject());
- JSErrorReport* report = JS_ErrorFromException(cx, exc_obj);
- couch_error(cx, report);
- }
- return 1;
- }
-
- JS::RootedValue result(cx);
- if(JS_ExecuteScript(cx, script, &result) != true) {
- JS::RootedValue exc(cx);
- if(!JS_GetPendingException(cx, &exc)) {
- fprintf(stderr, "Failed to execute script.\n");
- } else {
- JS::RootedObject exc_obj(cx, &exc.toObject());
- JSErrorReport* report = JS_ErrorFromException(cx, exc_obj);
- couch_error(cx, report);
- }
- }
-
- // Give the GC a chance to run.
- JS_MaybeGC(cx);
- }
-
- return 0;
-}
diff --git a/src/couch/priv/couch_js/68/util.cpp b/src/couch/priv/couch_js/68/util.cpp
deleted file mode 100644
index 6e6105df5..000000000
--- a/src/couch/priv/couch_js/68/util.cpp
+++ /dev/null
@@ -1,348 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <sstream>
-
-#include <jsapi.h>
-#include <jsfriendapi.h>
-#include <js/CharacterEncoding.h>
-#include <js/Conversions.h>
-#include <js/Initialization.h>
-#include <js/MemoryFunctions.h>
-#include <js/RegExp.h>
-
-#include "help.h"
-#include "util.h"
-
-std::string
-js_to_string(JSContext* cx, JS::HandleValue val)
-{
- JS::AutoSaveExceptionState exc_state(cx);
- JS::RootedString sval(cx);
- sval = val.toString();
-
- JS::UniqueChars chars(JS_EncodeStringToUTF8(cx, sval));
- if(!chars) {
- JS_ClearPendingException(cx);
- return std::string();
- }
-
- return chars.get();
-}
-
-bool
-js_to_string(JSContext* cx, JS::HandleValue val, std::string& str)
-{
- if(!val.isString()) {
- return false;
- }
-
- if(JS_GetStringLength(val.toString()) == 0) {
- str = "";
- return true;
- }
-
- std::string conv = js_to_string(cx, val);
- if(!conv.size()) {
- return false;
- }
-
- str = conv;
- return true;
-}
-
-JSString*
-string_to_js(JSContext* cx, const std::string& raw)
-{
- JS::UTF8Chars utf8(raw.c_str(), raw.size());
- JS::UniqueTwoByteChars utf16;
- size_t len;
-
- utf16.reset(JS::UTF8CharsToNewTwoByteCharsZ(cx, utf8, &len, js::MallocArena).get());
- if(!utf16) {
- return nullptr;
- }
-
- return JS_NewUCString(cx, std::move(utf16), len);
-}
-
-size_t
-couch_readfile(const char* file, char** outbuf_p)
-{
- FILE* fp;
- char fbuf[16384];
- char *buf = NULL;
- char* tmp;
- size_t nread = 0;
- size_t buflen = 0;
-
- if(strcmp(file, "-") == 0) {
- fp = stdin;
- } else {
- fp = fopen(file, "r");
- if(fp == NULL) {
- fprintf(stderr, "Failed to read file: %s\n", file);
- exit(3);
- }
- }
-
- while((nread = fread(fbuf, 1, 16384, fp)) > 0) {
- if(buf == NULL) {
- buf = new char[nread + 1];
- if(buf == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(3);
- }
- memcpy(buf, fbuf, nread);
- } else {
- tmp = new char[buflen + nread + 1];
- if(tmp == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(3);
- }
- memcpy(tmp, buf, buflen);
- memcpy(tmp+buflen, fbuf, nread);
- delete buf;
- buf = tmp;
- }
- buflen += nread;
- buf[buflen] = '\0';
- }
- *outbuf_p = buf;
- return buflen ;
-}
-
-couch_args*
-couch_parse_args(int argc, const char* argv[])
-{
- couch_args* args;
- int i = 1;
-
- args = new couch_args();
- if(args == NULL)
- return NULL;
-
- args->eval = 0;
- args->stack_size = 64L * 1024L * 1024L;
- args->scripts = nullptr;
-
- while(i < argc) {
- if(strcmp("-h", argv[i]) == 0) {
- DISPLAY_USAGE;
- exit(0);
- } else if(strcmp("-V", argv[i]) == 0) {
- DISPLAY_VERSION;
- exit(0);
- } else if(strcmp("-S", argv[i]) == 0) {
- args->stack_size = atoi(argv[++i]);
- if(args->stack_size <= 0) {
- fprintf(stderr, "Invalid stack size.\n");
- exit(2);
- }
- } else if(strcmp("--eval", argv[i]) == 0) {
- args->eval = 1;
- } else if(strcmp("--", argv[i]) == 0) {
- i++;
- break;
- } else {
- break;
- }
- i++;
- }
-
- if(i >= argc) {
- DISPLAY_USAGE;
- exit(3);
- }
- args->scripts = argv + i;
-
- return args;
-}
-
-
-int
-couch_fgets(char* buf, int size, FILE* fp)
-{
- int n, i, c;
-
- if(size <= 0) return -1;
- n = size - 1;
-
- for(i = 0; i < n && (c = getc(fp)) != EOF; i++) {
- buf[i] = c;
- if(c == '\n') {
- i++;
- break;
- }
- }
-
- buf[i] = '\0';
- return i;
-}
-
-
-JSString*
-couch_readline(JSContext* cx, FILE* fp)
-{
- JSString* str;
- char* bytes = NULL;
- char* tmp = NULL;
- size_t used = 0;
- size_t byteslen = 256;
- size_t oldbyteslen = 256;
- size_t readlen = 0;
-
- bytes = static_cast<char*>(JS_malloc(cx, byteslen));
- if(bytes == NULL) return NULL;
-
- while((readlen = couch_fgets(bytes+used, byteslen-used, fp)) > 0) {
- used += readlen;
-
- if(bytes[used-1] == '\n') {
- bytes[used-1] = '\0';
- break;
- }
-
- // Double our buffer and read more.
- oldbyteslen = byteslen;
- byteslen *= 2;
- tmp = static_cast<char*>(JS_realloc(cx, bytes, oldbyteslen, byteslen));
- if(!tmp) {
- JS_free(cx, bytes);
- return NULL;
- }
-
- bytes = tmp;
- }
-
- // Treat empty strings specially
- if(used == 0) {
- JS_free(cx, bytes);
- return JS_NewStringCopyZ(cx, nullptr);
- }
-
- // Shrink the buffer to the actual data size
- tmp = static_cast<char*>(JS_realloc(cx, bytes, byteslen, used));
- if(!tmp) {
- JS_free(cx, bytes);
- return NULL;
- }
- bytes = tmp;
- byteslen = used;
-
- str = string_to_js(cx, std::string(tmp));
- JS_free(cx, bytes);
- return str;
-}
-
-
-void
-couch_print(JSContext* cx, JS::HandleValue obj, bool use_stderr)
-{
- FILE *stream = stdout;
-
- if (use_stderr) {
- stream = stderr;
- }
- std::string val = js_to_string(cx, obj);
- fprintf(stream, "%s\n", val.c_str());
- fflush(stream);
-}
-
-
-void
-couch_error(JSContext* cx, JSErrorReport* report)
-{
- if(!report) {
- return;
- }
-
- if(JSREPORT_IS_WARNING(report->flags)) {
- return;
- }
-
- std::ostringstream msg;
- msg << "error: " << report->message().c_str();
-
- mozilla::Maybe<JSAutoRealm> ar;
- JS::RootedValue exc(cx);
- JS::RootedObject exc_obj(cx);
- JS::RootedObject stack_obj(cx);
- JS::RootedString stack_str(cx);
- JS::RootedValue stack_val(cx);
- JSPrincipals* principals = GetRealmPrincipals(js::GetContextRealm(cx));
-
- if(!JS_GetPendingException(cx, &exc)) {
- goto done;
- }
-
- // Clear the exception before an JS method calls or the result is
- // infinite, recursive error report generation.
- JS_ClearPendingException(cx);
-
- exc_obj.set(exc.toObjectOrNull());
- stack_obj.set(JS::ExceptionStackOrNull(exc_obj));
-
- if(!stack_obj) {
- // Compilation errors don't have a stack
-
- msg << " at ";
-
- if(report->filename) {
- msg << report->filename;
- } else {
- msg << "<unknown>";
- }
-
- if(report->lineno) {
- msg << ':' << report->lineno << ':' << report->column;
- }
-
- goto done;
- }
-
- if(!JS::BuildStackString(cx, principals, stack_obj, &stack_str, 2)) {
- goto done;
- }
-
- stack_val.set(JS::StringValue(stack_str));
- msg << std::endl << std::endl << js_to_string(cx, stack_val).c_str();
-
-done:
- msg << std::endl;
- fprintf(stderr, "%s", msg.str().c_str());
-}
-
-
-void
-couch_oom(JSContext* cx, void* data)
-{
- fprintf(stderr, "out of memory\n");
- exit(1);
-}
-
-
-bool
-couch_load_funcs(JSContext* cx, JS::HandleObject obj, JSFunctionSpec* funcs)
-{
- JSFunctionSpec* f;
- for(f = funcs; f->name; f++) {
- if(!JS_DefineFunction(cx, obj, f->name.string(), f->call.op, f->nargs, f->flags)) {
- fprintf(stderr, "Failed to create function: %s\n", f->name.string());
- return false;
- }
- }
- return true;
-}
diff --git a/src/couch/priv/couch_js/68/util.h b/src/couch/priv/couch_js/68/util.h
deleted file mode 100644
index bd7843eb9..000000000
--- a/src/couch/priv/couch_js/68/util.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_UTIL_H
-#define COUCHJS_UTIL_H
-
-#include <jsapi.h>
-
-typedef struct {
- int eval;
- int use_http;
- int use_test_funs;
- int stack_size;
- const char** scripts;
- const char* uri_file;
- JSString* uri;
-} couch_args;
-
-std::string js_to_string(JSContext* cx, JS::HandleValue val);
-bool js_to_string(JSContext* cx, JS::HandleValue val, std::string& str);
-JSString* string_to_js(JSContext* cx, const std::string& s);
-
-couch_args* couch_parse_args(int argc, const char* argv[]);
-int couch_fgets(char* buf, int size, FILE* fp);
-JSString* couch_readline(JSContext* cx, FILE* fp);
-size_t couch_readfile(const char* file, char** outbuf_p);
-void couch_print(JSContext* cx, JS::HandleValue str, bool use_stderr);
-void couch_error(JSContext* cx, JSErrorReport* report);
-void couch_oom(JSContext* cx, void* data);
-bool couch_load_funcs(JSContext* cx, JS::HandleObject obj, JSFunctionSpec* funcs);
-
-#endif // Included util.h
diff --git a/src/couch/priv/couch_js/86/help.h b/src/couch/priv/couch_js/86/help.h
deleted file mode 100644
index 6a23172af..000000000
--- a/src/couch/priv/couch_js/86/help.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_HELP_H
-#define COUCHJS_HELP_H
-
-#include "config.h"
-
-static const char VERSION_TEMPLATE[] =
- "%s - %s (SpiderMonkey 86)\n"
- "\n"
- "Licensed under the Apache License, Version 2.0 (the \"License\"); you may "
- "not use\n"
- "this file except in compliance with the License. You may obtain a copy of"
- "the\n"
- "License at\n"
- "\n"
- " http://www.apache.org/licenses/LICENSE-2.0\n"
- "\n"
- "Unless required by applicable law or agreed to in writing, software "
- "distributed\n"
- "under the License is distributed on an \"AS IS\" BASIS, WITHOUT "
- "WARRANTIES OR\n"
- "CONDITIONS OF ANY KIND, either express or implied. See the License "
- "for the\n"
- "specific language governing permissions and limitations under the "
- "License.\n";
-
-static const char USAGE_TEMPLATE[] =
- "Usage: %s [FILE]\n"
- "\n"
- "The %s command runs the %s JavaScript interpreter.\n"
- "\n"
- "The exit status is 0 for success or 1 for failure.\n"
- "\n"
- "Options:\n"
- "\n"
- " -h display a short help message and exit\n"
- " -V display version information and exit\n"
- " -S SIZE specify that the runtime should allow at\n"
- " most SIZE bytes of memory to be allocated\n"
- " default is 64 MiB\n"
- " --eval Enable runtime code evaluation (dangerous!)\n"
- "\n"
- "Report bugs at <%s>.\n";
-
-#define BASENAME COUCHJS_NAME
-
-#define couch_version(basename) \
- fprintf( \
- stdout, \
- VERSION_TEMPLATE, \
- basename, \
- PACKAGE_STRING)
-
-#define DISPLAY_VERSION couch_version(BASENAME)
-
-
-#define couch_usage(basename) \
- fprintf( \
- stdout, \
- USAGE_TEMPLATE, \
- basename, \
- basename, \
- PACKAGE_NAME, \
- PACKAGE_BUGREPORT)
-
-#define DISPLAY_USAGE couch_usage(BASENAME)
-
-#endif // Included help.h
diff --git a/src/couch/priv/couch_js/86/main.cpp b/src/couch/priv/couch_js/86/main.cpp
deleted file mode 100644
index 3cb4b82c4..000000000
--- a/src/couch/priv/couch_js/86/main.cpp
+++ /dev/null
@@ -1,344 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#ifdef XP_WIN
-#define NOMINMAX
-#include <windows.h>
-#else
-#include <unistd.h>
-#endif
-
-#include <jsapi.h>
-#include <js/CompilationAndEvaluation.h>
-#include <js/Conversions.h>
-#include <js/Initialization.h>
-#include <js/SourceText.h>
-#include <js/StableStringChars.h>
-#include <js/Warnings.h>
-#include <js/Wrapper.h>
-
-#include "config.h"
-#include "util.h"
-
-static bool enableSharedMemory = true;
-static bool enableToSource = true;
-
-static JSClassOps global_ops = {
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- JS_GlobalObjectTraceHook
-};
-
-/* The class of the global object. */
-static JSClass global_class = {
- "global",
- JSCLASS_GLOBAL_FLAGS,
- &global_ops
-};
-
-static JSObject*
-NewSandbox(JSContext* cx, bool lazy)
-{
- JS::RealmOptions options;
- options.creationOptions().setSharedMemoryAndAtomicsEnabled(enableSharedMemory);
- options.creationOptions().setNewCompartmentAndZone();
- // we need this in the query server error handling
- options.creationOptions().setToSourceEnabled(enableToSource);
- JS::RootedObject obj(cx, JS_NewGlobalObject(cx, &global_class, nullptr,
- JS::DontFireOnNewGlobalHook, options));
- if (!obj)
- return nullptr;
-
- {
- JSAutoRealm ac(cx, obj);
- if (!lazy && !JS::InitRealmStandardClasses(cx))
- return nullptr;
-
- JS::RootedValue value(cx, JS::BooleanValue(lazy));
- if (!JS_DefineProperty(cx, obj, "lazy", value, JSPROP_PERMANENT | JSPROP_READONLY))
- return nullptr;
-
- JS_FireOnNewGlobalObject(cx, obj);
- }
-
- if (!JS_WrapObject(cx, &obj))
- return nullptr;
- return obj;
-}
-
-static bool
-evalcx(JSContext *cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
- bool ret = false;
-
- JS::RootedString str(cx, args[0].toString());
- if (!str)
- return false;
-
- JS::RootedObject sandbox(cx);
- if (args.hasDefined(1)) {
- sandbox = JS::ToObject(cx, args[1]);
- if (!sandbox)
- return false;
- }
-
- if (!sandbox) {
- sandbox = NewSandbox(cx, false);
- if (!sandbox)
- return false;
- }
-
- JS::AutoStableStringChars strChars(cx);
- if (!strChars.initTwoByte(cx, str))
- return false;
-
- mozilla::Range<const char16_t> chars = strChars.twoByteRange();
- JS::SourceText<char16_t> srcBuf;
- if (!srcBuf.init(cx, chars.begin().get(), chars.length(),
- JS::SourceOwnership::Borrowed)) {
- return false;
- }
-
- if(srcBuf.length() == 0) {
- args.rval().setObject(*sandbox);
- } else {
- mozilla::Maybe<JSAutoRealm> ar;
- unsigned flags;
- JSObject* unwrapped = UncheckedUnwrap(sandbox, true, &flags);
- if (flags & js::Wrapper::CROSS_COMPARTMENT) {
- sandbox = unwrapped;
- ar.emplace(cx, sandbox);
- }
-
- JS::CompileOptions opts(cx);
- JS::RootedValue rval(cx);
- opts.setFileAndLine("<unknown>", 1);
-
- if (!JS::Evaluate(cx, opts, srcBuf, args.rval())) {
- return false;
- }
- }
- ret = true;
- if (!JS_WrapValue(cx, args.rval()))
- return false;
-
- return ret;
-}
-
-
-static bool
-gc(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
- JS_GC(cx);
- args.rval().setUndefined();
- return true;
-}
-
-
-static bool
-print(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
-
- bool use_stderr = false;
- if(argc > 1 && args[1].isTrue()) {
- use_stderr = true;
- }
-
- if(!args[0].isString()) {
- JS_ReportErrorUTF8(cx, "Unable to print non-string value.");
- return false;
- }
-
- couch_print(cx, args[0], use_stderr);
-
- args.rval().setUndefined();
- return true;
-}
-
-
-static bool
-quit(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
-
- int exit_code = args[0].toInt32();;
- JS_DestroyContext(cx);
- JS_ShutDown();
- exit(exit_code);
-}
-
-
-static bool
-readline(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JSString* line;
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
-
- /* GC Occasionally */
- JS_MaybeGC(cx);
-
- line = couch_readline(cx, stdin);
- if(line == NULL) return false;
-
- // return with JSString* instead of JSValue in the past
- args.rval().setString(line);
- return true;
-}
-
-
-static bool
-seal(JSContext* cx, unsigned int argc, JS::Value* vp)
-{
- JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
- JS::RootedObject target(cx);
- target = JS::ToObject(cx, args[0]);
- if (!target) {
- args.rval().setUndefined();
- return true;
- }
- bool deep = false;
- deep = args[1].toBoolean();
- bool ret = deep ? JS_DeepFreezeObject(cx, target) : JS_FreezeObject(cx, target);
- args.rval().setUndefined();
- return ret;
-}
-
-
-static JSFunctionSpec global_functions[] = {
- JS_FN("evalcx", evalcx, 0, 0),
- JS_FN("gc", gc, 0, 0),
- JS_FN("print", print, 0, 0),
- JS_FN("quit", quit, 0, 0),
- JS_FN("readline", readline, 0, 0),
- JS_FN("seal", seal, 0, 0),
- JS_FS_END
-};
-
-
-static bool
-csp_allows(JSContext* cx, JS::HandleString code)
-{
- couch_args* args = static_cast<couch_args*>(JS_GetContextPrivate(cx));
- if(args->eval) {
- return true;
- } else {
- return false;
- }
-}
-
-
-static JSSecurityCallbacks security_callbacks = {
- csp_allows,
- nullptr
-};
-
-int runWithContext(JSContext* cx, couch_args* args) {
- JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_BASELINE_ENABLE, 0);
- JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_ION_ENABLE, 0);
-
- if (!JS::InitSelfHostedCode(cx))
- return 1;
-
- JS::SetWarningReporter(cx, couch_error);
- JS::SetOutOfMemoryCallback(cx, couch_oom, NULL);
- JS_SetContextPrivate(cx, args);
- JS_SetSecurityCallbacks(cx, &security_callbacks);
-
- JS::RealmOptions options;
- // we need this in the query server error handling
- options.creationOptions().setToSourceEnabled(enableToSource);
- JS::RootedObject global(cx, JS_NewGlobalObject(cx, &global_class, nullptr,
- JS::FireOnNewGlobalHook, options));
- if (!global)
- return 1;
-
- JSAutoRealm ar(cx, global);
-
- if(!JS::InitRealmStandardClasses(cx))
- return 1;
-
- if(couch_load_funcs(cx, global, global_functions) != true)
- return 1;
-
- for(int i = 0 ; args->scripts[i] ; i++) {
- const char* filename = args->scripts[i];
-
- // Compile and run
- JS::CompileOptions options(cx);
- JS::RootedScript script(cx);
-
- script = JS::CompileUtf8Path(cx, options, filename);
- if (!script) {
- JS::RootedValue exc(cx);
- if(!JS_GetPendingException(cx, &exc)) {
- fprintf(stderr, "Failed to compile file: %s\n", filename);
- } else {
- JS::RootedObject exc_obj(cx, &exc.toObject());
- JSErrorReport* report = JS_ErrorFromException(cx, exc_obj);
- couch_error(cx, report);
- }
- return 1;
- }
-
- JS::RootedValue result(cx);
- if(JS_ExecuteScript(cx, script, &result) != true) {
- JS::RootedValue exc(cx);
- if(!JS_GetPendingException(cx, &exc)) {
- fprintf(stderr, "Failed to execute script.\n");
- } else {
- JS::RootedObject exc_obj(cx, &exc.toObject());
- JSErrorReport* report = JS_ErrorFromException(cx, exc_obj);
- couch_error(cx, report);
- }
- }
-
- // Give the GC a chance to run.
- JS_MaybeGC(cx);
- }
- return 0;
-}
-
-int
-main(int argc, const char* argv[])
-{
- JSContext* cx = NULL;
- int ret;
-
- couch_args* args = couch_parse_args(argc, argv);
-
- JS_Init();
- cx = JS_NewContext(args->stack_size);
- if(cx == NULL) {
- JS_ShutDown();
- return 1;
- }
- ret = runWithContext(cx, args);
- JS_DestroyContext(cx);
- JS_ShutDown();
-
- return ret;
-}
diff --git a/src/couch/priv/couch_js/86/util.cpp b/src/couch/priv/couch_js/86/util.cpp
deleted file mode 100644
index b61c76ad2..000000000
--- a/src/couch/priv/couch_js/86/util.cpp
+++ /dev/null
@@ -1,348 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <sstream>
-
-#include <jsapi.h>
-#include <jsfriendapi.h>
-#include <js/CharacterEncoding.h>
-#include <js/Conversions.h>
-#include <js/Initialization.h>
-#include <js/MemoryFunctions.h>
-#include <js/RegExp.h>
-
-#include "help.h"
-#include "util.h"
-
-std::string
-js_to_string(JSContext* cx, JS::HandleValue val)
-{
- JS::AutoSaveExceptionState exc_state(cx);
- JS::RootedString sval(cx);
- sval = val.toString();
-
- JS::UniqueChars chars(JS_EncodeStringToUTF8(cx, sval));
- if(!chars) {
- JS_ClearPendingException(cx);
- return std::string();
- }
-
- return chars.get();
-}
-
-bool
-js_to_string(JSContext* cx, JS::HandleValue val, std::string& str)
-{
- if(!val.isString()) {
- return false;
- }
-
- if(JS_GetStringLength(val.toString()) == 0) {
- str = "";
- return true;
- }
-
- std::string conv = js_to_string(cx, val);
- if(!conv.size()) {
- return false;
- }
-
- str = conv;
- return true;
-}
-
-JSString*
-string_to_js(JSContext* cx, const std::string& raw)
-{
- JS::UTF8Chars utf8(raw.c_str(), raw.size());
- JS::UniqueTwoByteChars utf16;
- size_t len;
-
- utf16.reset(JS::UTF8CharsToNewTwoByteCharsZ(cx, utf8, &len, js::MallocArena).get());
- if(!utf16) {
- return nullptr;
- }
-
- return JS_NewUCString(cx, std::move(utf16), len);
-}
-
-size_t
-couch_readfile(const char* file, char** outbuf_p)
-{
- FILE* fp;
- char fbuf[16384];
- char *buf = NULL;
- char* tmp;
- size_t nread = 0;
- size_t buflen = 0;
-
- if(strcmp(file, "-") == 0) {
- fp = stdin;
- } else {
- fp = fopen(file, "r");
- if(fp == NULL) {
- fprintf(stderr, "Failed to read file: %s\n", file);
- exit(3);
- }
- }
-
- while((nread = fread(fbuf, 1, 16384, fp)) > 0) {
- if(buf == NULL) {
- buf = new char[nread + 1];
- if(buf == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(3);
- }
- memcpy(buf, fbuf, nread);
- } else {
- tmp = new char[buflen + nread + 1];
- if(tmp == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(3);
- }
- memcpy(tmp, buf, buflen);
- memcpy(tmp+buflen, fbuf, nread);
- delete buf;
- buf = tmp;
- }
- buflen += nread;
- buf[buflen] = '\0';
- }
- *outbuf_p = buf;
- return buflen ;
-}
-
-couch_args*
-couch_parse_args(int argc, const char* argv[])
-{
- couch_args* args;
- int i = 1;
-
- args = new couch_args();
- if(args == NULL)
- return NULL;
-
- args->eval = 0;
- args->stack_size = 64L * 1024L * 1024L;
- args->scripts = nullptr;
-
- while(i < argc) {
- if(strcmp("-h", argv[i]) == 0) {
- DISPLAY_USAGE;
- exit(0);
- } else if(strcmp("-V", argv[i]) == 0) {
- DISPLAY_VERSION;
- exit(0);
- } else if(strcmp("-S", argv[i]) == 0) {
- args->stack_size = atoi(argv[++i]);
- if(args->stack_size <= 0) {
- fprintf(stderr, "Invalid stack size.\n");
- exit(2);
- }
- } else if(strcmp("--eval", argv[i]) == 0) {
- args->eval = 1;
- } else if(strcmp("--", argv[i]) == 0) {
- i++;
- break;
- } else {
- break;
- }
- i++;
- }
-
- if(i >= argc) {
- DISPLAY_USAGE;
- exit(3);
- }
- args->scripts = argv + i;
-
- return args;
-}
-
-
-int
-couch_fgets(char* buf, int size, FILE* fp)
-{
- int n, i, c;
-
- if(size <= 0) return -1;
- n = size - 1;
-
- for(i = 0; i < n && (c = getc(fp)) != EOF; i++) {
- buf[i] = c;
- if(c == '\n') {
- i++;
- break;
- }
- }
-
- buf[i] = '\0';
- return i;
-}
-
-
-JSString*
-couch_readline(JSContext* cx, FILE* fp)
-{
- JSString* str;
- char* bytes = NULL;
- char* tmp = NULL;
- size_t used = 0;
- size_t byteslen = 256;
- size_t oldbyteslen = 256;
- size_t readlen = 0;
-
- bytes = static_cast<char*>(JS_malloc(cx, byteslen));
- if(bytes == NULL) return NULL;
-
- while((readlen = couch_fgets(bytes+used, byteslen-used, fp)) > 0) {
- used += readlen;
-
- if(bytes[used-1] == '\n') {
- bytes[used-1] = '\0';
- break;
- }
-
- // Double our buffer and read more.
- oldbyteslen = byteslen;
- byteslen *= 2;
- tmp = static_cast<char*>(JS_realloc(cx, bytes, oldbyteslen, byteslen));
- if(!tmp) {
- JS_free(cx, bytes);
- return NULL;
- }
-
- bytes = tmp;
- }
-
- // Treat empty strings specially
- if(used == 0) {
- JS_free(cx, bytes);
- return JS_NewStringCopyZ(cx, nullptr);
- }
-
- // Shrink the buffer to the actual data size
- tmp = static_cast<char*>(JS_realloc(cx, bytes, byteslen, used));
- if(!tmp) {
- JS_free(cx, bytes);
- return NULL;
- }
- bytes = tmp;
- byteslen = used;
-
- str = string_to_js(cx, std::string(tmp));
- JS_free(cx, bytes);
- return str;
-}
-
-
-void
-couch_print(JSContext* cx, JS::HandleValue obj, bool use_stderr)
-{
- FILE *stream = stdout;
-
- if (use_stderr) {
- stream = stderr;
- }
- std::string val = js_to_string(cx, obj);
- fprintf(stream, "%s\n", val.c_str());
- fflush(stream);
-}
-
-
-void
-couch_error(JSContext* cx, JSErrorReport* report)
-{
- if(!report) {
- return;
- }
-
- if(report->isWarning()) {
- return;
- }
-
- std::ostringstream msg;
- msg << "error: " << report->message().c_str();
-
- mozilla::Maybe<JSAutoRealm> ar;
- JS::RootedValue exc(cx);
- JS::RootedObject exc_obj(cx);
- JS::RootedObject stack_obj(cx);
- JS::RootedString stack_str(cx);
- JS::RootedValue stack_val(cx);
- JSPrincipals* principals = GetRealmPrincipals(js::GetContextRealm(cx));
-
- if(!JS_GetPendingException(cx, &exc)) {
- goto done;
- }
-
- // Clear the exception before an JS method calls or the result is
- // infinite, recursive error report generation.
- JS_ClearPendingException(cx);
-
- exc_obj.set(exc.toObjectOrNull());
- stack_obj.set(JS::ExceptionStackOrNull(exc_obj));
-
- if(!stack_obj) {
- // Compilation errors don't have a stack
-
- msg << " at ";
-
- if(report->filename) {
- msg << report->filename;
- } else {
- msg << "<unknown>";
- }
-
- if(report->lineno) {
- msg << ':' << report->lineno << ':' << report->column;
- }
-
- goto done;
- }
-
- if(!JS::BuildStackString(cx, principals, stack_obj, &stack_str, 2)) {
- goto done;
- }
-
- stack_val.set(JS::StringValue(stack_str));
- msg << std::endl << std::endl << js_to_string(cx, stack_val).c_str();
-
-done:
- msg << std::endl;
- fprintf(stderr, "%s", msg.str().c_str());
-}
-
-
-void
-couch_oom(JSContext* cx, void* data)
-{
- fprintf(stderr, "out of memory\n");
- _Exit(1);
-}
-
-
-bool
-couch_load_funcs(JSContext* cx, JS::HandleObject obj, JSFunctionSpec* funcs)
-{
- JSFunctionSpec* f;
- for(f = funcs; f->name; f++) {
- if(!JS_DefineFunction(cx, obj, f->name.string(), f->call.op, f->nargs, f->flags)) {
- fprintf(stderr, "Failed to create function: %s\n", f->name.string());
- return false;
- }
- }
- return true;
-}
diff --git a/src/couch/priv/couch_js/86/util.h b/src/couch/priv/couch_js/86/util.h
deleted file mode 100644
index bd7843eb9..000000000
--- a/src/couch/priv/couch_js/86/util.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_UTIL_H
-#define COUCHJS_UTIL_H
-
-#include <jsapi.h>
-
-typedef struct {
- int eval;
- int use_http;
- int use_test_funs;
- int stack_size;
- const char** scripts;
- const char* uri_file;
- JSString* uri;
-} couch_args;
-
-std::string js_to_string(JSContext* cx, JS::HandleValue val);
-bool js_to_string(JSContext* cx, JS::HandleValue val, std::string& str);
-JSString* string_to_js(JSContext* cx, const std::string& s);
-
-couch_args* couch_parse_args(int argc, const char* argv[]);
-int couch_fgets(char* buf, int size, FILE* fp);
-JSString* couch_readline(JSContext* cx, FILE* fp);
-size_t couch_readfile(const char* file, char** outbuf_p);
-void couch_print(JSContext* cx, JS::HandleValue str, bool use_stderr);
-void couch_error(JSContext* cx, JSErrorReport* report);
-void couch_oom(JSContext* cx, void* data);
-bool couch_load_funcs(JSContext* cx, JS::HandleObject obj, JSFunctionSpec* funcs);
-
-#endif // Included util.h
diff --git a/src/couch/priv/spawnkillable/couchspawnkillable.sh b/src/couch/priv/spawnkillable/couchspawnkillable.sh
deleted file mode 100755
index f8d042e36..000000000
--- a/src/couch/priv/spawnkillable/couchspawnkillable.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#! /bin/sh -e
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# The purpose of this script is to echo an OS specific command before launching
-# the actual process. This provides a way for Erlang to hard-kill its external
-# processes.
-
-echo "kill -9 $$"
-exec $*
diff --git a/src/couch/priv/spawnkillable/couchspawnkillable_win.c b/src/couch/priv/spawnkillable/couchspawnkillable_win.c
deleted file mode 100644
index 067823159..000000000
--- a/src/couch/priv/spawnkillable/couchspawnkillable_win.c
+++ /dev/null
@@ -1,145 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-// Do what 2 lines of shell script in couchspawnkillable does...
-// * Create a new suspended process with the same (duplicated) standard
-// handles as us.
-// * Write a line to stdout, consisting of the path to ourselves, plus
-// '--kill {pid}' where {pid} is the PID of the newly created process.
-// * Un-suspend the new process.
-// * Wait for the process to terminate.
-// * Terminate with the child's exit-code.
-
-// Later, couch will call us with --kill and the PID, so we dutifully
-// terminate the specified PID.
-
-#include <stdlib.h>
-#include "windows.h"
-
-char *get_child_cmdline(int argc, char **argv)
-{
- // make a new command-line, but skipping me.
- // XXX - todo - spaces etc in args???
- int i;
- char *p, *cmdline;
- int nchars = 0;
- int nthis = 1;
- for (i=1;i<argc;i++)
- nchars += strlen(argv[i])+1;
- cmdline = p = malloc(nchars+1);
- if (!cmdline)
- return NULL;
- for (i=1;i<argc;i++) {
- nthis = strlen(argv[i]);
- strncpy(p, argv[i], nthis);
- p[nthis] = ' ';
- p += nthis+1;
- }
- // Replace the last space we added above with a '\0'
- cmdline[nchars-1] = '\0';
- return cmdline;
-}
-
-// create the child process, returning 0, or the exit-code we will
-// terminate with.
-int create_child(int argc, char **argv, PROCESS_INFORMATION *pi)
-{
- char buf[1024];
- DWORD dwcreate;
- STARTUPINFO si;
- char *cmdline;
- if (argc < 2)
- return 1;
- cmdline = get_child_cmdline(argc, argv);
- if (!cmdline)
- return 2;
-
- memset(&si, 0, sizeof(si));
- si.cb = sizeof(si);
- // depending on how *our* parent is started, we may or may not have
- // a valid stderr stream - so although we try and duplicate it, only
- // failing to duplicate stdin and stdout are considered fatal.
- if (!DuplicateHandle(GetCurrentProcess(),
- GetStdHandle(STD_INPUT_HANDLE),
- GetCurrentProcess(),
- &si.hStdInput,
- 0,
- TRUE, // inheritable
- DUPLICATE_SAME_ACCESS) ||
- !DuplicateHandle(GetCurrentProcess(),
- GetStdHandle(STD_OUTPUT_HANDLE),
- GetCurrentProcess(),
- &si.hStdOutput,
- 0,
- TRUE, // inheritable
- DUPLICATE_SAME_ACCESS)) {
- return 3;
- }
- DuplicateHandle(GetCurrentProcess(),
- GetStdHandle(STD_ERROR_HANDLE),
- GetCurrentProcess(),
- &si.hStdError,
- 0,
- TRUE, // inheritable
- DUPLICATE_SAME_ACCESS);
-
- si.dwFlags = STARTF_USESTDHANDLES;
- dwcreate = CREATE_SUSPENDED;
- if (!CreateProcess( NULL, cmdline,
- NULL,
- NULL,
- TRUE, // inherit handles
- dwcreate,
- NULL, // environ
- NULL, // cwd
- &si,
- pi))
- return 4;
- return 0;
-}
-
-// and here we go...
-int main(int argc, char **argv)
-{
- char out_buf[1024];
- int rc;
- DWORD cbwritten;
- DWORD exitcode;
- PROCESS_INFORMATION pi;
- if (argc==3 && strcmp(argv[1], "--kill")==0) {
- HANDLE h = OpenProcess(PROCESS_TERMINATE, 0, atoi(argv[2]));
- if (!h)
- return 1;
- if (!TerminateProcess(h, 0))
- return 2;
- CloseHandle(h);
- return 0;
- }
- // spawn the new suspended process
- rc = create_child(argc, argv, &pi);
- if (rc)
- return rc;
- // Write the 'terminate' command, which includes this PID, back to couch.
- // *sob* - what about spaces etc?
- sprintf_s(out_buf, sizeof(out_buf), "%s --kill %d\n",
- argv[0], pi.dwProcessId);
- WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), out_buf, strlen(out_buf),
- &cbwritten, NULL);
- // Let the child process go...
- ResumeThread(pi.hThread);
- // Wait for the process to terminate so we can reflect the exit code
- // back to couch.
- WaitForSingleObject(pi.hProcess, INFINITE);
- if (!GetExitCodeProcess(pi.hProcess, &exitcode))
- return 6;
- return exitcode;
-}
diff --git a/src/couch/priv/stats_descriptions.cfg b/src/couch/priv/stats_descriptions.cfg
deleted file mode 100644
index 7c8fd94cb..000000000
--- a/src/couch/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,332 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-% Style guide for descriptions: Start with a lowercase letter & do not add
-% a trailing full-stop / period
-% Please keep this in alphabetical order
-
-{[couchdb, auth_cache_hits], [
- {type, counter},
- {desc, <<"number of authentication cache hits">>}
-]}.
-{[couchdb, auth_cache_misses], [
- {type, counter},
- {desc, <<"number of authentication cache misses">>}
-]}.
-{[couchdb, collect_results_time], [
- {type, histogram},
- {desc, <<"microsecond latency for calls to couch_db:collect_results/3">>}
-]}.
-{[couchdb, database_writes], [
- {type, counter},
- {desc, <<"number of times a database was changed">>}
-]}.
-{[couchdb, database_reads], [
- {type, counter},
- {desc, <<"number of times a document was read from a database">>}
-]}.
-{[couchdb, database_purges], [
- {type, counter},
- {desc, <<"number of times a database was purged">>}
-]}.
-{[couchdb, db_open_time], [
- {type, histogram},
- {desc, <<"milliseconds required to open a database">>}
-]}.
-{[couchdb, document_inserts], [
- {type, counter},
- {desc, <<"number of documents inserted">>}
-]}.
-{[couchdb, document_writes], [
- {type, counter},
- {desc, <<"number of document write operations">>}
-]}.
-{[couchdb, document_purges, total], [
- {type, counter},
- {desc, <<"number of total document purge operations">>}
-]}.
-{[couchdb, document_purges, success], [
- {type, counter},
- {desc, <<"number of successful document purge operations">>}
-]}.
-{[couchdb, document_purges, failure], [
- {type, counter},
- {desc, <<"number of failed document purge operations">>}
-]}.
-{[couchdb, local_document_writes], [
- {type, counter},
- {desc, <<"number of _local document write operations">>}
-]}.
-{[couchdb, httpd, bulk_docs], [
- {type, histogram},
- {desc, <<"distribution of the number of docs in _bulk_docs requests">>}
-]}.
-{[couchdb, httpd, bulk_requests], [
- {type, counter},
- {desc, <<"number of bulk requests">>}
-]}.
-{[couchdb, httpd, requests], [
- {type, counter},
- {desc, <<"number of HTTP requests">>}
-]}.
-{[couchdb, httpd, view_timeouts], [
- {type, counter},
- {desc, <<"number of HTTP view timeouts">>}
-]}.
-{[couchdb, httpd, find_timeouts], [
- {type, counter},
- {desc, <<"number of HTTP find timeouts">>}
-]}.
-{[couchdb, httpd, explain_timeouts], [
- {type, counter},
- {desc, <<"number of HTTP _explain timeouts">>}
-]}.
-{[couchdb, httpd, all_docs_timeouts], [
- {type, counter},
- {desc, <<"number of HTTP all_docs timeouts">>}
-]}.
-{[couchdb, httpd, partition_view_requests], [
- {type, counter},
- {desc, <<"number of partition HTTP view requests">>}
-]}.
-{[couchdb, httpd, partition_find_requests], [
- {type, counter},
- {desc, <<"number of partition HTTP _find requests">>}
-]}.
-{[couchdb, httpd, partition_explain_requests], [
- {type, counter},
- {desc, <<"number of partition HTTP _explain requests">>}
-]}.
-{[couchdb, httpd, partition_all_docs_requests], [
- {type, counter},
- {desc, <<"number of partition HTTP _all_docs requests">>}
-]}.
-{[couchdb, httpd, partition_view_timeouts], [
- {type, counter},
- {desc, <<"number of partition HTTP view timeouts">>}
-]}.
-{[couchdb, httpd, partition_find_timeouts], [
- {type, counter},
- {desc, <<"number of partition HTTP find timeouts">>}
-]}.
-{[couchdb, httpd, partition_explain_timeouts], [
- {type, counter},
- {desc, <<"number of partition HTTP _explain timeouts">>}
-]}.
-{[couchdb, httpd, partition_all_docs_timeouts], [
- {type, counter},
- {desc, <<"number of partition HTTP all_docs timeouts">>}
-]}.
-{[couchdb, httpd, temporary_view_reads], [
- {type, counter},
- {desc, <<"number of temporary view reads">>}
-]}.
-{[couchdb, httpd, view_reads], [
- {type, counter},
- {desc, <<"number of view reads">>}
-]}.
-{[couchdb, httpd, clients_requesting_changes], [
- {type, counter},
- {desc, <<"number of clients for continuous _changes">>}
-]}.
-{[couchdb, httpd, purge_requests], [
- {type, counter},
- {desc, <<"number of purge requests">>}
-]}.
-{[couchdb, httpd_request_methods, 'COPY'], [
- {type, counter},
- {desc, <<"number of HTTP COPY requests">>}
-]}.
-{[couchdb, httpd_request_methods, 'DELETE'], [
- {type, counter},
- {desc, <<"number of HTTP DELETE requests">>}
-]}.
-{[couchdb, httpd_request_methods, 'GET'], [
- {type, counter},
- {desc, <<"number of HTTP GET requests">>}
-]}.
-{[couchdb, httpd_request_methods, 'HEAD'], [
- {type, counter},
- {desc, <<"number of HTTP HEAD requests">>}
-]}.
-{[couchdb, httpd_request_methods, 'OPTIONS'], [
- {type, counter},
- {desc, <<"number of HTTP OPTIONS requests">>}
-]}.
-{[couchdb, httpd_request_methods, 'POST'], [
- {type, counter},
- {desc, <<"number of HTTP POST requests">>}
-]}.
-{[couchdb, httpd_request_methods, 'PUT'], [
- {type, counter},
- {desc, <<"number of HTTP PUT requests">>}
-]}.
-{[couchdb, httpd_status_codes, 200], [
- {type, counter},
- {desc, <<"number of HTTP 200 OK responses">>}
-]}.
-{[couchdb, httpd_status_codes, 201], [
- {type, counter},
- {desc, <<"number of HTTP 201 Created responses">>}
-]}.
-{[couchdb, httpd_status_codes, 202], [
- {type, counter},
- {desc, <<"number of HTTP 202 Accepted responses">>}
-]}.
-{[couchdb, httpd_status_codes, 204], [
- {type, counter},
- {desc, <<"number of HTTP 204 No Content responses">>}
-]}.
-{[couchdb, httpd_status_codes, 206], [
- {type, counter},
- {desc, <<"number of HTTP 206 Partial Content">>}
-]}.
-{[couchdb, httpd_status_codes, 301], [
- {type, counter},
- {desc, <<"number of HTTP 301 Moved Permanently responses">>}
-]}.
-{[couchdb, httpd_status_codes, 302], [
- {type, counter},
- {desc, <<"number of HTTP 302 Found responses">>}
-]}.
-{[couchdb, httpd_status_codes, 304], [
- {type, counter},
- {desc, <<"number of HTTP 304 Not Modified responses">>}
-]}.
-{[couchdb, httpd_status_codes, 400], [
- {type, counter},
- {desc, <<"number of HTTP 400 Bad Request responses">>}
-]}.
-{[couchdb, httpd_status_codes, 401], [
- {type, counter},
- {desc, <<"number of HTTP 401 Unauthorized responses">>}
-]}.
-{[couchdb, httpd_status_codes, 403], [
- {type, counter},
- {desc, <<"number of HTTP 403 Forbidden responses">>}
-]}.
-{[couchdb, httpd_status_codes, 404], [
- {type, counter},
- {desc, <<"number of HTTP 404 Not Found responses">>}
-]}.
-{[couchdb, httpd_status_codes, 405], [
- {type, counter},
- {desc, <<"number of HTTP 405 Method Not Allowed responses">>}
-]}.
-{[couchdb, httpd_status_codes, 406], [
- {type, counter},
- {desc, <<"number of HTTP 406 Not Acceptable responses">>}
-]}.
-{[couchdb, httpd_status_codes, 409], [
- {type, counter},
- {desc, <<"number of HTTP 409 Conflict responses">>}
-]}.
-{[couchdb, httpd_status_codes, 412], [
- {type, counter},
- {desc, <<"number of HTTP 412 Precondition Failed responses">>}
-]}.
-{[couchdb, httpd_status_codes, 413], [
- {type, counter},
- {desc, <<"number of HTTP 413 Request Entity Too Long responses">>}
-]}.
-{[couchdb, httpd_status_codes, 414], [
- {type, counter},
- {desc, <<"number of HTTP 414 Request URI Too Long responses">>}
-]}.
-{[couchdb, httpd_status_codes, 415], [
- {type, counter},
- {desc, <<"number of HTTP 415 Unsupported Media Type responses">>}
-]}.
-{[couchdb, httpd_status_codes, 416], [
- {type, counter},
- {desc, <<"number of HTTP 416 Requested Range Not Satisfiable responses">>}
-]}.
-{[couchdb, httpd_status_codes, 417], [
- {type, counter},
- {desc, <<"number of HTTP 417 Expectation Failed responses">>}
-]}.
-{[couchdb, httpd_status_codes, 500], [
- {type, counter},
- {desc, <<"number of HTTP 500 Internal Server Error responses">>}
-]}.
-{[couchdb, httpd_status_codes, 501], [
- {type, counter},
- {desc, <<"number of HTTP 501 Not Implemented responses">>}
-]}.
-{[couchdb, httpd_status_codes, 503], [
- {type, counter},
- {desc, <<"number of HTTP 503 Service unavailable responses">>}
-]}.
-{[couchdb, open_databases], [
- {type, counter},
- {desc, <<"number of open databases">>}
-]}.
-{[couchdb, open_os_files], [
- {type, counter},
- {desc, <<"number of file descriptors CouchDB has open">>}
-]}.
-{[couchdb, request_time], [
- {type, histogram},
- {desc, <<"length of a request inside CouchDB without MochiWeb">>}
-]}.
-{[couchdb, couch_server, lru_skip], [
- {type, counter},
- {desc, <<"number of couch_server LRU operations skipped">>}
-]}.
-{[couchdb, query_server, vdu_rejects], [
- {type, counter},
- {desc, <<"number of rejections by validate_doc_update function">>}
-]}.
-{[couchdb, query_server, vdu_process_time], [
- {type, histogram},
- {desc, <<"duration of validate_doc_update function calls">>}
-]}.
-{[pread, exceed_eof], [
- {type, counter},
- {desc, <<"number of the attempts to read beyond end of db file">>}
-]}.
-{[pread, exceed_limit], [
- {type, counter},
- {desc, <<"number of the attempts to read beyond set limit">>}
-]}.
-{[mango, unindexed_queries], [
- {type, counter},
- {desc, <<"number of mango queries that could not use an index">>}
-]}.
-{[mango, query_invalid_index], [
- {type, counter},
- {desc, <<"number of mango queries that generated an invalid index warning">>}
-]}.
-{[mango, too_many_docs_scanned], [
- {type, counter},
- {desc, <<"number of mango queries that generated an index scan warning">>}
-]}.
-{[mango, docs_examined], [
- {type, counter},
- {desc, <<"number of documents examined by mango queries coordinated by this node">>}
-]}.
-{[mango, quorum_docs_examined], [
- {type, counter},
- {desc, <<"number of documents examined by mango queries, using cluster quorum">>}
-]}.
-{[mango, results_returned], [
- {type, counter},
- {desc, <<"number of rows returned by mango queries">>}
-]}.
-{[mango, query_time], [
- {type, histogram},
- {desc, <<"length of time processing a mango query">>}
-]}.
-{[mango, evaluate_selector], [
- {type, counter},
- {desc, <<"number of mango selector evaluations">>}
-]}.
diff --git a/src/couch/rebar.config.script b/src/couch/rebar.config.script
deleted file mode 100644
index ba907b0a2..000000000
--- a/src/couch/rebar.config.script
+++ /dev/null
@@ -1,253 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-CopyIfDifferent = fun(Path, Contents) ->
- case filelib:is_file(Path) of
- true ->
- case file:read_file(Path) of
- {ok, Contents} ->
- ok;
- _ ->
- file:write_file(Path, Contents)
- end;
- false ->
- file:write_file(Path, Contents)
- end
-end.
-
-
-CouchJSName = case os:type() of
- {win32, _} ->
- "couchjs.exe";
- _ ->
- "couchjs"
-end.
-CouchJSPath = filename:join(["priv", CouchJSName]).
-Version = case os:getenv("COUCHDB_VERSION") of
- false ->
- string:strip(os:cmd("git describe --always"), right, $\n);
- Version0 ->
- string:strip(Version0, right)
-end.
-
-GitSha = case os:getenv("COUCHDB_GIT_SHA") of
- false ->
- ""; % release builds won\'t get a fallback
- GitSha0 ->
- string:strip(GitSha0, right)
-end.
-
-CouchConfig = case filelib:is_file(os:getenv("COUCHDB_CONFIG")) of
- true ->
- {ok, Result} = file:consult(os:getenv("COUCHDB_CONFIG")),
- Result;
- false ->
- []
-end.
-
-SMVsn = case lists:keyfind(spidermonkey_version, 1, CouchConfig) of
- {_, "1.8.5"} ->
- "1.8.5";
- {_, "60"} ->
- "60";
- {_, "68"} ->
- "68";
- {_, "78"} ->
- "78";
- {_, "86"} ->
- "86";
- {_, "91"} ->
- "91";
- undefined ->
- "1.8.5";
- {_, Unsupported} ->
- io:format(standard_error, "Unsupported SpiderMonkey version: ~s~n", [Unsupported]),
- erlang:halt(1);
- false ->
- "1.8.5"
-end.
-
-ConfigH = [
- {"SM185", ""},
- {"HAVE_JS_GET_STRING_CHARS_AND_LENGTH", "1"},
- {"JSSCRIPT_TYPE", "JSObject*"},
- {"COUCHJS_NAME", "\"" ++ CouchJSName++ "\""},
- {"PACKAGE", "\"apache-couchdb\""},
- {"PACKAGE_BUGREPORT", "\"https://github.com/apache/couchdb/issues\""},
- {"PACKAGE_NAME", "\"Apache CouchDB\""},
- {"PACKAGE_STRING", "\"Apache CouchDB " ++ Version ++ "\""},
- {"PACKAGE_VERSION", "\"" ++ Version ++ "\""}
-].
-
-CouchJSConfig = case SMVsn of
- "78" ->
- "priv/couch_js/86/config.h";
- "91" ->
- "priv/couch_js/86/config.h";
- _ ->
- "priv/couch_js/" ++ SMVsn ++ "/config.h"
-end.
-ConfigSrc = [["#define ", K, " ", V, $\n] || {K, V} <- ConfigH].
-ConfigBin = iolist_to_binary(ConfigSrc).
-ok = CopyIfDifferent(CouchJSConfig, ConfigBin).
-
-MD5Config = case lists:keyfind(erlang_md5, 1, CouchConfig) of
- {erlang_md5, true} ->
- [{d, 'ERLANG_MD5', true}];
- _ ->
- []
-end.
-
-ProperConfig = case code:lib_dir(proper) of
- {error, bad_name} -> [];
- _ -> [{d, 'WITH_PROPER'}]
-end.
-
-{JS_CFLAGS, JS_LDFLAGS} = case os:type() of
- {win32, _} when SMVsn == "1.8.5" ->
- {
- "/DXP_WIN",
- "mozjs185-1.0.lib"
- };
- {unix, _} when SMVsn == "1.8.5" ->
- {
- "-DXP_UNIX -I/usr/include/js -I/usr/local/include/js",
- "-L/usr/local/lib -lmozjs185 -lm"
- };
- {win32, _} when SMVsn == "60" ->
- {
- "/DXP_WIN",
- "mozjs-60.lib"
- };
- {unix, darwin} when SMVsn == "60" ->
- {
- "-DXP_UNIX -I/usr/include/mozjs-60 -I/usr/local/include/mozjs-60 -std=c++14",
- "-L/usr/local/lib -lmozjs-60 -lm -std=c++14 -lc++"
- };
- {unix, _} when SMVsn == "60" ->
- {
- "-DXP_UNIX -I/usr/include/mozjs-60 -I/usr/local/include/mozjs-60 -std=c++14 -Wno-invalid-offsetof",
- "-L/usr/local/lib -std=c++14 -lmozjs-60 -lm"
- };
- {unix, _} when SMVsn == "68" ->
- {
- "-DXP_UNIX -I/usr/include/mozjs-68 -I/usr/local/include/mozjs-68 -std=c++14 -Wno-invalid-offsetof",
- "-L/usr/local/lib -std=c++14 -lmozjs-68 -lm"
- };
- {unix, _} when SMVsn == "78" ->
- {
- "-DXP_UNIX -I/usr/include/mozjs-78 -I/usr/local/include/mozjs-78 -std=c++17 -Wno-invalid-offsetof",
- "-L/usr/local/lib -std=c++17 -lmozjs-78 -lm"
- };
- {unix, _} when SMVsn == "86" ->
- {
- "-DXP_UNIX -I/usr/include/mozjs-86 -I/usr/local/include/mozjs-86 -I/opt/homebrew/include/mozjs-86/ -std=c++17 -Wno-invalid-offsetof",
- "-L/usr/local/lib -L /opt/homebrew/lib/ -std=c++17 -lmozjs-86 -lm"
- };
- {unix, _} when SMVsn == "91" ->
- {
- "$CFLAGS -DXP_UNIX -I/usr/include/mozjs-91 -I/usr/local/include/mozjs-91 -I/opt/homebrew/include/mozjs-91/ -std=c++17 -Wno-invalid-offsetof",
- "$LDFLAGS -L/usr/local/lib -L /opt/homebrew/lib/ -std=c++17 -lmozjs-91 -lm"
- };
- {win32, _} when SMVsn == "91" ->
- {
- "/std:c++17 /DXP_WIN",
- "$LDFLAGS mozjs-91.lib"
- }
-end.
-
-CouchJSSrc = case SMVsn of
- "1.8.5" -> ["priv/couch_js/1.8.5/*.c"];
- "60" -> ["priv/couch_js/60/*.cpp"];
- "68" -> ["priv/couch_js/68/*.cpp"];
- "78" -> ["priv/couch_js/86/*.cpp"];
- "86" -> ["priv/couch_js/86/*.cpp"];
- "91" -> ["priv/couch_js/86/*.cpp"]
-end.
-
-CouchJSEnv = case SMVsn of
- "1.8.5" ->
- [
- {"CFLAGS", JS_CFLAGS},
- {"LDFLAGS", JS_LDFLAGS}
- ];
- _ ->
- [
- {"CXXFLAGS", JS_CFLAGS},
- {"LDFLAGS", JS_LDFLAGS}
- ]
-end.
-
-IcuEnv = [{"DRV_CFLAGS", "$DRV_CFLAGS -DPIC -O2 -fno-common"},
- {"DRV_LDFLAGS", "$DRV_LDFLAGS -lm -licuuc -licudata -licui18n -lpthread"}].
-IcuDarwinEnv = [{"CFLAGS", "-DXP_UNIX -I/usr/local/opt/icu4c/include -I/opt/homebrew/opt/icu4c/include"},
- {"LDFLAGS", "-L/usr/local/opt/icu4c/lib -L/opt/homebrew/opt/icu4c/lib"}].
-IcuBsdEnv = [{"CFLAGS", "-DXP_UNIX -I/usr/local/include"},
- {"LDFLAGS", "-L/usr/local/lib"}].
-IcuWinEnv = [{"CFLAGS", "$DRV_CFLAGS /DXP_WIN"},
- {"LDFLAGS", "$LDFLAGS icuin.lib icudt.lib icuuc.lib"}].
-
-ComparePath = "priv/couch_ejson_compare.so".
-CompareSrc = ["priv/couch_ejson_compare/*.c"].
-
-BaseSpecs = [
- %% couchjs
- {".*", CouchJSPath, CouchJSSrc, [{env, CouchJSEnv}]},
- % ejson_compare
- {"darwin", ComparePath, CompareSrc, [{env, IcuEnv ++ IcuDarwinEnv}]},
- {"linux", ComparePath, CompareSrc, [{env, IcuEnv}]},
- {"bsd", ComparePath, CompareSrc, [{env, IcuEnv ++ IcuBsdEnv}]},
- {"win32", ComparePath, CompareSrc, [{env, IcuWinEnv}]}
-].
-
-SpawnSpec = [
- {"priv/couchspawnkillable", ["priv/spawnkillable/*.c"]}
-].
-
-%% hack required until switch to enc/rebar3
-PortEnvOverrides = [
- {"win32", "EXE_LINK_CXX_TEMPLATE",
- "$LINKER $PORT_IN_FILES $LDFLAGS $EXE_LDFLAGS /OUT:$PORT_OUT_FILE"}
-].
-
-PortSpecs = case os:type() of
- {win32, _} ->
- BaseSpecs ++ SpawnSpec;
- _ ->
- {ok, CSK} = file:read_file("priv/spawnkillable/couchspawnkillable.sh"),
- ok = CopyIfDifferent("priv/couchspawnkillable", CSK),
- os:cmd("chmod +x priv/couchspawnkillable"),
- BaseSpecs
-end.
-PlatformDefines = [
- {platform_define, "win32", 'WINDOWS'}
-].
-AddConfig = [
- {port_specs, PortSpecs},
- {erl_opts, PlatformDefines ++ [
- {d, 'COUCHDB_VERSION', Version},
- {d, 'COUCHDB_GIT_SHA', GitSha},
- {d, 'COUCHDB_SPIDERMONKEY_VERSION', SMVsn},
- {i, "../"}
- ] ++ MD5Config ++ ProperConfig},
- {port_env, PortEnvOverrides},
- {eunit_compile_opts, PlatformDefines}
-].
-
-lists:foldl(fun({K, V}, CfgAcc) ->
- case lists:keyfind(K, 1, CfgAcc) of
- {K, Existent} when is_list(Existent) andalso is_list(V) ->
- lists:keystore(K, 1, CfgAcc, {K, Existent ++ V});
- false ->
- lists:keystore(K, 1, CfgAcc, {K, V})
- end
-end, CONFIG, AddConfig).
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
deleted file mode 100644
index c2d0e0e92..000000000
--- a/src/couch/src/couch.app.src
+++ /dev/null
@@ -1,86 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch, [
- {description, "Apache CouchDB"},
- {vsn, git},
- {registered, [
- couch_db_update,
- couch_db_update_notifier_sup,
- couch_httpd,
- couch_primary_services,
- couch_proc_manager,
- couch_secondary_services,
- couch_server,
- couch_sup,
- couch_task_status
- ]},
- {mod, {couch_app, []}},
- {applications, [
- % stdlib
- kernel,
- stdlib,
- crypto,
- sasl,
- inets,
- ssl,
-
- % Upstream deps
- ibrowse,
- mochiweb,
-
- % ASF deps
- couch_epi,
- b64url,
- couch_log,
- couch_event,
- ioq,
- couch_stats,
- hyper,
- couch_prometheus,
- couch_dist
- ]},
- {env, [
- { httpd_global_handlers, [
- {"/", "{couch_httpd_misc_handlers, handle_welcome_req, <<\"Welcome\">>}"},
- {"favicon.ico", "{couch_httpd_misc_handlers, handle_favicon_req, \"{{prefix}}/share/www\"}"},
- {"_utils", "{couch_httpd_misc_handlers, handle_utils_dir_req, \"{{prefix}}/share/www\"}"},
- {"_all_dbs", "{couch_httpd_misc_handlers, handle_all_dbs_req}"},
- {"_active_tasks", "{couch_httpd_misc_handlers, handle_task_status_req}"},
- {"_config", "{couch_httpd_misc_handlers, handle_config_req}"},
- {"_replicate", "{couch_replicator_httpd, handle_req}"},
- {"_uuids", "{couch_httpd_misc_handlers, handle_uuids_req}"},
- {"_stats", "{couch_stats_httpd, handle_stats_req}"},
- {"_session", "{couch_httpd_auth, handle_session_req}"},
- {"_plugins", "{couch_plugins_httpd, handle_req}"}
- ]},
- { httpd_db_handlers, [
- {"_all_docs", "{couch_mrview_http, handle_all_docs_req}"},
- {"_local_docs", "{couch_mrview_http, handle_local_docs_req}"},
- {"_design_docs", "{couch_mrview_http, handle_design_docs_req}"},
- {"_changes", "{couch_httpd_db, handle_db_changes_req}"},
- {"_compact", "{couch_httpd_db, handle_compact_req}"},
- {"_design", "{couch_httpd_db, handle_design_req}"},
- {"_temp_view", "{couch_mrview_http, handle_temp_view_req}"},
- {"_view_cleanup", "{couch_mrview_http, handle_cleanup_req}"}
- ]},
- { httpd_design_handlers, [
- {"_compact", "{couch_mrview_http, handle_compact_req}"},
- {"_info", "{couch_mrview_http, handle_info_req}"},
- {"_list", "{couch_mrview_show, handle_view_list_req}"},
- {"_rewrite", "{couch_httpd_rewrite, handle_rewrite_req}"},
- {"_show", "{couch_mrview_show, handle_doc_show_req}"},
- {"_update", "{couch_mrview_show, handle_doc_update_req}"},
- {"_view", "{couch_mrview_http, handle_view_req}"}
- ]}
- ]}
-]}.
diff --git a/src/couch/src/couch.erl b/src/couch/src/couch.erl
deleted file mode 100644
index 6952c16c8..000000000
--- a/src/couch/src/couch.erl
+++ /dev/null
@@ -1,62 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch).
-
--export([
- start/0,
- stop/0,
- restart/0
-]).
-
-deps() ->
- [
- sasl,
- inets,
- crypto,
- public_key,
- ssl,
- ibrowse,
- mochiweb,
- config,
- couch_log
- ].
-
-start() ->
- catch erlang:system_flag(scheduler_bind_type, default_bind),
- case start_apps(deps()) of
- ok ->
- ok = application:start(couch);
- Else ->
- throw(Else)
- end.
-
-stop() ->
- application:stop(couch).
-
-restart() ->
- init:restart().
-
-start_apps([]) ->
- ok;
-start_apps([App | Rest]) ->
- case application:start(App) of
- ok ->
- start_apps(Rest);
- {error, {already_started, App}} ->
- start_apps(Rest);
- {error, _Reason} when App =:= public_key ->
- % ignore on R12B5
- start_apps(Rest);
- {error, _Reason} ->
- {error, {app_would_not_start, App}}
- end.
diff --git a/src/couch/src/couch_app.erl b/src/couch/src/couch_app.erl
deleted file mode 100644
index 8acc71d51..000000000
--- a/src/couch/src/couch_app.erl
+++ /dev/null
@@ -1,40 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_app).
-
--behaviour(application).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([
- start/2,
- stop/1,
- uptime/0
-]).
-
-start(_Type, _) ->
- case couch_sup:start_link() of
- {ok, _} = Resp ->
- {Time, _} = statistics(wall_clock),
- application:set_env(couch, start_time, Time),
- Resp;
- Else ->
- throw(Else)
- end.
-
-stop(_) ->
- ok.
-
-uptime() ->
- {Time, _} = statistics(wall_clock),
- Time - application:get_env(couch, start_time, Time).
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
deleted file mode 100644
index b3b2f23eb..000000000
--- a/src/couch/src/couch_att.erl
+++ /dev/null
@@ -1,970 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_att).
-
--export([
- new/0,
- new/1,
- fetch/2,
- store/2,
- store/3,
- transform/3
-]).
-
--export([
- is_stub/1,
- merge_stubs/2
-]).
-
--export([
- size_info/1,
- to_disk_term/1,
- from_disk_term/2
-]).
-
--export([
- from_json/2,
- to_json/4
-]).
-
--export([
- flush/2,
- foldl/3,
- range_foldl/5,
- foldl_decode/3,
- to_binary/1
-]).
-
--export([
- upgrade/1,
- downgrade/1
-]).
-
--export([
- max_attachment_size/0,
- validate_attachment_size/3
-]).
-
--compile(nowarn_deprecated_type).
--export_type([att/0]).
-
--include_lib("couch/include/couch_db.hrl").
-
-%% Legacy attachment record. This is going to be phased out by the new proplist
-%% based structure. It's needed for now to allow code to perform lazy upgrades
-%% while the patch is rolled out to the cluster. Attachments passed as records
-%% will remain so until they are required to be represented as property lists.
-%% Once this has been widely deployed, this record will be removed entirely and
-%% property lists will be the main format.
--record(att, {
- name :: binary(),
- type :: binary(),
- att_len :: non_neg_integer(),
-
- %% length of the attachment in its identity form
- %% (that is, without a content encoding applied to it)
- %% differs from att_len when encoding /= identity
- disk_len :: non_neg_integer(),
-
- md5 = <<>> :: binary(),
- revpos = 0 :: non_neg_integer(),
- data ::
- stub
- | follows
- | binary()
- | {any(), any()}
- | {follows, pid(), reference()}
- | fun(() -> binary()),
-
- %% Encoding of the attachment
- %% currently supported values are:
- %% identity, gzip
- %% additional values to support in the future:
- %% deflate, compress
- encoding = identity :: identity | gzip
-}).
-
-%% Extensible Attachment Type
-%%
-%% The following types describe the known properties for attachment fields
-%% encoded as property lists to allow easier upgrades. Values not in this list
-%% should be accepted at runtime but should be treated as opaque data as might
-%% be used by upgraded code. If you plan on operating on new data, please add
-%% an entry here as documentation.
-
-%% The name of the attachment is also used as the mime-part name for file
-%% downloads. These must be unique per document.
--type name_prop() :: {name, binary()}.
-
-%% The mime type of the attachment. This does affect compression of certain
-%% attachments if the type is found to be configured as a compressable type.
-%% This is commonly reserved for text/* types but could include other custom
-%% cases as well. See definition and use of couch_util:compressable_att_type/1.
--type type_prop() :: {type, binary()}.
-
-%% The attachment length is similar to disk-length but ignores additional
-%% encoding that may have occurred.
--type att_len_prop() :: {att_len, non_neg_integer()}.
-
-%% The size of the attachment as stored in a disk stream.
--type disk_len_prop() :: {disk_len, non_neg_integer()}.
-
-%% This is a digest of the original attachment data as uploaded by the client.
-%% it's useful for checking validity of contents against other attachment data
-%% as well as quick digest computation of the enclosing document.
--type md5_prop() :: {md5, binary()}.
-
--type revpos_prop() :: {revpos, 0}.
-
-%% This field is currently overloaded with just about everything. The
-%% {any(), any()} type is just there until I have time to check the actual
-%% values expected. Over time this should be split into more than one property
-%% to allow simpler handling.
--type data_prop() :: {
- data,
- stub
- | follows
- | binary()
- | {any(), any()}
- | {follows, pid(), reference()}
- | fun(() -> binary())
-}.
-
-%% We will occasionally compress our data. See type_prop() for more information
-%% on when this happens.
--type encoding_prop() :: {encoding, identity | gzip}.
-
--type attachment() :: [
- name_prop()
- | type_prop()
- | att_len_prop()
- | disk_len_prop()
- | md5_prop()
- | revpos_prop()
- | data_prop()
- | encoding_prop()
-].
-
--type disk_att_v1() :: {
- Name :: binary(),
- Type :: binary(),
- Sp :: any(),
- AttLen :: non_neg_integer(),
- RevPos :: non_neg_integer(),
- Md5 :: binary()
-}.
-
--type disk_att_v2() :: {
- Name :: binary(),
- Type :: binary(),
- Sp :: any(),
- AttLen :: non_neg_integer(),
- DiskLen :: non_neg_integer(),
- RevPos :: non_neg_integer(),
- Md5 :: binary(),
- Enc :: identity | gzip
-}.
-
--type disk_att_v3() :: {Base :: tuple(), Extended :: list()}.
-
--type disk_att() :: disk_att_v1() | disk_att_v2() | disk_att_v3().
-
--type att() :: #att{} | attachment() | disk_att().
-
--define(GB, (1024 * 1024 * 1024)).
-
-new() ->
- %% We construct a record by default for compatability. This will be
- %% upgraded on demand. A subtle effect this has on all attachments
- %% constructed via new is that it will pick up the proper defaults
- %% from the #att record definition given above. Newer properties do
- %% not support special default values and will all be treated as
- %% undefined.
- #att{}.
-
--spec new([{atom(), any()}]) -> att().
-new(Props) ->
- store(Props, new()).
-
--spec fetch
- ([atom()], att()) -> [any()];
- (atom(), att()) -> any().
-fetch(Fields, Att) when is_list(Fields) ->
- [fetch(Field, Att) || Field <- Fields];
-fetch(Field, Att) when is_list(Att) ->
- case lists:keyfind(Field, 1, Att) of
- {Field, Value} -> Value;
- false -> undefined
- end;
-fetch(name, #att{name = Name}) ->
- Name;
-fetch(type, #att{type = Type}) ->
- Type;
-fetch(att_len, #att{att_len = AttLen}) ->
- AttLen;
-fetch(disk_len, #att{disk_len = DiskLen}) ->
- DiskLen;
-fetch(md5, #att{md5 = Digest}) ->
- Digest;
-fetch(revpos, #att{revpos = RevPos}) ->
- RevPos;
-fetch(data, #att{data = Data}) ->
- Data;
-fetch(encoding, #att{encoding = Encoding}) ->
- Encoding;
-fetch(_, _) ->
- undefined.
-
--spec store([{atom(), any()}], att()) -> att().
-store(Props, Att0) ->
- lists:foldl(
- fun({Field, Value}, Att) ->
- store(Field, Value, Att)
- end,
- Att0,
- Props
- ).
-
--spec store(atom(), any(), att()) -> att().
-store(Field, undefined, Att) when is_list(Att) ->
- lists:keydelete(Field, 1, Att);
-store(Field, Value, Att) when is_list(Att) ->
- lists:keystore(Field, 1, Att, {Field, Value});
-store(name, Name, Att) ->
- Att#att{name = Name};
-store(type, Type, Att) ->
- Att#att{type = Type};
-store(att_len, AttLen, Att) ->
- Att#att{att_len = AttLen};
-store(disk_len, DiskLen, Att) ->
- Att#att{disk_len = DiskLen};
-store(md5, Digest, Att) ->
- Att#att{md5 = Digest};
-store(revpos, RevPos, Att) ->
- Att#att{revpos = RevPos};
-store(data, Data, Att) ->
- Att#att{data = Data};
-store(encoding, Encoding, Att) ->
- Att#att{encoding = Encoding};
-store(Field, Value, Att) ->
- store(Field, Value, upgrade(Att)).
-
--spec transform(atom(), fun(), att()) -> att().
-transform(Field, Fun, Att) ->
- NewValue = Fun(fetch(Field, Att)),
- store(Field, NewValue, Att).
-
-is_stub(Att) ->
- stub == fetch(data, Att).
-
-%% merge_stubs takes all stub attachments and replaces them with on disk
-%% attachments. It will return {missing, Name} if a stub isn't matched with
-%% an existing attachment on disk. If the revpos is supplied with the stub
-%% it is also only counted to match if is the same as the disk attachment.
-merge_stubs(MemAtts, DiskAtts) ->
- OnDisk = dict:from_list(
- [{fetch(name, Att), Att} || Att <- DiskAtts]
- ),
- merge_stubs(MemAtts, OnDisk, []).
-
-%% restore spec when R14 support is dropped
-%% -spec merge_stubs([att()], dict:dict(), [att()]) -> [att()].
-merge_stubs([Att | Rest], OnDisk, Merged) ->
- case fetch(data, Att) of
- stub ->
- [Name, Pos] = fetch([name, revpos], Att),
- case dict:find(Name, OnDisk) of
- {ok, DiskAtt} ->
- RevPos = fetch(revpos, DiskAtt),
- if
- %% We want to check for consistency between the stub and
- %% disk revpos here. If the stub's revpos is undefined
- %% it means it wasn't provided by the user and does not
- %% require being matched.
- RevPos == Pos orelse Pos == undefined ->
- merge_stubs(Rest, OnDisk, [DiskAtt | Merged]);
- true ->
- {missing, Name}
- end;
- _ ->
- {missing, Name}
- end;
- _ ->
- merge_stubs(Rest, OnDisk, [Att | Merged])
- end;
-merge_stubs([], _, Merged) ->
- {ok, lists:reverse(Merged)}.
-
-size_info([]) ->
- {ok, []};
-size_info(Atts) ->
- Info = lists:map(
- fun(Att) ->
- AttLen = fetch(att_len, Att),
- case fetch(data, Att) of
- {stream, StreamEngine} ->
- {ok, SPos} = couch_stream:to_disk_term(StreamEngine),
- {SPos, AttLen};
- {_, SPos} ->
- {SPos, AttLen}
- end
- end,
- Atts
- ),
- {ok, lists:usort(Info)}.
-
-%% When converting an attachment to disk term format, attempt to stay with the
-%% old format when possible. This should help make the attachment lazy upgrade
-%% as safe as possible, avoiding the need for complicated disk versioning
-%% schemes.
-to_disk_term(#att{} = Att) ->
- {stream, StreamEngine} = fetch(data, Att),
- {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
- {
- fetch(name, Att),
- fetch(type, Att),
- Sp,
- fetch(att_len, Att),
- fetch(disk_len, Att),
- fetch(revpos, Att),
- fetch(md5, Att),
- fetch(encoding, Att)
- };
-to_disk_term(Att) ->
- BaseProps = [name, type, data, att_len, disk_len, revpos, md5, encoding],
- {Extended, Base} = lists:foldl(
- fun
- (data, {Props, Values}) ->
- case lists:keytake(data, 1, Props) of
- {value, {_, {stream, StreamEngine}}, Other} ->
- {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
- {Other, [Sp | Values]};
- {value, {_, Value}, Other} ->
- {Other, [Value | Values]};
- false ->
- {Props, [undefined | Values]}
- end;
- (Key, {Props, Values}) ->
- case lists:keytake(Key, 1, Props) of
- {value, {_, Value}, Other} -> {Other, [Value | Values]};
- false -> {Props, [undefined | Values]}
- end
- end,
- {Att, []},
- BaseProps
- ),
- {list_to_tuple(lists:reverse(Base)), Extended}.
-
-%% The new disk term format is a simple wrapper around the legacy format. Base
-%% properties will remain in a tuple while the new fields and possibly data from
-%% future extensions will be stored in a list of atom/value pairs. While this is
-%% slightly less efficient, future work should be able to make use of
-%% compression to remove these sorts of common bits (block level compression
-%% with something like a shared dictionary that is checkpointed every now and
-%% then).
-from_disk_term(StreamSrc, {Base, Extended}) when
- is_tuple(Base), is_list(Extended)
-->
- store(Extended, from_disk_term(StreamSrc, Base));
-from_disk_term(StreamSrc, {Name, Type, Sp, AttLen, DiskLen, RevPos, Md5, Enc}) ->
- {ok, Stream} = open_stream(StreamSrc, Sp),
- #att{
- name = Name,
- type = Type,
- att_len = AttLen,
- disk_len = DiskLen,
- md5 = Md5,
- revpos = RevPos,
- data = {stream, Stream},
- encoding = upgrade_encoding(Enc)
- };
-from_disk_term(StreamSrc, {Name, Type, Sp, AttLen, RevPos, Md5}) ->
- {ok, Stream} = open_stream(StreamSrc, Sp),
- #att{
- name = Name,
- type = Type,
- att_len = AttLen,
- disk_len = AttLen,
- md5 = Md5,
- revpos = RevPos,
- data = {stream, Stream}
- };
-from_disk_term(StreamSrc, {Name, {Type, Sp, AttLen}}) ->
- {ok, Stream} = open_stream(StreamSrc, Sp),
- #att{
- name = Name,
- type = Type,
- att_len = AttLen,
- disk_len = AttLen,
- md5 = <<>>,
- revpos = 0,
- data = {stream, Stream}
- }.
-
-%% from_json reads in embedded JSON attachments and creates usable attachment
-%% values. The attachment may be a stub,
-from_json(Name, Props) ->
- Type = couch_util:get_value(
- <<"content_type">>, Props, ?DEFAULT_ATTACHMENT_CONTENT_TYPE
- ),
- Att = new([{name, Name}, {type, Type}]),
- IsStub = couch_util:get_value(<<"stub">>, Props),
- Follows = couch_util:get_value(<<"follows">>, Props),
- if
- IsStub -> stub_from_json(Att, Props);
- Follows -> follow_from_json(Att, Props);
- true -> inline_from_json(Att, Props)
- end.
-
-stub_from_json(Att, Props) ->
- {DiskLen, EncodedLen, Encoding} = encoded_lengths_from_json(Props),
- Digest = digest_from_json(Props),
- %% We specifically want undefined rather than the default 0 here to skip
- %% the revpos consistency check on stubs when it's not provided in the
- %% json object. See merge_stubs/3 for the stub check.
- RevPos = couch_util:get_value(<<"revpos">>, Props),
- store(
- [
- {md5, Digest},
- {revpos, RevPos},
- {data, stub},
- {disk_len, DiskLen},
- {att_len, EncodedLen},
- {encoding, Encoding}
- ],
- Att
- ).
-
-follow_from_json(Att, Props) ->
- {DiskLen, EncodedLen, Encoding} = encoded_lengths_from_json(Props),
- Digest = digest_from_json(Props),
- RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
- store(
- [
- {md5, Digest},
- {revpos, RevPos},
- {data, follows},
- {disk_len, DiskLen},
- {att_len, EncodedLen},
- {encoding, Encoding}
- ],
- Att
- ).
-
-inline_from_json(Att, Props) ->
- B64Data = couch_util:get_value(<<"data">>, Props),
- try base64:decode(B64Data) of
- Data ->
- Length = size(Data),
- RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
- store(
- [
- {data, Data},
- {revpos, RevPos},
- {disk_len, Length},
- {att_len, Length}
- ],
- Att
- )
- catch
- _:_ ->
- Name = fetch(name, Att),
- ErrMsg = <<"Invalid attachment data for ", Name/binary>>,
- throw({bad_request, ErrMsg})
- end.
-
-encoded_lengths_from_json(Props) ->
- Len = couch_util:get_value(<<"length">>, Props),
- case couch_util:get_value(<<"encoding">>, Props) of
- undefined ->
- Encoding = identity,
- EncodedLen = Len;
- EncodingValue ->
- EncodedLen = couch_util:get_value(<<"encoded_length">>, Props, Len),
- Encoding = list_to_existing_atom(binary_to_list(EncodingValue))
- end,
- {Len, EncodedLen, Encoding}.
-
-digest_from_json(Props) ->
- case couch_util:get_value(<<"digest">>, Props) of
- <<"md5-", EncodedMd5/binary>> -> base64:decode(EncodedMd5);
- _ -> <<>>
- end.
-
-to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
- [Name, Data, DiskLen, AttLen, Enc, Type, RevPos, Md5] = fetch(
- [name, data, disk_len, att_len, encoding, type, revpos, md5], Att
- ),
- Props = [
- {<<"content_type">>, Type},
- {<<"revpos">>, RevPos}
- ],
- DigestProp =
- case base64:encode(Md5) of
- <<>> -> [];
- Digest -> [{<<"digest">>, <<"md5-", Digest/binary>>}]
- end,
- DataProps =
- if
- not OutputData orelse Data == stub ->
- [{<<"length">>, DiskLen}, {<<"stub">>, true}];
- DataToFollow ->
- [{<<"length">>, DiskLen}, {<<"follows">>, true}];
- true ->
- AttData =
- case Enc of
- gzip -> zlib:gunzip(to_binary(Att));
- identity -> to_binary(Att)
- end,
- [{<<"data">>, base64:encode(AttData)}]
- end,
- EncodingProps =
- if
- ShowEncoding andalso Enc /= identity ->
- [
- {<<"encoding">>, couch_util:to_binary(Enc)},
- {<<"encoded_length">>, AttLen}
- ];
- true ->
- []
- end,
- HeadersProp =
- case fetch(headers, Att) of
- undefined -> [];
- Headers -> [{<<"headers">>, Headers}]
- end,
- {Name, {Props ++ DigestProp ++ DataProps ++ EncodingProps ++ HeadersProp}}.
-
-flush(Db, Att) ->
- flush_data(Db, fetch(data, Att), Att).
-
-flush_data(Db, Data, Att) when is_binary(Data) ->
- couch_db:with_stream(Db, Att, fun(OutputStream) ->
- couch_stream:write(OutputStream, Data)
- end);
-flush_data(Db, Fun, Att) when is_function(Fun) ->
- AttName = fetch(name, Att),
- MaxAttSize = max_attachment_size(),
- case fetch(att_len, Att) of
- undefined ->
- couch_db:with_stream(Db, Att, fun(OutputStream) ->
- % Fun(MaxChunkSize, WriterFun) must call WriterFun
- % once for each chunk of the attachment,
- Fun(
- 4096,
- % WriterFun({Length, Binary}, State)
- % WriterFun({0, _Footers}, State)
- % Called with Length == 0 on the last time.
- % WriterFun returns NewState.
- fun
- ({0, Footers}, _Total) ->
- F = mochiweb_headers:from_binary(Footers),
- case mochiweb_headers:get_value("Content-MD5", F) of
- undefined ->
- ok;
- Md5 ->
- {md5, base64:decode(Md5)}
- end;
- ({Length, Chunk}, Total0) ->
- Total = Total0 + Length,
- validate_attachment_size(AttName, Total, MaxAttSize),
- couch_stream:write(OutputStream, Chunk),
- Total
- end,
- 0
- )
- end);
- AttLen ->
- validate_attachment_size(AttName, AttLen, MaxAttSize),
- couch_db:with_stream(Db, Att, fun(OutputStream) ->
- write_streamed_attachment(OutputStream, Fun, AttLen)
- end)
- end;
-flush_data(Db, {follows, Parser, Ref}, Att) ->
- ParserRef = erlang:monitor(process, Parser),
- Fun = fun() ->
- Parser ! {get_bytes, Ref, self()},
- receive
- {started_open_doc_revs, NewRef} ->
- couch_doc:restart_open_doc_revs(Parser, Ref, NewRef);
- {bytes, Ref, Bytes} ->
- Bytes;
- {'DOWN', ParserRef, _, _, Reason} ->
- throw({mp_parser_died, Reason})
- end
- end,
- try
- flush_data(Db, Fun, store(data, Fun, Att))
- after
- erlang:demonitor(ParserRef, [flush])
- end;
-flush_data(Db, {stream, StreamEngine}, Att) ->
- case couch_db:is_active_stream(Db, StreamEngine) of
- true ->
- % Already written
- Att;
- false ->
- couch_db:with_stream(Db, Att, fun(OutputStream) ->
- couch_stream:copy(StreamEngine, OutputStream)
- end)
- end.
-
-write_streamed_attachment(_Stream, _F, 0) ->
- ok;
-write_streamed_attachment(_Stream, _F, LenLeft) when LenLeft < 0 ->
- throw({bad_request, <<"attachment longer than expected">>});
-write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 ->
- Bin =
- try
- read_next_chunk(F, LenLeft)
- catch
- {mp_parser_died, normal} ->
- throw({bad_request, <<"attachment shorter than expected">>})
- end,
- ok = couch_stream:write(Stream, Bin),
- write_streamed_attachment(Stream, F, LenLeft - iolist_size(Bin)).
-
-read_next_chunk(F, _) when is_function(F, 0) ->
- F();
-read_next_chunk(F, LenLeft) when is_function(F, 1) ->
- F(lists:min([LenLeft, 16#2000])).
-
-foldl(Att, Fun, Acc) ->
- foldl(fetch(data, Att), Att, Fun, Acc).
-
-foldl(Bin, _Att, Fun, Acc) when is_binary(Bin) ->
- Fun(Bin, Acc);
-foldl({stream, StreamEngine}, Att, Fun, Acc) ->
- Md5 = fetch(md5, Att),
- couch_stream:foldl(StreamEngine, Md5, Fun, Acc);
-foldl(DataFun, Att, Fun, Acc) when is_function(DataFun) ->
- Len = fetch(att_len, Att),
- fold_streamed_data(DataFun, Len, Fun, Acc);
-foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
- ParserRef = erlang:monitor(process, Parser),
- DataFun = fun() ->
- Parser ! {get_bytes, Ref, self()},
- receive
- {started_open_doc_revs, NewRef} ->
- couch_doc:restart_open_doc_revs(Parser, Ref, NewRef);
- {bytes, Ref, Bytes} ->
- Bytes;
- {'DOWN', ParserRef, _, _, Reason} ->
- throw({mp_parser_died, Reason})
- end
- end,
- try
- foldl(DataFun, store(data, DataFun, Att), Fun, Acc)
- after
- erlang:demonitor(ParserRef, [flush])
- end.
-
-range_foldl(Att, From, To, Fun, Acc) ->
- {stream, StreamEngine} = fetch(data, Att),
- couch_stream:range_foldl(StreamEngine, From, To, Fun, Acc).
-
-foldl_decode(Att, Fun, Acc) ->
- case fetch([data, encoding], Att) of
- [{stream, StreamEngine}, Enc] ->
- couch_stream:foldl_decode(
- StreamEngine, fetch(md5, Att), Enc, Fun, Acc
- );
- [Fun2, identity] ->
- fold_streamed_data(Fun2, fetch(att_len, Att), Fun, Acc)
- end.
-
-to_binary(Att) ->
- to_binary(fetch(data, Att), Att).
-
-to_binary(Bin, _Att) when is_binary(Bin) ->
- Bin;
-to_binary(Iolist, _Att) when is_list(Iolist) ->
- iolist_to_binary(Iolist);
-to_binary({stream, _StreamEngine}, Att) ->
- iolist_to_binary(
- lists:reverse(foldl(Att, fun(Bin, Acc) -> [Bin | Acc] end, []))
- );
-to_binary(DataFun, Att) when is_function(DataFun) ->
- Len = fetch(att_len, Att),
- iolist_to_binary(
- lists:reverse(
- fold_streamed_data(
- DataFun,
- Len,
- fun(Data, Acc) -> [Data | Acc] end,
- []
- )
- )
- ).
-
-fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
- Acc;
-fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0 ->
- Bin = RcvFun(),
- ResultAcc = Fun(Bin, Acc),
- fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
-
-%% Upgrade an attachment record to a property list on demand. This is a one-way
-%% operation as downgrading potentially truncates fields with important data.
--spec upgrade(#att{}) -> attachment().
-upgrade(#att{} = Att) ->
- Map = lists:zip(
- record_info(fields, att),
- lists:seq(2, record_info(size, att))
- ),
- %% Don't store undefined elements since that is default
- [{F, element(I, Att)} || {F, I} <- Map, element(I, Att) /= undefined];
-upgrade(Att) ->
- Att.
-
-%% Downgrade is exposed for interactive convenience. In practice, unless done
-%% manually, upgrades are always one-way.
-downgrade(#att{} = Att) ->
- Att;
-downgrade(Att) ->
- #att{
- name = fetch(name, Att),
- type = fetch(type, Att),
- att_len = fetch(att_len, Att),
- disk_len = fetch(disk_len, Att),
- md5 = fetch(md5, Att),
- revpos = fetch(revpos, Att),
- data = fetch(data, Att),
- encoding = fetch(encoding, Att)
- }.
-
-upgrade_encoding(true) -> gzip;
-upgrade_encoding(false) -> identity;
-upgrade_encoding(Encoding) -> Encoding.
-
-max_attachment_size() ->
- max_attachment_size(config:get("couchdb", "max_attachment_size", ?GB)).
-
-max_attachment_size(MaxAttSizeConfig) ->
- case MaxAttSizeConfig of
- "infinity" ->
- infinity;
- MaxAttSize when is_list(MaxAttSize) ->
- try list_to_integer(MaxAttSize) of
- Result -> Result
- catch
- _:_ ->
- couch_log:error("invalid config value for max attachment size: ~p ", [
- MaxAttSize
- ]),
- throw(internal_server_error)
- end;
- MaxAttSize when is_integer(MaxAttSize) ->
- MaxAttSize;
- MaxAttSize ->
- couch_log:error("invalid config value for max attachment size: ~p ", [MaxAttSize]),
- throw(internal_server_error)
- end.
-
-validate_attachment_size(AttName, AttSize, MaxAttSize) when
- is_integer(AttSize), AttSize > MaxAttSize
-->
- throw({request_entity_too_large, {attachment, AttName}});
-validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
- ok.
-
-open_stream(StreamSrc, Data) ->
- case couch_db:is_db(StreamSrc) of
- true ->
- couch_db:open_read_stream(StreamSrc, Data);
- false ->
- case is_function(StreamSrc, 1) of
- true ->
- StreamSrc(Data);
- false ->
- erlang:error({invalid_stream_source, StreamSrc})
- end
- end.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-% Eww...
--include("couch_bt_engine.hrl").
-
-%% Test utilities
-
-empty_att() -> new().
-
-upgraded_empty_att() ->
- new([{headers, undefined}]).
-
-%% Test groups
-
-attachment_upgrade_test_() ->
- {"Lazy record upgrade tests", [
- {"Existing record fields don't upgrade",
- {with, empty_att(), [fun test_non_upgrading_fields/1]}},
- {"New fields upgrade", {with, empty_att(), [fun test_upgrading_fields/1]}}
- ]}.
-
-attachment_defaults_test_() ->
- {"Attachment defaults tests", [
- {"Records retain old default values", [
- {with, empty_att(), [fun test_legacy_defaults/1]}
- ]},
- {"Upgraded records inherit defaults", [
- {with, upgraded_empty_att(), [fun test_legacy_defaults/1]}
- ]},
- {"Undefined entries are elided on upgrade", [
- {with, upgraded_empty_att(), [fun test_elided_entries/1]}
- ]}
- ]}.
-
-attachment_field_api_test_() ->
- {"Basic attachment field api", [
- fun test_construction/0,
- fun test_store_and_fetch/0,
- fun test_transform/0
- ]}.
-
-attachment_disk_term_test_() ->
- BaseAttachment = new([
- {name, <<"empty">>},
- {type, <<"application/octet-stream">>},
- {att_len, 0},
- {disk_len, 0},
- {md5, <<212, 29, 140, 217, 143, 0, 178, 4, 233, 128, 9, 152, 236, 248, 66, 126>>},
- {revpos, 4},
- {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
- {encoding, identity}
- ]),
- BaseDiskTerm = {
- <<"empty">>,
- <<"application/octet-stream">>,
- fake_sp,
- 0,
- 0,
- 4,
- <<212, 29, 140, 217, 143, 0, 178, 4, 233, 128, 9, 152, 236, 248, 66, 126>>,
- identity
- },
- Headers = [{<<"X-Foo">>, <<"bar">>}],
- ExtendedAttachment = store(headers, Headers, BaseAttachment),
- ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
- FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd = fake_fd}}}]),
- {"Disk term tests", [
- ?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
- ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
- ?_assertEqual(ExtendedDiskTerm, to_disk_term(ExtendedAttachment)),
- ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
- ]}.
-
-attachment_json_term_test_() ->
- Props = [
- {<<"content_type">>, <<"application/json">>},
- {<<"digest">>, <<"md5-QCNtWUNXV0UzJnEjMk92YUk1JA==">>},
- {<<"length">>, 14},
- {<<"revpos">>, 1}
- ],
- PropsInline = [{<<"data">>, <<"eyJhbnN3ZXIiOiA0Mn0=">>}] ++ Props,
- InvalidProps = [{<<"data">>, <<"!Base64Encoded$">>}] ++ Props,
- Att = couch_att:new([
- {name, <<"attachment.json">>},
- {type, <<"application/json">>}
- ]),
- ResultStub = couch_att:new([
- {name, <<"attachment.json">>},
- {type, <<"application/json">>},
- {att_len, 14},
- {disk_len, 14},
- {md5, <<"@#mYCWWE3&q#2OvaI5$">>},
- {revpos, 1},
- {data, stub},
- {encoding, identity}
- ]),
- ResultFollows = ResultStub#att{data = follows},
- ResultInline = ResultStub#att{md5 = <<>>, data = <<"{\"answer\": 42}">>},
- {"JSON term tests", [
- ?_assertEqual(ResultStub, stub_from_json(Att, Props)),
- ?_assertEqual(ResultFollows, follow_from_json(Att, Props)),
- ?_assertEqual(ResultInline, inline_from_json(Att, PropsInline)),
- ?_assertThrow({bad_request, _}, inline_from_json(Att, Props)),
- ?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps))
- ]}.
-
-attachment_stub_merge_test_() ->
- %% Stub merging needs to demonstrate revpos matching, skipping, and missing
- %% attachment errors.
- {"Attachment stub merging tests", []}.
-
-%% Test generators
-
-test_non_upgrading_fields(Attachment) ->
- Pairs = [
- {name, "cat.gif"},
- {type, "text/very-very-plain"},
- {att_len, 1024},
- {disk_len, 42},
- {md5, <<"md5-hashhashhash">>},
- {revpos, 4},
- {data, stub},
- {encoding, gzip}
- ],
- lists:foreach(
- fun({Field, Value}) ->
- ?assertMatch(#att{}, Attachment),
- Updated = store(Field, Value, Attachment),
- ?assertMatch(#att{}, Updated)
- end,
- Pairs
- ).
-
-test_upgrading_fields(Attachment) ->
- ?assertMatch(#att{}, Attachment),
- UpdatedHeaders = store(headers, [{<<"Ans">>, <<"42">>}], Attachment),
- ?assertMatch(X when is_list(X), UpdatedHeaders),
- UpdatedHeadersUndefined = store(headers, undefined, Attachment),
- ?assertMatch(X when is_list(X), UpdatedHeadersUndefined).
-
-test_legacy_defaults(Attachment) ->
- ?assertEqual(<<>>, fetch(md5, Attachment)),
- ?assertEqual(0, fetch(revpos, Attachment)),
- ?assertEqual(identity, fetch(encoding, Attachment)).
-
-test_elided_entries(Attachment) ->
- ?assertNot(lists:keymember(name, 1, Attachment)),
- ?assertNot(lists:keymember(type, 1, Attachment)),
- ?assertNot(lists:keymember(att_len, 1, Attachment)),
- ?assertNot(lists:keymember(disk_len, 1, Attachment)),
- ?assertNot(lists:keymember(data, 1, Attachment)).
-
-test_construction() ->
- ?assert(new() == new()),
- Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]),
- ?assertEqual(<<"foo.bar">>, fetch(name, Initialized)),
- ?assertEqual(<<"application/qux">>, fetch(type, Initialized)).
-
-test_store_and_fetch() ->
- Attachment = empty_att(),
- ?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))),
- ?assertEqual(42, fetch(ans, store(ans, 42, Attachment))).
-
-test_transform() ->
- Attachment = new([{counter, 0}]),
- Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment),
- ?assertEqual(1, fetch(counter, Transformed)).
-
-max_attachment_size_test_() ->
- {"Max attachment size tests", [
- ?_assertEqual(infinity, max_attachment_size("infinity")),
- ?_assertEqual(5, max_attachment_size(5)),
- ?_assertEqual(5, max_attachment_size("5"))
- ]}.
-
--endif.
diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl
deleted file mode 100644
index f361ab231..000000000
--- a/src/couch/src/couch_auth_cache.erl
+++ /dev/null
@@ -1,172 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_auth_cache).
-
--export([
- get_user_creds/1,
- get_user_creds/2,
- update_user_creds/3,
- get_admin/1,
- add_roles/2,
- auth_design_doc/1,
- ensure_users_db_exists/0
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch/include/couch_js_functions.hrl").
-
--spec get_user_creds(UserName :: string() | binary()) ->
- {ok, Credentials :: list(), term()} | nil.
-
-get_user_creds(UserName) ->
- get_user_creds(nil, UserName).
-
--spec get_user_creds(Req :: #httpd{} | nil, UserName :: string() | binary()) ->
- {ok, Credentials :: list(), term()} | nil.
-
-get_user_creds(Req, UserName) when is_list(UserName) ->
- get_user_creds(Req, ?l2b(UserName));
-get_user_creds(_Req, UserName) ->
- UserCreds =
- case get_admin(UserName) of
- nil ->
- get_from_db(UserName);
- Props ->
- case get_from_db(UserName) of
- nil ->
- Props;
- UserProps when is_list(UserProps) ->
- add_roles(Props, couch_util:get_value(<<"roles">>, UserProps))
- end
- end,
- validate_user_creds(UserCreds).
-
-update_user_creds(_Req, UserDoc, _AuthCtx) ->
- ok = ensure_users_db_exists(),
- couch_util:with_db(users_db(), fun(UserDb) ->
- {ok, _NewRev} = couch_db:update_doc(UserDb, UserDoc, []),
- ok
- end).
-
-add_roles(Props, ExtraRoles) ->
- CurrentRoles = couch_util:get_value(<<"roles">>, Props),
- lists:keyreplace(<<"roles">>, 1, Props, {<<"roles">>, CurrentRoles ++ ExtraRoles}).
-
-get_admin(UserName) when is_binary(UserName) ->
- get_admin(?b2l(UserName));
-get_admin(UserName) when is_list(UserName) ->
- case config:get("admins", UserName) of
- "-hashed-" ++ HashedPwdAndSalt ->
- % the name is an admin, now check to see if there is a user doc
- % which has a matching name, salt, and password_sha
- [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
- make_admin_doc(HashedPwd, Salt);
- "-pbkdf2-" ++ HashedPwdSaltAndIterations ->
- [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
- make_admin_doc(HashedPwd, Salt, Iterations);
- _Else ->
- nil
- end.
-
-make_admin_doc(HashedPwd, Salt) ->
- [
- {<<"roles">>, [<<"_admin">>]},
- {<<"salt">>, ?l2b(Salt)},
- {<<"password_scheme">>, <<"simple">>},
- {<<"password_sha">>, ?l2b(HashedPwd)}
- ].
-
-make_admin_doc(DerivedKey, Salt, Iterations) ->
- [
- {<<"roles">>, [<<"_admin">>]},
- {<<"salt">>, ?l2b(Salt)},
- {<<"iterations">>, list_to_integer(Iterations)},
- {<<"password_scheme">>, <<"pbkdf2">>},
- {<<"derived_key">>, ?l2b(DerivedKey)}
- ].
-
-get_from_db(UserName) ->
- ok = ensure_users_db_exists(),
- couch_util:with_db(users_db(), fun(Db) ->
- DocId = <<"org.couchdb.user:", UserName/binary>>,
- try
- {ok, Doc} = couch_db:open_doc(Db, DocId, [conflicts]),
- {DocProps} = couch_doc:to_json_obj(Doc, []),
- DocProps
- catch
- _:_Error ->
- nil
- end
- end).
-
-validate_user_creds(nil) ->
- nil;
-validate_user_creds(UserCreds) ->
- case couch_util:get_value(<<"_conflicts">>, UserCreds) of
- undefined ->
- ok;
- _ConflictList ->
- throw(
- {unauthorized,
- <<"User document conflicts must be resolved before the document",
- " is used for authentication purposes.">>}
- )
- end,
- {ok, UserCreds, nil}.
-
-users_db() ->
- DbNameList = config:get("couch_httpd_auth", "authentication_db", "_users"),
- ?l2b(DbNameList).
-
-ensure_users_db_exists() ->
- Options = [?ADMIN_CTX, nologifmissing],
- case couch_db:open(users_db(), Options) of
- {ok, Db} ->
- ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
- couch_db:close(Db);
- _Error ->
- {ok, Db} = couch_db:create(users_db(), Options),
- ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
- couch_db:close(Db)
- end,
- ok.
-
-ensure_auth_ddoc_exists(Db, DDocId) ->
- case couch_db:open_doc(Db, DDocId) of
- {not_found, _Reason} ->
- {ok, AuthDesign} = auth_design_doc(DDocId),
- {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []);
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
- ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
- ok;
- _ ->
- Props1 = lists:keyreplace(
- <<"validate_doc_update">>,
- 1,
- Props,
- {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
- ),
- couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), [])
- end
- end,
- ok.
-
-auth_design_doc(DocId) ->
- DocProps = [
- {<<"_id">>, DocId},
- {<<"language">>, <<"javascript">>},
- {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
- ],
- {ok, couch_doc:from_json_obj({DocProps})}.
diff --git a/src/couch/src/couch_base32.erl b/src/couch/src/couch_base32.erl
deleted file mode 100644
index 776fe773d..000000000
--- a/src/couch/src/couch_base32.erl
+++ /dev/null
@@ -1,156 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_base32).
-
--export([encode/1, decode/1]).
-
--define(SET, <<"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567">>).
-
--spec encode(binary()) -> binary().
-encode(Plain) when is_binary(Plain) ->
- IoList = encode(Plain, 0, byte_size(Plain) * 8, []),
- iolist_to_binary(lists:reverse(IoList)).
-
-encode(_Plain, _ByteOffset, 0, Acc) ->
- Acc;
-encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 8 ->
- <<A:5, B:3>> = binary:part(Plain, ByteOffset, 1),
- [<<(binary:at(?SET, A)), (binary:at(?SET, B bsl 2)), "======">> | Acc];
-encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 16 ->
- <<A:5, B:5, C:5, D:1>> = binary:part(Plain, ByteOffset, 2),
- [
- <<
- (binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D bsl 4)),
- "===="
- >>
- | Acc
- ];
-encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 24 ->
- <<A:5, B:5, C:5, D:5, E:4>> = binary:part(Plain, ByteOffset, 3),
- [
- <<
- (binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D)),
- (binary:at(?SET, E bsl 1)),
- "==="
- >>
- | Acc
- ];
-encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 32 ->
- <<A:5, B:5, C:5, D:5, E:5, F:5, G:2>> = binary:part(Plain, ByteOffset, 4),
- [
- <<
- (binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D)),
- (binary:at(?SET, E)),
- (binary:at(?SET, F)),
- (binary:at(?SET, G bsl 3)),
- "="
- >>
- | Acc
- ];
-encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining >= 40 ->
- <<A:5, B:5, C:5, D:5, E:5, F:5, G:5, H:5>> =
- binary:part(Plain, ByteOffset, 5),
- Output = <<
- (binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D)),
- (binary:at(?SET, E)),
- (binary:at(?SET, F)),
- (binary:at(?SET, G)),
- (binary:at(?SET, H))
- >>,
- encode(Plain, ByteOffset + 5, BitsRemaining - 40, [Output | Acc]).
-
--spec decode(binary()) -> binary().
-decode(Encoded) when is_binary(Encoded) ->
- IoList = decode(Encoded, 0, []),
- iolist_to_binary(lists:reverse(IoList)).
-
-decode(Encoded, ByteOffset, Acc) when ByteOffset == byte_size(Encoded) ->
- Acc;
-decode(Encoded, ByteOffset, Acc) ->
- case binary:part(Encoded, ByteOffset, 8) of
- <<A:1/binary, B:1/binary, "======">> ->
- [<<(find_in_set(A)):5, (find_in_set(B) bsr 2):3>> | Acc];
- <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, "====">> ->
- [
- <<
- (find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D) bsr 4):1
- >>
- | Acc
- ];
- <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, "===">> ->
- [
- <<
- (find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D)):5,
- (find_in_set(E) bsr 1):4
- >>
- | Acc
- ];
- <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary, "=">> ->
- [
- <<
- (find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D)):5,
- (find_in_set(E)):5,
- (find_in_set(F)):5,
- (find_in_set(G) bsr 3):2
- >>
- | Acc
- ];
- <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary,
- H:1/binary>> ->
- decode(
- Encoded,
- ByteOffset + 8,
- [
- <<
- (find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D)):5,
- (find_in_set(E)):5,
- (find_in_set(F)):5,
- (find_in_set(G)):5,
- (find_in_set(H)):5
- >>
- | Acc
- ]
- )
- end.
-
-find_in_set(Char) ->
- case binary:match(?SET, Char) of
- nomatch ->
- erlang:error(not_base32);
- {Offset, _} ->
- Offset
- end.
diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl
deleted file mode 100644
index 486ed7cb0..000000000
--- a/src/couch/src/couch_bt_engine.erl
+++ /dev/null
@@ -1,1229 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine).
--behavior(couch_db_engine).
-
--export([
- exists/1,
-
- delete/3,
- delete_compaction_files/3,
-
- is_compacting/1,
-
- init/2,
- terminate/2,
- handle_db_updater_call/2,
- handle_db_updater_info/2,
-
- incref/1,
- decref/1,
- monitored_by/1,
-
- last_activity/1,
-
- get_compacted_seq/1,
- get_del_doc_count/1,
- get_disk_version/1,
- get_doc_count/1,
- get_epochs/1,
- get_purge_seq/1,
- get_oldest_purge_seq/1,
- get_purge_infos_limit/1,
- get_revs_limit/1,
- get_security/1,
- get_props/1,
- get_size_info/1,
- get_partition_info/2,
- get_update_seq/1,
- get_uuid/1,
-
- set_revs_limit/2,
- set_purge_infos_limit/2,
- set_security/2,
- set_props/2,
-
- set_update_seq/2,
-
- open_docs/2,
- open_local_docs/2,
- read_doc_body/2,
- load_purge_infos/2,
-
- serialize_doc/2,
- write_doc_body/2,
- write_doc_infos/3,
- purge_docs/3,
- copy_purge_infos/2,
-
- commit_data/1,
-
- open_write_stream/2,
- open_read_stream/2,
- is_active_stream/2,
-
- fold_docs/4,
- fold_local_docs/4,
- fold_changes/5,
- fold_purge_infos/5,
- count_changes_since/2,
-
- start_compaction/4,
- finish_compaction/4
-]).
-
--export([
- init_state/4
-]).
-
--export([
- id_tree_split/1,
- id_tree_join/2,
- id_tree_reduce/2,
-
- seq_tree_split/1,
- seq_tree_join/2,
- seq_tree_reduce/2,
-
- local_tree_split/1,
- local_tree_join/2,
-
- purge_tree_split/1,
- purge_tree_join/2,
- purge_tree_reduce/2,
- purge_seq_tree_split/1,
- purge_seq_tree_join/2
-]).
-
-% Used by the compactor
--export([
- update_header/2,
- copy_security/2,
- copy_props/2
-]).
-
--include_lib("kernel/include/file.hrl").
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
-exists(FilePath) ->
- case is_file(FilePath) of
- true ->
- true;
- false ->
- is_file(FilePath ++ ".compact")
- end.
-
-delete(RootDir, FilePath, Async) ->
- %% Delete any leftover compaction files. If we don't do this a
- %% subsequent request for this DB will try to open them to use
- %% as a recovery.
- delete_compaction_files(RootDir, FilePath, [{context, compaction}]),
-
- % Delete the actual database file
- couch_file:delete(RootDir, FilePath, Async).
-
-delete_compaction_files(RootDir, FilePath, DelOpts) ->
- lists:foreach(
- fun(Ext) ->
- couch_file:delete(RootDir, FilePath ++ Ext, DelOpts)
- end,
- [".compact", ".compact.data", ".compact.meta"]
- ).
-
-is_compacting(DbName) ->
- lists:any(
- fun(Ext) ->
- filelib:is_regular(?b2l(DbName) ++ Ext)
- end,
- [".compact", ".compact.data", ".compact.meta"]
- ).
-
-init(FilePath, Options) ->
- {ok, Fd} = open_db_file(FilePath, Options),
- Header =
- case lists:member(create, Options) of
- true ->
- delete_compaction_files(FilePath),
- Header0 = couch_bt_engine_header:new(),
- Header1 = init_set_props(Fd, Header0, Options),
- ok = couch_file:write_header(Fd, Header1),
- Header1;
- false ->
- case couch_file:read_header(Fd) of
- {ok, Header0} ->
- Header0;
- no_valid_header ->
- delete_compaction_files(FilePath),
- Header0 = couch_bt_engine_header:new(),
- ok = couch_file:write_header(Fd, Header0),
- Header0
- end
- end,
- {ok, init_state(FilePath, Fd, Header, Options)}.
-
-terminate(_Reason, St) ->
- % If the reason we died is because our fd disappeared
- % then we don't need to try closing it again.
- Ref = St#st.fd_monitor,
- if
- Ref == closed ->
- ok;
- true ->
- ok = couch_file:close(St#st.fd),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- after 500 ->
- ok
- end
- end,
- couch_util:shutdown_sync(St#st.fd),
- ok.
-
-handle_db_updater_call(Msg, St) ->
- {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor = Ref} = St) ->
- {stop, normal, St#st{fd = undefined, fd_monitor = closed}}.
-
-incref(St) ->
- {ok, St#st{fd_monitor = erlang:monitor(process, St#st.fd)}}.
-
-decref(St) ->
- true = erlang:demonitor(St#st.fd_monitor, [flush]),
- ok.
-
-monitored_by(St) ->
- case erlang:process_info(St#st.fd, monitored_by) of
- {monitored_by, Pids} ->
- lists:filter(fun is_pid/1, Pids);
- _ ->
- []
- end.
-
-last_activity(#st{fd = Fd}) ->
- couch_file:last_read(Fd).
-
-get_compacted_seq(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, compacted_seq).
-
-get_del_doc_count(#st{} = St) ->
- {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
- element(2, Reds).
-
-get_disk_version(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, disk_version).
-
-get_doc_count(#st{} = St) ->
- {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
- element(1, Reds).
-
-get_epochs(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, epochs).
-
-get_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
- Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
- {stop, PurgeSeq}
- end,
- {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, [{dir, rev}]),
- PurgeSeq.
-
-get_oldest_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
- Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
- {stop, PurgeSeq}
- end,
- {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, []),
- PurgeSeq.
-
-get_purge_infos_limit(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, purge_infos_limit).
-
-get_revs_limit(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, revs_limit).
-
-get_size_info(#st{} = St) ->
- {ok, FileSize} = couch_file:bytes(St#st.fd),
- {ok, DbReduction} = couch_btree:full_reduce(St#st.id_tree),
- SizeInfo0 = element(3, DbReduction),
- SizeInfo =
- case SizeInfo0 of
- SI when is_record(SI, size_info) ->
- SI;
- {AS, ES} ->
- #size_info{active = AS, external = ES};
- AS ->
- #size_info{active = AS}
- end,
- ActiveSize = active_size(St, SizeInfo),
- ExternalSize = SizeInfo#size_info.external,
- [
- {active, ActiveSize},
- {external, ExternalSize},
- {file, FileSize}
- ].
-
-partition_size_cb(traverse, Key, {DC, DDC, Sizes}, {Partition, DCAcc, DDCAcc, SizesAcc}) ->
- case couch_partition:is_member(Key, Partition) of
- true ->
- {skip, {Partition, DC + DCAcc, DDC + DDCAcc, reduce_sizes(Sizes, SizesAcc)}};
- false ->
- {ok, {Partition, DCAcc, DDCAcc, SizesAcc}}
- end;
-partition_size_cb(visit, FDI, _PrevReds, {Partition, DCAcc, DDCAcc, Acc}) ->
- InPartition = couch_partition:is_member(FDI#full_doc_info.id, Partition),
- Deleted = FDI#full_doc_info.deleted,
- case {InPartition, Deleted} of
- {true, true} ->
- {ok, {Partition, DCAcc, DDCAcc + 1, reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
- {true, false} ->
- {ok, {Partition, DCAcc + 1, DDCAcc, reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
- {false, _} ->
- {ok, {Partition, DCAcc, DDCAcc, Acc}}
- end.
-
-get_partition_info(#st{} = St, Partition) ->
- StartKey = couch_partition:start_key(Partition),
- EndKey = couch_partition:end_key(Partition),
- Fun = fun partition_size_cb/4,
- InitAcc = {Partition, 0, 0, #size_info{}},
- Options = [{start_key, StartKey}, {end_key, EndKey}],
- {ok, _, OutAcc} = couch_btree:fold(St#st.id_tree, Fun, InitAcc, Options),
- {Partition, DocCount, DocDelCount, SizeInfo} = OutAcc,
- [
- {partition, Partition},
- {doc_count, DocCount},
- {doc_del_count, DocDelCount},
- {sizes, [
- {active, SizeInfo#size_info.active},
- {external, SizeInfo#size_info.external}
- ]}
- ].
-
-get_security(#st{header = Header} = St) ->
- case couch_bt_engine_header:get(Header, security_ptr) of
- undefined ->
- [];
- Pointer ->
- {ok, SecProps} = couch_file:pread_term(St#st.fd, Pointer),
- SecProps
- end.
-
-get_props(#st{header = Header} = St) ->
- case couch_bt_engine_header:get(Header, props_ptr) of
- undefined ->
- [];
- Pointer ->
- {ok, Props} = couch_file:pread_term(St#st.fd, Pointer),
- Props
- end.
-
-get_update_seq(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, update_seq).
-
-get_uuid(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, uuid).
-
-set_revs_limit(#st{header = Header} = St, RevsLimit) ->
- NewSt = St#st{
- header = couch_bt_engine_header:set(Header, [
- {revs_limit, RevsLimit}
- ]),
- needs_commit = true
- },
- {ok, increment_update_seq(NewSt)}.
-
-set_purge_infos_limit(#st{header = Header} = St, PurgeInfosLimit) ->
- NewSt = St#st{
- header = couch_bt_engine_header:set(Header, [
- {purge_infos_limit, PurgeInfosLimit}
- ]),
- needs_commit = true
- },
- {ok, increment_update_seq(NewSt)}.
-
-set_security(#st{header = Header} = St, NewSecurity) ->
- Options = [{compression, St#st.compression}],
- {ok, Ptr, _} = couch_file:append_term(St#st.fd, NewSecurity, Options),
- NewSt = St#st{
- header = couch_bt_engine_header:set(Header, [
- {security_ptr, Ptr}
- ]),
- needs_commit = true
- },
- {ok, increment_update_seq(NewSt)}.
-
-set_props(#st{header = Header} = St, Props) ->
- Options = [{compression, St#st.compression}],
- {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
- NewSt = St#st{
- header = couch_bt_engine_header:set(Header, [
- {props_ptr, Ptr}
- ]),
- needs_commit = true
- },
- {ok, increment_update_seq(NewSt)}.
-
-open_docs(#st{} = St, DocIds) ->
- Results = couch_btree:lookup(St#st.id_tree, DocIds),
- lists:map(
- fun
- ({ok, FDI}) -> FDI;
- (not_found) -> not_found
- end,
- Results
- ).
-
-open_local_docs(#st{} = St, DocIds) ->
- Results = couch_btree:lookup(St#st.local_tree, DocIds),
- lists:map(
- fun
- ({ok, Doc}) -> Doc;
- (not_found) -> not_found
- end,
- Results
- ).
-
-read_doc_body(#st{} = St, #doc{} = Doc) ->
- {ok, {Body, Atts}} = couch_file:pread_term(St#st.fd, Doc#doc.body),
- Doc#doc{
- body = Body,
- atts = Atts
- }.
-
-load_purge_infos(St, UUIDs) ->
- Results = couch_btree:lookup(St#st.purge_tree, UUIDs),
- lists:map(
- fun
- ({ok, Info}) -> Info;
- (not_found) -> not_found
- end,
- Results
- ).
-
-serialize_doc(#st{} = St, #doc{} = Doc) ->
- Compress = fun(Term) ->
- case couch_compress:is_compressed(Term, St#st.compression) of
- true -> Term;
- false -> couch_compress:compress(Term, St#st.compression)
- end
- end,
- Body = Compress(Doc#doc.body),
- Atts = Compress(Doc#doc.atts),
- SummaryBin = ?term_to_bin({Body, Atts}),
- Md5 = couch_hash:md5_hash(SummaryBin),
- Data = couch_file:assemble_file_chunk(SummaryBin, Md5),
- % TODO: This is a terrible hack to get around the issues
- % in COUCHDB-3255. We'll need to come back and figure
- % out a better approach to handling the case when we
- % need to generate a new revision id after the doc
- % has been serialized.
- Doc#doc{
- body = Data,
- meta = [{comp_body, Body} | Doc#doc.meta]
- }.
-
-write_doc_body(St, #doc{} = Doc) ->
- #st{
- fd = Fd
- } = St,
- {ok, Ptr, Written} = couch_file:append_raw_chunk(Fd, Doc#doc.body),
- {ok, Doc#doc{body = Ptr}, Written}.
-
-write_doc_infos(#st{} = St, Pairs, LocalDocs) ->
- #st{
- id_tree = IdTree,
- seq_tree = SeqTree,
- local_tree = LocalTree
- } = St,
- FinalAcc = lists:foldl(
- fun({OldFDI, NewFDI}, Acc) ->
- {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc,
- case {OldFDI, NewFDI} of
- {not_found, #full_doc_info{}} ->
- {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc};
- {#full_doc_info{id = Id}, #full_doc_info{id = Id}} ->
- NewAddAcc = [NewFDI | AddAcc],
- NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
- {NewAddAcc, RemIdsAcc, NewRemSeqsAcc};
- {#full_doc_info{id = Id}, not_found} ->
- NewRemIdsAcc = [Id | RemIdsAcc],
- NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
- {AddAcc, NewRemIdsAcc, NewRemSeqsAcc}
- end
- end,
- {[], [], []},
- Pairs
- ),
-
- {Add, RemIds, RemSeqs} = FinalAcc,
- {ok, IdTree2} = couch_btree:add_remove(IdTree, Add, RemIds),
- {ok, SeqTree2} = couch_btree:add_remove(SeqTree, Add, RemSeqs),
-
- {AddLDocs, RemLDocIds} = lists:foldl(
- fun(Doc, {AddAcc, RemAcc}) ->
- case Doc#doc.deleted of
- true ->
- {AddAcc, [Doc#doc.id | RemAcc]};
- false ->
- {[Doc | AddAcc], RemAcc}
- end
- end,
- {[], []},
- LocalDocs
- ),
- {ok, LocalTree2} = couch_btree:add_remove(LocalTree, AddLDocs, RemLDocIds),
-
- NewUpdateSeq = lists:foldl(
- fun(#full_doc_info{update_seq = Seq}, Acc) ->
- erlang:max(Seq, Acc)
- end,
- get_update_seq(St),
- Add
- ),
-
- NewHeader = couch_bt_engine_header:set(St#st.header, [
- {update_seq, NewUpdateSeq}
- ]),
-
- {ok, St#st{
- header = NewHeader,
- id_tree = IdTree2,
- seq_tree = SeqTree2,
- local_tree = LocalTree2,
- needs_commit = true
- }}.
-
-purge_docs(#st{} = St, Pairs, PurgeInfos) ->
- #st{
- id_tree = IdTree,
- seq_tree = SeqTree,
- purge_tree = PurgeTree,
- purge_seq_tree = PurgeSeqTree
- } = St,
-
- RemDocIds = [Old#full_doc_info.id || {Old, not_found} <- Pairs],
- RemSeqs = [Old#full_doc_info.update_seq || {Old, _} <- Pairs],
- DocsToAdd = [New || {_, New} <- Pairs, New /= not_found],
- CurrSeq = couch_bt_engine_header:get(St#st.header, update_seq),
- Seqs = [FDI#full_doc_info.update_seq || FDI <- DocsToAdd],
- NewSeq = lists:max([CurrSeq | Seqs]),
-
- % We bump NewUpdateSeq because we have to ensure that
- % indexers see that they need to process the new purge
- % information.
- UpdateSeq =
- case NewSeq == CurrSeq of
- true -> CurrSeq + 1;
- false -> NewSeq
- end,
- Header = couch_bt_engine_header:set(St#st.header, [
- {update_seq, UpdateSeq}
- ]),
-
- {ok, IdTree2} = couch_btree:add_remove(IdTree, DocsToAdd, RemDocIds),
- {ok, SeqTree2} = couch_btree:add_remove(SeqTree, DocsToAdd, RemSeqs),
- {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
- {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
- {ok, St#st{
- header = Header,
- id_tree = IdTree2,
- seq_tree = SeqTree2,
- purge_tree = PurgeTree2,
- purge_seq_tree = PurgeSeqTree2,
- needs_commit = true
- }}.
-
-copy_purge_infos(#st{} = St, PurgeInfos) ->
- #st{
- purge_tree = PurgeTree,
- purge_seq_tree = PurgeSeqTree
- } = St,
- {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
- {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
- {ok, St#st{
- purge_tree = PurgeTree2,
- purge_seq_tree = PurgeSeqTree2,
- needs_commit = true
- }}.
-
-commit_data(St) ->
- #st{
- fd = Fd,
- header = OldHeader,
- needs_commit = NeedsCommit
- } = St,
-
- NewHeader = update_header(St, OldHeader),
-
- case NewHeader /= OldHeader orelse NeedsCommit of
- true ->
- couch_file:sync(Fd),
- ok = couch_file:write_header(Fd, NewHeader),
- couch_file:sync(Fd),
- {ok, St#st{
- header = NewHeader,
- needs_commit = false
- }};
- false ->
- {ok, St}
- end.
-
-open_write_stream(#st{} = St, Options) ->
- couch_stream:open({couch_bt_engine_stream, {St#st.fd, []}}, Options).
-
-open_read_stream(#st{} = St, StreamSt) ->
- {ok, {couch_bt_engine_stream, {St#st.fd, StreamSt}}}.
-
-is_active_stream(#st{} = St, {couch_bt_engine_stream, {Fd, _}}) ->
- St#st.fd == Fd;
-is_active_stream(_, _) ->
- false.
-
-fold_docs(St, UserFun, UserAcc, Options) ->
- fold_docs_int(St, St#st.id_tree, UserFun, UserAcc, Options).
-
-fold_local_docs(St, UserFun, UserAcc, Options) ->
- case fold_docs_int(St, St#st.local_tree, UserFun, UserAcc, Options) of
- {ok, _Reds, FinalAcc} -> {ok, null, FinalAcc};
- {ok, FinalAcc} -> {ok, FinalAcc}
- end.
-
-fold_changes(St, SinceSeq, UserFun, UserAcc, Options) ->
- Fun = fun drop_reductions/4,
- InAcc = {UserFun, UserAcc},
- Opts = [{start_key, SinceSeq + 1}] ++ Options,
- {ok, _, OutAcc} = couch_btree:fold(St#st.seq_tree, Fun, InAcc, Opts),
- {_, FinalUserAcc} = OutAcc,
- {ok, FinalUserAcc}.
-
-fold_purge_infos(St, StartSeq0, UserFun, UserAcc, Options) ->
- PurgeSeqTree = St#st.purge_seq_tree,
- StartSeq = StartSeq0 + 1,
- MinSeq = get_oldest_purge_seq(St),
- if
- MinSeq =< StartSeq -> ok;
- true -> erlang:error({invalid_start_purge_seq, StartSeq0})
- end,
- Wrapper = fun(Info, _Reds, UAcc) ->
- UserFun(Info, UAcc)
- end,
- Opts = [{start_key, StartSeq}] ++ Options,
- {ok, _, OutAcc} = couch_btree:fold(PurgeSeqTree, Wrapper, UserAcc, Opts),
- {ok, OutAcc}.
-
-count_changes_since(St, SinceSeq) ->
- BTree = St#st.seq_tree,
- FoldFun = fun(_SeqStart, PartialReds, 0) ->
- {ok, couch_btree:final_reduce(BTree, PartialReds)}
- end,
- Opts = [{start_key, SinceSeq + 1}],
- {ok, Changes} = couch_btree:fold_reduce(BTree, FoldFun, 0, Opts),
- Changes.
-
-start_compaction(St, DbName, Options, Parent) ->
- Args = [St, DbName, Options, Parent],
- Pid = spawn_link(couch_bt_engine_compactor, start, Args),
- {ok, St, Pid}.
-
-finish_compaction(OldState, DbName, Options, CompactFilePath) ->
- {ok, NewState1} = ?MODULE:init(CompactFilePath, Options),
- OldSeq = get_update_seq(OldState),
- NewSeq = get_update_seq(NewState1),
- case OldSeq == NewSeq of
- true ->
- finish_compaction_int(OldState, NewState1);
- false ->
- Level = list_to_existing_atom(
- config:get(
- "couchdb", "compaction_log_level", "info"
- )
- ),
- couch_log:Level(
- "Compaction file still behind main file "
- "(update seq=~p. compact update seq=~p). Retrying.",
- [OldSeq, NewSeq]
- ),
- ok = decref(NewState1),
- start_compaction(OldState, DbName, Options, self())
- end.
-
-id_tree_split(#full_doc_info{} = Info) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq,
- deleted = Deleted,
- sizes = SizeInfo,
- rev_tree = Tree
- } = Info,
- {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-id_tree_join(Id, {HighSeq, Deleted, DiskTree}) ->
- % Handle old formats before data_size was added
- id_tree_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree});
-id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) ->
- #full_doc_info{
- id = Id,
- update_seq = HighSeq,
- deleted = ?i2b(Deleted),
- sizes = couch_db_updater:upgrade_sizes(Sizes),
- rev_tree = rev_tree(DiskTree)
- }.
-
-id_tree_reduce(reduce, FullDocInfos) ->
- lists:foldl(
- fun(Info, {NotDeleted, Deleted, Sizes}) ->
- Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes),
- case Info#full_doc_info.deleted of
- true ->
- {NotDeleted, Deleted + 1, Sizes2};
- false ->
- {NotDeleted + 1, Deleted, Sizes2}
- end
- end,
- {0, 0, #size_info{}},
- FullDocInfos
- );
-id_tree_reduce(rereduce, Reds) ->
- lists:foldl(
- fun
- ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) ->
- % pre 1.2 format, will be upgraded on compaction
- {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
- ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) ->
- AccSizes2 = reduce_sizes(AccSizes, Sizes),
- {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2}
- end,
- {0, 0, #size_info{}},
- Reds
- ).
-
-seq_tree_split(#full_doc_info{} = Info) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq,
- deleted = Del,
- sizes = SizeInfo,
- rev_tree = Tree
- } = Info,
- {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-seq_tree_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) ->
- seq_tree_join(Seq, {Id, Del, {0, 0}, DiskTree});
-seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq,
- deleted = ?i2b(Del),
- sizes = join_sizes(Sizes),
- rev_tree = rev_tree(DiskTree)
- };
-seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
- % Older versions stored #doc_info records in the seq_tree.
- % Compact to upgrade.
- Revs = lists:map(
- fun({Rev, Seq, Bp}) ->
- #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp}
- end,
- RevInfos
- ),
- DeletedRevs = lists:map(
- fun({Rev, Seq, Bp}) ->
- #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp}
- end,
- DeletedRevInfos
- ),
- #doc_info{
- id = Id,
- high_seq = KeySeq,
- revs = Revs ++ DeletedRevs
- }.
-
-seq_tree_reduce(reduce, DocInfos) ->
- % count the number of documents
- length(DocInfos);
-seq_tree_reduce(rereduce, Reds) ->
- lists:sum(Reds).
-
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_binary(Rev) ->
- #doc{
- id = Id,
- body = BodyData
- } = Doc,
- {Id, {binary_to_integer(Rev), BodyData}};
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) ->
- #doc{
- id = Id,
- body = BodyData
- } = Doc,
- {Id, {Rev, BodyData}}.
-
-local_tree_join(Id, {Rev, BodyData}) when is_binary(Rev) ->
- #doc{
- id = Id,
- revs = {0, [Rev]},
- body = BodyData
- };
-local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) ->
- #doc{
- id = Id,
- revs = {0, [integer_to_binary(Rev)]},
- body = BodyData
- }.
-
-purge_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
- {UUID, {PurgeSeq, DocId, Revs}}.
-
-purge_tree_join(UUID, {PurgeSeq, DocId, Revs}) ->
- {PurgeSeq, UUID, DocId, Revs}.
-
-purge_seq_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
- {PurgeSeq, {UUID, DocId, Revs}}.
-
-purge_seq_tree_join(PurgeSeq, {UUID, DocId, Revs}) ->
- {PurgeSeq, UUID, DocId, Revs}.
-
-purge_tree_reduce(reduce, IdRevs) ->
- % count the number of purge requests
- length(IdRevs);
-purge_tree_reduce(rereduce, Reds) ->
- lists:sum(Reds).
-
-set_update_seq(#st{header = Header} = St, UpdateSeq) ->
- {ok, St#st{
- header = couch_bt_engine_header:set(Header, [
- {update_seq, UpdateSeq}
- ]),
- needs_commit = true
- }}.
-
-copy_security(#st{header = Header} = St, SecProps) ->
- Options = [{compression, St#st.compression}],
- {ok, Ptr, _} = couch_file:append_term(St#st.fd, SecProps, Options),
- {ok, St#st{
- header = couch_bt_engine_header:set(Header, [
- {security_ptr, Ptr}
- ]),
- needs_commit = true
- }}.
-
-copy_props(#st{header = Header} = St, Props) ->
- Options = [{compression, St#st.compression}],
- {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
- {ok, St#st{
- header = couch_bt_engine_header:set(Header, [
- {props_ptr, Ptr}
- ]),
- needs_commit = true
- }}.
-
-open_db_file(FilePath, Options) ->
- case couch_file:open(FilePath, Options) of
- {ok, Fd} ->
- {ok, Fd};
- {error, enoent} ->
- % Couldn't find file. is there a compact version? This ca
- % happen (rarely) if we crashed during the file switch.
- case couch_file:open(FilePath ++ ".compact", [nologifmissing]) of
- {ok, Fd} ->
- Fmt = "Recovering from compaction file: ~s~s",
- couch_log:info(Fmt, [FilePath, ".compact"]),
- ok = file:rename(FilePath ++ ".compact", FilePath),
- ok = couch_file:sync(Fd),
- {ok, Fd};
- {error, enoent} ->
- throw({not_found, no_db_file})
- end;
- Error ->
- throw(Error)
- end.
-
-init_state(FilePath, Fd, Header0, Options) ->
- ok = couch_file:sync(Fd),
-
- Compression = couch_compress:get_compression_method(),
-
- Header1 = couch_bt_engine_header:upgrade(Header0),
- Header2 = set_default_security_object(Fd, Header1, Compression, Options),
- Header = upgrade_purge_info(Fd, Header2),
-
- IdTreeState = couch_bt_engine_header:id_tree_state(Header),
- {ok, IdTree} = couch_btree:open(IdTreeState, Fd, [
- {split, fun ?MODULE:id_tree_split/1},
- {join, fun ?MODULE:id_tree_join/2},
- {reduce, fun ?MODULE:id_tree_reduce/2},
- {compression, Compression}
- ]),
-
- SeqTreeState = couch_bt_engine_header:seq_tree_state(Header),
- {ok, SeqTree} = couch_btree:open(SeqTreeState, Fd, [
- {split, fun ?MODULE:seq_tree_split/1},
- {join, fun ?MODULE:seq_tree_join/2},
- {reduce, fun ?MODULE:seq_tree_reduce/2},
- {compression, Compression}
- ]),
-
- LocalTreeState = couch_bt_engine_header:local_tree_state(Header),
- {ok, LocalTree} = couch_btree:open(LocalTreeState, Fd, [
- {split, fun ?MODULE:local_tree_split/1},
- {join, fun ?MODULE:local_tree_join/2},
- {compression, Compression}
- ]),
-
- PurgeTreeState = couch_bt_engine_header:purge_tree_state(Header),
- {ok, PurgeTree} = couch_btree:open(PurgeTreeState, Fd, [
- {split, fun ?MODULE:purge_tree_split/1},
- {join, fun ?MODULE:purge_tree_join/2},
- {reduce, fun ?MODULE:purge_tree_reduce/2}
- ]),
-
- PurgeSeqTreeState = couch_bt_engine_header:purge_seq_tree_state(Header),
- {ok, PurgeSeqTree} = couch_btree:open(PurgeSeqTreeState, Fd, [
- {split, fun ?MODULE:purge_seq_tree_split/1},
- {join, fun ?MODULE:purge_seq_tree_join/2},
- {reduce, fun ?MODULE:purge_tree_reduce/2}
- ]),
-
- ok = couch_file:set_db_pid(Fd, self()),
-
- St = #st{
- filepath = FilePath,
- fd = Fd,
- fd_monitor = erlang:monitor(process, Fd),
- header = Header,
- needs_commit = false,
- id_tree = IdTree,
- seq_tree = SeqTree,
- local_tree = LocalTree,
- compression = Compression,
- purge_tree = PurgeTree,
- purge_seq_tree = PurgeSeqTree
- },
-
- % If this is a new database we've just created a
- % new UUID and default security object which need
- % to be written to disk.
- case Header /= Header0 of
- true ->
- {ok, NewSt} = commit_data(St#st{needs_commit = true}),
- NewSt;
- false ->
- St
- end.
-
-update_header(St, Header) ->
- couch_bt_engine_header:set(Header, [
- {seq_tree_state, couch_btree:get_state(St#st.seq_tree)},
- {id_tree_state, couch_btree:get_state(St#st.id_tree)},
- {local_tree_state, couch_btree:get_state(St#st.local_tree)},
- {purge_tree_state, couch_btree:get_state(St#st.purge_tree)},
- {purge_seq_tree_state, couch_btree:get_state(St#st.purge_seq_tree)}
- ]).
-
-increment_update_seq(#st{header = Header} = St) ->
- UpdateSeq = couch_bt_engine_header:get(Header, update_seq),
- St#st{
- header = couch_bt_engine_header:set(Header, [
- {update_seq, UpdateSeq + 1}
- ])
- }.
-
-set_default_security_object(Fd, Header, Compression, Options) ->
- case couch_bt_engine_header:get(Header, security_ptr) of
- Pointer when is_integer(Pointer) ->
- Header;
- _ ->
- Default = couch_util:get_value(default_security_object, Options),
- AppendOpts = [{compression, Compression}],
- {ok, Ptr, _} = couch_file:append_term(Fd, Default, AppendOpts),
- couch_bt_engine_header:set(Header, security_ptr, Ptr)
- end.
-
-% This function is here, and not in couch_bt_engine_header
-% because it requires modifying file contents
-upgrade_purge_info(Fd, Header) ->
- case couch_bt_engine_header:get(Header, purge_tree_state) of
- nil ->
- Header;
- Ptr when is_tuple(Ptr) ->
- Header;
- PurgeSeq when is_integer(PurgeSeq) ->
- % Pointer to old purged ids/revs is in purge_seq_tree_state
- Ptr = couch_bt_engine_header:get(Header, purge_seq_tree_state),
-
- case Ptr of
- nil ->
- PTS = couch_bt_engine_header:purge_tree_state(Header),
- PurgeTreeSt =
- case PTS of
- 0 -> nil;
- Else -> Else
- end,
- couch_bt_engine_header:set(Header, [
- {purge_tree_state, PurgeTreeSt}
- ]);
- _ ->
- {ok, PurgedIdsRevs} = couch_file:pread_term(Fd, Ptr),
-
- {Infos, _} = lists:foldl(
- fun({Id, Revs}, {InfoAcc, PSeq}) ->
- Info = {PSeq, couch_uuids:random(), Id, Revs},
- {[Info | InfoAcc], PSeq + 1}
- end,
- {[], PurgeSeq},
- PurgedIdsRevs
- ),
-
- {ok, PurgeTree} = couch_btree:open(nil, Fd, [
- {split, fun ?MODULE:purge_tree_split/1},
- {join, fun ?MODULE:purge_tree_join/2},
- {reduce, fun ?MODULE:purge_tree_reduce/2}
- ]),
- {ok, PurgeTree2} = couch_btree:add(PurgeTree, Infos),
- PurgeTreeSt = couch_btree:get_state(PurgeTree2),
-
- {ok, PurgeSeqTree} = couch_btree:open(nil, Fd, [
- {split, fun ?MODULE:purge_seq_tree_split/1},
- {join, fun ?MODULE:purge_seq_tree_join/2},
- {reduce, fun ?MODULE:purge_tree_reduce/2}
- ]),
- {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, Infos),
- PurgeSeqTreeSt = couch_btree:get_state(PurgeSeqTree2),
-
- couch_bt_engine_header:set(Header, [
- {purge_tree_state, PurgeTreeSt},
- {purge_seq_tree_state, PurgeSeqTreeSt}
- ])
- end
- end.
-
-init_set_props(Fd, Header, Options) ->
- case couch_util:get_value(props, Options) of
- undefined ->
- Header;
- InitialProps ->
- Compression = couch_compress:get_compression_method(),
- AppendOpts = [{compression, Compression}],
- {ok, Ptr, _} = couch_file:append_term(Fd, InitialProps, AppendOpts),
- couch_bt_engine_header:set(Header, props_ptr, Ptr)
- end.
-
-delete_compaction_files(FilePath) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- DelOpts = [{context, compaction}],
- delete_compaction_files(RootDir, FilePath, DelOpts).
-
-rev_tree(DiskTree) ->
- couch_key_tree:map(
- fun
- (_RevId, {Del, Ptr, Seq}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq
- };
- (_RevId, {Del, Ptr, Seq, Size}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq,
- sizes = couch_db_updater:upgrade_sizes(Size)
- };
- (_RevId, {Del, Ptr, Seq, Sizes, Atts}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq,
- sizes = couch_db_updater:upgrade_sizes(Sizes),
- atts = Atts
- };
- (_RevId, ?REV_MISSING) ->
- ?REV_MISSING
- end,
- DiskTree
- ).
-
-disk_tree(RevTree) ->
- couch_key_tree:map(
- fun
- (_RevId, ?REV_MISSING) ->
- ?REV_MISSING;
- (_RevId, #leaf{} = Leaf) ->
- #leaf{
- deleted = Del,
- ptr = Ptr,
- seq = Seq,
- sizes = Sizes,
- atts = Atts
- } = Leaf,
- {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts}
- end,
- RevTree
- ).
-
-split_sizes(#size_info{} = SI) ->
- {SI#size_info.active, SI#size_info.external}.
-
-join_sizes({Active, External}) when is_integer(Active), is_integer(External) ->
- #size_info{active = Active, external = External}.
-
-reduce_sizes(nil, _) ->
- nil;
-reduce_sizes(_, nil) ->
- nil;
-reduce_sizes(#size_info{} = S1, #size_info{} = S2) ->
- #size_info{
- active = S1#size_info.active + S2#size_info.active,
- external = S1#size_info.external + S2#size_info.external
- };
-reduce_sizes(S1, S2) ->
- US1 = couch_db_updater:upgrade_sizes(S1),
- US2 = couch_db_updater:upgrade_sizes(S2),
- reduce_sizes(US1, US2).
-
-active_size(#st{} = St, #size_info{} = SI) ->
- Trees = [
- St#st.id_tree,
- St#st.seq_tree,
- St#st.local_tree,
- St#st.purge_tree,
- St#st.purge_seq_tree
- ],
- lists:foldl(
- fun(T, Acc) ->
- case couch_btree:size(T) of
- _ when Acc == null ->
- null;
- nil ->
- null;
- Size ->
- Acc + Size
- end
- end,
- SI#size_info.active,
- Trees
- ).
-
-fold_docs_int(St, Tree, UserFun, UserAcc, Options) ->
- Fun =
- case lists:member(include_deleted, Options) of
- true -> fun include_deleted/4;
- false -> fun skip_deleted/4
- end,
- RedFun =
- case lists:member(include_reductions, Options) of
- true -> fun include_reductions/4;
- false -> fun drop_reductions/4
- end,
- InAcc = {RedFun, {UserFun, UserAcc}},
- {ok, Reds, OutAcc} = couch_btree:fold(Tree, Fun, InAcc, Options),
- {_, {_, FinalUserAcc}} = OutAcc,
- case lists:member(include_reductions, Options) of
- true when Tree == St#st.id_tree ->
- {ok, fold_docs_reduce_to_count(Reds), FinalUserAcc};
- true when Tree == St#st.local_tree ->
- {ok, 0, FinalUserAcc};
- false ->
- {ok, FinalUserAcc}
- end.
-
-include_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
- {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
- {Go, {UserFun, NewUserAcc}}.
-
-% First element of the reductions is the total
-% number of undeleted documents.
-skip_deleted(traverse, _Entry, {0, _, _} = _Reds, Acc) ->
- {skip, Acc};
-skip_deleted(visit, #full_doc_info{deleted = true}, _, Acc) ->
- {ok, Acc};
-skip_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
- {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
- {Go, {UserFun, NewUserAcc}}.
-
-include_reductions(visit, FDI, Reds, {UserFun, UserAcc}) ->
- {Go, NewUserAcc} = UserFun(FDI, Reds, UserAcc),
- {Go, {UserFun, NewUserAcc}};
-include_reductions(_, _, _, Acc) ->
- {ok, Acc}.
-
-drop_reductions(visit, FDI, _Reds, {UserFun, UserAcc}) ->
- {Go, NewUserAcc} = UserFun(FDI, UserAcc),
- {Go, {UserFun, NewUserAcc}};
-drop_reductions(_, _, _, Acc) ->
- {ok, Acc}.
-
-fold_docs_reduce_to_count(Reds) ->
- RedFun = fun id_tree_reduce/2,
- FinalRed = couch_btree:final_reduce(RedFun, Reds),
- element(1, FinalRed).
-
-finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) ->
- #st{
- filepath = FilePath,
- local_tree = OldLocal
- } = OldSt,
- #st{
- filepath = CompactDataPath,
- header = Header,
- local_tree = NewLocal1
- } = NewSt1,
-
- % suck up all the local docs into memory and write them to the new db
- LoadFun = fun(Value, _Offset, Acc) ->
- {ok, [Value | Acc]}
- end,
- {ok, _, LocalDocs} = couch_btree:foldl(OldLocal, LoadFun, []),
- {ok, NewLocal2} = couch_btree:add(NewLocal1, LocalDocs),
-
- {ok, NewSt2} = commit_data(NewSt1#st{
- header = couch_bt_engine_header:set(Header, [
- {compacted_seq, get_update_seq(OldSt)},
- {revs_limit, get_revs_limit(OldSt)},
- {purge_infos_limit, get_purge_infos_limit(OldSt)}
- ]),
- local_tree = NewLocal2
- }),
-
- % Rename our *.compact.data file to *.compact so that if we
- % die between deleting the old file and renaming *.compact
- % we can recover correctly.
- ok = file:rename(CompactDataPath, FilePath ++ ".compact"),
-
- % Remove the uncompacted database file
- RootDir = config:get("couchdb", "database_dir", "."),
- couch_file:delete(RootDir, FilePath),
-
- % Move our compacted file into its final location
- ok = file:rename(FilePath ++ ".compact", FilePath),
-
- % Delete the old meta compaction file after promoting
- % the compaction file.
- couch_file:delete(RootDir, FilePath ++ ".compact.meta"),
-
- % We're finished with our old state
- decref(OldSt),
-
- % And return our finished new state
- {ok,
- NewSt2#st{
- filepath = FilePath
- },
- undefined}.
-
-is_file(Path) ->
- case file:read_file_info(Path, [raw]) of
- {ok, #file_info{type = regular}} -> true;
- {ok, #file_info{type = directory}} -> true;
- _ -> false
- end.
diff --git a/src/couch/src/couch_bt_engine.hrl b/src/couch/src/couch_bt_engine.hrl
deleted file mode 100644
index e3c1d4983..000000000
--- a/src/couch/src/couch_bt_engine.hrl
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(st, {
- filepath,
- fd,
- fd_monitor,
- % deprecated but keeping it here to avoid altering the record size
- fsync_options_deprecated,
- header,
- needs_commit,
- id_tree,
- seq_tree,
- local_tree,
- compression,
- purge_tree,
- purge_seq_tree
-}).
diff --git a/src/couch/src/couch_bt_engine_compactor.erl b/src/couch/src/couch_bt_engine_compactor.erl
deleted file mode 100644
index 8ed55b5c3..000000000
--- a/src/couch/src/couch_bt_engine_compactor.erl
+++ /dev/null
@@ -1,767 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor).
-
--export([
- start/4
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
--record(comp_st, {
- db_name,
- old_st,
- new_st,
- meta_fd,
- retry
-}).
-
--record(comp_header, {
- db_header,
- meta_st
-}).
-
--record(merge_st, {
- src_fd,
- id_tree,
- seq_tree,
- curr,
- rem_seqs,
- locs
-}).
-
--ifdef(TEST).
--define(COMP_EVENT(Name), couch_bt_engine_compactor_ev:event(Name)).
--else.
--define(COMP_EVENT(Name), ignore).
--endif.
-
-start(#st{} = St, DbName, Options, Parent) ->
- erlang:put(io_priority, {db_compact, DbName}),
- couch_log:debug("Compaction process spawned for db \"~s\"", [DbName]),
-
- couch_db_engine:trigger_on_compact(DbName),
-
- ?COMP_EVENT(init),
- {ok, InitCompSt} = open_compaction_files(DbName, St, Options),
- ?COMP_EVENT(files_opened),
-
- Stages = [
- fun copy_purge_info/1,
- fun copy_compact/1,
- fun commit_compaction_data/1,
- fun sort_meta_data/1,
- fun commit_compaction_data/1,
- fun copy_meta_data/1,
- fun compact_final_sync/1
- ],
-
- FinalCompSt = lists:foldl(
- fun(Stage, CompSt) ->
- Stage(CompSt)
- end,
- InitCompSt,
- Stages
- ),
-
- #comp_st{
- new_st = FinalNewSt,
- meta_fd = MetaFd
- } = FinalCompSt,
-
- ok = couch_bt_engine:decref(FinalNewSt),
- ok = couch_file:close(MetaFd),
-
- ?COMP_EVENT(before_notify),
- Msg = {compact_done, couch_bt_engine, FinalNewSt#st.filepath},
- gen_server:cast(Parent, Msg).
-
-open_compaction_files(DbName, OldSt, Options) ->
- #st{
- filepath = DbFilePath,
- header = SrcHdr
- } = OldSt,
- DataFile = DbFilePath ++ ".compact.data",
- MetaFile = DbFilePath ++ ".compact.meta",
- {ok, DataFd, DataHdr} = open_compaction_file(DataFile),
- {ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile),
- DataHdrIsDbHdr = couch_bt_engine_header:is_header(DataHdr),
- CompSt =
- case {DataHdr, MetaHdr} of
- {#comp_header{} = A, #comp_header{} = A} ->
- % We're restarting a compaction that did not finish
- % before trying to swap out with the original db
- DbHeader = A#comp_header.db_header,
- St0 = couch_bt_engine:init_state(
- DataFile, DataFd, DbHeader, Options
- ),
- St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_st),
- #comp_st{
- db_name = DbName,
- old_st = OldSt,
- new_st = St1,
- meta_fd = MetaFd,
- retry = St0#st.id_tree
- };
- _ when DataHdrIsDbHdr ->
- % We tried to swap out the compaction but there were
- % writes to the database during compaction. Start
- % a compaction retry.
- Header = couch_bt_engine_header:from(SrcHdr),
- ok = reset_compaction_file(MetaFd, Header),
- St0 = couch_bt_engine:init_state(
- DataFile, DataFd, DataHdr, Options
- ),
- St1 = bind_emsort(St0, MetaFd, nil),
- #comp_st{
- db_name = DbName,
- old_st = OldSt,
- new_st = St1,
- meta_fd = MetaFd,
- retry = St0#st.id_tree
- };
- _ ->
- % We're starting a compaction from scratch
- Header = couch_bt_engine_header:from(SrcHdr),
- ok = reset_compaction_file(DataFd, Header),
- ok = reset_compaction_file(MetaFd, Header),
- St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options),
- St1 = bind_emsort(St0, MetaFd, nil),
- #comp_st{
- db_name = DbName,
- old_st = OldSt,
- new_st = St1,
- meta_fd = MetaFd,
- retry = nil
- }
- end,
- unlink(DataFd),
- erlang:monitor(process, MetaFd),
- {ok, CompSt}.
-
-copy_purge_info(#comp_st{} = CompSt) ->
- #comp_st{
- db_name = DbName,
- old_st = OldSt,
- new_st = NewSt,
- retry = Retry
- } = CompSt,
- ?COMP_EVENT(purge_init),
- MinPurgeSeq = couch_util:with_db(DbName, fun(Db) ->
- couch_db:get_minimum_purge_seq(Db)
- end),
- OldPSTree = OldSt#st.purge_seq_tree,
- StartSeq = couch_bt_engine:get_purge_seq(NewSt) + 1,
- BufferSize = config:get_integer(
- "database_compaction", "doc_buffer_size", 524288
- ),
- CheckpointAfter = config:get(
- "database_compaction", "checkpoint_after", BufferSize * 10
- ),
-
- EnumFun = fun(Info, _Reds, {StAcc0, InfosAcc, InfosSize, CopiedSize}) ->
- NewInfosSize = InfosSize + ?term_size(Info),
- if
- NewInfosSize >= BufferSize ->
- StAcc1 = copy_purge_infos(
- OldSt, StAcc0, [Info | InfosAcc], MinPurgeSeq, Retry
- ),
- NewCopiedSize = CopiedSize + NewInfosSize,
- if
- NewCopiedSize >= CheckpointAfter ->
- StAcc2 = commit_compaction_data(StAcc1),
- {ok, {StAcc2, [], 0, 0}};
- true ->
- {ok, {StAcc1, [], 0, NewCopiedSize}}
- end;
- true ->
- NewInfosAcc = [Info | InfosAcc],
- {ok, {StAcc0, NewInfosAcc, NewInfosSize, CopiedSize}}
- end
- end,
-
- InitAcc = {NewSt, [], 0, 0},
- Opts = [{start_key, StartSeq}],
- {ok, _, FinalAcc} = couch_btree:fold(OldPSTree, EnumFun, InitAcc, Opts),
- {NewStAcc, Infos, _, _} = FinalAcc,
- FinalNewSt = copy_purge_infos(OldSt, NewStAcc, Infos, MinPurgeSeq, Retry),
-
- ?COMP_EVENT(purge_done),
- CompSt#comp_st{
- new_st = FinalNewSt
- }.
-
-copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) ->
- #st{
- id_tree = OldIdTree
- } = OldSt,
-
- % Re-bind our id_tree to the backing btree
- NewIdTreeState = couch_bt_engine_header:id_tree_state(NewSt0#st.header),
- MetaFd = couch_emsort:get_fd(NewSt0#st.id_tree),
- MetaState = couch_emsort:get_state(NewSt0#st.id_tree),
- NewSt1 = bind_id_tree(NewSt0, NewSt0#st.fd, NewIdTreeState),
-
- #st{
- id_tree = NewIdTree0,
- seq_tree = NewSeqTree0,
- purge_tree = NewPurgeTree0,
- purge_seq_tree = NewPurgeSeqTree0
- } = NewSt1,
-
- % Copy over the purge infos
- InfosToAdd = lists:filter(
- fun({PSeq, _, _, _}) ->
- PSeq > MinPurgeSeq
- end,
- Infos
- ),
- {ok, NewPurgeTree1} = couch_btree:add(NewPurgeTree0, InfosToAdd),
- {ok, NewPurgeSeqTree1} = couch_btree:add(NewPurgeSeqTree0, InfosToAdd),
-
- NewSt2 = NewSt1#st{
- purge_tree = NewPurgeTree1,
- purge_seq_tree = NewPurgeSeqTree1
- },
-
- % If we're peforming a retry compaction we have to check if
- % any of the referenced docs have been completely purged
- % from the database. Any doc that has been completely purged
- % must then be removed from our partially compacted database.
- NewSt3 =
- if
- Retry == nil ->
- NewSt2;
- true ->
- AllDocIds = [DocId || {_PurgeSeq, _UUID, DocId, _Revs} <- Infos],
- UniqDocIds = lists:usort(AllDocIds),
- OldIdResults = couch_btree:lookup(OldIdTree, UniqDocIds),
- OldZipped = lists:zip(UniqDocIds, OldIdResults),
-
- % The list of non-existant docs in the database being compacted
- MaybeRemDocIds = [DocId || {DocId, not_found} <- OldZipped],
-
- % Removing anything that exists in the partially compacted database
- NewIdResults = couch_btree:lookup(NewIdTree0, MaybeRemDocIds),
- ToRemove = [Doc || {ok, Doc} <- NewIdResults, Doc /= {ok, not_found}],
-
- {RemIds, RemSeqs} = lists:unzip(
- lists:map(
- fun(FDI) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq
- } = FDI,
- {Id, Seq}
- end,
- ToRemove
- )
- ),
-
- {ok, NewIdTree1} = couch_btree:add_remove(NewIdTree0, [], RemIds),
- {ok, NewSeqTree1} = couch_btree:add_remove(NewSeqTree0, [], RemSeqs),
-
- NewSt2#st{
- id_tree = NewIdTree1,
- seq_tree = NewSeqTree1
- }
- end,
-
- Header = couch_bt_engine:update_header(NewSt3, NewSt3#st.header),
- NewSt4 = NewSt3#st{
- header = Header
- },
- bind_emsort(NewSt4, MetaFd, MetaState).
-
-copy_compact(#comp_st{} = CompSt) ->
- #comp_st{
- db_name = DbName,
- old_st = St,
- new_st = NewSt0,
- retry = Retry
- } = CompSt,
-
- Compression = couch_compress:get_compression_method(),
- NewSt = NewSt0#st{compression = Compression},
- NewUpdateSeq = couch_bt_engine:get_update_seq(NewSt0),
- TotalChanges = couch_bt_engine:count_changes_since(St, NewUpdateSeq),
- BufferSize = list_to_integer(
- config:get("database_compaction", "doc_buffer_size", "524288")
- ),
- CheckpointAfter = couch_util:to_integer(
- config:get(
- "database_compaction",
- "checkpoint_after",
- BufferSize * 10
- )
- ),
-
- EnumBySeqFun =
- fun(
- DocInfo,
- _Offset,
- {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize}
- ) ->
- Seq =
- case DocInfo of
- #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
- #doc_info{} -> DocInfo#doc_info.high_seq
- end,
-
- AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
- if
- AccUncopiedSize2 >= BufferSize ->
- NewSt2 = copy_docs(
- St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry
- ),
- AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
- if
- AccCopiedSize2 >= CheckpointAfter ->
- {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
- CommNewSt3 = commit_compaction_data(NewSt3),
- {ok, {CommNewSt3, [], 0, 0}};
- true ->
- {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
- {ok, {NewSt3, [], 0, AccCopiedSize2}}
- end;
- true ->
- {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2, AccCopiedSize}}
- end
- end,
-
- TaskProps0 = [
- {type, database_compaction},
- {database, DbName},
- {phase, document_copy},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ],
- case (Retry =/= nil) and couch_task_status:is_task_added() of
- true ->
- couch_task_status:update([
- {retry, true},
- {phase, document_copy},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ]);
- false ->
- couch_task_status:add_task(TaskProps0),
- couch_task_status:set_update_frequency(500)
- end,
-
- ?COMP_EVENT(seq_init),
- {ok, _, {NewSt2, Uncopied, _, _}} =
- couch_btree:foldl(
- St#st.seq_tree,
- EnumBySeqFun,
- {NewSt, [], 0, 0},
- [{start_key, NewUpdateSeq + 1}]
- ),
-
- NewSt3 = copy_docs(St, NewSt2, lists:reverse(Uncopied), Retry),
-
- ?COMP_EVENT(seq_done),
-
- % Copy the security information over
- SecProps = couch_bt_engine:get_security(St),
- {ok, NewSt4} = couch_bt_engine:copy_security(NewSt3, SecProps),
-
- % Copy general properties over
- Props = couch_bt_engine:get_props(St),
- {ok, NewSt5} = couch_bt_engine:set_props(NewSt4, Props),
-
- FinalUpdateSeq = couch_bt_engine:get_update_seq(St),
- {ok, NewSt6} = couch_bt_engine:set_update_seq(NewSt5, FinalUpdateSeq),
-
- CompSt#comp_st{
- new_st = NewSt6
- }.
-
-copy_docs(St, #st{} = NewSt, MixedInfos, Retry) ->
- DocInfoIds = [Id || #doc_info{id = Id} <- MixedInfos],
- LookupResults = couch_btree:lookup(St#st.id_tree, DocInfoIds),
- % COUCHDB-968, make sure we prune duplicates during compaction
- NewInfos0 = lists:usort(
- fun(#full_doc_info{id = A}, #full_doc_info{id = B}) ->
- A =< B
- end,
- merge_lookups(MixedInfos, LookupResults)
- ),
-
- NewInfos1 = lists:map(
- fun(Info) ->
- {NewRevTree, FinalAcc} = couch_key_tree:mapfold(
- fun
- ({RevPos, RevId}, #leaf{ptr = Sp} = Leaf, leaf, SizesAcc) ->
- {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt),
- #size_info{external = OldExternalSize} = Leaf#leaf.sizes,
- ExternalSize =
- case OldExternalSize of
- 0 when is_binary(Body) ->
- couch_compress:uncompressed_size(Body);
- 0 ->
- couch_ejson_size:encoded_size(Body);
- N ->
- N
- end,
- Doc0 = #doc{
- id = Info#full_doc_info.id,
- revs = {RevPos, [RevId]},
- deleted = Leaf#leaf.deleted,
- body = Body,
- atts = AttInfos
- },
- Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0),
- {ok, Doc2, ActiveSize} =
- couch_bt_engine:write_doc_body(NewSt, Doc1),
- AttSizes = [{element(3, A), element(4, A)} || A <- AttInfos],
- NewLeaf = Leaf#leaf{
- ptr = Doc2#doc.body,
- sizes = #size_info{
- active = ActiveSize,
- external = ExternalSize
- },
- atts = AttSizes
- },
- {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)};
- (_Rev, _Leaf, branch, SizesAcc) ->
- {?REV_MISSING, SizesAcc}
- end,
- {0, 0, []},
- Info#full_doc_info.rev_tree
- ),
- {FinalAS, FinalES, FinalAtts} = FinalAcc,
- TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
- NewActiveSize = FinalAS + TotalAttSize,
- NewExternalSize = FinalES + TotalAttSize,
- ?COMP_EVENT(seq_copy),
- Info#full_doc_info{
- rev_tree = NewRevTree,
- sizes = #size_info{
- active = NewActiveSize,
- external = NewExternalSize
- }
- }
- end,
- NewInfos0
- ),
-
- Limit = couch_bt_engine:get_revs_limit(St),
- NewInfos = lists:map(
- fun(FDI) ->
- FDI#full_doc_info{
- rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit)
- }
- end,
- NewInfos1
- ),
-
- RemoveSeqs =
- case Retry of
- nil ->
- [];
- OldDocIdTree ->
- % Compaction is being rerun to catch up to writes during the
- % first pass. This means we may have docs that already exist
- % in the seq_tree in the .data file. Here we lookup any old
- % update_seqs so that they can be removed.
- Ids = [Id || #full_doc_info{id = Id} <- NewInfos],
- Existing = couch_btree:lookup(OldDocIdTree, Ids),
- [Seq || {ok, #full_doc_info{update_seq = Seq}} <- Existing]
- end,
-
- {ok, SeqTree} = couch_btree:add_remove(
- NewSt#st.seq_tree, NewInfos, RemoveSeqs
- ),
-
- EMSortFd = couch_emsort:get_fd(NewSt#st.id_tree),
- {ok, LocSizes} = couch_file:append_terms(EMSortFd, NewInfos),
- EMSortEntries = lists:zipwith(
- fun(FDI, {Loc, _}) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq
- } = FDI,
- {{Id, Seq}, Loc}
- end,
- NewInfos,
- LocSizes
- ),
- {ok, IdEms} = couch_emsort:add(NewSt#st.id_tree, EMSortEntries),
- update_compact_task(length(NewInfos)),
- NewSt#st{id_tree = IdEms, seq_tree = SeqTree}.
-
-copy_doc_attachments(#st{} = SrcSt, SrcSp, DstSt) ->
- {ok, {BodyData, BinInfos0}} = couch_file:pread_term(SrcSt#st.fd, SrcSp),
- BinInfos =
- case BinInfos0 of
- _ when is_binary(BinInfos0) ->
- couch_compress:decompress(BinInfos0);
- _ when is_list(BinInfos0) ->
- % pre 1.2 file format
- BinInfos0
- end,
- % copy the bin values
- NewBinInfos = lists:map(
- fun
- ({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) ->
- % 010 UPGRADE CODE
- {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
- {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
- ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) ->
- {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
- {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- Enc =
- case Enc1 of
- true ->
- % 0110 UPGRADE CODE
- gzip;
- false ->
- % 0110 UPGRADE CODE
- identity;
- _ ->
- Enc1
- end,
- {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}
- end,
- BinInfos
- ),
- {BodyData, NewBinInfos}.
-
-sort_meta_data(#comp_st{new_st = St0} = CompSt) ->
- ?COMP_EVENT(md_sort_init),
- NumKVs = couch_emsort:num_kvs(St0#st.id_tree),
- NumMerges = couch_emsort:num_merges(St0#st.id_tree),
- couch_task_status:update([
- {phase, docid_sort},
- {progress, 0},
- {changes_done, 0},
- {total_changes, NumMerges * NumKVs}
- ]),
- Reporter = fun update_compact_task/1,
- {ok, Ems} = couch_emsort:merge(St0#st.id_tree, Reporter),
- ?COMP_EVENT(md_sort_done),
- CompSt#comp_st{
- new_st = St0#st{
- id_tree = Ems
- }
- }.
-
-copy_meta_data(#comp_st{new_st = St} = CompSt) ->
- #st{
- fd = Fd,
- header = Header,
- id_tree = Src
- } = St,
- SrcFd = couch_emsort:get_fd(Src),
- DstState = couch_bt_engine_header:id_tree_state(Header),
- {ok, IdTree0} = couch_btree:open(DstState, Fd, [
- {split, fun couch_bt_engine:id_tree_split/1},
- {join, fun couch_bt_engine:id_tree_join/2},
- {reduce, fun couch_bt_engine:id_tree_reduce/2}
- ]),
- {ok, Iter} = couch_emsort:iter(Src),
- Acc0 = #merge_st{
- src_fd = SrcFd,
- id_tree = IdTree0,
- seq_tree = St#st.seq_tree,
- rem_seqs = [],
- locs = []
- },
- ?COMP_EVENT(md_copy_init),
- NumKVs = couch_emsort:num_kvs(Src),
- couch_task_status:update([
- {phase, docid_copy},
- {progress, 0},
- {changes_done, 0},
- {total_changes, NumKVs}
- ]),
- Acc = merge_docids(Iter, Acc0),
- {ok, Infos} = couch_file:pread_terms(SrcFd, Acc#merge_st.locs),
- {ok, IdTree} = couch_btree:add(Acc#merge_st.id_tree, Infos),
- {ok, SeqTree} = couch_btree:add_remove(
- Acc#merge_st.seq_tree, [], Acc#merge_st.rem_seqs
- ),
- update_compact_task(NumKVs),
- ?COMP_EVENT(md_copy_done),
- CompSt#comp_st{
- new_st = St#st{
- id_tree = IdTree,
- seq_tree = SeqTree
- }
- }.
-
-compact_final_sync(#comp_st{new_st = St0} = CompSt) ->
- ?COMP_EVENT(before_final_sync),
- {ok, St1} = couch_bt_engine:commit_data(St0),
- ?COMP_EVENT(after_final_sync),
- CompSt#comp_st{
- new_st = St1
- }.
-
-open_compaction_file(FilePath) ->
- case couch_file:open(FilePath, [nologifmissing]) of
- {ok, Fd} ->
- case couch_file:read_header(Fd) of
- {ok, Header} -> {ok, Fd, Header};
- no_valid_header -> {ok, Fd, nil}
- end;
- {error, enoent} ->
- {ok, Fd} = couch_file:open(FilePath, [create]),
- {ok, Fd, nil}
- end.
-
-reset_compaction_file(Fd, Header) ->
- ok = couch_file:truncate(Fd, 0),
- ok = couch_file:write_header(Fd, Header).
-
-commit_compaction_data(#comp_st{new_st = St} = CompSt) ->
- % Compaction needs to write headers to both the data file
- % and the meta file so if we need to restart we can pick
- % back up from where we left off.
- CompSt#comp_st{
- new_st = commit_compaction_data(St)
- };
-commit_compaction_data(#st{} = St) ->
- commit_compaction_data(St, couch_emsort:get_fd(St#st.id_tree)),
- commit_compaction_data(St, St#st.fd).
-
-commit_compaction_data(#st{header = OldHeader} = St0, Fd) ->
- DataState = couch_bt_engine_header:id_tree_state(OldHeader),
- MetaFd = couch_emsort:get_fd(St0#st.id_tree),
- MetaState = couch_emsort:get_state(St0#st.id_tree),
- St1 = bind_id_tree(St0, St0#st.fd, DataState),
- Header = couch_bt_engine:update_header(St1, St1#st.header),
- CompHeader = #comp_header{
- db_header = Header,
- meta_st = MetaState
- },
- ok = couch_file:sync(Fd),
- ok = couch_file:write_header(Fd, CompHeader),
- St2 = St1#st{
- header = Header
- },
- bind_emsort(St2, MetaFd, MetaState).
-
-bind_emsort(St, Fd, nil) ->
- {ok, Ems} = couch_emsort:open(Fd),
- St#st{id_tree = Ems};
-bind_emsort(St, Fd, {BB, _} = Root) when is_list(BB) ->
- % Upgrade clause when we find old compaction files
- bind_emsort(St, Fd, [{root, Root}]);
-bind_emsort(St, Fd, State) ->
- {ok, Ems} = couch_emsort:open(Fd, State),
- St#st{id_tree = Ems}.
-
-bind_id_tree(St, Fd, State) ->
- {ok, IdBtree} = couch_btree:open(State, Fd, [
- {split, fun couch_bt_engine:id_tree_split/1},
- {join, fun couch_bt_engine:id_tree_join/2},
- {reduce, fun couch_bt_engine:id_tree_reduce/2}
- ]),
- St#st{id_tree = IdBtree}.
-
-merge_lookups(Infos, []) ->
- Infos;
-merge_lookups([], _) ->
- [];
-merge_lookups([#doc_info{} = DI | RestInfos], [{ok, FDI} | RestLookups]) ->
- % Assert we've matched our lookups
- if
- DI#doc_info.id == FDI#full_doc_info.id -> ok;
- true -> erlang:error({mismatched_doc_infos, DI#doc_info.id})
- end,
- [FDI | merge_lookups(RestInfos, RestLookups)];
-merge_lookups([FDI | RestInfos], Lookups) ->
- [FDI | merge_lookups(RestInfos, Lookups)].
-
-merge_docids(Iter, #merge_st{locs = Locs} = Acc) when length(Locs) > 1000 ->
- #merge_st{
- src_fd = SrcFd,
- id_tree = IdTree0,
- seq_tree = SeqTree0,
- rem_seqs = RemSeqs
- } = Acc,
- {ok, Infos} = couch_file:pread_terms(SrcFd, Locs),
- {ok, IdTree1} = couch_btree:add(IdTree0, Infos),
- {ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs),
- Acc1 = Acc#merge_st{
- id_tree = IdTree1,
- seq_tree = SeqTree1,
- rem_seqs = [],
- locs = []
- },
- update_compact_task(length(Locs)),
- merge_docids(Iter, Acc1);
-merge_docids(Iter, #merge_st{curr = Curr} = Acc) ->
- case next_info(Iter, Curr, []) of
- {NextIter, NewCurr, Loc, Seqs} ->
- Acc1 = Acc#merge_st{
- locs = [Loc | Acc#merge_st.locs],
- rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
- curr = NewCurr
- },
- ?COMP_EVENT(md_copy_row),
- merge_docids(NextIter, Acc1);
- {finished, Loc, Seqs} ->
- Acc#merge_st{
- locs = [Loc | Acc#merge_st.locs],
- rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
- curr = undefined
- };
- empty ->
- Acc
- end.
-
-next_info(Iter, undefined, []) ->
- case couch_emsort:next(Iter) of
- {ok, {{Id, Seq}, Loc}, NextIter} ->
- next_info(NextIter, {Id, Seq, Loc}, []);
- finished ->
- empty
- end;
-next_info(Iter, {Id, Seq, Loc}, Seqs) ->
- case couch_emsort:next(Iter) of
- {ok, {{Id, NSeq}, NLoc}, NextIter} ->
- next_info(NextIter, {Id, NSeq, NLoc}, [Seq | Seqs]);
- {ok, {{NId, NSeq}, NLoc}, NextIter} ->
- {NextIter, {NId, NSeq, NLoc}, Loc, Seqs};
- finished ->
- {finished, Loc, Seqs}
- end.
-
-update_compact_task(NumChanges) ->
- [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
- Changes2 = Changes + NumChanges,
- Progress =
- case Total of
- 0 ->
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
- couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl
deleted file mode 100644
index e28f07723..000000000
--- a/src/couch/src/couch_bt_engine_header.erl
+++ /dev/null
@@ -1,451 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_header).
-
--export([
- new/0,
- from/1,
- is_header/1,
- upgrade/1,
- get/2,
- get/3,
- set/2,
- set/3
-]).
-
--export([
- disk_version/1,
- latest_disk_version/0,
- update_seq/1,
- id_tree_state/1,
- seq_tree_state/1,
- latest/1,
- local_tree_state/1,
- purge_tree_state/1,
- purge_seq_tree_state/1,
- purge_infos_limit/1,
- security_ptr/1,
- revs_limit/1,
- uuid/1,
- epochs/1,
- compacted_seq/1
-]).
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 8).
-
--record(db_header, {
- disk_version = ?LATEST_DISK_VERSION,
- update_seq = 0,
- unused = 0,
- id_tree_state = nil,
- seq_tree_state = nil,
- local_tree_state = nil,
- purge_tree_state = nil,
- %purge tree: purge_seq -> uuid
- purge_seq_tree_state = nil,
- security_ptr = nil,
- revs_limit = 1000,
- uuid,
- epochs,
- compacted_seq,
- purge_infos_limit = 1000,
- props_ptr
-}).
-
--define(PARTITION_DISK_VERSION, 8).
-
-new() ->
- #db_header{
- uuid = couch_uuids:random(),
- epochs = [{node(), 0}]
- }.
-
-from(Header0) ->
- Header = upgrade(Header0),
- #db_header{
- uuid = Header#db_header.uuid,
- epochs = Header#db_header.epochs,
- compacted_seq = Header#db_header.compacted_seq
- }.
-
-is_header(Header) ->
- try
- upgrade(Header),
- true
- catch
- _:_ ->
- false
- end.
-
-upgrade(Header) ->
- Funs = [
- fun upgrade_tuple/1,
- fun upgrade_disk_version/1,
- fun upgrade_uuid/1,
- fun upgrade_epochs/1,
- fun upgrade_compacted_seq/1
- ],
- lists:foldl(
- fun(F, HdrAcc) ->
- F(HdrAcc)
- end,
- Header,
- Funs
- ).
-
-get(Header, Key) ->
- ?MODULE:get(Header, Key, undefined).
-
-get(Header, Key, Default) ->
- get_field(Header, Key, Default).
-
-set(Header, Key, Value) ->
- ?MODULE:set(Header, [{Key, Value}]).
-
-set(Header0, Fields) ->
- % A subtlety here is that if a database was open during
- % the release upgrade that updates to uuids and epochs then
- % this dynamic upgrade also assigns a uuid and epoch.
- Header = upgrade(Header0),
- lists:foldl(
- fun({Field, Value}, HdrAcc) ->
- set_field(HdrAcc, Field, Value)
- end,
- Header,
- Fields
- ).
-
-disk_version(Header) ->
- get_field(Header, disk_version).
-
-latest_disk_version() ->
- ?LATEST_DISK_VERSION.
-
-update_seq(Header) ->
- get_field(Header, update_seq).
-
-id_tree_state(Header) ->
- get_field(Header, id_tree_state).
-
-seq_tree_state(Header) ->
- get_field(Header, seq_tree_state).
-
-local_tree_state(Header) ->
- get_field(Header, local_tree_state).
-
-purge_tree_state(Header) ->
- get_field(Header, purge_tree_state).
-
-purge_seq_tree_state(Header) ->
- get_field(Header, purge_seq_tree_state).
-
-security_ptr(Header) ->
- get_field(Header, security_ptr).
-
-revs_limit(Header) ->
- get_field(Header, revs_limit).
-
-uuid(Header) ->
- get_field(Header, uuid).
-
-epochs(Header) ->
- get_field(Header, epochs).
-
-compacted_seq(Header) ->
- get_field(Header, compacted_seq).
-
-purge_infos_limit(Header) ->
- get_field(Header, purge_infos_limit).
-
-get_field(Header, Field) ->
- get_field(Header, Field, undefined).
-
-get_field(Header, Field, Default) ->
- Idx = index(Field),
- case Idx > tuple_size(Header) of
- true -> Default;
- false -> element(index(Field), Header)
- end.
-
-set_field(Header, Field, Value) ->
- setelement(index(Field), Header, Value).
-
-index(Field) ->
- couch_util:get_value(Field, indexes()).
-
-indexes() ->
- Fields = record_info(fields, db_header),
- Indexes = lists:seq(2, record_info(size, db_header)),
- lists:zip(Fields, Indexes).
-
-upgrade_tuple(Old) when is_record(Old, db_header) ->
- Old;
-upgrade_tuple(Old) when is_tuple(Old) ->
- NewSize = record_info(size, db_header),
- if
- tuple_size(Old) < NewSize -> ok;
- true -> erlang:error({invalid_header_size, Old})
- end,
- {_, New} = lists:foldl(
- fun(Val, {Idx, Hdr}) ->
- {Idx + 1, setelement(Idx, Hdr, Val)}
- end,
- {1, #db_header{}},
- tuple_to_list(Old)
- ),
- if
- is_record(New, db_header) -> ok;
- true -> erlang:error({invalid_header_extension, {Old, New}})
- end,
- New.
-
--define(OLD_DISK_VERSION_ERROR,
- "Database files from versions smaller than 0.10.0 are no longer supported"
-).
-
-upgrade_disk_version(#db_header{} = Header) ->
- case element(2, Header) of
- 1 ->
- throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 2 ->
- throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 3 ->
- throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- % [0.10 - 0.11)
- 4 ->
- Header#db_header{security_ptr = nil};
- % pre 1.2
- 5 ->
- Header#db_header{disk_version = ?LATEST_DISK_VERSION};
- % pre clustered purge
- 6 ->
- Header#db_header{disk_version = ?LATEST_DISK_VERSION};
- % pre partitioned dbs
- 7 ->
- Header#db_header{disk_version = ?LATEST_DISK_VERSION};
- ?LATEST_DISK_VERSION ->
- Header;
- _ ->
- Reason = "Incorrect disk header version",
- throw({database_disk_version_error, Reason})
- end.
-
-upgrade_uuid(#db_header{} = Header) ->
- case Header#db_header.uuid of
- undefined ->
- % Upgrading this old db file to a newer
- % on disk format that includes a UUID.
- Header#db_header{uuid = couch_uuids:random()};
- _ ->
- Header
- end.
-
-upgrade_epochs(#db_header{} = Header) ->
- NewEpochs =
- case Header#db_header.epochs of
- undefined ->
- % This node is taking over ownership of shard with
- % and old version of couch file. Before epochs there
- % was always an implicit assumption that a file was
- % owned since eternity by the node it was on. This
- % just codifies that assumption.
- [{node(), 0}];
- [{Node, _} | _] = Epochs0 when Node == node() ->
- % Current node is the current owner of this db
- Epochs0;
- Epochs1 ->
- % This node is taking over ownership of this db
- % and marking the update sequence where it happened.
- [{node(), Header#db_header.update_seq} | Epochs1]
- end,
- % Its possible for a node to open a db and claim
- % ownership but never make a write to the db. This
- % removes nodes that claimed ownership but never
- % changed the database.
- DedupedEpochs = remove_dup_epochs(NewEpochs),
- Header#db_header{epochs = DedupedEpochs}.
-
-% This is slightly relying on the udpate_seq's being sorted
-% in epochs due to how we only ever push things onto the
-% front. Although if we ever had a case where the update_seq
-% is not monotonically increasing I don't know that we'd
-% want to remove dupes (by calling a sort on the input to this
-% function). So for now we don't sort but are relying on the
-% idea that epochs is always sorted.
-remove_dup_epochs([_] = Epochs) ->
- Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S}]) ->
- % Seqs match, keep the most recent owner
- [{N1, S}];
-remove_dup_epochs([_, _] = Epochs) ->
- % Seqs don't match.
- Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
- % Seqs match, keep the most recent owner
- remove_dup_epochs([{N1, S} | Rest]);
-remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
- % Seqs don't match, recurse to check others
- [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-upgrade_compacted_seq(#db_header{} = Header) ->
- case Header#db_header.compacted_seq of
- undefined ->
- Header#db_header{compacted_seq = 0};
- _ ->
- Header
- end.
-
-latest(?LATEST_DISK_VERSION) ->
- true;
-latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
- false;
-latest(_Else) ->
- undefined.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-mk_header(Vsn) ->
- {
- % record name
- db_header,
- % disk version
- Vsn,
- % update_seq
- 100,
- % unused
- 0,
- % id_tree_state
- foo,
- % seq_tree_state
- bar,
- % local_tree_state
- bam,
- % was purge_seq - now purge_tree_state
- flam,
- % was purged_docs - now purge_seq_tree_state
- baz,
- % security_ptr
- bang,
- % revs_limit
- 999
- }.
-
--ifdef(run_broken_tests).
-
-upgrade_v3_test() ->
- Vsn3Header = mk_header(3),
- NewHeader = upgrade_tuple(Vsn3Header),
-
- % Tuple upgrades don't change
- ?assert(is_record(NewHeader, db_header)),
- ?assertEqual(3, disk_version(NewHeader)),
- ?assertEqual(100, update_seq(NewHeader)),
- ?assertEqual(foo, id_tree_state(NewHeader)),
- ?assertEqual(bar, seq_tree_state(NewHeader)),
- ?assertEqual(bam, local_tree_state(NewHeader)),
- ?assertEqual(flam, purge_tree_state(NewHeader)),
- ?assertEqual(baz, purge_seq_tree_state(NewHeader)),
- ?assertEqual(bang, security_ptr(NewHeader)),
- ?assertEqual(999, revs_limit(NewHeader)),
- ?assertEqual(undefined, uuid(NewHeader)),
- ?assertEqual(undefined, epochs(NewHeader)),
-
- % Security ptr isn't changed until upgrade_disk_version/1
- NewNewHeader = upgrade_disk_version(NewHeader),
- ?assert(is_record(NewNewHeader, db_header)),
- ?assertEqual(nil, security_ptr(NewNewHeader)),
-
- % Assert upgrade works on really old headers
- NewestHeader = upgrade(Vsn3Header),
- ?assertMatch(<<_:32/binary>>, uuid(NewestHeader)),
- ?assertEqual([{node(), 0}], epochs(NewestHeader)).
-
--endif.
-
-upgrade_v5_to_v8_test() ->
- Vsn5Header = mk_header(5),
- NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
-
- ?assert(is_record(NewHeader, db_header)),
- ?assertEqual(8, disk_version(NewHeader)),
-
- % Security ptr isn't changed for v5 headers
- ?assertEqual(bang, security_ptr(NewHeader)).
-
-upgrade_uuid_test() ->
- Vsn5Header = mk_header(5),
-
- % Upgraded headers get a new UUID
- NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
- ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
-
- % Headers with a UUID don't have their UUID changed
- NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
- ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
-
- % Derived empty headers maintain the same UUID
- ResetHeader = from(NewNewHeader),
- ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
-upgrade_epochs_test() ->
- Vsn5Header = mk_header(5),
-
- % Upgraded headers get a default epochs set
- NewHeader = upgrade(Vsn5Header),
- ?assertEqual([{node(), 0}], epochs(NewHeader)),
-
- % Fake an old entry in epochs
- FakeFields = [
- {update_seq, 20},
- {epochs, [{'someothernode@someotherhost', 0}]}
- ],
- NotOwnedHeader = set(NewHeader, FakeFields),
-
- OwnedEpochs = [
- {node(), 20},
- {'someothernode@someotherhost', 0}
- ],
-
- % Upgrading a header not owned by the local node updates
- % the epochs appropriately.
- NowOwnedHeader = upgrade(NotOwnedHeader),
- ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
-
- % Headers with epochs stay the same after upgrades
- NewNewHeader = upgrade(NowOwnedHeader),
- ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
-
- % Getting a reset header maintains the epoch data
- ResetHeader = from(NewNewHeader),
- ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
-get_uuid_from_old_header_test() ->
- Vsn5Header = mk_header(5),
- ?assertEqual(undefined, uuid(Vsn5Header)).
-
-get_epochs_from_old_header_test() ->
- Vsn5Header = mk_header(5),
- ?assertEqual(undefined, epochs(Vsn5Header)).
-
--endif.
diff --git a/src/couch/src/couch_bt_engine_stream.erl b/src/couch/src/couch_bt_engine_stream.erl
deleted file mode 100644
index 253877e77..000000000
--- a/src/couch/src/couch_bt_engine_stream.erl
+++ /dev/null
@@ -1,60 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_stream).
-
--export([
- foldl/3,
- seek/2,
- write/2,
- finalize/1,
- to_disk_term/1
-]).
-
-foldl({_Fd, []}, _Fun, Acc) ->
- Acc;
-foldl({Fd, [{Pos, _} | Rest]}, Fun, Acc) ->
- foldl({Fd, [Pos | Rest]}, Fun, Acc);
-foldl({Fd, [Bin | Rest]}, Fun, Acc) when is_binary(Bin) ->
- % We're processing the first bit of data
- % after we did a seek for a range fold.
- foldl({Fd, Rest}, Fun, Fun(Bin, Acc));
-foldl({Fd, [Pos | Rest]}, Fun, Acc) when is_integer(Pos) ->
- {ok, Bin} = couch_file:pread_binary(Fd, Pos),
- foldl({Fd, Rest}, Fun, Fun(Bin, Acc)).
-
-seek({Fd, [{Pos, Length} | Rest]}, Offset) ->
- case Length =< Offset of
- true ->
- seek({Fd, Rest}, Offset - Length);
- false ->
- seek({Fd, [Pos | Rest]}, Offset)
- end;
-seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) ->
- {ok, Bin} = couch_file:pread_binary(Fd, Pos),
- case iolist_size(Bin) =< Offset of
- true ->
- seek({Fd, Rest}, Offset - size(Bin));
- false ->
- <<_:Offset/binary, Tail/binary>> = Bin,
- {ok, {Fd, [Tail | Rest]}}
- end.
-
-write({Fd, Written}, Data) when is_pid(Fd) ->
- {ok, Pos, _} = couch_file:append_binary(Fd, Data),
- {ok, {Fd, [{Pos, iolist_size(Data)} | Written]}}.
-
-finalize({Fd, Written}) ->
- {ok, {Fd, lists:reverse(Written)}}.
-
-to_disk_term({_Fd, Written}) ->
- {ok, Written}.
diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl
deleted file mode 100644
index b974a22ee..000000000
--- a/src/couch/src/couch_btree.erl
+++ /dev/null
@@ -1,1175 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree).
-
--export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
--export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]).
--export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
--export([extract/2, assemble/3, less/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(FILL_RATIO, 0.5).
-
-extract(#btree{extract_kv = undefined}, Value) ->
- Value;
-extract(#btree{extract_kv = Extract}, Value) ->
- Extract(Value).
-
-assemble(#btree{assemble_kv = undefined}, Key, Value) ->
- {Key, Value};
-assemble(#btree{assemble_kv = Assemble}, Key, Value) ->
- Assemble(Key, Value).
-
-less(#btree{less = undefined}, A, B) ->
- A < B;
-less(#btree{less = Less}, A, B) ->
- Less(A, B).
-
-% pass in 'nil' for State if a new Btree.
-open(State, Fd) ->
- {ok, #btree{root = State, fd = Fd}}.
-
-set_options(Bt, []) ->
- Bt;
-set_options(Bt, [{split, Extract} | Rest]) ->
- set_options(Bt#btree{extract_kv = Extract}, Rest);
-set_options(Bt, [{join, Assemble} | Rest]) ->
- set_options(Bt#btree{assemble_kv = Assemble}, Rest);
-set_options(Bt, [{less, Less} | Rest]) ->
- set_options(Bt#btree{less = Less}, Rest);
-set_options(Bt, [{reduce, Reduce} | Rest]) ->
- set_options(Bt#btree{reduce = Reduce}, Rest);
-set_options(Bt, [{compression, Comp} | Rest]) ->
- set_options(Bt#btree{compression = Comp}, Rest).
-
-open(State, Fd, Options) ->
- {ok, set_options(#btree{root = State, fd = Fd}, Options)}.
-
-get_state(#btree{root = Root}) ->
- Root.
-
-final_reduce(#btree{reduce = Reduce}, Val) ->
- final_reduce(Reduce, Val);
-final_reduce(Reduce, {[], []}) ->
- Reduce(reduce, []);
-final_reduce(_Bt, {[], [Red]}) ->
- Red;
-final_reduce(Reduce, {[], Reductions}) ->
- Reduce(rereduce, Reductions);
-final_reduce(Reduce, {KVs, Reductions}) ->
- Red = Reduce(reduce, KVs),
- final_reduce(Reduce, {[], [Red | Reductions]}).
-
-fold_reduce(#btree{root = Root} = Bt, Fun, Acc, Options) ->
- Dir = couch_util:get_value(dir, Options, fwd),
- StartKey = couch_util:get_value(start_key, Options),
- InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options),
- KeyGroupFun = get_group_fun(Bt, Options),
- try
- {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
- reduce_stream_node(
- Bt,
- Dir,
- Root,
- StartKey,
- InEndRangeFun,
- undefined,
- [],
- [],
- KeyGroupFun,
- Fun,
- Acc
- ),
- if
- GroupedKey2 == undefined ->
- {ok, Acc2};
- true ->
- case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
- {ok, Acc3} -> {ok, Acc3};
- {stop, Acc3} -> {ok, Acc3}
- end
- end
- catch
- throw:{stop, AccDone} -> {ok, AccDone}
- end.
-
-full_reduce(#btree{root = nil, reduce = Reduce}) ->
- {ok, Reduce(reduce, [])};
-full_reduce(#btree{root = Root}) ->
- {ok, element(2, Root)}.
-
-size(#btree{root = nil}) ->
- 0;
-size(#btree{root = {_P, _Red}}) ->
- % pre 1.2 format
- nil;
-size(#btree{root = {_P, _Red, Size}}) ->
- Size.
-
-get_group_fun(Bt, Options) ->
- case couch_util:get_value(key_group_level, Options) of
- exact ->
- make_group_fun(Bt, exact);
- 0 ->
- fun(_, _) -> true end;
- N when is_integer(N), N > 0 ->
- make_group_fun(Bt, N);
- undefined ->
- couch_util:get_value(key_group_fun, Options, fun(_, _) -> true end)
- end.
-
-make_group_fun(Bt, exact) ->
- fun({Key1, _}, {Key2, _}) ->
- case less(Bt, {Key1, nil}, {Key2, nil}) of
- false ->
- case less(Bt, {Key2, nil}, {Key1, nil}) of
- false ->
- true;
- _ ->
- false
- end;
- _ ->
- false
- end
- end;
-make_group_fun(Bt, GroupLevel) when is_integer(GroupLevel), GroupLevel > 0 ->
- fun
- GF({{p, Partition, Key1}, Val1}, {{p, Partition, Key2}, Val2}) ->
- GF({Key1, Val1}, {Key2, Val2});
- GF({[_ | _] = Key1, _}, {[_ | _] = Key2, _}) ->
- SL1 = lists:sublist(Key1, GroupLevel),
- SL2 = lists:sublist(Key2, GroupLevel),
- case less(Bt, {SL1, nil}, {SL2, nil}) of
- false ->
- case less(Bt, {SL2, nil}, {SL1, nil}) of
- false ->
- true;
- _ ->
- false
- end;
- _ ->
- false
- end;
- GF({Key1, _}, {Key2, _}) ->
- case less(Bt, {Key1, nil}, {Key2, nil}) of
- false ->
- case less(Bt, {Key2, nil}, {Key1, nil}) of
- false ->
- true;
- _ ->
- false
- end;
- _ ->
- false
- end
- end.
-
-% wraps a 2 arity function with the proper 3 arity function
-convert_fun_arity(Fun) when is_function(Fun, 2) ->
- fun
- (visit, KV, _Reds, AccIn) -> Fun(KV, AccIn);
- (traverse, _K, _Red, AccIn) -> {ok, AccIn}
- end;
-convert_fun_arity(Fun) when is_function(Fun, 3) ->
- fun
- (visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn);
- (traverse, _K, _Red, AccIn) -> {ok, AccIn}
- end;
-convert_fun_arity(Fun) when is_function(Fun, 4) ->
- % Already arity 4
- Fun.
-
-make_key_in_end_range_function(Bt, fwd, Options) ->
- case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- case couch_util:get_value(end_key, Options) of
- undefined ->
- fun(_Key) -> true end;
- LastKey ->
- fun(Key) -> not less(Bt, LastKey, Key) end
- end;
- EndKey ->
- fun(Key) -> less(Bt, Key, EndKey) end
- end;
-make_key_in_end_range_function(Bt, rev, Options) ->
- case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- case couch_util:get_value(end_key, Options) of
- undefined ->
- fun(_Key) -> true end;
- LastKey ->
- fun(Key) -> not less(Bt, Key, LastKey) end
- end;
- EndKey ->
- fun(Key) -> less(Bt, EndKey, Key) end
- end.
-
-foldl(Bt, Fun, Acc) ->
- fold(Bt, Fun, Acc, []).
-
-foldl(Bt, Fun, Acc, Options) ->
- fold(Bt, Fun, Acc, Options).
-
-fold(#btree{root = nil}, _Fun, Acc, _Options) ->
- {ok, {[], []}, Acc};
-fold(#btree{root = Root} = Bt, Fun, Acc, Options) ->
- Dir = couch_util:get_value(dir, Options, fwd),
- InRange = make_key_in_end_range_function(Bt, Dir, Options),
- Result =
- case couch_util:get_value(start_key, Options) of
- undefined ->
- stream_node(
- Bt,
- [],
- Bt#btree.root,
- InRange,
- Dir,
- convert_fun_arity(Fun),
- Acc
- );
- StartKey ->
- stream_node(
- Bt,
- [],
- Bt#btree.root,
- StartKey,
- InRange,
- Dir,
- convert_fun_arity(Fun),
- Acc
- )
- end,
- case Result of
- {ok, Acc2} ->
- FullReduction = element(2, Root),
- {ok, {[], [FullReduction]}, Acc2};
- {stop, LastReduction, Acc2} ->
- {ok, LastReduction, Acc2}
- end.
-
-add(Bt, InsertKeyValues) ->
- add_remove(Bt, InsertKeyValues, []).
-
-add_remove(Bt, InsertKeyValues, RemoveKeys) ->
- {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
- {ok, Bt2}.
-
-query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
- #btree{root = Root} = Bt,
- InsertActions = lists:map(
- fun(KeyValue) ->
- {Key, Value} = extract(Bt, KeyValue),
- {insert, Key, Value}
- end,
- InsertValues
- ),
- RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
- FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
- SortFun =
- fun({OpA, A, _}, {OpB, B, _}) ->
- case A == B of
- % A and B are equal, sort by op.
- true -> op_order(OpA) < op_order(OpB);
- false -> less(Bt, A, B)
- end
- end,
- Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
- {ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
- {ok, NewRoot} = complete_root(Bt, KeyPointers),
- {ok, QueryResults, Bt#btree{root = NewRoot}}.
-
-% for ordering different operations with the same key.
-% fetch < remove < insert
-op_order(fetch) -> 1;
-op_order(remove) -> 2;
-op_order(insert) -> 3.
-
-lookup(#btree{root = Root, less = Less} = Bt, Keys) ->
- SortedKeys =
- case Less of
- undefined -> lists:sort(Keys);
- _ -> lists:sort(Less, Keys)
- end,
- {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
- % We want to return the results in the same order as the keys were input
- % but we may have changed the order when we sorted. So we need to put the
- % order back into the results.
- couch_util:reorder_results(Keys, SortedResults).
-
-lookup(_Bt, nil, Keys) ->
- {ok, [{Key, not_found} || Key <- Keys]};
-lookup(Bt, Node, Keys) ->
- Pointer = element(1, Node),
- {NodeType, NodeList} = get_node(Bt, Pointer),
- case NodeType of
- kp_node ->
- lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
- kv_node ->
- lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
- end.
-
-lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
- {ok, lists:reverse(Output)};
-lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
- {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
- N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
- {Key, PointerInfo} = element(N, NodeTuple),
- SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
- case lists:splitwith(SplitFun, LookupKeys) of
- {[], GreaterQueries} ->
- lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
- {LessEqQueries, GreaterQueries} ->
- {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
- lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
- end.
-
-lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
- {ok, lists:reverse(Output)};
-lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
- % keys not found
- {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
- N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
- {Key, Value} = element(N, NodeTuple),
- case less(Bt, LookupKey, Key) of
- true ->
- % LookupKey is less than Key
- lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
- false ->
- case less(Bt, Key, LookupKey) of
- true ->
- % LookupKey is greater than Key
- lookup_kvnode(Bt, NodeTuple, N + 1, RestLookupKeys, [
- {LookupKey, not_found} | Output
- ]);
- false ->
- % LookupKey is equal to Key
- lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [
- {LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output
- ])
- end
- end.
-
-complete_root(_Bt, []) ->
- {ok, nil};
-complete_root(_Bt, [{_Key, PointerInfo}]) ->
- {ok, PointerInfo};
-complete_root(Bt, KPs) ->
- {ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
- complete_root(Bt, ResultKeyPointers).
-
-%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
-% It is inaccurate as it does not account for compression when blocks are
-% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
-% it's probably really inefficient.
-
-chunkify(InList) ->
- BaseChunkSize = get_chunk_size(),
- case ?term_size(InList) of
- Size when Size > BaseChunkSize ->
- NumberOfChunksLikely = ((Size div BaseChunkSize) + 1),
- ChunkThreshold = Size div NumberOfChunksLikely,
- chunkify(InList, ChunkThreshold, [], 0, []);
- _Else ->
- [InList]
- end.
-
-chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
- lists:reverse(OutputChunks);
-chunkify([], _ChunkThreshold, [Item], _OutListSize, [PrevChunk | RestChunks]) ->
- NewPrevChunk = PrevChunk ++ [Item],
- lists:reverse(RestChunks, [NewPrevChunk]);
-chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
- lists:reverse([lists:reverse(OutList) | OutputChunks]);
-chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
- case ?term_size(InElement) of
- Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
- chunkify(RestInList, ChunkThreshold, [], 0, [
- lists:reverse([InElement | OutList]) | OutputChunks
- ]);
- Size ->
- chunkify(
- RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks
- )
- end.
-
--compile({inline, [get_chunk_size/0]}).
-get_chunk_size() ->
- try
- list_to_integer(config:get("couchdb", "btree_chunk_size", "1279"))
- catch
- error:badarg ->
- 1279
- end.
-
-modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
- {NodeType, NodeList} =
- case RootPointerInfo of
- nil ->
- {kv_node, []};
- _Tuple ->
- Pointer = element(1, RootPointerInfo),
- get_node(Bt, Pointer)
- end,
- NodeTuple = list_to_tuple(NodeList),
-
- {ok, NewNodeList, QueryOutput2} =
- case NodeType of
- kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
- kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
- end,
- case NewNodeList of
- % no nodes remain
- [] ->
- {ok, [], QueryOutput2};
- % nothing changed
- NodeList ->
- {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
- {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
- _Else2 ->
- {ok, ResultList} =
- case RootPointerInfo of
- nil ->
- write_node(Bt, NodeType, NewNodeList);
- _ ->
- {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
- OldNode = {LastKey, RootPointerInfo},
- write_node(Bt, OldNode, NodeType, NodeList, NewNodeList)
- end,
- {ok, ResultList, QueryOutput2}
- end.
-
-reduce_node(#btree{reduce = nil}, _NodeType, _NodeList) ->
- [];
-reduce_node(#btree{reduce = R}, kp_node, NodeList) ->
- R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
-reduce_node(#btree{reduce = R} = Bt, kv_node, NodeList) ->
- R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
-
-reduce_tree_size(kv_node, NodeSize, _KvList) ->
- NodeSize;
-reduce_tree_size(kp_node, NodeSize, []) ->
- NodeSize;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red}} | _]) ->
- % pre 1.2 format
- nil;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red, nil}} | _]) ->
- nil;
-reduce_tree_size(kp_node, NodeSize, [{_K, {_P, _Red, Sz}} | NodeList]) ->
- reduce_tree_size(kp_node, NodeSize + Sz, NodeList).
-
-get_node(#btree{fd = Fd}, NodePos) ->
- {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
- {NodeType, NodeList}.
-
-write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
- % split up nodes into smaller sizes
- Chunks = chunkify(NodeList),
- % now write out each chunk and return the KeyPointer pairs for those nodes
- ToWrite = [{NodeType, Chunk} || Chunk <- Chunks],
- WriteOpts = [{compression, Comp}],
- {ok, PtrSizes} = couch_file:append_terms(Fd, ToWrite, WriteOpts),
- {ok, group_kps(Bt, NodeType, Chunks, PtrSizes)}.
-
-group_kps(_Bt, _NodeType, [], []) ->
- [];
-group_kps(Bt, NodeType, [Chunk | RestChunks], [{Ptr, Size} | RestPtrSizes]) ->
- {LastKey, _} = lists:last(Chunk),
- SubTreeSize = reduce_tree_size(NodeType, Size, Chunk),
- KP = {LastKey, {Ptr, reduce_node(Bt, NodeType, Chunk), SubTreeSize}},
- [KP | group_kps(Bt, NodeType, RestChunks, RestPtrSizes)].
-
-write_node(Bt, _OldNode, NodeType, [], NewList) ->
- write_node(Bt, NodeType, NewList);
-write_node(Bt, _OldNode, NodeType, [_], NewList) ->
- write_node(Bt, NodeType, NewList);
-write_node(Bt, OldNode, NodeType, OldList, NewList) ->
- case can_reuse_old_node(OldList, NewList) of
- {true, Prefix, Suffix} ->
- {ok, PrefixKVs} =
- case Prefix of
- [] -> {ok, []};
- _ -> write_node(Bt, NodeType, Prefix)
- end,
- {ok, SuffixKVs} =
- case Suffix of
- [] -> {ok, []};
- _ -> write_node(Bt, NodeType, Suffix)
- end,
- Result = PrefixKVs ++ [OldNode] ++ SuffixKVs,
- {ok, Result};
- false ->
- write_node(Bt, NodeType, NewList)
- end.
-
-can_reuse_old_node(OldList, NewList) ->
- {Prefix, RestNewList} = remove_prefix_kvs(hd(OldList), NewList),
- case old_list_is_prefix(OldList, RestNewList, 0) of
- {true, Size, Suffix} ->
- ReuseThreshold = get_chunk_size() * ?FILL_RATIO,
- if
- Size < ReuseThreshold -> false;
- true -> {true, Prefix, Suffix}
- end;
- false ->
- false
- end.
-
-remove_prefix_kvs(KV1, [KV2 | Rest]) when KV2 < KV1 ->
- {Prefix, RestNewList} = remove_prefix_kvs(KV1, Rest),
- {[KV2 | Prefix], RestNewList};
-remove_prefix_kvs(_, RestNewList) ->
- {[], RestNewList}.
-
-% No more KV's in the old node so its a prefix
-old_list_is_prefix([], Suffix, Size) ->
- {true, Size, Suffix};
-% Some KV's have been removed from the old node
-old_list_is_prefix(_OldList, [], _Size) ->
- false;
-% KV is equal in both old and new node so continue
-old_list_is_prefix([KV | Rest1], [KV | Rest2], Acc) ->
- old_list_is_prefix(Rest1, Rest2, ?term_size(KV) + Acc);
-% KV mismatch between old and new node so not a prefix
-old_list_is_prefix(_OldList, _NewList, _Acc) ->
- false.
-
-modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
- modify_node(Bt, nil, Actions, QueryOutput);
-modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
- {ok,
- lists:reverse(
- ResultNode,
- bounded_tuple_to_list(
- NodeTuple,
- LowerBound,
- tuple_size(NodeTuple),
- []
- )
- ),
- QueryOutput};
-modify_kpnode(
- Bt,
- NodeTuple,
- LowerBound,
- [{_, FirstActionKey, _} | _] = Actions,
- ResultNode,
- QueryOutput
-) ->
- Sz = tuple_size(NodeTuple),
- N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
- case N =:= Sz of
- true ->
- % perform remaining actions on last node
- {_, PointerInfo} = element(Sz, NodeTuple),
- {ok, ChildKPs, QueryOutput2} =
- modify_node(Bt, PointerInfo, Actions, QueryOutput),
- NodeList = lists:reverse(
- ResultNode,
- bounded_tuple_to_list(
- NodeTuple,
- LowerBound,
- Sz - 1,
- ChildKPs
- )
- ),
- {ok, NodeList, QueryOutput2};
- false ->
- {NodeKey, PointerInfo} = element(N, NodeTuple),
- SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
- not less(Bt, NodeKey, ActionKey)
- end,
- {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
- {ok, ChildKPs, QueryOutput2} =
- modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
- ResultNode2 = lists:reverse(
- ChildKPs,
- bounded_tuple_to_revlist(
- NodeTuple,
- LowerBound,
- N - 1,
- ResultNode
- )
- ),
- modify_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, ResultNode2, QueryOutput2)
- end.
-
-bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
- Tail;
-bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
- bounded_tuple_to_revlist(Tuple, Start + 1, End, [element(Start, Tuple) | Tail]).
-
-bounded_tuple_to_list(Tuple, Start, End, Tail) ->
- bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
-
-bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
- lists:reverse(Acc, Tail);
-bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
- bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
-
-find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
- End;
-find_first_gteq(Bt, Tuple, Start, End, Key) ->
- Mid = Start + ((End - Start) div 2),
- {TupleKey, _} = element(Mid, Tuple),
- case less(Bt, TupleKey, Key) of
- true ->
- find_first_gteq(Bt, Tuple, Mid + 1, End, Key);
- false ->
- find_first_gteq(Bt, Tuple, Start, Mid, Key)
- end.
-
-modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
- {ok,
- lists:reverse(
- ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])
- ),
- QueryOutput};
-modify_kvnode(
- Bt,
- NodeTuple,
- LowerBound,
- [{ActionType, ActionKey, ActionValue} | RestActions],
- ResultNode,
- QueryOutput
-) when LowerBound > tuple_size(NodeTuple) ->
- case ActionType of
- insert ->
- modify_kvnode(
- Bt,
- NodeTuple,
- LowerBound,
- RestActions,
- [{ActionKey, ActionValue} | ResultNode],
- QueryOutput
- );
- remove ->
- % just drop the action
- modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
- fetch ->
- % the key/value must not exist in the tree
- modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [
- {not_found, {ActionKey, nil}} | QueryOutput
- ])
- end;
-modify_kvnode(
- Bt,
- NodeTuple,
- LowerBound,
- [{ActionType, ActionKey, ActionValue} | RestActions],
- AccNode,
- QueryOutput
-) ->
- N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
- {Key, Value} = element(N, NodeTuple),
- ResultNode = bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
- case less(Bt, ActionKey, Key) of
- true ->
- case ActionType of
- insert ->
- % ActionKey is less than the Key, so insert
- modify_kvnode(
- Bt,
- NodeTuple,
- N,
- RestActions,
- [{ActionKey, ActionValue} | ResultNode],
- QueryOutput
- );
- remove ->
- % ActionKey is less than the Key, just drop the action
- modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
- fetch ->
- % ActionKey is less than the Key, the key/value must not exist in the tree
- modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [
- {not_found, {ActionKey, nil}} | QueryOutput
- ])
- end;
- false ->
- % ActionKey and Key are maybe equal.
- case less(Bt, Key, ActionKey) of
- false ->
- case ActionType of
- insert ->
- modify_kvnode(
- Bt,
- NodeTuple,
- N + 1,
- RestActions,
- [{ActionKey, ActionValue} | ResultNode],
- QueryOutput
- );
- remove ->
- modify_kvnode(
- Bt, NodeTuple, N + 1, RestActions, ResultNode, QueryOutput
- );
- fetch ->
- % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
- % since an identical action key can follow it.
- modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [
- {ok, assemble(Bt, Key, Value)} | QueryOutput
- ])
- end;
- true ->
- modify_kvnode(
- Bt,
- NodeTuple,
- N + 1,
- [{ActionType, ActionKey, ActionValue} | RestActions],
- [{Key, Value} | ResultNode],
- QueryOutput
- )
- end
- end.
-
-reduce_stream_node(
- _Bt,
- _Dir,
- nil,
- _KeyStart,
- _InEndRangeFun,
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- _KeyGroupFun,
- _Fun,
- Acc
-) ->
- {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_node(
- Bt,
- Dir,
- Node,
- KeyStart,
- InEndRangeFun,
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
-) ->
- P = element(1, Node),
- case get_node(Bt, P) of
- {kp_node, NodeList} ->
- NodeList2 = adjust_dir(Dir, NodeList),
- reduce_stream_kp_node(
- Bt,
- Dir,
- NodeList2,
- KeyStart,
- InEndRangeFun,
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
- );
- {kv_node, KVs} ->
- KVs2 = adjust_dir(Dir, KVs),
- reduce_stream_kv_node(
- Bt,
- Dir,
- KVs2,
- KeyStart,
- InEndRangeFun,
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
- )
- end.
-
-reduce_stream_kv_node(
- Bt,
- Dir,
- KVs,
- KeyStart,
- InEndRangeFun,
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
-) ->
- GTEKeyStartKVs =
- case KeyStart of
- undefined ->
- KVs;
- _ ->
- DropFun =
- case Dir of
- fwd ->
- fun({Key, _}) -> less(Bt, Key, KeyStart) end;
- rev ->
- fun({Key, _}) -> less(Bt, KeyStart, Key) end
- end,
- lists:dropwhile(DropFun, KVs)
- end,
- KVs2 = lists:takewhile(
- fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs
- ),
- reduce_stream_kv_node2(
- Bt,
- KVs2,
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
- ).
-
-reduce_stream_kv_node2(
- _Bt,
- [],
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- _KeyGroupFun,
- _Fun,
- Acc
-) ->
- {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_kv_node2(
- Bt,
- [{Key, Value} | RestKVs],
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
-) ->
- case GroupedKey of
- undefined ->
- reduce_stream_kv_node2(
- Bt,
- RestKVs,
- Key,
- [assemble(Bt, Key, Value)],
- [],
- KeyGroupFun,
- Fun,
- Acc
- );
- _ ->
- case KeyGroupFun(GroupedKey, Key) of
- true ->
- reduce_stream_kv_node2(
- Bt,
- RestKVs,
- GroupedKey,
- [assemble(Bt, Key, Value) | GroupedKVsAcc],
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
- );
- false ->
- case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
- {ok, Acc2} ->
- reduce_stream_kv_node2(
- Bt,
- RestKVs,
- Key,
- [assemble(Bt, Key, Value)],
- [],
- KeyGroupFun,
- Fun,
- Acc2
- );
- {stop, Acc2} ->
- throw({stop, Acc2})
- end
- end
- end.
-
-reduce_stream_kp_node(
- Bt,
- Dir,
- NodeList,
- KeyStart,
- InEndRangeFun,
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
-) ->
- Nodes =
- case KeyStart of
- undefined ->
- NodeList;
- _ ->
- case Dir of
- fwd ->
- lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
- rev ->
- RevKPs = lists:reverse(NodeList),
- case
- lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs)
- of
- {_Before, []} ->
- NodeList;
- {Before, [FirstAfter | _]} ->
- [FirstAfter | lists:reverse(Before)]
- end
- end
- end,
- {InRange, MaybeInRange} = lists:splitwith(
- fun({Key, _}) -> InEndRangeFun(Key) end, Nodes
- ),
- NodesInRange =
- case MaybeInRange of
- [FirstMaybeInRange | _] when Dir =:= fwd ->
- InRange ++ [FirstMaybeInRange];
- _ ->
- InRange
- end,
- reduce_stream_kp_node2(
- Bt,
- Dir,
- NodesInRange,
- KeyStart,
- InEndRangeFun,
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
- ).
-
-reduce_stream_kp_node2(
- Bt,
- Dir,
- [{_Key, NodeInfo} | RestNodeList],
- KeyStart,
- InEndRangeFun,
- undefined,
- [],
- [],
- KeyGroupFun,
- Fun,
- Acc
-) ->
- {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
- reduce_stream_node(
- Bt,
- Dir,
- NodeInfo,
- KeyStart,
- InEndRangeFun,
- undefined,
- [],
- [],
- KeyGroupFun,
- Fun,
- Acc
- ),
- reduce_stream_kp_node2(
- Bt,
- Dir,
- RestNodeList,
- KeyStart,
- InEndRangeFun,
- GroupedKey2,
- GroupedKVsAcc2,
- GroupedRedsAcc2,
- KeyGroupFun,
- Fun,
- Acc2
- );
-reduce_stream_kp_node2(
- Bt,
- Dir,
- NodeList,
- KeyStart,
- InEndRangeFun,
- GroupedKey,
- GroupedKVsAcc,
- GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
-) ->
- {Grouped0, Ungrouped0} = lists:splitwith(
- fun({Key, _}) ->
- KeyGroupFun(GroupedKey, Key)
- end,
- NodeList
- ),
- {GroupedNodes, UngroupedNodes} =
- case Grouped0 of
- [] ->
- {Grouped0, Ungrouped0};
- _ ->
- [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
- {RestGrouped, [FirstGrouped | Ungrouped0]}
- end,
- GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
- case UngroupedNodes of
- [{_Key, NodeInfo} | RestNodes] ->
- {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
- reduce_stream_node(
- Bt,
- Dir,
- NodeInfo,
- KeyStart,
- InEndRangeFun,
- GroupedKey,
- GroupedKVsAcc,
- GroupedReds ++ GroupedRedsAcc,
- KeyGroupFun,
- Fun,
- Acc
- ),
- reduce_stream_kp_node2(
- Bt,
- Dir,
- RestNodes,
- KeyStart,
- InEndRangeFun,
- GroupedKey2,
- GroupedKVsAcc2,
- GroupedRedsAcc2,
- KeyGroupFun,
- Fun,
- Acc2
- );
- [] ->
- {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
- end.
-
-adjust_dir(fwd, List) ->
- List;
-adjust_dir(rev, List) ->
- lists:reverse(List).
-
-stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
- Pointer = element(1, Node),
- {NodeType, NodeList} = get_node(Bt, Pointer),
- case NodeType of
- kp_node ->
- stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
- kv_node ->
- stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
- end.
-
-stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
- Pointer = element(1, Node),
- {NodeType, NodeList} = get_node(Bt, Pointer),
- case NodeType of
- kp_node ->
- stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
- kv_node ->
- stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
- end.
-
-stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
- {ok, Acc};
-stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
- Red = element(2, Node),
- case Fun(traverse, Key, Red, Acc) of
- {ok, Acc2} ->
- case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
- {ok, Acc3} ->
- stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
- {stop, LastReds, Acc3} ->
- {stop, LastReds, Acc3}
- end;
- {skip, Acc2} ->
- stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2);
- {stop, Acc2} ->
- {stop, Reds, Acc2}
- end.
-
-drop_nodes(_Bt, Reds, _StartKey, []) ->
- {Reds, []};
-drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
- case less(Bt, NodeKey, StartKey) of
- true ->
- drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
- false ->
- {Reds, [{NodeKey, Node} | RestKPs]}
- end.
-
-stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
- {NewReds, NodesToStream} =
- case Dir of
- fwd ->
- % drop all nodes sorting before the key
- drop_nodes(Bt, Reds, StartKey, KPs);
- rev ->
- % keep all nodes sorting before the key, AND the first node to sort after
- RevKPs = lists:reverse(KPs),
- case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
- {_RevsBefore, []} ->
- % everything sorts before it
- {Reds, KPs};
- {RevBefore, [FirstAfter | Drop]} ->
- {[element(2, Node) || {_K, Node} <- Drop] ++ Reds, [
- FirstAfter | lists:reverse(RevBefore)
- ]}
- end
- end,
- case NodesToStream of
- [] ->
- {ok, Acc};
- [{_Key, Node} | Rest] ->
- case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
- {ok, Acc2} ->
- Red = element(2, Node),
- stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
- {stop, LastReds, Acc2} ->
- {stop, LastReds, Acc2}
- end
- end.
-
-stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
- DropFun =
- case Dir of
- fwd ->
- fun({Key, _}) -> less(Bt, Key, StartKey) end;
- rev ->
- fun({Key, _}) -> less(Bt, StartKey, Key) end
- end,
- {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
- AssembleLTKVs = [assemble(Bt, K, V) || {K, V} <- LTKVs],
- stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
-
-stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
- {ok, Acc};
-stream_kv_node2(Bt, Reds, PrevKVs, [{K, V} | RestKVs], InRange, Dir, Fun, Acc) ->
- case InRange(K) of
- false ->
- {stop, {PrevKVs, Reds}, Acc};
- true ->
- AssembledKV = assemble(Bt, K, V),
- case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
- {ok, Acc2} ->
- stream_kv_node2(
- Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2
- );
- {stop, Acc2} ->
- {stop, {PrevKVs, Reds}, Acc2}
- end
- end.
diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl
deleted file mode 100644
index 089cda975..000000000
--- a/src/couch/src/couch_changes.erl
+++ /dev/null
@@ -1,777 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes).
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([
- handle_db_changes/3,
- get_changes_timeout/2,
- wait_updated/3,
- get_rest_updated/1,
- configure_filter/4,
- filter/3,
- handle_db_event/3,
- handle_view_event/3,
- send_changes_doc_ids/6,
- send_changes_design_docs/6
-]).
-
--export([changes_enumerator/2]).
-
-%% export so we can use fully qualified call to facilitate hot-code upgrade
--export([
- keep_sending_changes/3
-]).
-
--record(changes_acc, {
- db,
- seq,
- prepend,
- filter,
- callback,
- user_acc,
- resp_type,
- limit,
- include_docs,
- doc_options,
- conflicts,
- timeout,
- timeout_fun,
- aggregation_kvs,
- aggregation_results
-}).
-
-handle_db_changes(Args0, Req, Db0) ->
- #changes_args{
- style = Style,
- filter = FilterName,
- feed = Feed,
- dir = Dir,
- since = Since
- } = Args0,
- Filter = configure_filter(FilterName, Style, Req, Db0),
- Args = Args0#changes_args{filter_fun = Filter},
- DbName = couch_db:name(Db0),
- StartListenerFun = fun() ->
- couch_event:link_listener(
- ?MODULE, handle_db_event, self(), [{dbname, DbName}]
- )
- end,
- Start = fun() ->
- {ok, Db} = couch_db:reopen(Db0),
- StartSeq =
- case Dir of
- rev ->
- couch_db:get_update_seq(Db);
- fwd ->
- Since
- end,
- {Db, StartSeq}
- end,
- % begin timer to deal with heartbeat when filter function fails
- case Args#changes_args.heartbeat of
- undefined ->
- erlang:erase(last_changes_heartbeat);
- Val when is_integer(Val); Val =:= true ->
- put(last_changes_heartbeat, os:timestamp())
- end,
-
- case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
- true ->
- fun(CallbackAcc) ->
- {Callback, UserAcc} = get_callback_acc(CallbackAcc),
- {ok, Listener} = StartListenerFun(),
-
- {Db, StartSeq} = Start(),
- UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
- {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
- Acc0 = build_acc(
- Args,
- Callback,
- UserAcc2,
- Db,
- StartSeq,
- <<"">>,
- Timeout,
- TimeoutFun
- ),
- try
- keep_sending_changes(
- Args#changes_args{dir = fwd},
- Acc0,
- true
- )
- after
- couch_event:stop_listener(Listener),
- % clean out any remaining update messages
- get_rest_updated(ok)
- end
- end;
- false ->
- fun(CallbackAcc) ->
- {Callback, UserAcc} = get_callback_acc(CallbackAcc),
- UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
- {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
- {Db, StartSeq} = Start(),
- Acc0 = build_acc(
- Args#changes_args{feed = "normal"},
- Callback,
- UserAcc2,
- Db,
- StartSeq,
- <<>>,
- Timeout,
- TimeoutFun
- ),
- {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
- send_changes(
- Acc0,
- Dir,
- true
- ),
- end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
- end
- end.
-
-handle_db_event(_DbName, updated, Parent) ->
- Parent ! updated,
- {ok, Parent};
-handle_db_event(_DbName, deleted, Parent) ->
- Parent ! deleted,
- {ok, Parent};
-handle_db_event(_DbName, _Event, Parent) ->
- {ok, Parent}.
-
-handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
- case Msg of
- {index_commit, DDocId} ->
- Parent ! updated;
- {index_delete, DDocId} ->
- Parent ! deleted;
- _ ->
- ok
- end,
- {ok, {Parent, DDocId}}.
-
-get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
- Pair;
-get_callback_acc(Callback) when is_function(Callback, 2) ->
- {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
-
-configure_filter("_doc_ids", Style, Req, _Db) ->
- {doc_ids, Style, get_doc_ids(Req)};
-configure_filter("_selector", Style, Req, _Db) ->
- {selector, Style, get_selector_and_fields(Req)};
-configure_filter("_design", Style, _Req, _Db) ->
- {design_docs, Style};
-configure_filter("_view", Style, Req, Db) ->
- ViewName = get_view_qs(Req),
- if
- ViewName /= "" -> ok;
- true -> throw({bad_request, "`view` filter parameter is not provided."})
- end,
- ViewNameParts = string:tokens(ViewName, "/"),
- case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
- [DName, VName] ->
- {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
- check_member_exists(DDoc, [<<"views">>, VName]),
- case couch_db:is_clustered(Db) of
- true ->
- DIR = fabric_util:doc_id_and_rev(DDoc),
- {fetch, view, Style, DIR, VName};
- false ->
- {view, Style, DDoc, VName}
- end;
- [] ->
- Msg = "`view` must be of the form `designname/viewname`",
- throw({bad_request, Msg})
- end;
-configure_filter([$_ | _], _Style, _Req, _Db) ->
- throw({bad_request, "unknown builtin filter name"});
-configure_filter("", main_only, _Req, _Db) ->
- {default, main_only};
-configure_filter("", all_docs, _Req, _Db) ->
- {default, all_docs};
-configure_filter(FilterName, Style, Req, Db) ->
- FilterNameParts = string:tokens(FilterName, "/"),
- case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
- [DName, FName] ->
- {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
- check_member_exists(DDoc, [<<"filters">>, FName]),
- case couch_db:is_clustered(Db) of
- true ->
- DIR = fabric_util:doc_id_and_rev(DDoc),
- {fetch, custom, Style, Req, DIR, FName};
- false ->
- {custom, Style, Req, DDoc, FName}
- end;
- [] ->
- {default, Style};
- _Else ->
- Msg = "`filter` must be of the form `designname/filtername`",
- throw({bad_request, Msg})
- end.
-
-filter(Db, #full_doc_info{} = FDI, Filter) ->
- filter(Db, couch_doc:to_doc_info(FDI), Filter);
-filter(_Db, DocInfo, {default, Style}) ->
- apply_style(DocInfo, Style);
-filter(_Db, DocInfo, {doc_ids, Style, DocIds}) ->
- case lists:member(DocInfo#doc_info.id, DocIds) of
- true ->
- apply_style(DocInfo, Style);
- false ->
- []
- end;
-filter(Db, DocInfo, {selector, Style, {Selector, _Fields}}) ->
- Docs = open_revs(Db, DocInfo, Style),
- Passes = [
- mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
- || Doc <- Docs
- ],
- filter_revs(Passes, Docs);
-filter(_Db, DocInfo, {design_docs, Style}) ->
- case DocInfo#doc_info.id of
- <<"_design", _/binary>> ->
- apply_style(DocInfo, Style);
- _ ->
- []
- end;
-filter(Db, DocInfo, {view, Style, DDoc, VName}) ->
- Docs = open_revs(Db, DocInfo, Style),
- {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
- filter_revs(Passes, Docs);
-filter(Db, DocInfo, {custom, Style, Req0, DDoc, FName}) ->
- Req =
- case Req0 of
- {json_req, _} -> Req0;
- #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)}
- end,
- Docs = open_revs(Db, DocInfo, Style),
- {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
- filter_revs(Passes, Docs).
-
-get_view_qs({json_req, {Props}}) ->
- {Query} = couch_util:get_value(<<"query">>, Props, {[]}),
- binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
-get_view_qs(Req) ->
- couch_httpd:qs_value(Req, "view", "").
-
-get_doc_ids({json_req, {Props}}) ->
- check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method = 'POST'} = Req) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {Props} = couch_httpd:json_body_obj(Req),
- check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method = 'GET'} = Req) ->
- DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
- check_docids(DocIds);
-get_doc_ids(_) ->
- throw({bad_request, no_doc_ids_provided}).
-
-get_selector_and_fields({json_req, {Props}}) ->
- Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
- Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
- {Selector, Fields};
-get_selector_and_fields(#httpd{method = 'POST'} = Req) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- get_selector_and_fields({json_req, couch_httpd:json_body_obj(Req)});
-get_selector_and_fields(_) ->
- throw({bad_request, "Selector must be specified in POST payload"}).
-
-check_docids(DocIds) when is_list(DocIds) ->
- lists:foreach(
- fun
- (DocId) when not is_binary(DocId) ->
- Msg = "`doc_ids` filter parameter is not a list of doc ids.",
- throw({bad_request, Msg});
- (_) ->
- ok
- end,
- DocIds
- ),
- DocIds;
-check_docids(_) ->
- Msg = "`doc_ids` filter parameter is not a list of doc ids.",
- throw({bad_request, Msg}).
-
-check_selector(Selector = {_}) ->
- try
- mango_selector:normalize(Selector)
- catch
- {mango_error, Mod, Reason0} ->
- {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
- throw({bad_request, Reason})
- end;
-check_selector(_Selector) ->
- throw({bad_request, "Selector error: expected a JSON object"}).
-
-check_fields(nil) ->
- nil;
-check_fields(Fields) when is_list(Fields) ->
- try
- {ok, Fields1} = mango_fields:new(Fields),
- Fields1
- catch
- {mango_error, Mod, Reason0} ->
- {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
- throw({bad_request, Reason})
- end;
-check_fields(_Fields) ->
- throw({bad_request, "Selector error: fields must be JSON array"}).
-
-open_ddoc(Db, DDocId) ->
- DbName = couch_db:name(Db),
- case couch_db:is_clustered(Db) of
- true ->
- case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of
- {ok, _} = Resp -> Resp;
- Else -> throw(Else)
- end;
- false ->
- case couch_db:open_doc(Db, DDocId, [ejson_body]) of
- {ok, _} = Resp -> Resp;
- Else -> throw(Else)
- end
- end.
-
-check_member_exists(#doc{body = {Props}}, Path) ->
- couch_util:get_nested_json_value({Props}, Path).
-
-apply_style(#doc_info{revs = Revs}, main_only) ->
- [#rev_info{rev = Rev} | _] = Revs,
- [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
-apply_style(#doc_info{revs = Revs}, all_docs) ->
- [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev = R} <- Revs].
-
-open_revs(Db, DocInfo, Style) ->
- DocInfos =
- case Style of
- main_only -> [DocInfo];
- all_docs -> [DocInfo#doc_info{revs = [R]} || R <- DocInfo#doc_info.revs]
- end,
- OpenOpts = [deleted, conflicts],
- % Relying on list comprehensions to silence errors
- OpenResults = [couch_db:open_doc(Db, DI, OpenOpts) || DI <- DocInfos],
- [Doc || {ok, Doc} <- OpenResults].
-
-filter_revs(Passes, Docs) ->
- lists:flatmap(
- fun
- ({true, #doc{revs = {RevPos, [RevId | _]}}}) ->
- RevStr = couch_doc:rev_to_str({RevPos, RevId}),
- Change = {[{<<"rev">>, RevStr}]},
- [Change];
- (_) ->
- []
- end,
- lists:zip(Passes, Docs)
- ).
-
-get_changes_timeout(Args, Callback) ->
- #changes_args{
- heartbeat = Heartbeat,
- timeout = Timeout,
- feed = ResponseType
- } = Args,
- DefaultTimeout = chttpd_util:get_chttpd_config_integer(
- "changes_timeout", 60000
- ),
- case Heartbeat of
- undefined ->
- case Timeout of
- undefined ->
- {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
- infinity ->
- {infinity, fun(UserAcc) -> {stop, UserAcc} end};
- _ ->
- {lists:min([DefaultTimeout, Timeout]), fun(UserAcc) -> {stop, UserAcc} end}
- end;
- true ->
- {DefaultTimeout, fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
- _ ->
- {lists:min([DefaultTimeout, Heartbeat]), fun(UserAcc) ->
- {ok, Callback(timeout, ResponseType, UserAcc)}
- end}
- end.
-
-start_sending_changes(_Callback, UserAcc, ResponseType) when
- ResponseType =:= "continuous" orelse
- ResponseType =:= "eventsource"
-->
- UserAcc;
-start_sending_changes(Callback, UserAcc, ResponseType) ->
- Callback(start, ResponseType, UserAcc).
-
-build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) ->
- #changes_args{
- include_docs = IncludeDocs,
- doc_options = DocOpts,
- conflicts = Conflicts,
- limit = Limit,
- feed = ResponseType,
- filter_fun = Filter
- } = Args,
- #changes_acc{
- db = Db,
- seq = StartSeq,
- prepend = Prepend,
- filter = Filter,
- callback = Callback,
- user_acc = UserAcc,
- resp_type = ResponseType,
- limit = Limit,
- include_docs = IncludeDocs,
- doc_options = DocOpts,
- conflicts = Conflicts,
- timeout = Timeout,
- timeout_fun = TimeoutFun,
- aggregation_results = [],
- aggregation_kvs = []
- }.
-
-send_changes(Acc, Dir, FirstRound) ->
- #changes_acc{
- db = Db,
- seq = StartSeq,
- filter = Filter
- } = maybe_upgrade_changes_acc(Acc),
- DbEnumFun = fun changes_enumerator/2,
- case can_optimize(FirstRound, Filter) of
- {true, Fun} ->
- Fun(Db, StartSeq, Dir, DbEnumFun, Acc, Filter);
- _ ->
- Opts = [{dir, Dir}],
- couch_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts)
- end.
-
-can_optimize(true, {doc_ids, _Style, DocIds}) ->
- MaxDocIds = config:get_integer(
- "couchdb",
- "changes_doc_ids_optimization_threshold",
- 100
- ),
- if
- length(DocIds) =< MaxDocIds ->
- {true, fun send_changes_doc_ids/6};
- true ->
- false
- end;
-can_optimize(true, {design_docs, _Style}) ->
- {true, fun send_changes_design_docs/6};
-can_optimize(_, _) ->
- false.
-
-send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
- Results = couch_db:get_full_doc_infos(Db, DocIds),
- FullInfos = lists:foldl(
- fun
- (#full_doc_info{} = FDI, Acc) -> [FDI | Acc];
- (not_found, Acc) -> Acc
- end,
- [],
- Results
- ),
- send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
- FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
- Opts = [
- include_deleted,
- {start_key, <<"_design/">>},
- {end_key_gt, <<"_design0">>}
- ],
- {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts),
- send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
- FoldFun =
- case Dir of
- fwd -> fun lists:foldl/3;
- rev -> fun lists:foldr/3
- end,
- GreaterFun =
- case Dir of
- fwd -> fun(A, B) -> A > B end;
- rev -> fun(A, B) -> A =< B end
- end,
- DocInfos = lists:foldl(
- fun(FDI, Acc) ->
- DI = couch_doc:to_doc_info(FDI),
- case GreaterFun(DI#doc_info.high_seq, StartSeq) of
- true -> [DI | Acc];
- false -> Acc
- end
- end,
- [],
- FullDocInfos
- ),
- SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
- FinalAcc =
- try
- FoldFun(
- fun(DocInfo, Acc) ->
- case Fun(DocInfo, Acc) of
- {ok, NewAcc} ->
- NewAcc;
- {stop, NewAcc} ->
- throw({stop, NewAcc})
- end
- end,
- Acc0,
- SortedDocInfos
- )
- catch
- {stop, Acc} -> Acc
- end,
- case Dir of
- fwd ->
- FinalAcc0 =
- case element(1, FinalAcc) of
- % we came here via couch_http or internal call
- changes_acc ->
- FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)};
- % we came here via chttpd / fabric / rexi
- fabric_changes_acc ->
- FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
- end,
- {ok, FinalAcc0};
- rev ->
- {ok, FinalAcc}
- end.
-
-keep_sending_changes(Args, Acc0, FirstRound) ->
- #changes_args{
- feed = ResponseType,
- limit = Limit,
- db_open_options = DbOptions
- } = Args,
-
- {ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
-
- #changes_acc{
- db = Db,
- callback = Callback,
- timeout = Timeout,
- timeout_fun = TimeoutFun,
- seq = EndSeq,
- prepend = Prepend2,
- user_acc = UserAcc2,
- limit = NewLimit
- } = maybe_upgrade_changes_acc(ChangesAcc),
-
- couch_db:close(Db),
- if
- Limit > NewLimit, ResponseType == "longpoll" ->
- end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
- true ->
- case wait_updated(Timeout, TimeoutFun, UserAcc2) of
- {updated, UserAcc4} ->
- DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions],
- case couch_db:open(couch_db:name(Db), DbOptions1) of
- {ok, Db2} ->
- ?MODULE:keep_sending_changes(
- Args#changes_args{limit = NewLimit},
- ChangesAcc#changes_acc{
- db = Db2,
- user_acc = UserAcc4,
- seq = EndSeq,
- prepend = Prepend2,
- timeout = Timeout,
- timeout_fun = TimeoutFun
- },
- false
- );
- _Else ->
- end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
- end;
- {stop, UserAcc4} ->
- end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
- end
- end.
-
-end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
- Callback({stop, EndSeq}, ResponseType, UserAcc).
-
-changes_enumerator(Value, Acc) ->
- #changes_acc{
- filter = Filter,
- callback = Callback,
- prepend = Prepend,
- user_acc = UserAcc,
- limit = Limit,
- resp_type = ResponseType,
- db = Db,
- timeout = Timeout,
- timeout_fun = TimeoutFun
- } = maybe_upgrade_changes_acc(Acc),
- Results0 = filter(Db, Value, Filter),
- Results = [Result || Result <- Results0, Result /= null],
- Seq =
- case Value of
- #full_doc_info{} ->
- Value#full_doc_info.update_seq;
- #doc_info{} ->
- Value#doc_info.high_seq
- end,
- Go =
- if
- (Limit =< 1) andalso Results =/= [] -> stop;
- true -> ok
- end,
- case Results of
- [] ->
- {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
- case Done of
- stop ->
- {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
- ok ->
- {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
- end;
- _ ->
- if
- ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" ->
- ChangesRow = changes_row(Results, Value, Acc),
- UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
- reset_heartbeat(),
- {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}};
- true ->
- ChangesRow = changes_row(Results, Value, Acc),
- UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
- reset_heartbeat(),
- {Go, Acc#changes_acc{
- seq = Seq,
- prepend = <<",\n">>,
- user_acc = UserAcc2,
- limit = Limit - 1
- }}
- end
- end.
-
-changes_row(Results, #full_doc_info{} = FDI, Acc) ->
- changes_row(Results, couch_doc:to_doc_info(FDI), Acc);
-changes_row(Results, DocInfo, Acc0) ->
- Acc = maybe_upgrade_changes_acc(Acc0),
- #doc_info{
- id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
- } = DocInfo,
- {
- [{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
- deleted_item(Del) ++ maybe_get_changes_doc(DocInfo, Acc)
- }.
-
-maybe_get_changes_doc(Value, #changes_acc{include_docs = true} = Acc) ->
- #changes_acc{
- db = Db,
- doc_options = DocOpts,
- conflicts = Conflicts,
- filter = Filter
- } = Acc,
- Opts =
- case Conflicts of
- true -> [deleted, conflicts];
- false -> [deleted]
- end,
- load_doc(Db, Value, Opts, DocOpts, Filter);
-maybe_get_changes_doc(_Value, _Acc) ->
- [].
-
-load_doc(Db, Value, Opts, DocOpts, Filter) ->
- case couch_index_util:load_doc(Db, Value, Opts) of
- null ->
- [{doc, null}];
- Doc ->
- [{doc, doc_to_json(Doc, DocOpts, Filter)}]
- end.
-
-doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}}) when
- Fields =/= nil
-->
- mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
-doc_to_json(Doc, DocOpts, _Filter) ->
- couch_doc:to_json_obj(Doc, DocOpts).
-
-deleted_item(true) -> [{<<"deleted">>, true}];
-deleted_item(_) -> [].
-
-% waits for a updated msg, if there are multiple msgs, collects them.
-wait_updated(Timeout, TimeoutFun, UserAcc) ->
- receive
- updated ->
- get_rest_updated(UserAcc);
- deleted ->
- {stop, UserAcc}
- after Timeout ->
- {Go, UserAcc2} = TimeoutFun(UserAcc),
- case Go of
- ok ->
- ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
- stop ->
- {stop, UserAcc2}
- end
- end.
-
-get_rest_updated(UserAcc) ->
- receive
- updated ->
- get_rest_updated(UserAcc)
- after 0 ->
- {updated, UserAcc}
- end.
-
-reset_heartbeat() ->
- case get(last_changes_heartbeat) of
- undefined ->
- ok;
- _ ->
- put(last_changes_heartbeat, os:timestamp())
- end.
-
-maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
- Before = get(last_changes_heartbeat),
- case Before of
- undefined ->
- {ok, Acc};
- _ ->
- Now = os:timestamp(),
- case timer:now_diff(Now, Before) div 1000 >= Timeout of
- true ->
- Acc2 = TimeoutFun(Acc),
- put(last_changes_heartbeat, Now),
- Acc2;
- false ->
- {ok, Acc}
- end
- end.
-
-maybe_upgrade_changes_acc(#changes_acc{} = Acc) ->
- Acc;
-maybe_upgrade_changes_acc(Acc) when tuple_size(Acc) == 19 ->
- #changes_acc{
- db = element(2, Acc),
- seq = element(6, Acc),
- prepend = element(7, Acc),
- filter = element(8, Acc),
- callback = element(9, Acc),
- user_acc = element(10, Acc),
- resp_type = element(11, Acc),
- limit = element(12, Acc),
- include_docs = element(13, Acc),
- doc_options = element(14, Acc),
- conflicts = element(15, Acc),
- timeout = element(16, Acc),
- timeout_fun = element(17, Acc),
- aggregation_kvs = element(18, Acc),
- aggregation_results = element(19, Acc)
- }.
diff --git a/src/couch/src/couch_compress.erl b/src/couch/src/couch_compress.erl
deleted file mode 100644
index 59d692058..000000000
--- a/src/couch/src/couch_compress.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compress).
-
--export([compress/2, decompress/1, is_compressed/2]).
--export([get_compression_method/0]).
--export([uncompressed_size/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% binaries compressed with snappy have their first byte set to this value
--define(SNAPPY_PREFIX, 1).
-% Term prefixes documented at:
-% http://www.erlang.org/doc/apps/erts/erl_ext_dist.html
--define(TERM_PREFIX, 131).
--define(COMPRESSED_TERM_PREFIX, 131, 80).
-
-get_compression_method() ->
- case config:get("couchdb", "file_compression") of
- undefined ->
- ?DEFAULT_COMPRESSION;
- Method1 ->
- case string:tokens(Method1, "_") of
- [Method] ->
- list_to_existing_atom(Method);
- [Method, Level] ->
- {list_to_existing_atom(Method), list_to_integer(Level)}
- end
- end.
-
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) ->
- Bin;
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) ->
- compress(decompress(Bin), Method);
-compress(<<?COMPRESSED_TERM_PREFIX, _/binary>> = Bin, {deflate, _Level}) ->
- Bin;
-compress(<<?TERM_PREFIX, _/binary>> = Bin, Method) ->
- compress(decompress(Bin), Method);
-compress(Term, none) ->
- ?term_to_bin(Term);
-compress(Term, {deflate, Level}) ->
- term_to_binary(Term, [{minor_version, 1}, {compressed, Level}]);
-compress(Term, snappy) ->
- Bin = ?term_to_bin(Term),
- try
- {ok, CompressedBin} = snappy:compress(Bin),
- <<?SNAPPY_PREFIX, CompressedBin/binary>>
- catch
- exit:snappy_nif_not_loaded ->
- Bin
- end.
-
-decompress(<<?SNAPPY_PREFIX, Rest/binary>>) ->
- {ok, TermBin} = snappy:decompress(Rest),
- binary_to_term(TermBin);
-decompress(<<?TERM_PREFIX, _/binary>> = Bin) ->
- binary_to_term(Bin);
-decompress(_) ->
- error(invalid_compression).
-
-is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) ->
- Method =:= snappy;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) ->
- true;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, _Method) ->
- false;
-is_compressed(<<?TERM_PREFIX, _/binary>>, Method) ->
- Method =:= none;
-is_compressed(Term, _Method) when not is_binary(Term) ->
- false;
-is_compressed(_, _) ->
- error(invalid_compression).
-
-uncompressed_size(<<?SNAPPY_PREFIX, Rest/binary>>) ->
- {ok, Size} = snappy:uncompressed_length(Rest),
- Size;
-uncompressed_size(<<?COMPRESSED_TERM_PREFIX, Size:32, _/binary>> = _Bin) ->
- % See http://erlang.org/doc/apps/erts/erl_ext_dist.html
- % The uncompressed binary would be encoded with <<131, Rest/binary>>
- % so need to add 1 for 131
- Size + 1;
-uncompressed_size(<<?TERM_PREFIX, _/binary>> = Bin) ->
- byte_size(Bin);
-uncompressed_size(_) ->
- error(invalid_compression).
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
deleted file mode 100644
index 572746229..000000000
--- a/src/couch/src/couch_db.erl
+++ /dev/null
@@ -1,2374 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db).
-
--export([
- create/2,
- open/2,
- open_int/2,
- incref/1,
- reopen/1,
- close/1,
- exists/1,
-
- clustered_db/2,
- clustered_db/3,
-
- monitor/1,
- monitored_by/1,
- is_idle/1,
-
- is_admin/1,
- check_is_admin/1,
- check_is_member/1,
-
- name/1,
- get_after_doc_read_fun/1,
- get_before_doc_update_fun/1,
- get_committed_update_seq/1,
- get_compacted_seq/1,
- get_compactor_pid/1,
- get_compactor_pid_sync/1,
- get_db_info/1,
- get_partition_info/2,
- get_del_doc_count/1,
- get_doc_count/1,
- get_epochs/1,
- get_filepath/1,
- get_instance_start_time/1,
- get_pid/1,
- get_revs_limit/1,
- get_security/1,
- get_update_seq/1,
- get_user_ctx/1,
- get_uuid/1,
- get_purge_seq/1,
- get_oldest_purge_seq/1,
- get_purge_infos_limit/1,
-
- is_db/1,
- is_system_db/1,
- is_clustered/1,
- is_system_db_name/1,
- is_partitioned/1,
-
- set_revs_limit/2,
- set_purge_infos_limit/2,
- set_security/2,
- set_user_ctx/2,
-
- load_validation_funs/1,
- reload_validation_funs/1,
-
- open_doc/2,
- open_doc/3,
- open_doc_revs/4,
- open_doc_int/3,
- get_doc_info/2,
- get_full_doc_info/2,
- get_full_doc_infos/2,
- get_missing_revs/2,
- get_design_doc/2,
- get_design_docs/1,
- get_design_doc_count/1,
- get_purge_infos/2,
-
- get_minimum_purge_seq/1,
- purge_client_exists/3,
-
- validate_docid/2,
- doc_from_json_obj_validate/2,
-
- update_doc/3,
- update_doc/4,
- update_docs/4,
- update_docs/2,
- update_docs/3,
- delete_doc/3,
-
- purge_docs/2,
- purge_docs/3,
-
- with_stream/3,
- open_write_stream/2,
- open_read_stream/2,
- is_active_stream/2,
-
- fold_docs/3,
- fold_docs/4,
- fold_local_docs/4,
- fold_design_docs/4,
- fold_changes/4,
- fold_changes/5,
- count_changes_since/2,
- fold_purge_infos/4,
- fold_purge_infos/5,
-
- calculate_start_seq/3,
- owner_of/2,
-
- start_compact/1,
- cancel_compact/1,
- wait_for_compaction/1,
- wait_for_compaction/2,
- is_compacting/1,
-
- dbname_suffix/1,
- normalize_dbname/1,
- validate_dbname/1,
-
- make_doc/5,
- new_revid/1
-]).
-
--export([
- start_link/4
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_db_int.hrl").
-
--define(DBNAME_REGEX,
- % use the stock CouchDB regex
- "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*"
- % but allow an optional shard timestamp at the end
- "(\\.[0-9]{10,})?$"
-).
--define(DEFAULT_COMPRESSIBLE_TYPES,
- "text/*, application/javascript, application/json, application/xml"
-).
-
-start_link(Engine, DbName, Filepath, Options) ->
- Arg = {Engine, DbName, Filepath, Options},
- proc_lib:start_link(couch_db_updater, init, [Arg]).
-
-create(DbName, Options) ->
- couch_server:create(DbName, Options).
-
-% this is for opening a database for internal purposes like the replicator
-% or the view indexer. it never throws a reader error.
-open_int(DbName, Options) ->
- couch_server:open(DbName, Options).
-
-% this should be called anytime an http request opens the database.
-% it ensures that the http userCtx is a valid reader
-open(DbName, Options) ->
- case couch_server:open(DbName, Options) of
- {ok, Db} ->
- try
- check_is_member(Db),
- {ok, Db}
- catch
- throw:Error ->
- close(Db),
- throw(Error)
- end;
- Else ->
- Else
- end.
-
-reopen(#db{} = Db) ->
- % We could have just swapped out the storage engine
- % for this database during a compaction so we just
- % reimplement this as a close/open pair now.
- try
- open(Db#db.name, [{user_ctx, Db#db.user_ctx} | Db#db.options])
- after
- close(Db)
- end.
-
-% You shouldn't call this. Its part of the ref counting between
-% couch_server and couch_db instances.
-incref(#db{} = Db) ->
- couch_db_engine:incref(Db).
-
-clustered_db(DbName, Options) when is_list(Options) ->
- UserCtx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
- SecProps = couch_util:get_value(security, Options, []),
- Props = couch_util:get_value(props, Options, []),
- {ok, #db{
- name = DbName,
- user_ctx = UserCtx,
- security = SecProps,
- options = [{props, Props}]
- }};
-clustered_db(DbName, #user_ctx{} = UserCtx) ->
- clustered_db(DbName, [{user_ctx, UserCtx}]).
-
-clustered_db(DbName, UserCtx, SecProps) ->
- clustered_db(DbName, [{user_ctx, UserCtx}, {security, SecProps}]).
-
-is_db(#db{}) ->
- true;
-is_db(_) ->
- false.
-
-is_system_db(#db{options = Options}) ->
- lists:member(sys_db, Options).
-
-is_clustered(#db{main_pid = nil}) ->
- true;
-is_clustered(#db{}) ->
- false;
-is_clustered(?OLD_DB_REC = Db) ->
- ?OLD_DB_MAIN_PID(Db) == undefined.
-
-is_partitioned(#db{options = Options}) ->
- Props = couch_util:get_value(props, Options, []),
- couch_util:get_value(partitioned, Props, false).
-
-close(#db{} = Db) ->
- ok = couch_db_engine:decref(Db);
-close(?OLD_DB_REC) ->
- ok.
-
-exists(DbName) ->
- couch_server:exists(DbName).
-
-is_idle(#db{compactor_pid = nil} = Db) ->
- monitored_by(Db) == [];
-is_idle(_Db) ->
- false.
-
-monitored_by(Db) ->
- case couch_db_engine:monitored_by(Db) of
- Pids when is_list(Pids) ->
- PidTracker = whereis(couch_stats_process_tracker),
- Pids -- [Db#db.main_pid, PidTracker];
- undefined ->
- []
- end.
-
-monitor(#db{main_pid = MainPid}) ->
- erlang:monitor(process, MainPid).
-
-start_compact(#db{} = Db) ->
- gen_server:call(Db#db.main_pid, start_compact).
-
-cancel_compact(#db{main_pid = Pid}) ->
- gen_server:call(Pid, cancel_compact).
-
-wait_for_compaction(Db) ->
- wait_for_compaction(Db, infinity).
-
-wait_for_compaction(#db{main_pid = Pid} = Db, Timeout) ->
- Start = os:timestamp(),
- case gen_server:call(Pid, compactor_pid) of
- CPid when is_pid(CPid) ->
- Ref = erlang:monitor(process, CPid),
- receive
- {'DOWN', Ref, _, _, normal} when Timeout == infinity ->
- wait_for_compaction(Db, Timeout);
- {'DOWN', Ref, _, _, normal} ->
- Elapsed = timer:now_diff(os:timestamp(), Start) div 1000,
- wait_for_compaction(Db, Timeout - Elapsed);
- {'DOWN', Ref, _, _, Reason} ->
- {error, Reason}
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- {error, Timeout}
- end;
- _ ->
- ok
- end.
-
-is_compacting(DbName) ->
- couch_server:is_compacting(DbName).
-
-delete_doc(Db, Id, Revisions) ->
- DeletedDocs = [#doc{id = Id, revs = [Rev], deleted = true} || Rev <- Revisions],
- {ok, [Result]} = update_docs(Db, DeletedDocs, []),
- {ok, Result}.
-
-open_doc(Db, IdOrDocInfo) ->
- open_doc(Db, IdOrDocInfo, []).
-
-open_doc(Db, Id, Options) ->
- increment_stat(Db, [couchdb, database_reads]),
- case open_doc_int(Db, Id, Options) of
- {ok, #doc{deleted = true} = Doc} ->
- case lists:member(deleted, Options) of
- true ->
- apply_open_options({ok, Doc}, Options);
- false ->
- {not_found, deleted}
- end;
- Else ->
- apply_open_options(Else, Options)
- end.
-
-apply_open_options({ok, Doc}, Options) ->
- apply_open_options2(Doc, Options);
-apply_open_options(Else, _Options) ->
- Else.
-
-apply_open_options2(Doc, []) ->
- {ok, Doc};
-apply_open_options2(
- #doc{atts = Atts0, revs = Revs} = Doc,
- [{atts_since, PossibleAncestors} | Rest]
-) ->
- RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
- Atts = lists:map(
- fun(Att) ->
- [AttPos, Data] = couch_att:fetch([revpos, data], Att),
- if
- AttPos > RevPos -> couch_att:store(data, Data, Att);
- true -> couch_att:store(data, stub, Att)
- end
- end,
- Atts0
- ),
- apply_open_options2(Doc#doc{atts = Atts}, Rest);
-apply_open_options2(Doc, [ejson_body | Rest]) ->
- apply_open_options2(couch_doc:with_ejson_body(Doc), Rest);
-apply_open_options2(Doc, [_ | Rest]) ->
- apply_open_options2(Doc, Rest).
-
-find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
- 0;
-find_ancestor_rev_pos(_DocRevs, []) ->
- 0;
-find_ancestor_rev_pos({RevPos, [RevId | Rest]}, AttsSinceRevs) ->
- case lists:member({RevPos, RevId}, AttsSinceRevs) of
- true ->
- RevPos;
- false ->
- find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
- end.
-
-open_doc_revs(Db, Id, Revs, Options) ->
- increment_stat(Db, [couchdb, database_reads]),
- [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options),
- {ok, [apply_open_options(Result, Options) || Result <- Results]}.
-
-% Each returned result is a list of tuples:
-% {Id, MissingRevs, PossibleAncestors}
-% if no revs are missing, MissingRevs is []
-get_missing_revs(Db, IdRevsList) ->
- FDIs = get_full_doc_infos(Db, [Id || {Id, _Revs} <- IdRevsList]),
- Results = lists:zipwith(
- fun
- ({Id, Revs}, #full_doc_info{rev_tree = RevTree} = FDI) ->
- MissingRevs = couch_key_tree:find_missing(RevTree, Revs),
- {Id, MissingRevs, possible_ancestors(FDI, MissingRevs)};
- ({Id, Revs}, not_found) ->
- {Id, Revs, []}
- end,
- IdRevsList,
- FDIs
- ),
- {ok, Results}.
-
-get_doc_info(Db, Id) ->
- case get_full_doc_info(Db, Id) of
- #full_doc_info{} = FDI ->
- {ok, couch_doc:to_doc_info(FDI)};
- Else ->
- Else
- end.
-
-get_full_doc_info(Db, Id) ->
- [Result] = get_full_doc_infos(Db, [Id]),
- Result.
-
-get_full_doc_infos(Db, Ids) ->
- couch_db_engine:open_docs(Db, Ids).
-
-purge_docs(Db, IdRevs) ->
- purge_docs(Db, IdRevs, []).
-
--spec purge_docs(#db{}, [{UUId, Id, [Rev]}], [PurgeOption]) ->
- {ok, [Reply]}
-when
- UUId :: binary(),
- Id :: binary() | list(),
- Rev :: {non_neg_integer(), binary()},
- PurgeOption :: interactive_edit | replicated_changes,
- Reply :: {ok, []} | {ok, [Rev]}.
-purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) ->
- UUIDsIdsRevs2 = [
- {UUID, couch_util:to_binary(Id), Revs}
- || {UUID, Id, Revs} <- UUIDsIdsRevs
- ],
- % Check here if any UUIDs already exist when
- % we're not replicating purge infos
- IsRepl = lists:member(replicated_changes, Options),
- if
- IsRepl ->
- ok;
- true ->
- UUIDs = [UUID || {UUID, _, _} <- UUIDsIdsRevs2],
- lists:foreach(
- fun(Resp) ->
- if
- Resp == not_found ->
- ok;
- true ->
- Fmt = "Duplicate purge info UIUD: ~s",
- Reason = io_lib:format(Fmt, [element(2, Resp)]),
- throw({badreq, Reason})
- end
- end,
- get_purge_infos(Db, UUIDs)
- )
- end,
- increment_stat(Db, [couchdb, database_purges]),
- gen_server:call(Pid, {purge_docs, UUIDsIdsRevs2, Options}).
-
--spec get_purge_infos(#db{}, [UUId]) -> [PurgeInfo] when
- UUId :: binary(),
- PurgeInfo :: {PurgeSeq, UUId, Id, [Rev]} | not_found,
- PurgeSeq :: non_neg_integer(),
- Id :: binary(),
- Rev :: {non_neg_integer(), binary()}.
-get_purge_infos(Db, UUIDs) ->
- couch_db_engine:load_purge_infos(Db, UUIDs).
-
-get_minimum_purge_seq(#db{} = Db) ->
- PurgeSeq = couch_db_engine:get_purge_seq(Db),
- OldestPurgeSeq = couch_db_engine:get_oldest_purge_seq(Db),
- PurgeInfosLimit = couch_db_engine:get_purge_infos_limit(Db),
-
- FoldFun = fun(#doc{id = DocId, body = {Props}}, SeqAcc) ->
- case DocId of
- <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
- ClientSeq = couch_util:get_value(<<"purge_seq">>, Props),
- DbName = couch_db:name(Db),
- % If there's a broken doc we have to keep every
- % purge info until the doc is fixed or removed.
- Fmt = "Invalid purge doc '~s' on ~p with purge_seq '~w'",
- case ClientSeq of
- CS when is_integer(CS), CS >= PurgeSeq - PurgeInfosLimit ->
- {ok, SeqAcc};
- CS when is_integer(CS) ->
- case purge_client_exists(DbName, DocId, Props) of
- true ->
- {ok, erlang:min(CS, SeqAcc)};
- false ->
- couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
- {ok, SeqAcc}
- end;
- _ ->
- couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
- {ok, erlang:min(OldestPurgeSeq, SeqAcc)}
- end;
- _ ->
- {stop, SeqAcc}
- end
- end,
- InitMinSeq = PurgeSeq - PurgeInfosLimit,
- Opts = [
- {start_key, list_to_binary(?LOCAL_DOC_PREFIX ++ "purge-")}
- ],
- {ok, MinIdxSeq} = couch_db:fold_local_docs(Db, FoldFun, InitMinSeq, Opts),
- FinalSeq =
- case MinIdxSeq < PurgeSeq - PurgeInfosLimit of
- true -> MinIdxSeq;
- false -> erlang:max(0, PurgeSeq - PurgeInfosLimit)
- end,
- % Log a warning if we've got a purge sequence exceeding the
- % configured threshold.
- if
- FinalSeq >= (PurgeSeq - PurgeInfosLimit) ->
- ok;
- true ->
- Fmt = "The purge sequence for '~s' exceeds configured threshold",
- couch_log:warning(Fmt, [couch_db:name(Db)])
- end,
- FinalSeq.
-
-purge_client_exists(DbName, DocId, Props) ->
- % Warn about clients that have not updated their purge
- % checkpoints in the last "index_lag_warn_seconds"
- LagWindow = config:get_integer(
- % Default 24 hours
- "purge",
- "index_lag_warn_seconds",
- 86400
- ),
-
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- LagThreshold = NowSecs - LagWindow,
-
- try
- Exists = couch_db_plugin:is_valid_purge_client(DbName, Props),
- if
- not Exists ->
- ok;
- true ->
- Updated = couch_util:get_value(<<"updated_on">>, Props),
- if
- is_integer(Updated) and Updated > LagThreshold ->
- ok;
- true ->
- Diff = NowSecs - Updated,
- Fmt1 =
- "Purge checkpoint '~s' not updated in ~p seconds\n"
- " in database ~p",
- couch_log:error(Fmt1, [DocId, Diff, DbName])
- end
- end,
- Exists
- catch
- _:_ ->
- % If we fail to check for a client we have to assume that
- % it exists.
- Fmt2 =
- "Failed to check purge checkpoint using\n"
- " document '~p' in database ~p",
- couch_log:error(Fmt2, [DocId, DbName]),
- true
- end.
-
-set_purge_infos_limit(#db{main_pid = Pid} = Db, Limit) when Limit > 0 ->
- check_is_admin(Db),
- gen_server:call(Pid, {set_purge_infos_limit, Limit}, infinity);
-set_purge_infos_limit(_Db, _Limit) ->
- throw(invalid_purge_infos_limit).
-
-get_after_doc_read_fun(#db{after_doc_read = Fun}) ->
- Fun.
-
-get_before_doc_update_fun(#db{before_doc_update = Fun}) ->
- Fun.
-
-get_committed_update_seq(#db{committed_update_seq = Seq}) ->
- Seq.
-
-get_update_seq(#db{} = Db) ->
- couch_db_engine:get_update_seq(Db).
-
-get_user_ctx(#db{user_ctx = UserCtx}) ->
- UserCtx;
-get_user_ctx(?OLD_DB_REC = Db) ->
- ?OLD_DB_USER_CTX(Db).
-
-get_purge_seq(#db{} = Db) ->
- couch_db_engine:get_purge_seq(Db).
-
-get_oldest_purge_seq(#db{} = Db) ->
- couch_db_engine:get_oldest_purge_seq(Db).
-
-get_purge_infos_limit(#db{} = Db) ->
- couch_db_engine:get_purge_infos_limit(Db).
-
-get_pid(#db{main_pid = Pid}) ->
- Pid.
-
-get_del_doc_count(Db) ->
- {ok, couch_db_engine:get_del_doc_count(Db)}.
-
-get_doc_count(Db) ->
- {ok, couch_db_engine:get_doc_count(Db)}.
-
-get_uuid(#db{} = Db) ->
- couch_db_engine:get_uuid(Db).
-
-get_epochs(#db{} = Db) ->
- Epochs = couch_db_engine:get_epochs(Db),
- validate_epochs(Epochs),
- Epochs.
-
-get_filepath(#db{filepath = FilePath}) ->
- FilePath.
-
-get_instance_start_time(#db{instance_start_time = IST}) ->
- IST.
-
-get_compacted_seq(#db{} = Db) ->
- couch_db_engine:get_compacted_seq(Db).
-
-get_compactor_pid(#db{compactor_pid = Pid}) ->
- Pid.
-
-get_compactor_pid_sync(#db{main_pid = Pid}) ->
- case gen_server:call(Pid, compactor_pid, infinity) of
- CPid when is_pid(CPid) ->
- CPid;
- _ ->
- nil
- end.
-
-get_db_info(Db) ->
- #db{
- name = Name,
- compactor_pid = Compactor,
- instance_start_time = StartTime,
- committed_update_seq = CommittedUpdateSeq
- } = Db,
- {ok, DocCount} = get_doc_count(Db),
- {ok, DelDocCount} = get_del_doc_count(Db),
- SizeInfo = couch_db_engine:get_size_info(Db),
- DiskVersion = couch_db_engine:get_disk_version(Db),
- Uuid =
- case get_uuid(Db) of
- undefined -> null;
- Uuid0 -> Uuid0
- end,
- CompactedSeq =
- case get_compacted_seq(Db) of
- undefined -> null;
- Else1 -> Else1
- end,
- Props =
- case couch_db_engine:get_props(Db) of
- undefined -> null;
- Else2 -> {Else2}
- end,
- InfoList = [
- {db_name, Name},
- {engine, couch_db_engine:get_engine(Db)},
- {doc_count, DocCount},
- {doc_del_count, DelDocCount},
- {update_seq, get_update_seq(Db)},
- {purge_seq, couch_db_engine:get_purge_seq(Db)},
- {compact_running, Compactor /= nil},
- {sizes, {SizeInfo}},
- {instance_start_time, StartTime},
- {disk_format_version, DiskVersion},
- {committed_update_seq, CommittedUpdateSeq},
- {compacted_seq, CompactedSeq},
- {props, Props},
- {uuid, Uuid}
- ],
- {ok, InfoList}.
-
-get_partition_info(#db{} = Db, Partition) when is_binary(Partition) ->
- Info = couch_db_engine:get_partition_info(Db, Partition),
- {ok, Info};
-get_partition_info(_Db, _Partition) ->
- throw({bad_request, <<"`partition` is not valid">>}).
-
-get_design_doc(#db{name = <<"shards/", _/binary>> = ShardDbName}, DDocId0) ->
- DDocId = couch_util:normalize_ddoc_id(DDocId0),
- DbName = mem3:dbname(ShardDbName),
- {_, Ref} = spawn_monitor(fun() ->
- exit(fabric:open_doc(DbName, DDocId, []))
- end),
- receive
- {'DOWN', Ref, _, _, Response} ->
- Response
- end;
-get_design_doc(#db{} = Db, DDocId0) ->
- DDocId = couch_util:normalize_ddoc_id(DDocId0),
- couch_db:open_doc_int(Db, DDocId, [ejson_body]).
-
-get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) ->
- DbName = mem3:dbname(ShardDbName),
- {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
- receive
- {'DOWN', Ref, _, _, Response} ->
- Response
- end;
-get_design_docs(#db{} = Db) ->
- FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
- {ok, Docs} = fold_design_docs(Db, FoldFun, [], []),
- {ok, lists:reverse(Docs)}.
-
-get_design_doc_count(#db{} = Db) ->
- FoldFun = fun(_, Acc) -> {ok, Acc + 1} end,
- fold_design_docs(Db, FoldFun, 0, []).
-
-check_is_admin(#db{user_ctx = UserCtx} = Db) ->
- case is_admin(Db) of
- true ->
- ok;
- false ->
- Reason = <<"You are not a db or server admin.">>,
- throw_security_error(UserCtx, Reason)
- end.
-
-check_is_member(#db{user_ctx = UserCtx} = Db) ->
- case is_member(Db) of
- true -> ok;
- false -> throw_security_error(UserCtx)
- end.
-
-is_admin(#db{user_ctx = UserCtx} = Db) ->
- case couch_db_plugin:check_is_admin(Db) of
- true ->
- true;
- false ->
- {Admins} = get_admins(Db),
- is_authorized(UserCtx, Admins)
- end.
-
-is_member(#db{user_ctx = UserCtx} = Db) ->
- case is_admin(Db) of
- true ->
- true;
- false ->
- case is_public_db(Db) of
- true ->
- true;
- false ->
- {Members} = get_members(Db),
- is_authorized(UserCtx, Members)
- end
- end.
-
-is_public_db(#db{} = Db) ->
- {Members} = get_members(Db),
- Names = couch_util:get_value(<<"names">>, Members, []),
- Roles = couch_util:get_value(<<"roles">>, Members, []),
- Names =:= [] andalso Roles =:= [].
-
-is_authorized(#user_ctx{name = UserName, roles = UserRoles}, Security) ->
- Names = couch_util:get_value(<<"names">>, Security, []),
- Roles = couch_util:get_value(<<"roles">>, Security, []),
- case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
- true -> true;
- false -> check_security(names, UserName, Names)
- end.
-
-check_security(roles, [], _) ->
- false;
-check_security(roles, UserRoles, Roles) ->
- UserRolesSet = ordsets:from_list(UserRoles),
- RolesSet = ordsets:from_list(Roles),
- not ordsets:is_disjoint(UserRolesSet, RolesSet);
-check_security(names, _, []) ->
- false;
-check_security(names, null, _) ->
- false;
-check_security(names, UserName, Names) ->
- lists:member(UserName, Names).
-
-throw_security_error(#user_ctx{name = null} = UserCtx) ->
- Reason = <<"You are not authorized to access this db.">>,
- throw_security_error(UserCtx, Reason);
-throw_security_error(#user_ctx{name = _} = UserCtx) ->
- Reason = <<"You are not allowed to access this db.">>,
- throw_security_error(UserCtx, Reason).
-throw_security_error(#user_ctx{} = UserCtx, Reason) ->
- Error = security_error_type(UserCtx),
- throw({Error, Reason}).
-
-security_error_type(#user_ctx{name = null}) ->
- unauthorized;
-security_error_type(#user_ctx{name = _}) ->
- forbidden.
-
-get_admins(#db{security = SecProps}) ->
- couch_util:get_value(<<"admins">>, SecProps, {[]}).
-
-get_members(#db{security = SecProps}) ->
- % we fallback to readers here for backwards compatibility
- couch_util:get_value(
- <<"members">>,
- SecProps,
- couch_util:get_value(<<"readers">>, SecProps, {[]})
- ).
-
-get_security(#db{security = SecProps}) ->
- {SecProps};
-get_security(?OLD_DB_REC = Db) ->
- {?OLD_DB_SECURITY(Db)}.
-
-set_security(#db{main_pid = Pid} = Db, {NewSecProps}) when is_list(NewSecProps) ->
- check_is_admin(Db),
- ok = validate_security_object(NewSecProps),
- gen_server:call(Pid, {set_security, NewSecProps}, infinity);
-set_security(_, _) ->
- throw(bad_request).
-
-set_user_ctx(#db{} = Db, UserCtx) ->
- {ok, Db#db{user_ctx = UserCtx}}.
-
-validate_security_object(SecProps) ->
- Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
- % we fallback to readers here for backwards compatibility
- Members = couch_util:get_value(
- <<"members">>,
- SecProps,
- couch_util:get_value(<<"readers">>, SecProps, {[]})
- ),
- ok = validate_names_and_roles(Admins),
- ok = validate_names_and_roles(Members),
- ok.
-
-% validate user input
-validate_names_and_roles({Props}) when is_list(Props) ->
- case couch_util:get_value(<<"names">>, Props, []) of
- Ns when is_list(Ns) ->
- [throw("names must be a JSON list of strings") || N <- Ns, not is_binary(N)],
- Ns;
- _ ->
- throw("names must be a JSON list of strings")
- end,
- case couch_util:get_value(<<"roles">>, Props, []) of
- Rs when is_list(Rs) ->
- [throw("roles must be a JSON list of strings") || R <- Rs, not is_binary(R)],
- Rs;
- _ ->
- throw("roles must be a JSON list of strings")
- end,
- ok;
-validate_names_and_roles(_) ->
- throw("admins or members must be a JSON list of strings").
-
-get_revs_limit(#db{} = Db) ->
- couch_db_engine:get_revs_limit(Db).
-
-set_revs_limit(#db{main_pid = Pid} = Db, Limit) when Limit > 0 ->
- check_is_admin(Db),
- gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
-set_revs_limit(_Db, _Limit) ->
- throw(invalid_revs_limit).
-
-name(#db{name = Name}) ->
- Name;
-name(?OLD_DB_REC = Db) ->
- ?OLD_DB_NAME(Db).
-
-validate_docid(#db{} = Db, DocId) when is_binary(DocId) ->
- couch_doc:validate_docid(DocId, name(Db)),
- case is_partitioned(Db) of
- true ->
- couch_partition:validate_docid(DocId);
- false ->
- ok
- end.
-
-doc_from_json_obj_validate(#db{} = Db, DocJson) ->
- Doc = couch_doc:from_json_obj_validate(DocJson, name(Db)),
- {Props} = DocJson,
- case couch_util:get_value(<<"_id">>, Props) of
- DocId when is_binary(DocId) ->
- % Only validate the docid if it was provided
- validate_docid(Db, DocId);
- _ ->
- ok
- end,
- Doc.
-
-update_doc(Db, Doc, Options) ->
- update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(Db, Doc, Options, UpdateType) ->
- case update_docs(Db, [Doc], Options, UpdateType) of
- {ok, [{ok, NewRev}]} ->
- {ok, NewRev};
- {ok, [{{_Id, _Rev}, Error}]} ->
- throw(Error);
- {ok, [Error]} ->
- throw(Error);
- {ok, []} ->
- % replication success
- {Pos, [RevId | _]} = Doc#doc.revs,
- {ok, {Pos, RevId}}
- end.
-
-update_docs(Db, Docs) ->
- update_docs(Db, Docs, []).
-
-% group_alike_docs groups the sorted documents into sublist buckets, by id.
-% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
-group_alike_docs(Docs) ->
- % Here we're just asserting that our doc sort is stable so that
- % if we have duplicate docids we don't have to worry about the
- % behavior of lists:sort/2 which isn't documented anyhwere as
- % being stable.
- WithPos = lists:zip(Docs, lists:seq(1, length(Docs))),
- SortFun = fun({D1, P1}, {D2, P2}) -> {D1#doc.id, P1} =< {D2#doc.id, P2} end,
- SortedDocs = [D || {D, _} <- lists:sort(SortFun, WithPos)],
- group_alike_docs(SortedDocs, []).
-
-group_alike_docs([], Buckets) ->
- lists:reverse(lists:map(fun lists:reverse/1, Buckets));
-group_alike_docs([Doc | Rest], []) ->
- group_alike_docs(Rest, [[Doc]]);
-group_alike_docs([Doc | Rest], [Bucket | RestBuckets]) ->
- [#doc{id = BucketId} | _] = Bucket,
- case Doc#doc.id == BucketId of
- true ->
- % add to existing bucket
- group_alike_docs(Rest, [[Doc | Bucket] | RestBuckets]);
- false ->
- % add to new bucket
- group_alike_docs(Rest, [[Doc] | [Bucket | RestBuckets]])
- end.
-
-validate_doc_update(#db{} = Db, #doc{id = <<"_design/", _/binary>>} = Doc, _GetDiskDocFun) ->
- case catch check_is_admin(Db) of
- ok -> validate_ddoc(Db, Doc);
- Error -> Error
- end;
-validate_doc_update(#db{validate_doc_funs = undefined} = Db, Doc, Fun) ->
- ValidationFuns = load_validation_funs(Db),
- validate_doc_update(Db#db{validate_doc_funs = ValidationFuns}, Doc, Fun);
-validate_doc_update(#db{validate_doc_funs = []}, _Doc, _GetDiskDocFun) ->
- ok;
-validate_doc_update(_Db, #doc{id = <<"_local/", _/binary>>}, _GetDiskDocFun) ->
- ok;
-validate_doc_update(Db, Doc, GetDiskDocFun) ->
- case get(io_priority) of
- {internal_repl, _} ->
- ok;
- _ ->
- validate_doc_update_int(Db, Doc, GetDiskDocFun)
- end.
-
-validate_ddoc(Db, DDoc) ->
- try
- ok = couch_index_server:validate(Db, couch_doc:with_ejson_body(DDoc))
- catch
- throw:{invalid_design_doc, Reason} ->
- {bad_request, invalid_design_doc, Reason};
- throw:{compilation_error, Reason} ->
- {bad_request, compilation_error, Reason};
- throw:Error ->
- Error
- end.
-
-validate_doc_update_int(Db, Doc, GetDiskDocFun) ->
- Fun = fun() ->
- DiskDoc = GetDiskDocFun(),
- JsonCtx = couch_util:json_user_ctx(Db),
- SecObj = get_security(Db),
- try
- [
- case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
- ok -> ok;
- Error -> throw(Error)
- end
- || Fun <- Db#db.validate_doc_funs
- ],
- ok
- catch
- throw:Error ->
- Error
- end
- end,
- couch_stats:update_histogram(
- [couchdb, query_server, vdu_process_time],
- Fun
- ).
-
-% to be safe, spawn a middleman here
-load_validation_funs(#db{main_pid = Pid, name = <<"shards/", _/binary>>} = Db) ->
- {_, Ref} = spawn_monitor(fun() ->
- exit(ddoc_cache:open(mem3:dbname(Db#db.name), validation_funs))
- end),
- receive
- {'DOWN', Ref, _, _, {ok, Funs}} ->
- gen_server:cast(Pid, {load_validation_funs, Funs}),
- Funs;
- {'DOWN', Ref, _, _, {database_does_not_exist, _StackTrace}} ->
- ok = couch_server:close_db_if_idle(Db#db.name),
- erlang:error(database_does_not_exist);
- {'DOWN', Ref, _, _, Reason} ->
- couch_log:error("could not load validation funs ~p", [Reason]),
- throw(internal_server_error)
- end;
-load_validation_funs(#db{main_pid = Pid} = Db) ->
- {ok, DDocInfos} = get_design_docs(Db),
- OpenDocs = fun(#full_doc_info{} = D) ->
- {ok, Doc} = open_doc_int(Db, D, [ejson_body]),
- Doc
- end,
- DDocs = lists:map(OpenDocs, DDocInfos),
- Funs = lists:flatmap(
- fun(DDoc) ->
- case couch_doc:get_validate_doc_fun(DDoc) of
- nil -> [];
- Fun -> [Fun]
- end
- end,
- DDocs
- ),
- gen_server:cast(Pid, {load_validation_funs, Funs}),
- Funs.
-
-reload_validation_funs(#db{} = Db) ->
- gen_server:cast(Db#db.main_pid, {load_validation_funs, undefined}).
-
-prep_and_validate_update(
- Db,
- #doc{id = Id, revs = {RevStart, Revs}} = Doc,
- OldFullDocInfo,
- LeafRevsDict,
- AllowConflict
-) ->
- case Revs of
- [PrevRev | _] ->
- case dict:find({RevStart, PrevRev}, LeafRevsDict) of
- {ok, {#leaf{deleted = Deleted, ptr = DiskSp}, DiskRevs}} ->
- case couch_doc:has_stubs(Doc) of
- true ->
- DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
- Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
- {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
- false ->
- LoadDiskDoc = fun() -> make_doc(Db, Id, Deleted, DiskSp, DiskRevs) end,
- {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
- end;
- error when AllowConflict ->
- % will generate error if
- couch_doc:merge_stubs(Doc, #doc{}),
- % there are stubs
- {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
- error ->
- {conflict, Doc}
- end;
- [] ->
- % new doc, and we have existing revs.
- % reuse existing deleted doc
- if
- OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
- {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
- true ->
- {conflict, Doc}
- end
- end.
-
-prep_and_validate_updates(
- _Db,
- [],
- [],
- _AllowConflict,
- AccPrepped,
- AccFatalErrors
-) ->
- AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
- {AccPrepped2, AccFatalErrors};
-prep_and_validate_updates(
- Db,
- [DocBucket | RestBuckets],
- [not_found | RestLookups],
- AllowConflict,
- AccPrepped,
- AccErrors
-) ->
- % no existing revs are known,
- {PreppedBucket, AccErrors3} = lists:foldl(
- fun(#doc{revs = Revs} = Doc, {AccBucket, AccErrors2}) ->
- case couch_doc:has_stubs(Doc) of
- true ->
- % will throw exception
- couch_doc:merge_stubs(Doc, #doc{});
- false ->
- ok
- end,
- case Revs of
- {0, []} ->
- case validate_doc_update(Db, Doc, fun() -> nil end) of
- ok ->
- {[Doc | AccBucket], AccErrors2};
- Error ->
- {AccBucket, [{doc_tag(Doc), Error} | AccErrors2]}
- end;
- _ ->
- % old revs specified but none exist, a conflict
- {AccBucket, [{doc_tag(Doc), conflict} | AccErrors2]}
- end
- end,
- {[], AccErrors},
- DocBucket
- ),
-
- prep_and_validate_updates(
- Db,
- RestBuckets,
- RestLookups,
- AllowConflict,
- [PreppedBucket | AccPrepped],
- AccErrors3
- );
-prep_and_validate_updates(
- Db,
- [DocBucket | RestBuckets],
- [#full_doc_info{rev_tree = OldRevTree} = OldFullDocInfo | RestLookups],
- AllowConflict,
- AccPrepped,
- AccErrors
-) ->
- Leafs = couch_key_tree:get_all_leafs(OldRevTree),
- LeafRevsDict = dict:from_list([
- {{Start, RevId}, {Leaf, Revs}}
- || {Leaf, {Start, [RevId | _]} = Revs} <- Leafs
- ]),
- {PreppedBucket, AccErrors3} = lists:foldl(
- fun(Doc, {Docs2Acc, AccErrors2}) ->
- case
- prep_and_validate_update(
- Db,
- Doc,
- OldFullDocInfo,
- LeafRevsDict,
- AllowConflict
- )
- of
- {ok, Doc2} ->
- {[Doc2 | Docs2Acc], AccErrors2};
- {Error, _} ->
- % Record the error
- {Docs2Acc, [{doc_tag(Doc), Error} | AccErrors2]}
- end
- end,
- {[], AccErrors},
- DocBucket
- ),
- prep_and_validate_updates(
- Db,
- RestBuckets,
- RestLookups,
- AllowConflict,
- [PreppedBucket | AccPrepped],
- AccErrors3
- ).
-
-update_docs(Db, Docs, Options) ->
- update_docs(Db, Docs, Options, interactive_edit).
-
-prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
- Errors2 = [
- {{Id, {Pos, Rev}}, Error}
- || {#doc{id = Id, revs = {Pos, [Rev | _]}}, Error} <- AccErrors
- ],
- AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
- {AccPrepped2, lists:reverse(Errors2)};
-prep_and_validate_replicated_updates(
- Db, [Bucket | RestBuckets], [OldInfo | RestOldInfo], AccPrepped, AccErrors
-) ->
- case OldInfo of
- not_found ->
- {ValidatedBucket, AccErrors3} = lists:foldl(
- fun(Doc, {AccPrepped2, AccErrors2}) ->
- case couch_doc:has_stubs(Doc) of
- true ->
- % will throw exception
- couch_doc:merge_stubs(Doc, #doc{});
- false ->
- ok
- end,
- case validate_doc_update(Db, Doc, fun() -> nil end) of
- ok ->
- {[Doc | AccPrepped2], AccErrors2};
- Error ->
- {AccPrepped2, [{Doc, Error} | AccErrors2]}
- end
- end,
- {[], AccErrors},
- Bucket
- ),
- prep_and_validate_replicated_updates(
- Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3
- );
- #full_doc_info{rev_tree = OldTree} ->
- OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
- OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _} | _]} <- OldLeafs],
- NewPaths = lists:map(fun couch_doc:to_path/1, Bucket),
- NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths),
- Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
- LeafRevsFullDict = dict:from_list([
- {{Start, RevId}, FullPath}
- || {Start, [{RevId, _} | _]} = FullPath <- Leafs
- ]),
- {ValidatedBucket, AccErrors3} =
- lists:foldl(
- fun(#doc{id = Id, revs = {Pos, [RevId | _]}} = Doc, {AccValidated, AccErrors2}) ->
- IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU),
- case dict:find({Pos, RevId}, LeafRevsFullDict) of
- {ok, {Start, Path}} when not IsOldLeaf ->
- % our unflushed doc is a leaf node. Go back on the path
- % to find the previous rev that's on disk.
-
- LoadPrevRevFun = fun() ->
- make_first_doc_on_disk(Db, Id, Start - 1, tl(Path))
- end,
-
- case couch_doc:has_stubs(Doc) of
- true ->
- DiskDoc =
- case LoadPrevRevFun() of
- #doc{} = DiskDoc0 ->
- DiskDoc0;
- _ ->
- % Force a missing_stub exception
- couch_doc:merge_stubs(Doc, #doc{})
- end,
- Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
- GetDiskDocFun = fun() -> DiskDoc end;
- false ->
- Doc2 = Doc,
- GetDiskDocFun = LoadPrevRevFun
- end,
-
- case validate_doc_update(Db, Doc2, GetDiskDocFun) of
- ok ->
- {[Doc2 | AccValidated], AccErrors2};
- Error ->
- {AccValidated, [{Doc, Error} | AccErrors2]}
- end;
- _ ->
- % this doc isn't a leaf or already exists in the tree.
- % ignore but consider it a success.
- {AccValidated, AccErrors2}
- end
- end,
- {[], AccErrors},
- Bucket
- ),
- prep_and_validate_replicated_updates(
- Db,
- RestBuckets,
- RestOldInfo,
- [ValidatedBucket | AccPrepped],
- AccErrors3
- )
- end.
-
-new_revid(#doc{body = Body, revs = {OldStart, OldRevs}, atts = Atts, deleted = Deleted}) ->
- DigestedAtts = lists:foldl(
- fun(Att, Acc) ->
- [N, T, M] = couch_att:fetch([name, type, md5], Att),
- case M == <<>> of
- true -> Acc;
- false -> [{N, T, M} | Acc]
- end
- end,
- [],
- Atts
- ),
- case DigestedAtts of
- Atts2 when length(Atts) =/= length(Atts2) ->
- % We must have old style non-md5 attachments
- ?l2b(integer_to_list(couch_util:rand32()));
- Atts2 ->
- OldRev =
- case OldRevs of
- [] -> 0;
- [OldRev0 | _] -> OldRev0
- end,
- couch_hash:md5_hash(
- term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}])
- )
- end.
-
-new_revs([], OutBuckets, IdRevsAcc) ->
- {lists:reverse(OutBuckets), IdRevsAcc};
-new_revs([Bucket | RestBuckets], OutBuckets, IdRevsAcc) ->
- {NewBucket, IdRevsAcc3} = lists:mapfoldl(
- fun(#doc{revs = {Start, RevIds}} = Doc, IdRevsAcc2) ->
- NewRevId = new_revid(Doc),
- {Doc#doc{revs = {Start + 1, [NewRevId | RevIds]}}, [
- {doc_tag(Doc), {ok, {Start + 1, NewRevId}}} | IdRevsAcc2
- ]}
- end,
- IdRevsAcc,
- Bucket
- ),
- new_revs(RestBuckets, [NewBucket | OutBuckets], IdRevsAcc3).
-
-check_dup_atts(#doc{atts = Atts} = Doc) ->
- lists:foldl(
- fun(Att, Names) ->
- Name = couch_att:fetch(name, Att),
- case ordsets:is_element(Name, Names) of
- true -> throw({bad_request, <<"Duplicate attachments">>});
- false -> ordsets:add_element(Name, Names)
- end
- end,
- ordsets:new(),
- Atts
- ),
- Doc.
-
-tag_docs([]) ->
- [];
-tag_docs([#doc{meta = Meta} = Doc | Rest]) ->
- [Doc#doc{meta = [{ref, make_ref()} | Meta]} | tag_docs(Rest)].
-
-doc_tag(#doc{meta = Meta}) ->
- case lists:keyfind(ref, 1, Meta) of
- {ref, Ref} when is_reference(Ref) -> Ref;
- false -> throw(doc_not_tagged);
- Else -> throw({invalid_doc_tag, Else})
- end.
-
-update_docs(Db, Docs0, Options, replicated_changes) ->
- Docs = tag_docs(Docs0),
-
- PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
- prep_and_validate_replicated_updates(
- Db0,
- DocBuckets0,
- ExistingDocInfos,
- [],
- []
- )
- end,
-
- {ok, DocBuckets, NonRepDocs, DocErrors} =
- before_docs_update(Db, Docs, PrepValidateFun, replicated_changes),
-
- DocBuckets2 = [
- [
- doc_flush_atts(Db, check_dup_atts(Doc))
- || Doc <- Bucket
- ]
- || Bucket <- DocBuckets
- ],
- {ok, _} = write_and_commit(
- Db,
- DocBuckets2,
- NonRepDocs,
- [merge_conflicts | Options]
- ),
- {ok, DocErrors};
-update_docs(Db, Docs0, Options, interactive_edit) ->
- Docs = tag_docs(Docs0),
-
- AllOrNothing = lists:member(all_or_nothing, Options),
- PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
- prep_and_validate_updates(
- Db0,
- DocBuckets0,
- ExistingDocInfos,
- AllOrNothing,
- [],
- []
- )
- end,
-
- {ok, DocBuckets, NonRepDocs, DocErrors} =
- before_docs_update(Db, Docs, PrepValidateFun, interactive_edit),
-
- if
- (AllOrNothing) and (DocErrors /= []) ->
- RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]),
- {aborted,
- lists:map(
- fun({Ref, Error}) ->
- #doc{id = Id, revs = {Start, RevIds}} = dict:fetch(Ref, RefErrorDict),
- case {Start, RevIds} of
- {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error};
- {0, []} -> {{Id, {0, <<>>}}, Error}
- end
- end,
- DocErrors
- )};
- true ->
- Options2 =
- if
- AllOrNothing -> [merge_conflicts];
- true -> []
- end ++ Options,
- DocBuckets2 = [
- [
- doc_flush_atts(
- Db,
- set_new_att_revpos(
- check_dup_atts(Doc)
- )
- )
- || Doc <- B
- ]
- || B <- DocBuckets
- ],
- {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []),
-
- {ok, CommitResults} = write_and_commit(
- Db,
- DocBuckets3,
- NonRepDocs,
- Options2
- ),
-
- ResultsDict = lists:foldl(
- fun({Key, Resp}, ResultsAcc) ->
- dict:store(Key, Resp, ResultsAcc)
- end,
- dict:from_list(IdRevs),
- CommitResults ++ DocErrors
- ),
- {ok,
- lists:map(
- fun(Doc) ->
- dict:fetch(doc_tag(Doc), ResultsDict)
- end,
- Docs
- )}
- end.
-
-% Returns the first available document on disk. Input list is a full rev path
-% for the doc.
-make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
- nil;
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) ->
- make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING} | RestPath]) ->
- make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted = IsDel, ptr = Sp}} | _] = DocPath) ->
- Revs = [Rev || {Rev, _} <- DocPath],
- make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
-
-collect_results_with_metrics(Pid, MRef, []) ->
- Begin = os:timestamp(),
- try
- collect_results(Pid, MRef, [])
- after
- ResultsTime = timer:now_diff(os:timestamp(), Begin) div 1000,
- couch_stats:update_histogram(
- [couchdb, collect_results_time],
- ResultsTime
- )
- end.
-
-collect_results(Pid, MRef, ResultsAcc) ->
- receive
- {result, Pid, Result} ->
- collect_results(Pid, MRef, [Result | ResultsAcc]);
- {done, Pid} ->
- {ok, ResultsAcc};
- {retry, Pid} ->
- retry;
- {'DOWN', MRef, _, _, Reason} ->
- exit(Reason)
- end.
-
-write_and_commit(
- #db{main_pid = Pid, user_ctx = Ctx} = Db,
- DocBuckets1,
- NonRepDocs,
- Options
-) ->
- DocBuckets = prepare_doc_summaries(Db, DocBuckets1),
- MergeConflicts = lists:member(merge_conflicts, Options),
- MRef = erlang:monitor(process, Pid),
- try
- Pid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts},
- case collect_results_with_metrics(Pid, MRef, []) of
- {ok, Results} ->
- {ok, Results};
- retry ->
- % This can happen if the db file we wrote to was swapped out by
- % compaction. Retry by reopening the db and writing to the current file
- {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]),
- DocBuckets2 = [
- [doc_flush_atts(Db2, Doc) || Doc <- Bucket]
- || Bucket <- DocBuckets1
- ],
- % We only retry once
- DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
- close(Db2),
- Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts},
- case collect_results_with_metrics(Pid, MRef, []) of
- {ok, Results} -> {ok, Results};
- retry -> throw({update_error, compaction_retry})
- end
- end
- after
- erlang:demonitor(MRef, [flush])
- end.
-
-prepare_doc_summaries(Db, BucketList) ->
- [
- lists:map(
- fun(#doc{body = Body, atts = Atts} = Doc0) ->
- DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts],
- {ok, SizeInfo} = couch_att:size_info(Atts),
- AttsStream =
- case Atts of
- [Att | _] ->
- {stream, StreamEngine} = couch_att:fetch(data, Att),
- StreamEngine;
- [] ->
- nil
- end,
- Doc1 = Doc0#doc{
- atts = DiskAtts,
- meta =
- [
- {size_info, SizeInfo},
- {atts_stream, AttsStream},
- {ejson_size, couch_ejson_size:encoded_size(Body)}
- ] ++ Doc0#doc.meta
- },
- couch_db_engine:serialize_doc(Db, Doc1)
- end,
- Bucket
- )
- || Bucket <- BucketList
- ].
-
-before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType) ->
- increment_stat(Db, [couchdb, database_writes]),
-
- % Separate _local docs from normal docs
- IsLocal = fun
- (#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true;
- (_) -> false
- end,
- {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs),
-
- BucketList = group_alike_docs(Docs2),
-
- DocBuckets = lists:map(
- fun(Bucket) ->
- lists:map(
- fun(Doc) ->
- DocWithBody = couch_doc:with_ejson_body(Doc),
- couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType)
- end,
- Bucket
- )
- end,
- BucketList
- ),
-
- ValidatePred = fun
- (#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
- (#doc{atts = Atts}) -> Atts /= []
- end,
-
- case (VDFuns /= []) orelse lists:any(ValidatePred, Docs2) of
- true ->
- % lookup the doc by id and get the most recent
- Ids = [Id || [#doc{id = Id} | _] <- DocBuckets],
- ExistingDocs = get_full_doc_infos(Db, Ids),
- {DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs),
- % remove empty buckets
- DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []],
- {ok, DocBuckets3, NonRepDocs, DocErrors};
- false ->
- {ok, DocBuckets, NonRepDocs, []}
- end.
-
-set_new_att_revpos(#doc{revs = {RevPos, _Revs}, atts = Atts0} = Doc) ->
- Atts = lists:map(
- fun(Att) ->
- case couch_att:fetch(data, Att) of
- % already commited to disk, don't set new rev
- {stream, _} -> Att;
- {Fd, _} when is_pid(Fd) -> Att;
- % write required so update RevPos
- _ -> couch_att:store(revpos, RevPos + 1, Att)
- end
- end,
- Atts0
- ),
- Doc#doc{atts = Atts}.
-
-doc_flush_atts(Db, Doc) ->
- Doc#doc{atts = [couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}.
-
-compressible_att_type(MimeType) when is_binary(MimeType) ->
- compressible_att_type(?b2l(MimeType));
-compressible_att_type(MimeType) ->
- TypeExpList = re:split(
- config:get(
- "attachments",
- "compressible_types",
- ?DEFAULT_COMPRESSIBLE_TYPES
- ),
- "\\s*,\\s*",
- [{return, list}]
- ),
- lists:any(
- fun(TypeExp) ->
- Regexp = [
- "^\\s*",
- re:replace(TypeExp, "\\*", ".*"),
- "(?:\\s*;.*?)?\\s*",
- $$
- ],
- re:run(MimeType, Regexp, [caseless]) =/= nomatch
- end,
- [T || T <- TypeExpList, T /= []]
- ).
-
-% From RFC 2616 3.6.1 - Chunked Transfer Coding
-%
-% In other words, the origin server is willing to accept
-% the possibility that the trailer fields might be silently
-% discarded along the path to the client.
-%
-% I take this to mean that if "Trailers: Content-MD5\r\n"
-% is present in the request, but there is no Content-MD5
-% trailer, we're free to ignore this inconsistency and
-% pretend that no Content-MD5 exists.
-with_stream(Db, Att, Fun) ->
- [InMd5, Type, Enc] = couch_att:fetch([md5, type, encoding], Att),
- BufferSize = config:get_integer(
- "couchdb",
- "attachment_stream_buffer_size",
- 4096
- ),
- Options =
- case (Enc =:= identity) andalso compressible_att_type(Type) of
- true ->
- CompLevel = config:get_integer(
- "attachments", "compression_level", 8
- ),
- [
- {buffer_size, BufferSize},
- {encoding, gzip},
- {compression_level, CompLevel}
- ];
- _ ->
- [{buffer_size, BufferSize}]
- end,
- {ok, OutputStream} = open_write_stream(Db, Options),
- ReqMd5 =
- case Fun(OutputStream) of
- {md5, FooterMd5} ->
- case InMd5 of
- md5_in_footer -> FooterMd5;
- _ -> InMd5
- end;
- _ ->
- InMd5
- end,
- {StreamEngine, Len, IdentityLen, Md5, IdentityMd5} =
- couch_stream:close(OutputStream),
- couch_util:check_md5(IdentityMd5, ReqMd5),
- {AttLen, DiskLen, NewEnc} =
- case Enc of
- identity ->
- case {Md5, IdentityMd5} of
- {Same, Same} ->
- {Len, IdentityLen, identity};
- _ ->
- {Len, IdentityLen, gzip}
- end;
- gzip ->
- case couch_att:fetch([att_len, disk_len], Att) of
- [AL, DL] when AL =:= undefined orelse DL =:= undefined ->
- % Compressed attachment uploaded through the standalone API.
- {Len, Len, gzip};
- [AL, DL] ->
- % This case is used for efficient push-replication, where a
- % compressed attachment is located in the body of multipart
- % content-type request.
- {AL, DL, gzip}
- end
- end,
- couch_att:store(
- [
- {data, {stream, StreamEngine}},
- {att_len, AttLen},
- {disk_len, DiskLen},
- {md5, Md5},
- {encoding, NewEnc}
- ],
- Att
- ).
-
-open_write_stream(Db, Options) ->
- couch_db_engine:open_write_stream(Db, Options).
-
-open_read_stream(Db, AttState) ->
- couch_db_engine:open_read_stream(Db, AttState).
-
-is_active_stream(Db, StreamEngine) ->
- couch_db_engine:is_active_stream(Db, StreamEngine).
-
-calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) ->
- Seq;
-calculate_start_seq(Db, Node, {Seq, Uuid}) ->
- % Treat the current node as the epoch node
- calculate_start_seq(Db, Node, {Seq, Uuid, Node});
-calculate_start_seq(Db, _Node, {Seq, {split, Uuid}, EpochNode}) ->
- case is_owner(EpochNode, Seq, get_epochs(Db)) of
- true ->
- % Find last replicated sequence from split source to target
- mem3_rep:find_split_target_seq(Db, EpochNode, Uuid, Seq);
- false ->
- couch_log:warning(
- "~p calculate_start_seq not owner "
- "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
- [?MODULE, Db#db.name, Seq, Uuid, EpochNode, get_epochs(Db)]
- ),
- 0
- end;
-calculate_start_seq(Db, Node, {Seq, Uuid, EpochNode}) ->
- case is_prefix(Uuid, get_uuid(Db)) of
- true ->
- case is_owner(EpochNode, Seq, get_epochs(Db)) of
- true ->
- Seq;
- false ->
- %% Shard might have been moved from another node. We
- %% matched the uuid already, try to find last viable
- %% sequence we can use
- couch_log:warning(
- "~p calculate_start_seq not owner, "
- " trying replacement db: ~p, seq: ~p, uuid: ~p, "
- "epoch_node: ~p, epochs: ~p",
- [
- ?MODULE,
- Db#db.name,
- Seq,
- Uuid,
- EpochNode,
- get_epochs(Db)
- ]
- ),
- calculate_start_seq(Db, Node, {replace, EpochNode, Uuid, Seq})
- end;
- false ->
- couch_log:warning(
- "~p calculate_start_seq uuid prefix mismatch "
- "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
- [?MODULE, Db#db.name, Seq, Uuid, EpochNode]
- ),
- %% The file was rebuilt, most likely in a different
- %% order, so rewind.
- 0
- end;
-calculate_start_seq(Db, _Node, {replace, OriginalNode, Uuid, Seq}) ->
- case is_prefix(Uuid, couch_db:get_uuid(Db)) of
- true ->
- try
- start_seq(get_epochs(Db), OriginalNode, Seq)
- catch
- throw:epoch_mismatch ->
- couch_log:warning(
- "~p start_seq duplicate uuid on node: ~p "
- "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
- [?MODULE, node(), Db#db.name, Seq, Uuid, OriginalNode]
- ),
- 0
- end;
- false ->
- {replace, OriginalNode, Uuid, Seq}
- end.
-
-validate_epochs(Epochs) ->
- %% Assert uniqueness.
- case length(Epochs) == length(lists:ukeysort(2, Epochs)) of
- true -> ok;
- false -> erlang:error(duplicate_epoch)
- end,
- %% Assert order.
- case Epochs == lists:sort(fun({_, A}, {_, B}) -> B =< A end, Epochs) of
- true -> ok;
- false -> erlang:error(epoch_order)
- end.
-
-is_prefix(Pattern, Subject) ->
- binary:longest_common_prefix([Pattern, Subject]) == size(Pattern).
-
-is_owner(Node, Seq, Epochs) ->
- Node =:= owner_of(Epochs, Seq).
-
-owner_of(Db, Seq) when not is_list(Db) ->
- owner_of(get_epochs(Db), Seq);
-owner_of([], _Seq) ->
- undefined;
-owner_of([{EpochNode, EpochSeq} | _Rest], Seq) when Seq >= EpochSeq ->
- EpochNode;
-owner_of([_ | Rest], Seq) ->
- owner_of(Rest, Seq).
-
-start_seq([{OrigNode, EpochSeq} | _], OrigNode, Seq) when Seq >= EpochSeq ->
- %% OrigNode is the owner of the Seq so we can safely stream from there
- Seq;
-start_seq([{_, NewSeq}, {OrigNode, _} | _], OrigNode, Seq) when Seq >= NewSeq ->
- %% We transferred this file before Seq was written on OrigNode, so we need
- %% to stream from the beginning of the next epoch. Note that it is _not_
- %% necessary for the current node to own the epoch beginning at NewSeq
- NewSeq;
-start_seq([_ | Rest], OrigNode, Seq) ->
- start_seq(Rest, OrigNode, Seq);
-start_seq([], _OrigNode, _Seq) ->
- throw(epoch_mismatch).
-
-fold_docs(Db, UserFun, UserAcc) ->
- fold_docs(Db, UserFun, UserAcc, []).
-
-fold_docs(Db, UserFun, UserAcc, Options) ->
- couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options).
-
-fold_local_docs(Db, UserFun, UserAcc, Options) ->
- couch_db_engine:fold_local_docs(Db, UserFun, UserAcc, Options).
-
-fold_design_docs(Db, UserFun, UserAcc, Options1) ->
- Options2 = set_design_doc_keys(Options1),
- couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options2).
-
-fold_changes(Db, StartSeq, UserFun, UserAcc) ->
- fold_changes(Db, StartSeq, UserFun, UserAcc, []).
-
-fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) ->
- couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts).
-
-fold_purge_infos(Db, StartPurgeSeq, Fun, Acc) ->
- fold_purge_infos(Db, StartPurgeSeq, Fun, Acc, []).
-
-fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts) ->
- couch_db_engine:fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts).
-
-count_changes_since(Db, SinceSeq) ->
- couch_db_engine:count_changes_since(Db, SinceSeq).
-
-%%% Internal function %%%
-open_doc_revs_int(Db, IdRevs, Options) ->
- Ids = [Id || {Id, _Revs} <- IdRevs],
- LookupResults = get_full_doc_infos(Db, Ids),
- lists:zipwith(
- fun({Id, Revs}, Lookup) ->
- case Lookup of
- #full_doc_info{rev_tree = RevTree} ->
- {FoundRevs, MissingRevs} =
- case Revs of
- all ->
- {couch_key_tree:get_all_leafs(RevTree), []};
- _ ->
- case lists:member(latest, Options) of
- true ->
- couch_key_tree:get_key_leafs(RevTree, Revs);
- false ->
- couch_key_tree:get(RevTree, Revs)
- end
- end,
- FoundResults =
- lists:map(
- fun({Value, {Pos, [Rev | _]} = FoundRevPath}) ->
- case Value of
- ?REV_MISSING ->
- % we have the rev in our list but know nothing about it
- {{not_found, missing}, {Pos, Rev}};
- #leaf{deleted = IsDeleted, ptr = SummaryPtr} ->
- {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
- end
- end,
- FoundRevs
- ),
- Results =
- FoundResults ++
- [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
- {ok, Results};
- not_found when Revs == all ->
- {ok, []};
- not_found ->
- {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
- end
- end,
- IdRevs,
- LookupResults
- ).
-
-open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
- case couch_db_engine:open_local_docs(Db, [Id]) of
- [#doc{} = Doc] ->
- apply_open_options({ok, Doc}, Options);
- [not_found] ->
- {not_found, missing}
- end;
-open_doc_int(Db, #doc_info{id = Id, revs = [RevInfo | _]} = DocInfo, Options) ->
- #rev_info{deleted = IsDeleted, rev = {Pos, RevId}, body_sp = Bp} = RevInfo,
- Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos, [RevId]}),
- apply_open_options(
- {ok, Doc#doc{meta = doc_meta_info(DocInfo, [], Options)}}, Options
- );
-open_doc_int(Db, #full_doc_info{id = Id, rev_tree = RevTree} = FullDocInfo, Options) ->
- #doc_info{revs = [#rev_info{deleted = IsDeleted, rev = Rev, body_sp = Bp} | _]} =
- DocInfo = couch_doc:to_doc_info(FullDocInfo),
- {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
- Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
- apply_open_options(
- {ok, Doc#doc{meta = doc_meta_info(DocInfo, RevTree, Options)}}, Options
- );
-open_doc_int(Db, Id, Options) ->
- case get_full_doc_info(Db, Id) of
- #full_doc_info{} = FullDocInfo ->
- open_doc_int(Db, FullDocInfo, Options);
- not_found ->
- {not_found, missing}
- end.
-
-doc_meta_info(
- #doc_info{high_seq = Seq, revs = [#rev_info{rev = Rev} | RestInfo]}, RevTree, Options
-) ->
- case lists:member(revs_info, Options) of
- false ->
- [];
- true ->
- {[{Pos, RevPath}], []} =
- couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-
- [
- {revs_info, Pos,
- lists:map(
- fun
- ({Rev1, ?REV_MISSING}) ->
- {Rev1, missing};
- ({Rev1, Leaf}) ->
- case Leaf#leaf.deleted of
- true ->
- {Rev1, deleted};
- false ->
- {Rev1, available}
- end
- end,
- RevPath
- )}
- ]
- end ++
- case lists:member(conflicts, Options) of
- false ->
- [];
- true ->
- case [Rev1 || #rev_info{rev = Rev1, deleted = false} <- RestInfo] of
- [] -> [];
- ConflictRevs -> [{conflicts, ConflictRevs}]
- end
- end ++
- case lists:member(deleted_conflicts, Options) of
- false ->
- [];
- true ->
- case [Rev1 || #rev_info{rev = Rev1, deleted = true} <- RestInfo] of
- [] -> [];
- DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
- end
- end ++
- case lists:member(local_seq, Options) of
- false -> [];
- true -> [{local_seq, Seq}]
- end.
-
-make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) ->
- #doc{
- id = Id,
- revs = RevisionPath,
- body = [],
- atts = [],
- deleted = Deleted
- };
-make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}) ->
- RevsLimit = get_revs_limit(Db),
- Doc0 = couch_db_engine:read_doc_body(Db, #doc{
- id = Id,
- revs = {Pos, lists:sublist(Revs, 1, RevsLimit)},
- body = Bp,
- deleted = Deleted
- }),
- Doc1 =
- case Doc0#doc.atts of
- BinAtts when is_binary(BinAtts) ->
- Doc0#doc{
- atts = couch_compress:decompress(BinAtts)
- };
- ListAtts when is_list(ListAtts) ->
- Doc0
- end,
- after_doc_read(Db, Doc1#doc{
- atts = [couch_att:from_disk_term(Db, T) || T <- Doc1#doc.atts]
- }).
-
-after_doc_read(#db{} = Db, Doc) ->
- DocWithBody = couch_doc:with_ejson_body(Doc),
- couch_db_plugin:after_doc_read(Db, DocWithBody).
-
-increment_stat(#db{options = Options}, Stat) ->
- case lists:member(sys_db, Options) of
- true ->
- ok;
- false ->
- couch_stats:increment_counter(Stat)
- end.
-
--spec normalize_dbname(list() | binary()) -> binary().
-
-normalize_dbname(DbName) when is_list(DbName) ->
- normalize_dbname(list_to_binary(DbName));
-normalize_dbname(DbName) when is_binary(DbName) ->
- mem3:dbname(couch_util:drop_dot_couch_ext(DbName)).
-
--spec dbname_suffix(list() | binary()) -> binary().
-
-dbname_suffix(DbName) ->
- filename:basename(normalize_dbname(DbName)).
-
-validate_dbname(DbName) when is_list(DbName) ->
- validate_dbname(?l2b(DbName));
-validate_dbname(DbName) when is_binary(DbName) ->
- Normalized = normalize_dbname(DbName),
- couch_db_plugin:validate_dbname(
- DbName, Normalized, fun validate_dbname_int/2
- ).
-
-validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
- DbNoExt = couch_util:drop_dot_couch_ext(DbName),
- case re:run(DbNoExt, ?DBNAME_REGEX, [{capture, none}, dollar_endonly]) of
- match ->
- ok;
- nomatch ->
- case is_system_db_name(Normalized) of
- true -> ok;
- false -> {error, {illegal_database_name, DbName}}
- end
- end.
-
-is_system_db_name(DbName) when is_list(DbName) ->
- is_system_db_name(?l2b(DbName));
-is_system_db_name(DbName) when is_binary(DbName) ->
- Normalized = normalize_dbname(DbName),
- Suffix = filename:basename(Normalized),
- case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of
- {<<".">>, Result} ->
- Result;
- {_Prefix, false} ->
- false;
- {Prefix, true} ->
- ReOpts = [{capture, none}, dollar_endonly],
- re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
- end.
-
-set_design_doc_keys(Options1) ->
- Dir =
- case lists:keyfind(dir, 1, Options1) of
- {dir, D0} -> D0;
- _ -> fwd
- end,
- Options2 = set_design_doc_start_key(Options1, Dir),
- set_design_doc_end_key(Options2, Dir).
-
--define(FIRST_DDOC_KEY, <<"_design/">>).
--define(LAST_DDOC_KEY, <<"_design0">>).
-
-set_design_doc_start_key(Options, fwd) ->
- Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY),
- Key2 =
- case Key1 < ?FIRST_DDOC_KEY of
- true -> ?FIRST_DDOC_KEY;
- false -> Key1
- end,
- lists:keystore(start_key, 1, Options, {start_key, Key2});
-set_design_doc_start_key(Options, rev) ->
- Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY),
- Key2 =
- case Key1 > ?LAST_DDOC_KEY of
- true -> ?LAST_DDOC_KEY;
- false -> Key1
- end,
- lists:keystore(start_key, 1, Options, {start_key, Key2}).
-
-set_design_doc_end_key(Options, fwd) ->
- case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
- Key2 =
- case Key1 > ?LAST_DDOC_KEY of
- true -> ?LAST_DDOC_KEY;
- false -> Key1
- end,
- lists:keystore(end_key, 1, Options, {end_key, Key2});
- EKeyGT ->
- Key2 =
- case EKeyGT > ?LAST_DDOC_KEY of
- true -> ?LAST_DDOC_KEY;
- false -> EKeyGT
- end,
- lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
- end;
-set_design_doc_end_key(Options, rev) ->
- case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
- Key2 =
- case Key1 < ?FIRST_DDOC_KEY of
- true -> ?FIRST_DDOC_KEY;
- false -> Key1
- end,
- lists:keystore(end_key, 1, Options, {end_key, Key2});
- EKeyGT ->
- Key2 =
- case EKeyGT < ?FIRST_DDOC_KEY of
- true -> ?FIRST_DDOC_KEY;
- false -> EKeyGT
- end,
- lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
- end.
-
-possible_ancestors(_FullInfo, []) ->
- [];
-possible_ancestors(FullInfo, MissingRevs) ->
- #doc_info{revs = RevsInfo} = couch_doc:to_doc_info(FullInfo),
- LeafRevs = [Rev || #rev_info{rev = Rev} <- RevsInfo],
- % Find the revs that are possible parents of this rev
- lists:foldl(
- fun({LeafPos, LeafRevId}, Acc) ->
- % this leaf is a "possible ancenstor" of the missing
- % revs if this LeafPos lessthan any of the missing revs
- case
- lists:any(
- fun({MissingPos, _}) ->
- LeafPos < MissingPos
- end,
- MissingRevs
- )
- of
- true ->
- [{LeafPos, LeafRevId} | Acc];
- false ->
- Acc
- end
- end,
- [],
- LeafRevs
- ).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- ok = meck:new(couch_epi, [passthrough]),
- ok = meck:expect(couch_epi, decide, fun(_, _, _, _, _) -> no_decision end),
- ok.
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset([couch_epi]).
-
-teardown(_) ->
- ok.
-
-validate_dbname_success_test_() ->
- Cases =
- generate_cases_with_shards("long/co$mplex-/path+/something") ++
- generate_cases_with_shards("something") ++
- lists:append(
- [
- generate_cases_with_shards(?b2l(SystemDb))
- || SystemDb <- ?SYSTEM_DATABASES
- ]
- ),
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [should_pass_validate_dbname(A) || {_, A} <- Cases]
- }
- }.
-
-validate_dbname_fail_test_() ->
- Cases =
- generate_cases("_long/co$mplex-/path+/_something") ++
- generate_cases("_something") ++
- generate_cases_with_shards("long/co$mplex-/path+/_something#") ++
- generate_cases_with_shards("long/co$mplex-/path+/some.thing") ++
- generate_cases("!abcdefg/werwej/_users") ++
- generate_cases_with_shards("!abcdefg/werwej/_users"),
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [should_fail_validate_dbname(A) || {_, A} <- Cases]
- }
- }.
-
-normalize_dbname_test_() ->
- Cases =
- generate_cases_with_shards("long/co$mplex-/path+/_something") ++
- generate_cases_with_shards("_something"),
- WithExpected = [{?l2b(filename:rootname(A)), B} || {A, B} <- Cases],
- [
- {test_name({Expected, Db}), ?_assertEqual(Expected, normalize_dbname(Db))}
- || {Expected, Db} <- WithExpected
- ].
-
-dbname_suffix_test_() ->
- Cases =
- generate_cases_with_shards("long/co$mplex-/path+/_something") ++
- generate_cases_with_shards("_something"),
- WithExpected = [{?l2b(filename:basename(Arg)), Db} || {Arg, Db} <- Cases],
- [
- {test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))}
- || {Expected, Db} <- WithExpected
- ].
-
-is_system_db_name_test_() ->
- Cases = lists:append(
- [
- generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db))
- || Db <- ?SYSTEM_DATABASES
- ] ++
- [generate_cases_with_shards(?b2l(Db)) || Db <- ?SYSTEM_DATABASES]
- ),
- WithExpected = [
- {?l2b(filename:basename(filename:rootname(Arg))), Db}
- || {Arg, Db} <- Cases
- ],
- [
- {test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES", ?_assert(is_system_db_name(Db))}
- || {Expected, Db} <- WithExpected
- ].
-
-should_pass_validate_dbname(DbName) ->
- {test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}.
-
-should_fail_validate_dbname(DbName) ->
- {
- test_name(DbName),
- ?_test(begin
- Result = validate_dbname(DbName),
- ?assertMatch({error, {illegal_database_name, _}}, Result),
- {error, {illegal_database_name, FailedDbName}} = Result,
- ?assertEqual(to_binary(DbName), FailedDbName),
- ok
- end)
- }.
-
-calculate_start_seq_test_() ->
- {
- setup,
- fun setup_start_seq_all/0,
- fun teardown_start_seq_all/1,
- {
- foreach,
- fun setup_start_seq/0,
- fun teardown_start_seq/1,
- [
- t_calculate_start_seq_uuid_mismatch(),
- t_calculate_start_seq_is_owner(),
- t_calculate_start_seq_not_owner(),
- t_calculate_start_seq_raw(),
- t_calculate_start_seq_epoch_mismatch(),
- t_calculate_start_seq_shard_move()
- ]
- }
- }.
-
-setup_start_seq_all() ->
- meck:new(couch_db_engine, [passthrough]),
- meck:expect(couch_db_engine, get_uuid, fun(_) -> <<"foo">> end),
- ok = meck:expect(couch_log, warning, 2, ok),
- Epochs = [
- {node2, 10},
- {node1, 1}
- ],
- meck:expect(couch_db_engine, get_epochs, fun(_) -> Epochs end).
-
-teardown_start_seq_all(_) ->
- meck:unload().
-
-setup_start_seq() ->
- meck:reset([
- couch_db_engine,
- couch_log
- ]).
-
-teardown_start_seq(_) ->
- ok.
-
-t_calculate_start_seq_uuid_mismatch() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- Seq = calculate_start_seq(Db, node2, {15, <<"baz">>}),
- ?assertEqual(0, Seq)
- end).
-
-t_calculate_start_seq_is_owner() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- Seq = calculate_start_seq(Db, node2, {15, <<"foo">>}),
- ?assertEqual(15, Seq)
- end).
-
-t_calculate_start_seq_not_owner() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- Seq = calculate_start_seq(Db, node3, {15, <<"foo">>}),
- ?assertEqual(0, Seq)
- end).
-
-t_calculate_start_seq_raw() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- Seq = calculate_start_seq(Db, node1, 13),
- ?assertEqual(13, Seq)
- end).
-
-t_calculate_start_seq_epoch_mismatch() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- SeqIn = {replace, not_this_node, get_uuid(Db), 42},
- Seq = calculate_start_seq(Db, node1, SeqIn),
- ?assertEqual(0, Seq)
- end).
-
-t_calculate_start_seq_shard_move() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- % Sequence when shard was on node1
- ?assertEqual(2, calculate_start_seq(Db, node1, {2, <<"foo">>})),
- % Shard moved to node2 with no other updates after the move to node2
- ?assertEqual(10, calculate_start_seq(Db, node2, {10, <<"foo">>})),
- % Sequence from node1 after the move happened, we reset back to the
- % start of the epoch on node2 = 10
- ?assertEqual(10, calculate_start_seq(Db, node1, {16, <<"foo">>})),
- % Invalid node, epoch mismatch, start at 0
- ?assertEqual(0, calculate_start_seq(Db, node3, {16, <<"foo">>}))
- end).
-
-is_owner_test() ->
- ?assertNot(is_owner(foo, 1, [])),
- ?assertNot(is_owner(foo, 1, [{foo, 2}])),
- ?assert(is_owner(foo, 1, [{foo, 1}])),
- ?assert(is_owner(foo, 2, [{foo, 1}])),
- ?assert(is_owner(foo, 50, [{bar, 100}, {foo, 1}])),
- ?assert(is_owner(foo, 50, [{baz, 200}, {bar, 100}, {foo, 1}])),
- ?assert(is_owner(bar, 150, [{baz, 200}, {bar, 100}, {foo, 1}])),
- ?assert(is_owner(bar, 100, [{baz, 200}, {bar, 100}, {foo, 1}])),
- ?assertNot(is_owner(bar, 99, [{baz, 200}, {bar, 100}, {foo, 1}])),
- ?assertNot(is_owner(baz, 199, [{baz, 200}, {bar, 100}, {foo, 1}])),
- ?assertError(duplicate_epoch, validate_epochs([{foo, 1}, {bar, 1}])),
- ?assertError(epoch_order, validate_epochs([{foo, 100}, {bar, 200}])).
-
-to_binary(DbName) when is_list(DbName) ->
- ?l2b(DbName);
-to_binary(DbName) when is_binary(DbName) ->
- DbName.
-
-test_name({Expected, DbName}) ->
- lists:flatten(io_lib:format("~p -> ~p", [DbName, Expected]));
-test_name(DbName) ->
- lists:flatten(io_lib:format("~p", [DbName])).
-
-generate_cases_with_shards(DbName) ->
- DbNameWithShard = add_shard(DbName),
- DbNameWithShardAndExtension = add_shard(DbName) ++ ".couch",
- Cases = [
- DbName,
- ?l2b(DbName),
- DbNameWithShard,
- ?l2b(DbNameWithShard),
- DbNameWithShardAndExtension,
- ?l2b(DbNameWithShardAndExtension)
- ],
- [{DbName, Case} || Case <- Cases].
-
-add_shard(DbName) ->
- "shards/00000000-3fffffff/" ++ DbName ++ ".1415960794".
-
-generate_cases(DbName) ->
- [{DbName, DbName}, {DbName, ?l2b(DbName)}].
-
--endif.
diff --git a/src/couch/src/couch_db_engine.erl b/src/couch/src/couch_db_engine.erl
deleted file mode 100644
index 9e46b816b..000000000
--- a/src/couch/src/couch_db_engine.erl
+++ /dev/null
@@ -1,1034 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_engine).
-
--include("couch_db.hrl").
--include("couch_db_int.hrl").
-
--type filepath() :: iolist().
--type docid() :: binary().
--type rev() :: {non_neg_integer(), binary()}.
--type revs() :: [rev()].
--type json() :: any().
--type uuid() :: binary().
--type purge_seq() :: non_neg_integer().
-
--type doc_pair() :: {
- #full_doc_info{} | not_found,
- #full_doc_info{} | not_found
-}.
-
--type doc_pairs() :: [doc_pair()].
-
--type db_open_options() :: [
- create
-].
-
--type delete_options() :: [
- {context, delete | compaction}
- | sync
-].
-
--type purge_info() :: {purge_seq(), uuid(), docid(), revs()}.
--type epochs() :: [{Node :: atom(), UpdateSeq :: non_neg_integer()}].
--type size_info() :: [{Name :: atom(), Size :: non_neg_integer()}].
--type partition_info() :: [
- {partition, Partition :: binary()}
- | {doc_count, DocCount :: non_neg_integer()}
- | {doc_del_count, DocDelCount :: non_neg_integer()}
- | {sizes, size_info()}
-].
-
--type write_stream_options() :: [
- {buffer_size, Size :: pos_integer()}
- | {encoding, atom()}
- | {compression_level, non_neg_integer()}
-].
-
--type doc_fold_options() :: [
- {start_key, Key :: any()}
- | {end_key, Key :: any()}
- | {end_key_gt, Key :: any()}
- | {dir, fwd | rev}
- | include_reductions
- | include_deleted
-].
-
--type changes_fold_options() :: [
- {dir, fwd | rev}
-].
-
--type purge_fold_options() :: [
- {start_key, Key :: any()}
- | {end_key, Key :: any()}
- | {end_key_gt, Key :: any()}
- | {dir, fwd | rev}
-].
-
--type db_handle() :: any().
-
--type doc_fold_fun() :: fun(
- (#full_doc_info{}, UserAcc :: any()) ->
- {ok, NewUserAcc :: any()}
- | {stop, NewUserAcc :: any()}
-).
-
--type local_doc_fold_fun() :: fun(
- (#doc{}, UserAcc :: any()) ->
- {ok, NewUserAcc :: any()}
- | {stop, NewUserAcc :: any()}
-).
-
--type changes_fold_fun() :: fun(
- (#doc_info{}, UserAcc :: any()) ->
- {ok, NewUserAcc :: any()}
- | {stop, NewUserAcc :: any()}
-).
-
--type purge_fold_fun() :: fun(
- (purge_info(), UserAcc :: any()) ->
- {ok, NewUserAcc :: any()}
- | {stop, NewUserAcc :: any()}
-).
-
-% This is called by couch_server to determine which
-% engine should be used for the given database. DbPath
-% is calculated based on the DbName and the configured
-% extension for a given engine. The first engine to
-% return true is the engine that will be used for the
-% database.
--callback exists(DbPath :: filepath()) -> boolean().
-
-% This is called by couch_server to delete a database. It
-% is called from inside the couch_server process which
-% means that the storage engine does not have to guarantee
-% its own consistency checks when executing in this
-% context. Although since this is executed in the context
-% of couch_server it should return relatively quickly.
--callback delete(
- RootDir :: filepath(),
- DbPath :: filepath(),
- DelOpts :: delete_options()
-) ->
- ok | {error, Reason :: atom()}.
-
-% This function can be called from multiple contexts. It
-% will either be called just before a call to delete/3 above
-% or when a compaction is cancelled which executes in the
-% context of a couch_db_updater process. It is intended to
-% remove any temporary files used during compaction that
-% may be used to recover from a failed compaction swap.
--callback delete_compaction_files(
- RootDir :: filepath(),
- DbPath :: filepath(),
- DelOpts :: delete_options()
-) ->
- ok.
-
-% This is called from the couch_db_updater:init/1 context. As
-% such this means that it is guaranteed to only have one process
-% executing for a given DbPath argument (ie, opening a given
-% database is guaranteed to only happen in a single process).
-% However, multiple process may be trying to open different
-% databases concurrently so if a database requires a shared
-% resource that will require concurrency control at the storage
-% engine layer.
-%
-% The returned DbHandle should be a term that can be freely
-% copied between processes and accessed concurrently. However
-% its guaranteed that the handle will only ever be mutated
-% in a single threaded context (ie, within the couch_db_updater
-% process).
--callback init(DbPath :: filepath(), db_open_options()) ->
- {ok, DbHandle :: db_handle()}.
-
-% This is called in the context of couch_db_updater:terminate/2
-% and as such has the same properties for init/2. It's guaranteed
-% to be consistent for a given database but may be called by many
-% databases concurrently.
--callback terminate(Reason :: any(), DbHandle :: db_handle()) -> Ignored :: any().
-
-% This is called in the context of couch_db_updater:handle_call/3
-% for any message that is unknown. It can be used to handle messages
-% from asynchronous processes like the engine's compactor if it has one.
--callback handle_db_updater_call(Msg :: any(), DbHandle :: db_handle()) ->
- {reply, Resp :: any(), NewDbHandle :: db_handle()}
- | {stop, Reason :: any(), Resp :: any(), NewDbHandle :: db_handle()}.
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and has the same properties as handle_call/3.
--callback handle_db_updater_info(Msg :: any(), DbHandle :: db_handle()) ->
- {noreply, NewDbHandle :: db_handle()}
- | {noreply, NewDbHandle :: db_handle(), Timeout :: timeout()}
- | {stop, Reason :: any(), NewDbHandle :: db_handle()}.
-
-% These functions are called by any process opening or closing
-% a database. As such they need to be able to handle being
-% called concurrently. For example, the legacy engine uses these
-% to add monitors to the main engine process.
--callback incref(DbHandle :: db_handle()) -> {ok, NewDbHandle :: db_handle()}.
--callback decref(DbHandle :: db_handle()) -> ok.
--callback monitored_by(DbHande :: db_handle()) -> [pid()].
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and should return the timestamp of the last activity of
-% the database. If a storage has no notion of activity or the
-% value would be hard to report its ok to just return the
-% result of os:timestamp/0 as this will just disable idle
-% databases from automatically closing.
--callback last_activity(DbHandle :: db_handle()) -> erlang:timestamp().
-
-% All of the get_* functions may be called from many
-% processes concurrently.
-
-% The database should make a note of the update sequence when it
-% was last compacted. If the database doesn't need compacting it
-% can just hard code a return value of 0.
--callback get_compacted_seq(DbHandle :: db_handle()) ->
- CompactedSeq :: non_neg_integer().
-
-% The number of documents in the database which have all leaf
-% revisions marked as deleted.
--callback get_del_doc_count(DbHandle :: db_handle()) ->
- DelDocCount :: non_neg_integer().
-
-% This number is reported in the database info properties and
-% as such can be any JSON value.
--callback get_disk_version(DbHandle :: db_handle()) -> Version :: json().
-
-% The number of documents in the database that have one or more
-% leaf revisions not marked as deleted.
--callback get_doc_count(DbHandle :: db_handle()) -> DocCount :: non_neg_integer().
-
-% The epochs track which node owned the database starting at
-% a given update sequence. Each time a database is opened it
-% should look at the epochs. If the most recent entry is not
-% for the current node it should add an entry that will be
-% written the next time a write is performed. An entry is
-% simply a {node(), CurrentUpdateSeq} tuple.
--callback get_epochs(DbHandle :: db_handle()) -> Epochs :: epochs().
-
-% Get the current purge sequence known to the engine. This
-% value should be updated during calls to purge_docs.
--callback get_purge_seq(DbHandle :: db_handle()) -> purge_seq().
-
-% Get the oldest purge sequence known to the engine
--callback get_oldest_purge_seq(DbHandle :: db_handle()) -> purge_seq().
-
-% Get the purged infos limit. This should just return the last
-% value that was passed to set_purged_docs_limit/2.
--callback get_purge_infos_limit(DbHandle :: db_handle()) -> pos_integer().
-
-% Get the revision limit. This should just return the last
-% value that was passed to set_revs_limit/2.
--callback get_revs_limit(DbHandle :: db_handle()) -> RevsLimit :: pos_integer().
-
-% Get the current security properties. This should just return
-% the last value that was passed to set_security/2.
--callback get_security(DbHandle :: db_handle()) -> SecProps :: any().
-
-% Get the current properties.
--callback get_props(DbHandle :: db_handle()) -> Props :: [any()].
-
-% This information is displayed in the database info poperties. It
-% should just be a list of {Name::atom(), Size::non_neg_integer()}
-% tuples that will then be combined across shards. Currently,
-% various modules expect there to at least be values for:
-%
-% file - Number of bytes on disk
-%
-% active - Theoretical minimum number of bytes to store this db on disk
-% which is used to guide decisions on compaction
-%
-% external - Number of bytes that would be required to represent the
-% contents outside of the database (for capacity and backup
-% planning)
--callback get_size_info(DbHandle :: db_handle()) -> SizeInfo :: size_info().
-
-% This returns the information for the given partition.
-% It should just be a list of {Name::atom(), Size::non_neg_integer()}
-% It returns the partition name, doc count, deleted doc count and two sizes:
-%
-% active - Theoretical minimum number of bytes to store this partition on disk
-%
-% external - Number of bytes that would be required to represent the
-% contents of this partition outside of the database
--callback get_partition_info(DbHandle :: db_handle(), Partition :: binary()) ->
- partition_info().
-
-% The current update sequence of the database. The update
-% sequence should be incrememnted for every revision added to
-% the database.
--callback get_update_seq(DbHandle :: db_handle()) -> UpdateSeq :: non_neg_integer().
-
-% Whenever a database is created it should generate a
-% persistent UUID for identification in case the shard should
-% ever need to be moved between nodes in a cluster.
--callback get_uuid(DbHandle :: db_handle()) -> UUID :: binary().
-
-% These functions are only called by couch_db_updater and
-% as such are guaranteed to be single threaded calls. The
-% database should simply store these values somewhere so
-% they can be returned by the corresponding get_* calls.
-
--callback set_revs_limit(DbHandle :: db_handle(), RevsLimit :: pos_integer()) ->
- {ok, NewDbHandle :: db_handle()}.
-
--callback set_purge_infos_limit(DbHandle :: db_handle(), Limit :: pos_integer()) ->
- {ok, NewDbHandle :: db_handle()}.
-
--callback set_security(DbHandle :: db_handle(), SecProps :: any()) ->
- {ok, NewDbHandle :: db_handle()}.
-
-% This function is only called by couch_db_updater and
-% as such is guaranteed to be single threaded calls. The
-% database should simply store provided property list
-% unaltered.
-
--callback set_props(DbHandle :: db_handle(), Props :: any()) ->
- {ok, NewDbHandle :: db_handle()}.
-
-% Set the current update sequence of the database. The intention is to use this
-% when copying a database such that the destination update sequence should
-% match exactly the source update sequence.
--callback set_update_seq(
- DbHandle :: db_handle(),
- UpdateSeq :: non_neg_integer()
-) ->
- {ok, NewDbHandle :: db_handle()}.
-
-% This function will be called by many processes concurrently.
-% It should return a #full_doc_info{} record or not_found for
-% every provided DocId in the order those DocId's appear in
-% the input.
-%
-% Traditionally this function will only return documents that
-% were present in the database when the DbHandle was retrieved
-% from couch_server. It is currently unknown what would break
-% if a storage engine deviated from that property.
--callback open_docs(DbHandle :: db_handle(), DocIds :: [docid()]) ->
- [#full_doc_info{} | not_found].
-
-% This function will be called by many processes concurrently.
-% It should return a #doc{} record or not_found for every
-% provided DocId in the order they appear in the input.
-%
-% The same caveats around database snapshots from open_docs
-% apply to this function (although this function is called
-% rather less frequently so it may not be as big of an
-% issue).
--callback open_local_docs(DbHandle :: db_handle(), DocIds :: [docid()]) ->
- [#doc{} | not_found].
-
-% This function will be called from many contexts concurrently.
-% The provided RawDoc is a #doc{} record that has its body
-% value set to the body value returned from write_doc_body/2.
-%
-% This API exists so that storage engines can store document
-% bodies externally from the #full_doc_info{} record (which
-% is the traditional approach and is recommended).
--callback read_doc_body(DbHandle :: db_handle(), RawDoc :: doc()) ->
- doc().
-
-% This function will be called from many contexts concurrently.
-% If the storage engine has a purge_info() record for any of the
-% provided UUIDs, those purge_info() records should be returned. The
-% resulting list should have the same length as the input list of
-% UUIDs.
--callback load_purge_infos(DbHandle :: db_handle(), [uuid()]) ->
- [purge_info() | not_found].
-
-% This function is called concurrently by any client process
-% that is writing a document. It should accept a #doc{}
-% record and return a #doc{} record with a mutated body it
-% wishes to have written to disk by write_doc_body/2.
-%
-% This API exists so that storage engines can compress
-% document bodies in parallel by client processes rather
-% than forcing all compression to occur single threaded
-% in the context of the couch_db_updater process.
--callback serialize_doc(DbHandle :: db_handle(), Doc :: doc()) ->
- doc().
-
-% This function is called in the context of a couch_db_updater
-% which means its single threaded for the given DbHandle.
-%
-% The returned #doc{} record should have its Body set to a value
-% that will be stored in the #full_doc_info{} record's revision
-% tree leaves which is passed to read_doc_body/2 above when
-% a client wishes to read a document.
-%
-% The BytesWritten return value is used to determine the number
-% of active bytes in the database which can is used to make
-% a determination of when to compact this database.
--callback write_doc_body(DbHandle :: db_handle(), Doc :: doc()) ->
- {ok, FlushedDoc :: doc(), BytesWritten :: non_neg_integer()}.
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% This is probably the most complicated function in the entire
-% API due to a few subtle behavior requirements required by
-% CouchDB's storage model.
-%
-% The Pairs argument is a list of pairs (2-tuples) of
-% #full_doc_info{} records. The first element of the pair is
-% the #full_doc_info{} that exists on disk. The second element
-% is the new version that should be written to disk. There are
-% two basic cases that should be followed:
-%
-% 1. {not_found, #full_doc_info{}} - A new document was created
-% 2. {#full_doc_info{}, #full_doc_info{}} - A document was updated
-%
-% The cases are fairly straight forward as long as proper
-% accounting for moving entries in the update sequence are accounted
-% for.
-%
-% The LocalDocs variable is applied separately. Its important to
-% note for new storage engine authors that these documents are
-% separate because they should *not* be included as part of the
-% changes index for the database.
-%
-% Traditionally an invocation of write_doc_infos should be all
-% or nothing in so much that if an error occurs (or the VM dies)
-% then the database doesn't retain any of the changes. However
-% as long as a storage engine maintains consistency this should
-% not be an issue as it has never been a guarantee and the
-% batches are non-deterministic (from the point of view of the
-% client).
--callback write_doc_infos(
- DbHandle :: db_handle(),
- Pairs :: doc_pairs(),
- LocalDocs :: [#doc{}]
-) ->
- {ok, NewDbHandle :: db_handle()}.
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% Each doc_pair() is a 2-tuple of #full_doc_info{} records. The
-% first element of the pair is the #full_doc_info{} that exists
-% on disk. The second element is the new version that should be
-% written to disk. There are three basic cases that should be considered:
-%
-% 1. {#full_doc_info{}, #full_doc_info{}} - A document was partially purged
-% 2. {#full_doc_info{}, not_found} - A document was completely purged
-% 3. {not_found, not_found} - A no-op purge
-%
-% In case 1, non-tail-append engines may have to remove revisions
-% specifically rather than rely on compaction to remove them. Also
-% note that the new #full_doc_info{} will have a different update_seq
-% that will need to be reflected in the changes feed.
-%
-% In case 2 you'll notice is "purged completely" which
-% means it needs to be removed from the database including the
-% update sequence.
-%
-% In case 3 we just need to store the purge_info() to know that it
-% was processed even though it produced no changes to the database.
-%
-% The purge_info() tuples contain the purge_seq, uuid, docid and
-% revisions that were requested to be purged. This should be persisted
-% in such a way that we can efficiently load purge_info() by its UUID
-% as well as iterate over purge_info() entries in order of their PurgeSeq.
--callback purge_docs(DbHandle :: db_handle(), [doc_pair()], [purge_info()]) ->
- {ok, NewDbHandle :: db_handle()}.
-
-% This function should be called from a single threaded context and
-% should be used to copy purge infos from on database to another
-% when copying a database
--callback copy_purge_infos(DbHandle :: db_handle(), [purge_info()]) ->
- {ok, NewDbHandle :: db_handle()}.
-
-% This function is called in the context of couch_db_udpater and
-% as such is single threaded for any given DbHandle.
-%
-% This call is made periodically to ensure that the database has
-% stored all updates on stable storage. (ie, here is where you fsync).
--callback commit_data(DbHandle :: db_handle()) ->
- {ok, NewDbHande :: db_handle()}.
-
-% This function is called by multiple processes concurrently.
-%
-% This function along with open_read_stream are part of the
-% attachments API. For the time being I'm leaving these mostly
-% undocumented. There are implementations of this in both the
-% legacy btree engine as well as the alternative engine
-% implementations for the curious, however this is a part of the
-% API for which I'd like feed back.
-%
-% Currently an engine can elect to not implement these API's
-% by throwing the atom not_supported.
--callback open_write_stream(
- DbHandle :: db_handle(),
- Options :: write_stream_options()
-) ->
- {ok, pid()}.
-
-% See the documentation for open_write_stream
--callback open_read_stream(DbHandle :: db_handle(), StreamDiskInfo :: any()) ->
- {ok, {Module :: atom(), ReadStreamState :: any()}}.
-
-% See the documentation for open_write_stream
--callback is_active_stream(DbHandle :: db_handle(), ReadStreamState :: any()) ->
- boolean().
-
-% This funciton is called by many processes concurrently.
-%
-% This function is called to fold over the documents in
-% the database sorted by the raw byte collation order of
-% the document id. For each document id, the supplied user
-% function should be invoked with the first argument set
-% to the #full_doc_info{} record and the second argument
-% set to the current user supplied accumulator. The return
-% value of the user function is a 2-tuple of {Go, NewUserAcc}.
-% The NewUserAcc value should then replace the current
-% user accumulator. If Go is the atom ok, iteration over
-% documents should continue. If Go is the atom stop, then
-% iteration should halt and the return value should be
-% {ok, NewUserAcc}.
-%
-% Possible options to this function include:
-%
-% 1. start_key - Start iteration at the provided key or
-% or just after if the key doesn't exist
-% 2. end_key - Stop iteration just after the provided key
-% 3. end_key_gt - Stop iteration prior to visiting the provided
-% key
-% 4. dir - The atom fwd or rev. This is to be able to iterate
-% over documents in reverse order. The logic for comparing
-% start_key, end_key, and end_key_gt are then reversed (ie,
-% when rev, start_key should be greater than end_key if the
-% user wishes to see results)
-% 5. include_reductions - This is a hack for _all_docs since
-% it currently relies on reductions to count an offset. This
-% is a terrible hack that will need to be addressed by the
-% API in the future. If this option is present the supplied
-% user function expects three arguments, where the first
-% argument is a #full_doc_info{} record, the second argument
-% is the current list of reductions to the left of the current
-% document, and the third argument is the current user
-% accumulator. The return value from the user function is
-% unaffected. However the final return value of the function
-% should include the final total reductions as the second
-% element of a 3-tuple. Like I said, this is a hack.
-% 6. include_deleted - By default deleted documents are not
-% included in fold_docs calls. However in some special
-% cases we do want to see them (as of now, just in couch_changes
-% during the design document changes optimization)
-%
-% Historically, if a process calls this function repeatedly it
-% would see the same results returned even if there were concurrent
-% updates happening. However there doesn't seem to be any instance of
-% that actually happening so a storage engine that includes new results
-% between invocations shouldn't have any issues.
--callback fold_docs(
- DbHandle :: db_handle(),
- UserFold :: doc_fold_fun(),
- UserAcc :: any(),
- doc_fold_options()
-) ->
- {ok, LastUserAcc :: any()}.
-
-% This function may be called by many processes concurrently.
-%
-% This should behave exactly the same as fold_docs/4 except that it
-% should only return local documents and the first argument to the
-% user function is a #doc{} record, not a #full_doc_info{}.
--callback fold_local_docs(
- DbHandle :: db_handle(),
- UserFold :: local_doc_fold_fun(),
- UserAcc :: any(),
- doc_fold_options()
-) ->
- {ok, LastUserAcc :: any()}.
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over the documents (not local
-% documents) in order of their most recent update. Each document
-% in the database should have exactly one entry in this sequence.
-% If a document is updated during a call to this function it should
-% not be included twice as that will probably lead to Very Bad Things.
-%
-% This should behave similarly to fold_docs/4 in that the supplied
-% user function should be invoked with a #full_doc_info{} record
-% as the first argument and the current user accumulator as the
-% second argument. The same semantics for the return value from the
-% user function should be handled as in fold_docs/4.
-%
-% The StartSeq parameter indicates where the fold should start
-% *after*. As in, if a change with a value of StartSeq exists in the
-% database it should not be included in the fold.
-%
-% The only option currently supported by the API is the `dir`
-% option that should behave the same as for fold_docs.
--callback fold_changes(
- DbHandle :: db_handle(),
- StartSeq :: non_neg_integer(),
- UserFold :: changes_fold_fun(),
- UserAcc :: any(),
- changes_fold_options()
-) ->
- {ok, LastUserAcc :: any()}.
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over purged requests in order of
-% their oldest purge (increasing purge_seq order)
-%
-% The StartPurgeSeq parameter indicates where the fold should start *after*.
--callback fold_purge_infos(
- DbHandle :: db_handle(),
- StartPurgeSeq :: purge_seq(),
- UserFold :: purge_fold_fun(),
- UserAcc :: any(),
- purge_fold_options()
-) ->
- {ok, LastUserAcc :: any()}.
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to count the number of documents changed
-% since the given UpdateSeq (ie, not including the possible change
-% at exactly UpdateSeq). It is currently only used internally to
-% provide a status update in a replication's _active_tasks entry
-% to indicate how many documents are left to be processed.
-%
-% This is a fairly difficult thing to support in engine's that don't
-% behave exactly like a tree with efficient support for counting rows
-% between keys. As such returning 0 or even just the difference between
-% the current update sequence is possibly the best some storage engines
-% can provide. This may lead to some confusion when interpreting the
-% _active_tasks entry if the storage engine isn't accounted for by the
-% client.
--callback count_changes_since(
- DbHandle :: db_handle(),
- UpdateSeq :: non_neg_integer()
-) ->
- TotalChanges :: non_neg_integer().
-
-% This function is called in the context of couch_db_updater and as
-% such is guaranteed to be single threaded for the given DbHandle.
-%
-% If a storage engine requires compaction this is a trigger to start
-% it off. However a storage engine can do whatever it wants here. As
-% this is fairly engine specific there's not a lot guidance that is
-% generally applicable.
-%
-% When compaction is finished the compactor should use
-% gen_server:cast/2 to send a {compact_done, CompactEngine, CompactInfo}
-% message to the Parent pid provided. Currently CompactEngine
-% must be the same engine that started the compaction and CompactInfo
-% is an arbitrary term that's passed to finish_compaction/4.
--callback start_compaction(
- DbHandle :: db_handle(),
- DbName :: binary(),
- Options :: db_open_options(),
- Parent :: pid()
-) ->
- {ok, NewDbHandle :: db_handle(), CompactorPid :: pid()}.
-
-% This function is called in the context of couch_db_udpater and as
-% such is guarnateed to be single threaded for the given DbHandle.
-%
-% Same as for start_compaction, this will be extremely specific to
-% any given storage engine.
-%
-% The split in the API here is so that if the storage engine needs
-% to update the DbHandle state of the couch_db_updater it can as
-% finish_compaction/4 is called in the context of the couch_db_updater.
--callback finish_compaction(
- OldDbHandle :: db_handle(),
- DbName :: binary(),
- Options :: db_open_options(),
- CompactInfo :: any()
-) ->
- {ok, CompactedDbHandle :: db_handle(), CompactorPid :: pid() | undefined}.
-
--export([
- exists/2,
- delete/4,
- delete_compaction_files/4,
- is_compacting/2,
-
- init/3,
- terminate/2,
- handle_db_updater_call/3,
- handle_db_updater_info/2,
-
- incref/1,
- decref/1,
- monitored_by/1,
-
- last_activity/1,
-
- get_engine/1,
- get_compacted_seq/1,
- get_del_doc_count/1,
- get_disk_version/1,
- get_doc_count/1,
- get_epochs/1,
- get_purge_seq/1,
- get_oldest_purge_seq/1,
- get_purge_infos_limit/1,
- get_revs_limit/1,
- get_security/1,
- get_props/1,
- get_size_info/1,
- get_partition_info/2,
- get_update_seq/1,
- get_uuid/1,
-
- set_revs_limit/2,
- set_security/2,
- set_purge_infos_limit/2,
- set_props/2,
-
- set_update_seq/2,
-
- open_docs/2,
- open_local_docs/2,
- read_doc_body/2,
- load_purge_infos/2,
-
- serialize_doc/2,
- write_doc_body/2,
- write_doc_infos/3,
- purge_docs/3,
- copy_purge_infos/2,
- commit_data/1,
-
- open_write_stream/2,
- open_read_stream/2,
- is_active_stream/2,
-
- fold_docs/4,
- fold_local_docs/4,
- fold_changes/5,
- fold_purge_infos/5,
- count_changes_since/2,
-
- start_compaction/1,
- finish_compaction/2,
- trigger_on_compact/1
-]).
-
-exists(Engine, DbPath) ->
- Engine:exists(DbPath).
-
-delete(Engine, RootDir, DbPath, DelOpts) when is_list(DelOpts) ->
- Engine:delete(RootDir, DbPath, DelOpts).
-
-delete_compaction_files(Engine, RootDir, DbPath, DelOpts) when
- is_list(DelOpts)
-->
- Engine:delete_compaction_files(RootDir, DbPath, DelOpts).
-
-is_compacting(Engine, DbName) ->
- Engine:is_compacting(DbName).
-
-init(Engine, DbPath, Options) ->
- case Engine:init(DbPath, Options) of
- {ok, EngineState} ->
- {ok, {Engine, EngineState}};
- Error ->
- throw(Error)
- end.
-
-terminate(Reason, #db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:terminate(Reason, EngineState).
-
-handle_db_updater_call(Msg, _From, #db{} = Db) ->
- #db{
- engine = {Engine, EngineState}
- } = Db,
- case Engine:handle_db_updater_call(Msg, EngineState) of
- {reply, Resp, NewState} ->
- {reply, Resp, Db#db{engine = {Engine, NewState}}};
- {stop, Reason, Resp, NewState} ->
- {stop, Reason, Resp, Db#db{engine = {Engine, NewState}}}
- end.
-
-handle_db_updater_info(Msg, #db{} = Db) ->
- #db{
- name = Name,
- engine = {Engine, EngineState}
- } = Db,
- case Engine:handle_db_updater_info(Msg, EngineState) of
- {noreply, NewState} ->
- {noreply, Db#db{engine = {Engine, NewState}}};
- {noreply, NewState, Timeout} ->
- {noreply, Db#db{engine = {Engine, NewState}}, Timeout};
- {stop, Reason, NewState} ->
- couch_log:error("DB ~s shutting down: ~p", [Name, Msg]),
- {stop, Reason, Db#db{engine = {Engine, NewState}}}
- end.
-
-incref(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewState} = Engine:incref(EngineState),
- {ok, Db#db{engine = {Engine, NewState}}}.
-
-decref(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:decref(EngineState).
-
-monitored_by(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:monitored_by(EngineState).
-
-last_activity(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:last_activity(EngineState).
-
-get_engine(#db{} = Db) ->
- #db{engine = {Engine, _}} = Db,
- Engine.
-
-get_compacted_seq(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_compacted_seq(EngineState).
-
-get_del_doc_count(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_del_doc_count(EngineState).
-
-get_disk_version(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_disk_version(EngineState).
-
-get_doc_count(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_doc_count(EngineState).
-
-get_epochs(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_epochs(EngineState).
-
-get_purge_seq(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_purge_seq(EngineState).
-
-get_oldest_purge_seq(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_oldest_purge_seq(EngineState).
-
-get_purge_infos_limit(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_purge_infos_limit(EngineState).
-
-get_revs_limit(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_revs_limit(EngineState).
-
-get_security(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_security(EngineState).
-
-get_props(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_props(EngineState).
-
-get_size_info(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_size_info(EngineState).
-
-get_partition_info(#db{} = Db, Partition) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_partition_info(EngineState, Partition).
-
-get_update_seq(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_update_seq(EngineState).
-
-get_uuid(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_uuid(EngineState).
-
-set_revs_limit(#db{} = Db, RevsLimit) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_revs_limit(EngineState, RevsLimit),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-set_purge_infos_limit(#db{} = Db, PurgedDocsLimit) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_purge_infos_limit(EngineState, PurgedDocsLimit),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-set_security(#db{} = Db, SecProps) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_security(EngineState, SecProps),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-set_props(#db{} = Db, Props) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_props(EngineState, Props),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-set_update_seq(#db{} = Db, UpdateSeq) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_update_seq(EngineState, UpdateSeq),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-open_docs(#db{} = Db, DocIds) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:open_docs(EngineState, DocIds).
-
-open_local_docs(#db{} = Db, DocIds) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:open_local_docs(EngineState, DocIds).
-
-read_doc_body(#db{} = Db, RawDoc) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:read_doc_body(EngineState, RawDoc).
-
-load_purge_infos(#db{} = Db, UUIDs) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:load_purge_infos(EngineState, UUIDs).
-
-serialize_doc(#db{} = Db, #doc{} = Doc) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:serialize_doc(EngineState, Doc).
-
-write_doc_body(#db{} = Db, #doc{} = Doc) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:write_doc_body(EngineState, Doc).
-
-write_doc_infos(#db{} = Db, DocUpdates, LocalDocs) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:write_doc_infos(EngineState, DocUpdates, LocalDocs),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-purge_docs(#db{} = Db, DocUpdates, Purges) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:purge_docs(
- EngineState, DocUpdates, Purges
- ),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-copy_purge_infos(#db{} = Db, Purges) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:copy_purge_infos(
- EngineState, Purges
- ),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-commit_data(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:commit_data(EngineState),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-open_write_stream(#db{} = Db, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:open_write_stream(EngineState, Options).
-
-open_read_stream(#db{} = Db, StreamDiskInfo) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:open_read_stream(EngineState, StreamDiskInfo).
-
-is_active_stream(#db{} = Db, ReadStreamState) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:is_active_stream(EngineState, ReadStreamState).
-
-fold_docs(#db{} = Db, UserFun, UserAcc, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:fold_docs(EngineState, UserFun, UserAcc, Options).
-
-fold_local_docs(#db{} = Db, UserFun, UserAcc, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:fold_local_docs(EngineState, UserFun, UserAcc, Options).
-
-fold_changes(#db{} = Db, StartSeq, UserFun, UserAcc, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:fold_changes(EngineState, StartSeq, UserFun, UserAcc, Options).
-
-fold_purge_infos(#db{} = Db, StartPurgeSeq, UserFun, UserAcc, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:fold_purge_infos(
- EngineState, StartPurgeSeq, UserFun, UserAcc, Options
- ).
-
-count_changes_since(#db{} = Db, StartSeq) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:count_changes_since(EngineState, StartSeq).
-
-start_compaction(#db{} = Db) ->
- #db{
- engine = {Engine, EngineState},
- name = DbName,
- options = Options
- } = Db,
- {ok, NewEngineState, Pid} = Engine:start_compaction(
- EngineState, DbName, Options, self()
- ),
- {ok, Db#db{
- engine = {Engine, NewEngineState},
- compactor_pid = Pid
- }}.
-
-finish_compaction(Db, CompactInfo) ->
- #db{
- engine = {Engine, St},
- name = DbName,
- options = Options
- } = Db,
- NewDb =
- case Engine:finish_compaction(St, DbName, Options, CompactInfo) of
- {ok, NewState, undefined} ->
- couch_event:notify(DbName, compacted),
- Db#db{
- engine = {Engine, NewState},
- compactor_pid = nil
- };
- {ok, NewState, CompactorPid} when is_pid(CompactorPid) ->
- Db#db{
- engine = {Engine, NewState},
- compactor_pid = CompactorPid
- }
- end,
- ok = couch_server:db_updated(NewDb),
- {ok, NewDb}.
-
-trigger_on_compact(DbName) ->
- {ok, DDocs} = get_ddocs(DbName),
- couch_db_plugin:on_compact(DbName, DDocs).
-
-get_ddocs(<<"shards/", _/binary>> = DbName) ->
- {_, Ref} = spawn_monitor(fun() ->
- exit(fabric:design_docs(mem3:dbname(DbName)))
- end),
- receive
- {'DOWN', Ref, _, _, {ok, JsonDDocs}} ->
- {ok,
- lists:map(
- fun(JsonDDoc) ->
- couch_doc:from_json_obj(JsonDDoc)
- end,
- JsonDDocs
- )};
- {'DOWN', Ref, _, _, Else} ->
- Else
- end;
-get_ddocs(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- FoldFun = fun(FDI, Acc) ->
- {ok, Doc} = couch_db:open_doc_int(Db, FDI, []),
- {ok, [Doc | Acc]}
- end,
- {ok, Docs} = couch_db:fold_design_docs(Db, FoldFun, [], []),
- {ok, lists:reverse(Docs)}
- end).
diff --git a/src/couch/src/couch_db_epi.erl b/src/couch/src/couch_db_epi.erl
deleted file mode 100644
index 870202bad..000000000
--- a/src/couch/src/couch_db_epi.erl
+++ /dev/null
@@ -1,51 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- couch.
-
-providers() ->
- [
- {chttpd_handlers, couch_httpd_handlers}
- ].
-
-services() ->
- [
- {couch_db, couch_db_plugin},
- {feature_flags, couch_flags}
- ].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [couch_flags_config:data_provider()].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/couch/src/couch_db_header.erl b/src/couch/src/couch_db_header.erl
deleted file mode 100644
index 9c81ba6d0..000000000
--- a/src/couch/src/couch_db_header.erl
+++ /dev/null
@@ -1,408 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_header).
-
--export([
- new/0,
- from/1,
- is_header/1,
- upgrade/1,
- set/2
-]).
-
--export([
- disk_version/1,
- update_seq/1,
- id_tree_state/1,
- seq_tree_state/1,
- latest/1,
- local_tree_state/1,
- purge_seq/1,
- purged_docs/1,
- security_ptr/1,
- revs_limit/1,
- uuid/1,
- epochs/1,
- compacted_seq/1
-]).
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 6).
-
--record(db_header, {
- disk_version = ?LATEST_DISK_VERSION,
- update_seq = 0,
- unused = 0,
- id_tree_state = nil,
- seq_tree_state = nil,
- local_tree_state = nil,
- purge_seq = 0,
- purged_docs = nil,
- security_ptr = nil,
- revs_limit = 1000,
- uuid,
- epochs,
- compacted_seq
-}).
-
-new() ->
- #db_header{
- uuid = couch_uuids:random(),
- epochs = [{node(), 0}]
- }.
-
-from(Header0) ->
- Header = upgrade(Header0),
- #db_header{
- uuid = Header#db_header.uuid,
- epochs = Header#db_header.epochs,
- compacted_seq = Header#db_header.compacted_seq
- }.
-
-is_header(Header) ->
- try
- upgrade(Header),
- true
- catch
- _:_ ->
- false
- end.
-
-upgrade(Header) ->
- Funs = [
- fun upgrade_tuple/1,
- fun upgrade_disk_version/1,
- fun upgrade_uuid/1,
- fun upgrade_epochs/1,
- fun upgrade_compacted_seq/1
- ],
- lists:foldl(
- fun(F, HdrAcc) ->
- F(HdrAcc)
- end,
- Header,
- Funs
- ).
-
-set(Header0, Fields) ->
- % A subtlety here is that if a database was open during
- % the release upgrade that updates to uuids and epochs then
- % this dynamic upgrade also assigns a uuid and epoch.
- Header = upgrade(Header0),
- lists:foldl(
- fun({Field, Value}, HdrAcc) ->
- set_field(HdrAcc, Field, Value)
- end,
- Header,
- Fields
- ).
-
-disk_version(Header) ->
- get_field(Header, disk_version).
-
-update_seq(Header) ->
- get_field(Header, update_seq).
-
-id_tree_state(Header) ->
- get_field(Header, id_tree_state).
-
-seq_tree_state(Header) ->
- get_field(Header, seq_tree_state).
-
-local_tree_state(Header) ->
- get_field(Header, local_tree_state).
-
-purge_seq(Header) ->
- get_field(Header, purge_seq).
-
-purged_docs(Header) ->
- get_field(Header, purged_docs).
-
-security_ptr(Header) ->
- get_field(Header, security_ptr).
-
-revs_limit(Header) ->
- get_field(Header, revs_limit).
-
-uuid(Header) ->
- get_field(Header, uuid).
-
-epochs(Header) ->
- get_field(Header, epochs).
-
-compacted_seq(Header) ->
- get_field(Header, compacted_seq).
-
-get_field(Header, Field) ->
- Idx = index(Field),
- case Idx > tuple_size(Header) of
- true -> undefined;
- false -> element(index(Field), Header)
- end.
-
-set_field(Header, Field, Value) ->
- setelement(index(Field), Header, Value).
-
-index(Field) ->
- couch_util:get_value(Field, indexes()).
-
-indexes() ->
- Fields = record_info(fields, db_header),
- Indexes = lists:seq(2, record_info(size, db_header)),
- lists:zip(Fields, Indexes).
-
-upgrade_tuple(Old) when is_record(Old, db_header) ->
- Old;
-upgrade_tuple(Old) when is_tuple(Old) ->
- NewSize = record_info(size, db_header),
- if
- tuple_size(Old) < NewSize -> ok;
- true -> erlang:error({invalid_header_size, Old})
- end,
- {_, New} = lists:foldl(
- fun(Val, {Idx, Hdr}) ->
- {Idx + 1, setelement(Idx, Hdr, Val)}
- end,
- {1, #db_header{}},
- tuple_to_list(Old)
- ),
- if
- is_record(New, db_header) -> ok;
- true -> erlang:error({invalid_header_extension, {Old, New}})
- end,
- New.
-
--define(OLD_DISK_VERSION_ERROR,
- "Database files from versions smaller than 0.10.0 are no longer supported"
-).
-
-upgrade_disk_version(#db_header{} = Header) ->
- case element(2, Header) of
- 1 ->
- throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 2 ->
- throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 3 ->
- throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- % [0.10 - 0.11)
- 4 ->
- Header#db_header{security_ptr = nil};
- % pre 1.2
- 5 ->
- Header;
- ?LATEST_DISK_VERSION ->
- Header;
- _ ->
- Reason = "Incorrect disk header version",
- throw({database_disk_version_error, Reason})
- end.
-
-upgrade_uuid(#db_header{} = Header) ->
- case Header#db_header.uuid of
- undefined ->
- % Upgrading this old db file to a newer
- % on disk format that includes a UUID.
- Header#db_header{uuid = couch_uuids:random()};
- _ ->
- Header
- end.
-
-upgrade_epochs(#db_header{} = Header) ->
- NewEpochs =
- case Header#db_header.epochs of
- undefined ->
- % This node is taking over ownership of shard with
- % and old version of couch file. Before epochs there
- % was always an implicit assumption that a file was
- % owned since eternity by the node it was on. This
- % just codifies that assumption.
- [{node(), 0}];
- [{Node, _} | _] = Epochs0 when Node == node() ->
- % Current node is the current owner of this db
- Epochs0;
- Epochs1 ->
- % This node is taking over ownership of this db
- % and marking the update sequence where it happened.
- [{node(), Header#db_header.update_seq} | Epochs1]
- end,
- % Its possible for a node to open a db and claim
- % ownership but never make a write to the db. This
- % removes nodes that claimed ownership but never
- % changed the database.
- DedupedEpochs = remove_dup_epochs(NewEpochs),
- Header#db_header{epochs = DedupedEpochs}.
-
-% This is slightly relying on the udpate_seq's being sorted
-% in epochs due to how we only ever push things onto the
-% front. Although if we ever had a case where the update_seq
-% is not monotonically increasing I don't know that we'd
-% want to remove dupes (by calling a sort on the input to this
-% function). So for now we don't sort but are relying on the
-% idea that epochs is always sorted.
-remove_dup_epochs([_] = Epochs) ->
- Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S}]) ->
- % Seqs match, keep the most recent owner
- [{N1, S}];
-remove_dup_epochs([_, _] = Epochs) ->
- % Seqs don't match.
- Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
- % Seqs match, keep the most recent owner
- remove_dup_epochs([{N1, S} | Rest]);
-remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
- % Seqs don't match, recurse to check others
- [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-upgrade_compacted_seq(#db_header{} = Header) ->
- case Header#db_header.compacted_seq of
- undefined ->
- Header#db_header{compacted_seq = 0};
- _ ->
- Header
- end.
-
-latest(?LATEST_DISK_VERSION) ->
- true;
-latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
- false;
-latest(_Else) ->
- undefined.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-mk_header(Vsn) ->
- {
- % record name
- db_header,
- % disk version
- Vsn,
- % update_seq
- 100,
- % unused
- 0,
- % id_tree_state
- foo,
- % seq_tree_state
- bar,
- % local_tree_state
- bam,
- % purge_seq
- 1,
- % purged_docs
- baz,
- % security_ptr
- bang,
- % revs_limit
- 999
- }.
-
-upgrade_v3_test() ->
- Vsn3Header = mk_header(3),
- NewHeader = upgrade_tuple(Vsn3Header),
-
- % Tuple upgrades don't change
- ?assert(is_record(NewHeader, db_header)),
- ?assertEqual(3, disk_version(NewHeader)),
- ?assertEqual(100, update_seq(NewHeader)),
- ?assertEqual(foo, id_tree_state(NewHeader)),
- ?assertEqual(bar, seq_tree_state(NewHeader)),
- ?assertEqual(bam, local_tree_state(NewHeader)),
- ?assertEqual(1, purge_seq(NewHeader)),
- ?assertEqual(baz, purged_docs(NewHeader)),
- ?assertEqual(bang, security_ptr(NewHeader)),
- ?assertEqual(999, revs_limit(NewHeader)),
- ?assertEqual(undefined, uuid(NewHeader)),
- ?assertEqual(undefined, epochs(NewHeader)),
-
- ?assertThrow(
- {database_disk_version_error, _},
- upgrade_disk_version(NewHeader)
- ).
-
-upgrade_v5_test() ->
- Vsn5Header = mk_header(5),
- NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
-
- ?assert(is_record(NewHeader, db_header)),
- ?assertEqual(5, disk_version(NewHeader)),
-
- % Security ptr isn't changed for v5 headers
- ?assertEqual(bang, security_ptr(NewHeader)).
-
-upgrade_uuid_test() ->
- Vsn5Header = mk_header(5),
-
- % Upgraded headers get a new UUID
- NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
- ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
-
- % Headers with a UUID don't have their UUID changed
- NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
- ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
-
- % Derived empty headers maintain the same UUID
- ResetHeader = from(NewNewHeader),
- ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
-upgrade_epochs_test() ->
- Vsn5Header = mk_header(5),
-
- % Upgraded headers get a default epochs set
- NewHeader = upgrade(Vsn5Header),
- ?assertEqual([{node(), 0}], epochs(NewHeader)),
-
- % Fake an old entry in epochs
- FakeFields = [
- {update_seq, 20},
- {epochs, [{'someothernode@someotherhost', 0}]}
- ],
- NotOwnedHeader = set(NewHeader, FakeFields),
-
- OwnedEpochs = [
- {node(), 20},
- {'someothernode@someotherhost', 0}
- ],
-
- % Upgrading a header not owned by the local node updates
- % the epochs appropriately.
- NowOwnedHeader = upgrade(NotOwnedHeader),
- ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
-
- % Headers with epochs stay the same after upgrades
- NewNewHeader = upgrade(NowOwnedHeader),
- ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
-
- % Getting a reset header maintains the epoch data
- ResetHeader = from(NewNewHeader),
- ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
-get_uuid_from_old_header_test() ->
- Vsn5Header = mk_header(5),
- ?assertEqual(undefined, uuid(Vsn5Header)).
-
-get_epochs_from_old_header_test() ->
- Vsn5Header = mk_header(5),
- ?assertEqual(undefined, epochs(Vsn5Header)).
-
--endif.
diff --git a/src/couch/src/couch_db_int.hrl b/src/couch/src/couch_db_int.hrl
deleted file mode 100644
index 7da0ce5df..000000000
--- a/src/couch/src/couch_db_int.hrl
+++ /dev/null
@@ -1,76 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--record(db, {
- vsn = 1,
- name,
- filepath,
-
- engine = {couch_bt_engine, undefined},
-
- main_pid = nil,
- compactor_pid = nil,
-
- committed_update_seq,
-
- instance_start_time, % number of microsecs since jan 1 1970 as a binary string
-
- user_ctx = #user_ctx{},
- security = [],
- validate_doc_funs = undefined,
-
- before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
- after_doc_read = nil, % nil | fun(Doc, Db) -> NewDoc
-
- % feature removed in 3.x, but field kept to avoid changing db record size
- % and breaking rolling cluster upgrade
- waiting_delayed_commit_deprecated,
-
- options = [],
- compression
-}).
-
-
--define(OLD_DB_REC, {
- db,
- _, % MainPid
- _, % CompactorPid
- _, % InstanceStartTime
- _, % Fd
- _, % FdMonitor
- _, % Header
- _, % CommittedUpdateSeq
- _, % IdTree
- _, % SeqTree
- _, % LocalTree
- _, % UpdateSeq
- _, % Name
- _, % FilePath
- _, % ValidateDocFuns
- _, % Security
- _, % SecurityPtr
- _, % UserCtx
- _, % WaitingDelayedCommit
- _, % RevsLimit
- _, % FsyncOptions
- _, % Options
- _, % Compression
- _, % BeforeDocUpdate
- _ % AfterDocRead
-}).
-
-
--define(OLD_DB_NAME(Db), element(2, Db)).
--define(OLD_DB_MAIN_PID(Db), element(13, Db)).
--define(OLD_DB_USER_CTX(Db), element(18, Db)).
--define(OLD_DB_SECURITY(Db), element(16, Db)).
diff --git a/src/couch/src/couch_db_plugin.erl b/src/couch/src/couch_db_plugin.erl
deleted file mode 100644
index c84edc1b7..000000000
--- a/src/couch/src/couch_db_plugin.erl
+++ /dev/null
@@ -1,96 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_plugin).
-
--export([
- validate_dbname/3,
- before_doc_update/3,
- after_doc_read/2,
- validate_docid/1,
- check_is_admin/1,
- is_valid_purge_client/2,
- on_compact/2,
- on_delete/2
-]).
-
--define(SERVICE_ID, couch_db).
-
--include_lib("couch/include/couch_db.hrl").
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-validate_dbname(DbName, Normalized, Default) ->
- maybe_handle(validate_dbname, [DbName, Normalized], Default).
-
-before_doc_update(Db, Doc0, UpdateType) ->
- Fun = couch_db:get_before_doc_update_fun(Db),
- case with_pipe(before_doc_update, [Doc0, Db, UpdateType]) of
- [Doc1, _Db, UpdateType1] when is_function(Fun) ->
- Fun(Doc1, Db, UpdateType1);
- [Doc1, _Db, _UpdateType] ->
- Doc1
- end.
-
-after_doc_read(Db, Doc0) ->
- Fun = couch_db:get_after_doc_read_fun(Db),
- case with_pipe(after_doc_read, [Doc0, Db]) of
- [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
- [Doc1, _Db] -> Doc1
- end.
-
-validate_docid(Id) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- %% callbacks return true only if it specifically allow the given Id
- couch_epi:any(Handle, ?SERVICE_ID, validate_docid, [Id], []).
-
-check_is_admin(Db) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- %% callbacks return true only if it specifically allow the given Id
- couch_epi:any(Handle, ?SERVICE_ID, check_is_admin, [Db], []).
-
-is_valid_purge_client(DbName, Props) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- %% callbacks return true only if it specifically allow the given Id
- couch_epi:any(Handle, ?SERVICE_ID, is_valid_purge_client, [DbName, Props], []).
-
-on_compact(DbName, DDocs) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, on_compact, [DbName, DDocs], []).
-
-on_delete(DbName, Options) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, on_delete, [DbName, Options], []).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-with_pipe(Func, Args) ->
- do_apply(Func, Args, [pipe]).
-
-do_apply(Func, Args, Opts) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
-
-maybe_handle(Func, Args, Default) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of
- no_decision when is_function(Default) ->
- apply(Default, Args);
- no_decision ->
- Default;
- {decided, Result} ->
- Result
- end.
diff --git a/src/couch/src/couch_db_split.erl b/src/couch/src/couch_db_split.erl
deleted file mode 100644
index d219e3731..000000000
--- a/src/couch/src/couch_db_split.erl
+++ /dev/null
@@ -1,523 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_split).
-
--export([
- split/3,
- copy_local_docs/3,
- cleanup_target/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(DEFAULT_BUFFER_SIZE, 16777216).
-
--record(state, {
- source_db,
- source_uuid,
- targets,
- pickfun,
- max_buffer_size = ?DEFAULT_BUFFER_SIZE,
- hashfun
-}).
-
--record(target, {
- db,
- uuid,
- buffer = [],
- buffer_size = 0
-}).
-
--record(racc, {
- id,
- source_db,
- target_db,
- active = 0,
- external = 0,
- atts = []
-}).
-
-% Public API
-
-split(Source, #{} = Targets, PickFun) when
- map_size(Targets) >= 2, is_function(PickFun, 3)
-->
- case couch_db:open_int(Source, [?ADMIN_CTX]) of
- {ok, SourceDb} ->
- Engine = get_engine(SourceDb),
- Partitioned = couch_db:is_partitioned(SourceDb),
- HashFun = mem3_hash:get_hash_fun(couch_db:name(SourceDb)),
- try
- split(SourceDb, Partitioned, Engine, Targets, PickFun, HashFun)
- catch
- throw:{target_create_error, DbName, Error, TargetDbs} ->
- cleanup_targets(TargetDbs, Engine),
- {error, {target_create_error, DbName, Error}}
- after
- couch_db:close(SourceDb)
- end;
- {not_found, _} ->
- {error, missing_source}
- end.
-
-copy_local_docs(Source, #{} = Targets0, PickFun) when
- is_binary(Source), is_function(PickFun, 3)
-->
- case couch_db:open_int(Source, [?ADMIN_CTX]) of
- {ok, SourceDb} ->
- try
- Targets = maps:map(
- fun(_, DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- #target{db = Db, uuid = couch_db:get_uuid(Db)}
- end,
- Targets0
- ),
- SourceName = couch_db:name(SourceDb),
- try
- State = #state{
- source_db = SourceDb,
- source_uuid = couch_db:get_uuid(SourceDb),
- targets = Targets,
- pickfun = PickFun,
- hashfun = mem3_hash:get_hash_fun(SourceName)
- },
- copy_local_docs(State),
- ok
- after
- maps:map(
- fun(_, #target{db = Db} = T) ->
- couch_db:close(Db),
- T#target{db = undefined}
- end,
- Targets
- )
- end
- after
- couch_db:close(SourceDb)
- end;
- {not_found, _} ->
- {error, missing_source}
- end.
-
-cleanup_target(Source, Target) when is_binary(Source), is_binary(Target) ->
- case couch_db:open_int(Source, [?ADMIN_CTX]) of
- {ok, SourceDb} ->
- try
- delete_target(Target, get_engine(SourceDb))
- after
- couch_db:close(SourceDb)
- end;
- {not_found, _} ->
- {error, missing_source}
- end.
-
-% Private Functions
-
-split(SourceDb, Partitioned, Engine, Targets0, PickFun, {M, F, A} = HashFun) ->
- Targets = maps:fold(
- fun(Key, DbName, Map) ->
- case couch_db:validate_dbname(DbName) of
- ok ->
- ok;
- {error, E} ->
- throw({target_create_error, DbName, E, Map})
- end,
- case couch_server:lock(DbName, <<"shard splitting">>) of
- ok ->
- ok;
- {error, Err} ->
- throw({target_create_error, DbName, Err, Map})
- end,
- {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
- Opts =
- [create, ?ADMIN_CTX] ++
- case Partitioned of
- true -> [{props, [{partitioned, true}, {hash, [M, F, A]}]}];
- false -> []
- end,
- case couch_db:start_link(Engine, DbName, Filepath, Opts) of
- {ok, Db} ->
- Map#{Key => #target{db = Db}};
- {error, Error} ->
- throw({target_create_error, DbName, Error, Map})
- end
- end,
- #{},
- Targets0
- ),
- Seq = couch_db:get_update_seq(SourceDb),
- State1 = #state{
- source_db = SourceDb,
- targets = Targets,
- pickfun = PickFun,
- hashfun = HashFun,
- max_buffer_size = get_max_buffer_size()
- },
- State2 = copy_docs(State1),
- State3 = copy_checkpoints(State2),
- State4 = copy_meta(State3),
- State5 = copy_purge_info(State4),
- State6 = set_targets_update_seq(State5),
- stop_targets(State6#state.targets),
- {ok, Seq}.
-
-cleanup_targets(#{} = Targets, Engine) ->
- maps:map(
- fun(_, #target{db = Db} = T) ->
- ok = stop_target_db(Db),
- DbName = couch_db:name(Db),
- delete_target(DbName, Engine),
- couch_server:unlock(DbName),
- T
- end,
- Targets
- ).
-
-stop_targets(#{} = Targets) ->
- maps:map(
- fun(_, #target{db = Db} = T) ->
- {ok, Db1} = couch_db_engine:commit_data(Db),
- ok = stop_target_db(Db1),
- T
- end,
- Targets
- ).
-
-stop_target_db(Db) ->
- couch_db:close(Db),
- Pid = couch_db:get_pid(Db),
- catch unlink(Pid),
- catch exit(Pid, kill),
- couch_server:unlock(couch_db:name(Db)),
- ok.
-
-delete_target(DbName, Engine) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
- DelOpt = [{context, compaction}, sync],
- couch_db_engine:delete(Engine, RootDir, Filepath, DelOpt).
-
-pick_target(DocId, #state{} = State, #{} = Targets) ->
- #state{pickfun = PickFun, hashfun = HashFun} = State,
- Key = PickFun(DocId, maps:keys(Targets), HashFun),
- {Key, maps:get(Key, Targets)}.
-
-set_targets_update_seq(#state{targets = Targets} = State) ->
- Seq = couch_db:get_update_seq(State#state.source_db),
- Targets1 = maps:map(
- fun(_, #target{db = Db} = Target) ->
- {ok, Db1} = couch_db_engine:set_update_seq(Db, Seq),
- Target#target{db = Db1}
- end,
- Targets
- ),
- State#state{targets = Targets1}.
-
-copy_checkpoints(#state{} = State) ->
- #state{source_db = Db, source_uuid = SrcUUID, targets = Targets} = State,
- FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
- UpdatedAcc =
- case Id of
- <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
- % Transform mem3 internal replicator checkpoints to avoid
- % rewinding the changes feed when it sees the new shards
- maps:map(
- fun(_, #target{uuid = TgtUUID, buffer = Docs} = T) ->
- Doc1 = update_checkpoint_doc(SrcUUID, TgtUUID, Doc),
- T#target{buffer = [Doc1 | Docs]}
- end,
- Acc
- );
- <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
- % Copy purge checkpoints to all shards
- maps:map(
- fun(_, #target{buffer = Docs} = T) ->
- T#target{buffer = [Doc | Docs]}
- end,
- Acc
- );
- <<?LOCAL_DOC_PREFIX, _/binary>> ->
- % Skip copying these that will be done during
- % local docs top off right before the shards are switched
- Acc
- end,
- {ok, UpdatedAcc}
- end,
- {ok, Targets1} = couch_db_engine:fold_local_docs(Db, FoldFun, Targets, []),
- Targets2 = maps:map(
- fun(_, #target{db = TDb, buffer = Docs} = T) ->
- case Docs of
- [] ->
- T;
- [_ | _] ->
- Docs1 = lists:reverse(Docs),
- {ok, TDb1} = couch_db_engine:write_doc_infos(TDb, [], Docs1),
- {ok, TDb2} = couch_db_engine:commit_data(TDb1),
- T#target{db = TDb2, buffer = []}
- end
- end,
- Targets1
- ),
- State#state{targets = Targets2}.
-
-update_checkpoint_doc(Old, New, #doc{body = {Props}} = Doc) ->
- NewProps =
- case couch_util:get_value(<<"target_uuid">>, Props) of
- Old ->
- replace_kv(Props, {<<"target_uuid">>, Old, New});
- Other when is_binary(Other) ->
- replace_kv(Props, {<<"source_uuid">>, Old, New})
- end,
- NewId = update_checkpoint_id(Doc#doc.id, Old, New),
- Doc#doc{id = NewId, body = {NewProps}}.
-
-update_checkpoint_id(Id, Old, New) ->
- OldHash = mem3_rep:local_id_hash(Old),
- NewHash = mem3_rep:local_id_hash(New),
- binary:replace(Id, OldHash, NewHash).
-
-replace_kv({[]}, _) ->
- {[]};
-replace_kv({KVs}, Replacement) ->
- {[replace_kv(KV, Replacement) || KV <- KVs]};
-replace_kv([], _) ->
- [];
-replace_kv(List, Replacement) when is_list(List) ->
- [replace_kv(V, Replacement) || V <- List];
-replace_kv({K, V}, {K, V, NewV}) ->
- {K, NewV};
-replace_kv({K, V}, Replacement) ->
- {K, replace_kv(V, Replacement)};
-replace_kv(V, _) ->
- V.
-
-copy_meta(#state{source_db = SourceDb, targets = Targets} = State) ->
- RevsLimit = couch_db:get_revs_limit(SourceDb),
- {SecProps} = couch_db:get_security(SourceDb),
- PurgeLimit = couch_db:get_purge_infos_limit(SourceDb),
- Targets1 = maps:map(
- fun(_, #target{db = Db} = T) ->
- {ok, Db1} = couch_db_engine:set_revs_limit(Db, RevsLimit),
- {ok, Db2} = couch_db_engine:set_security(Db1, SecProps),
- {ok, Db3} = couch_db_engine:set_purge_infos_limit(Db2, PurgeLimit),
- T#target{db = Db3}
- end,
- Targets
- ),
- State#state{targets = Targets1}.
-
-copy_purge_info(#state{source_db = Db} = State) ->
- Seq = max(0, couch_db:get_oldest_purge_seq(Db) - 1),
- {ok, NewState} = couch_db:fold_purge_infos(Db, Seq, fun purge_cb/2, State),
- Targets = maps:map(
- fun(_, #target{} = T) ->
- commit_purge_infos(T)
- end,
- NewState#state.targets
- ),
- NewState#state{targets = Targets}.
-
-acc_and_flush(Item, #target{} = Target, MaxBuffer, FlushCb) ->
- #target{buffer = Buffer, buffer_size = BSize} = Target,
- BSize1 = BSize + ?term_size(Item),
- Target1 = Target#target{buffer = [Item | Buffer], buffer_size = BSize1},
- case BSize1 > MaxBuffer of
- true -> FlushCb(Target1);
- false -> Target1
- end.
-
-purge_cb({_PSeq, _UUID, Id, _Revs} = PI, #state{targets = Targets} = State) ->
- {Key, Target} = pick_target(Id, State, Targets),
- MaxBuffer = State#state.max_buffer_size,
- Target1 = acc_and_flush(PI, Target, MaxBuffer, fun commit_purge_infos/1),
- {ok, State#state{targets = Targets#{Key => Target1}}}.
-
-commit_purge_infos(#target{buffer = [], db = Db} = Target) ->
- Target#target{db = Db};
-commit_purge_infos(#target{buffer = PIs0, db = Db} = Target) ->
- PIs = lists:reverse(PIs0),
- {ok, Db1} = couch_db_engine:copy_purge_infos(Db, PIs),
- {ok, Db2} = couch_db_engine:commit_data(Db1),
- Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
-copy_docs(#state{source_db = Db} = State) ->
- {ok, NewState} = couch_db:fold_changes(Db, 0, fun changes_cb/2, State),
- CommitTargets = maps:map(
- fun(_, #target{} = T) ->
- commit_docs(T)
- end,
- NewState#state.targets
- ),
- NewState#state{targets = CommitTargets}.
-
-% Backwards compatibility clause. Seq trees used to hold #doc_infos at one time
-changes_cb(#doc_info{id = Id}, #state{source_db = Db} = State) ->
- [FDI = #full_doc_info{}] = couch_db_engine:open_docs(Db, [Id]),
- changes_cb(FDI, State);
-changes_cb(#full_doc_info{id = Id} = FDI, #state{} = State) ->
- #state{source_db = SourceDb, targets = Targets} = State,
- {Key, Target} = pick_target(Id, State, Targets),
- FDI1 = process_fdi(FDI, SourceDb, Target#target.db),
- MaxBuffer = State#state.max_buffer_size,
- Target1 = acc_and_flush(FDI1, Target, MaxBuffer, fun commit_docs/1),
- {ok, State#state{targets = Targets#{Key => Target1}}}.
-
-commit_docs(#target{buffer = [], db = Db} = Target) ->
- Target#target{db = Db};
-commit_docs(#target{buffer = FDIs, db = Db} = Target) ->
- Pairs = [{not_found, FDI} || FDI <- lists:reverse(FDIs)],
- {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, []),
- {ok, Db2} = couch_db_engine:commit_data(Db1),
- Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
-process_fdi(FDI, SourceDb, TargetDb) ->
- #full_doc_info{id = Id, rev_tree = RTree} = FDI,
- Acc = #racc{id = Id, source_db = SourceDb, target_db = TargetDb},
- {NewRTree, NewAcc} = couch_key_tree:mapfold(fun revtree_cb/4, Acc, RTree),
- {Active, External} = total_sizes(NewAcc),
- FDI#full_doc_info{
- rev_tree = NewRTree,
- sizes = #size_info{active = Active, external = External}
- }.
-
-revtree_cb(_Rev, _Leaf, branch, Acc) ->
- {[], Acc};
-revtree_cb({Pos, RevId}, Leaf, leaf, Acc) ->
- #racc{id = Id, source_db = SourceDb, target_db = TargetDb} = Acc,
- #leaf{deleted = Deleted, ptr = Ptr, sizes = LeafSizes} = Leaf,
- Doc0 = #doc{
- id = Id,
- revs = {Pos, [RevId]},
- deleted = Deleted,
- body = Ptr
- },
- Doc1 = couch_db_engine:read_doc_body(SourceDb, Doc0),
- #doc{body = Body, atts = AttInfos0} = Doc1,
- External =
- case LeafSizes#size_info.external of
- 0 when is_binary(Body) ->
- couch_compress:uncompressed_size(Body);
- 0 ->
- couch_ejson_size:encoded_size(Body);
- N ->
- N
- end,
- AttInfos =
- if
- not is_binary(AttInfos0) -> AttInfos0;
- true -> couch_compress:decompress(AttInfos0)
- end,
- Atts = [process_attachment(Att, SourceDb, TargetDb) || Att <- AttInfos],
- Doc2 = Doc1#doc{atts = Atts},
- Doc3 = couch_db_engine:serialize_doc(TargetDb, Doc2),
- {ok, Doc4, Active} = couch_db_engine:write_doc_body(TargetDb, Doc3),
- % element(3,...) and (4,...) are the stream pointer and size respecitively
- % (see couch_att.erl) They are numeric for compatibility with older formats
- AttSizes = [{element(3, A), element(4, A)} || A <- Atts],
- NewLeaf = Leaf#leaf{
- ptr = Doc4#doc.body,
- sizes = #size_info{active = Active, external = External},
- atts = AttSizes
- },
- {NewLeaf, add_sizes(Active, External, AttSizes, Acc)}.
-
-% This is copied almost verbatim from the compactor
-process_attachment(
- {Name, Type, BinSp, AttLen, RevPos, ExpectedMd5},
- SourceDb,
- TargetDb
-) ->
- % 010 upgrade code
- {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
- {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
-process_attachment(
- {Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}, SourceDb, TargetDb
-) ->
- {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
- {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- Enc =
- case Enc1 of
- % 0110 upgrade code
- true -> gzip;
- % 0110 upgrade code
- false -> identity;
- _ -> Enc1
- end,
- {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}.
-
-get_engine(Db) ->
- {ok, DbInfoProps} = couch_db:get_db_info(Db),
- proplists:get_value(engine, DbInfoProps).
-
-add_sizes(Active, External, Atts, #racc{} = Acc) ->
- #racc{active = ActiveAcc, external = ExternalAcc, atts = AttsAcc} = Acc,
- NewActiveAcc = ActiveAcc + Active,
- NewExternalAcc = ExternalAcc + External,
- NewAttsAcc = lists:umerge(Atts, AttsAcc),
- Acc#racc{
- active = NewActiveAcc,
- external = NewExternalAcc,
- atts = NewAttsAcc
- }.
-
-total_sizes(#racc{active = Active, external = External, atts = Atts}) ->
- TotalAtts = lists:foldl(fun({_, S}, A) -> S + A end, 0, Atts),
- {Active + TotalAtts, External + TotalAtts}.
-
-get_max_buffer_size() ->
- config:get_integer("reshard", "split_buffer_size", ?DEFAULT_BUFFER_SIZE).
-
-copy_local_docs(#state{source_db = Db, targets = Targets} = State) ->
- FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
- UpdatedAcc =
- case Id of
- <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
- Acc;
- <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
- Acc;
- <<?LOCAL_DOC_PREFIX, _/binary>> ->
- % Users' and replicator app's checkpoints go to their
- % respective shards based on the general hashing algorithm
- {Key, Target} = pick_target(Id, State, Acc),
- #target{buffer = Docs} = Target,
- Acc#{Key => Target#target{buffer = [Doc | Docs]}}
- end,
- {ok, UpdatedAcc}
- end,
- {ok, Targets1} = couch_db:fold_local_docs(Db, FoldFun, Targets, []),
- Targets2 = maps:map(
- fun(_, #target{db = TDb, buffer = Docs} = T) ->
- case Docs of
- [] ->
- T;
- [_ | _] ->
- Docs1 = lists:reverse(Docs),
- {ok, _} = couch_db:update_docs(TDb, Docs1),
- T#target{buffer = []}
- end
- end,
- Targets1
- ),
- State#state{targets = Targets2}.
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
deleted file mode 100644
index 17a1e9160..000000000
--- a/src/couch/src/couch_db_updater.erl
+++ /dev/null
@@ -1,1029 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_updater).
--behaviour(gen_server).
--vsn(1).
-
--export([add_sizes/3, upgrade_sizes/1]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_db_int.hrl").
-
--define(IDLE_LIMIT_DEFAULT, 61000).
-% 10 GiB
--define(DEFAULT_MAX_PARTITION_SIZE, 16#280000000).
-
--record(merge_acc, {
- revs_limit,
- merge_conflicts,
- add_infos = [],
- rem_seqs = [],
- cur_seq,
- full_partitions = []
-}).
-
-init({Engine, DbName, FilePath, Options0}) ->
- erlang:put(io_priority, {db_update, DbName}),
- update_idle_limit_from_config(),
- DefaultSecObj = default_security_object(DbName),
- Options = [{default_security_object, DefaultSecObj} | Options0],
- try
- {ok, EngineState} = couch_db_engine:init(Engine, FilePath, Options),
- Db = init_db(DbName, FilePath, EngineState, Options),
- case lists:member(sys_db, Options) of
- false ->
- couch_stats_process_tracker:track([couchdb, open_databases]);
- true ->
- ok
- end,
- % Don't load validation funs here because the fabric query is
- % liable to race conditions. Instead see
- % couch_db:validate_doc_update, which loads them lazily.
- NewDb = Db#db{main_pid = self()},
- proc_lib:init_ack({ok, NewDb}),
- gen_server:enter_loop(?MODULE, [], NewDb, idle_limit())
- catch
- throw:InitError ->
- proc_lib:init_ack(InitError)
- end.
-
-terminate(Reason, Db) ->
- couch_util:shutdown_sync(Db#db.compactor_pid),
- couch_db_engine:terminate(Reason, Db),
- ok.
-
-handle_call(get_db, _From, Db) ->
- {reply, {ok, Db}, Db, idle_limit()};
-handle_call(start_compact, _From, Db) ->
- {noreply, NewDb, _Timeout} = handle_cast(start_compact, Db),
- {reply, {ok, NewDb#db.compactor_pid}, NewDb, idle_limit()};
-handle_call(compactor_pid, _From, #db{compactor_pid = Pid} = Db) ->
- {reply, Pid, Db, idle_limit()};
-handle_call(cancel_compact, _From, #db{compactor_pid = nil} = Db) ->
- {reply, ok, Db, idle_limit()};
-handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) ->
- unlink(Pid),
- exit(Pid, kill),
- couch_server:delete_compaction_files(Db#db.name),
- Db2 = Db#db{compactor_pid = nil},
- ok = couch_server:db_updated(Db2),
- {reply, ok, Db2, idle_limit()};
-handle_call({set_security, NewSec}, _From, #db{} = Db) ->
- {ok, NewDb} = couch_db_engine:set_security(Db, NewSec),
- NewSecDb = commit_data(NewDb#db{
- security = NewSec
- }),
- ok = couch_server:db_updated(NewSecDb),
- {reply, ok, NewSecDb, idle_limit()};
-handle_call({set_revs_limit, Limit}, _From, Db) ->
- {ok, Db2} = couch_db_engine:set_revs_limit(Db, Limit),
- Db3 = commit_data(Db2),
- ok = couch_server:db_updated(Db3),
- {reply, ok, Db3, idle_limit()};
-handle_call({set_purge_infos_limit, Limit}, _From, Db) ->
- {ok, Db2} = couch_db_engine:set_purge_infos_limit(Db, Limit),
- ok = couch_server:db_updated(Db2),
- {reply, ok, Db2, idle_limit()};
-handle_call({purge_docs, [], _}, _From, Db) ->
- {reply, {ok, []}, Db, idle_limit()};
-handle_call({purge_docs, PurgeReqs0, Options}, _From, Db) ->
- % Filter out any previously applied updates during
- % internal replication
- IsRepl = lists:member(replicated_changes, Options),
- PurgeReqs =
- if
- not IsRepl ->
- PurgeReqs0;
- true ->
- UUIDs = [UUID || {UUID, _Id, _Revs} <- PurgeReqs0],
- PurgeInfos = couch_db_engine:load_purge_infos(Db, UUIDs),
- lists:flatmap(
- fun
- ({not_found, PReq}) -> [PReq];
- ({{_, _, _, _}, _}) -> []
- end,
- lists:zip(PurgeInfos, PurgeReqs0)
- )
- end,
- {ok, NewDb, Replies} = purge_docs(Db, PurgeReqs),
- {reply, {ok, Replies}, NewDb, idle_limit()};
-handle_call(Msg, From, Db) ->
- case couch_db_engine:handle_db_updater_call(Msg, From, Db) of
- {reply, Resp, NewDb} ->
- {reply, Resp, NewDb, idle_limit()};
- Else ->
- Else
- end.
-
-handle_cast({load_validation_funs, ValidationFuns}, Db) ->
- Db2 = Db#db{validate_doc_funs = ValidationFuns},
- ok = couch_server:db_updated(Db2),
- {noreply, Db2, idle_limit()};
-handle_cast(start_compact, Db) ->
- case Db#db.compactor_pid of
- nil ->
- % For now we only support compacting to the same
- % storage engine. After the first round of patches
- % we'll add a field that sets the target engine
- % type to compact to with a new copy compactor.
- UpdateSeq = couch_db_engine:get_update_seq(Db),
- Args = [Db#db.name, UpdateSeq],
- Level = list_to_existing_atom(
- config:get(
- "couchdb", "compaction_log_level", "info"
- )
- ),
- couch_log:Level("Starting compaction for db \"~s\" at ~p", Args),
- {ok, Db2} = couch_db_engine:start_compaction(Db),
- ok = couch_server:db_updated(Db2),
- {noreply, Db2, idle_limit()};
- _ ->
- % compact currently running, this is a no-op
- {noreply, Db, idle_limit()}
- end;
-handle_cast({compact_done, _Engine, CompactInfo}, #db{} = OldDb) ->
- {ok, NewDb} = couch_db_engine:finish_compaction(OldDb, CompactInfo),
- {noreply, NewDb};
-handle_cast(wakeup, Db) ->
- {noreply, Db, idle_limit()};
-handle_cast(Msg, #db{name = Name} = Db) ->
- couch_log:error(
- "Database `~s` updater received unexpected cast: ~p",
- [Name, Msg]
- ),
- {stop, Msg, Db}.
-
-handle_info(
- {update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts},
- Db
-) ->
- GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
- if
- NonRepDocs == [] ->
- {GroupedDocs3, Clients} = collect_updates(
- GroupedDocs2,
- [Client],
- MergeConflicts
- );
- true ->
- GroupedDocs3 = GroupedDocs2,
- Clients = [Client]
- end,
- NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
- try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts) of
- {ok, Db2, UpdatedDDocIds} ->
- ok = couch_server:db_updated(Db2),
- case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of
- {Seq, Seq} -> ok;
- _ -> couch_event:notify(Db2#db.name, updated)
- end,
- if
- NonRepDocs2 /= [] ->
- couch_event:notify(Db2#db.name, local_updated);
- true ->
- ok
- end,
- [catch (ClientPid ! {done, self()}) || ClientPid <- Clients],
- Db3 =
- case length(UpdatedDDocIds) > 0 of
- true ->
- % Ken and ddoc_cache are the only things that
- % use the unspecified ddoc_updated message. We
- % should update them to use the new message per
- % ddoc.
- lists:foreach(
- fun(DDocId) ->
- couch_event:notify(Db2#db.name, {ddoc_updated, DDocId})
- end,
- UpdatedDDocIds
- ),
- couch_event:notify(Db2#db.name, ddoc_updated),
- ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds),
- refresh_validate_doc_funs(Db2);
- false ->
- Db2
- end,
- {noreply, Db3, hibernate_if_no_idle_limit()}
- catch
- throw:retry ->
- [catch (ClientPid ! {retry, self()}) || ClientPid <- Clients],
- {noreply, Db, hibernate_if_no_idle_limit()}
- end;
-handle_info({'EXIT', _Pid, normal}, Db) ->
- {noreply, Db, idle_limit()};
-handle_info({'EXIT', _Pid, Reason}, Db) ->
- {stop, Reason, Db};
-handle_info(timeout, #db{name = DbName} = Db) ->
- IdleLimitMSec = update_idle_limit_from_config(),
- case couch_db:is_idle(Db) of
- true ->
- LastActivity = couch_db_engine:last_activity(Db),
- DtMSec = timer:now_diff(os:timestamp(), LastActivity) div 1000,
- MSecSinceLastActivity = max(0, DtMSec),
- case MSecSinceLastActivity > IdleLimitMSec of
- true ->
- ok = couch_server:close_db_if_idle(DbName);
- false ->
- ok
- end;
- false ->
- ok
- end,
- % Send a message to wake up and then hibernate. Hibernation here is done to
- % force a thorough garbage collection.
- gen_server:cast(self(), wakeup),
- {noreply, Db, hibernate};
-handle_info(Msg, Db) ->
- case couch_db_engine:handle_db_updater_info(Msg, Db) of
- {noreply, NewDb} ->
- {noreply, NewDb, idle_limit()};
- Else ->
- Else
- end.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-sort_and_tag_grouped_docs(Client, GroupedDocs) ->
- % These groups should already be sorted but sometimes clients misbehave.
- % The merge_updates function will fail and the database can end up with
- % duplicate documents if the incoming groups are not sorted, so as a sanity
- % check we sort them again here. See COUCHDB-2735.
- Cmp = fun([#doc{id = A} | _], [#doc{id = B} | _]) -> A < B end,
- lists:map(
- fun(DocGroup) ->
- [{Client, maybe_tag_doc(D)} || D <- DocGroup]
- end,
- lists:sort(Cmp, GroupedDocs)
- ).
-
-maybe_tag_doc(#doc{id = Id, revs = {Pos, [_Rev | PrevRevs]}, meta = Meta0} = Doc) ->
- case lists:keymember(ref, 1, Meta0) of
- true ->
- Doc;
- false ->
- Key = {Id, {Pos - 1, PrevRevs}},
- Doc#doc{meta = [{ref, Key} | Meta0]}
- end.
-
-merge_updates([[{_, #doc{id = X}} | _] = A | RestA], [[{_, #doc{id = X}} | _] = B | RestB]) ->
- [A ++ B | merge_updates(RestA, RestB)];
-merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X < Y ->
- [hd(A) | merge_updates(tl(A), B)];
-merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X > Y ->
- [hd(B) | merge_updates(A, tl(B))];
-merge_updates([], RestB) ->
- RestB;
-merge_updates(RestA, []) ->
- RestA.
-
-collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts) ->
- receive
- % Only collect updates with the same MergeConflicts flag and without
- % local docs. It's easier to just avoid multiple _local doc
- % updaters than deal with their possible conflicts, and local docs
- % writes are relatively rare. Can be optmized later if really needed.
- {update_docs, Client, GroupedDocs, [], MergeConflicts} ->
- GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
- GroupedDocsAcc2 =
- merge_updates(GroupedDocsAcc, GroupedDocs2),
- collect_updates(
- GroupedDocsAcc2,
- [Client | ClientsAcc],
- MergeConflicts
- )
- after 0 ->
- {GroupedDocsAcc, ClientsAcc}
- end.
-
-init_db(DbName, FilePath, EngineState, Options) ->
- % convert start time tuple to microsecs and store as a binary string
- {MegaSecs, Secs, MicroSecs} = os:timestamp(),
- StartTime = ?l2b(
- io_lib:format(
- "~p",
- [(MegaSecs * 1000000 * 1000000) + (Secs * 1000000) + MicroSecs]
- )
- ),
-
- BDU = couch_util:get_value(before_doc_update, Options, nil),
- ADR = couch_util:get_value(after_doc_read, Options, nil),
-
- NonCreateOpts = [Opt || Opt <- Options, Opt /= create],
-
- InitDb = #db{
- name = DbName,
- filepath = FilePath,
- engine = EngineState,
- instance_start_time = StartTime,
- options = NonCreateOpts,
- before_doc_update = BDU,
- after_doc_read = ADR
- },
-
- DbProps = couch_db_engine:get_props(InitDb),
-
- InitDb#db{
- committed_update_seq = couch_db_engine:get_update_seq(InitDb),
- security = couch_db_engine:get_security(InitDb),
- options = lists:keystore(props, 1, NonCreateOpts, {props, DbProps})
- }.
-
-refresh_validate_doc_funs(#db{name = <<"shards/", _/binary>> = Name} = Db) ->
- spawn(fabric, reset_validation_funs, [mem3:dbname(Name)]),
- Db#db{validate_doc_funs = undefined};
-refresh_validate_doc_funs(Db0) ->
- Db = Db0#db{user_ctx = ?ADMIN_USER},
- {ok, DesignDocs} = couch_db:get_design_docs(Db),
- ProcessDocFuns = lists:flatmap(
- fun(DesignDocInfo) ->
- {ok, DesignDoc} = couch_db:open_doc_int(
- Db, DesignDocInfo, [ejson_body]
- ),
- case couch_doc:get_validate_doc_fun(DesignDoc) of
- nil -> [];
- Fun -> [Fun]
- end
- end,
- DesignDocs
- ),
- Db#db{validate_doc_funs = ProcessDocFuns}.
-
-% rev tree functions
-
-flush_trees(_Db, [], AccFlushedTrees) ->
- {ok, lists:reverse(AccFlushedTrees)};
-flush_trees(
- #db{} = Db,
- [InfoUnflushed | RestUnflushed],
- AccFlushed
-) ->
- #full_doc_info{update_seq = UpdateSeq, rev_tree = Unflushed} = InfoUnflushed,
- {Flushed, FinalAcc} = couch_key_tree:mapfold(
- fun(_Rev, Value, Type, SizesAcc) ->
- case Value of
- % This node is a document summary that needs to be
- % flushed to disk.
- #doc{} = Doc ->
- check_doc_atts(Db, Doc),
- ExternalSize = get_meta_body_size(Value#doc.meta),
- {size_info, AttSizeInfo} =
- lists:keyfind(size_info, 1, Doc#doc.meta),
- {ok, NewDoc, WrittenSize} =
- couch_db_engine:write_doc_body(Db, Doc),
- Leaf = #leaf{
- deleted = Doc#doc.deleted,
- ptr = NewDoc#doc.body,
- seq = UpdateSeq,
- sizes = #size_info{
- active = WrittenSize,
- external = ExternalSize
- },
- atts = AttSizeInfo
- },
- {Leaf, add_sizes(Type, Leaf, SizesAcc)};
- #leaf{} ->
- {Value, add_sizes(Type, Value, SizesAcc)};
- _ ->
- {Value, SizesAcc}
- end
- end,
- {0, 0, []},
- Unflushed
- ),
- {FinalAS, FinalES, FinalAtts} = FinalAcc,
- TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
- NewInfo = InfoUnflushed#full_doc_info{
- rev_tree = Flushed,
- sizes = #size_info{
- active = FinalAS + TotalAttSize,
- external = FinalES + TotalAttSize
- }
- },
- flush_trees(Db, RestUnflushed, [NewInfo | AccFlushed]).
-
-check_doc_atts(Db, Doc) ->
- {atts_stream, Stream} = lists:keyfind(atts_stream, 1, Doc#doc.meta),
- % Make sure that the attachments were written to the currently
- % active attachment stream. If compaction swaps during a write
- % request we may have to rewrite our attachment bodies.
- if
- Stream == nil ->
- ok;
- true ->
- case couch_db:is_active_stream(Db, Stream) of
- true ->
- ok;
- false ->
- % Stream where the attachments were written to is
- % no longer the current attachment stream. This
- % can happen when a database is switched at
- % compaction time.
- couch_log:debug(
- "Stream where the attachments were"
- " written has changed."
- " Possibly retrying.",
- []
- ),
- throw(retry)
- end
- end.
-
-add_sizes(Type, #leaf{sizes = Sizes, atts = AttSizes}, Acc) ->
- % Maybe upgrade from disk_size only
- #size_info{
- active = ActiveSize,
- external = ExternalSize
- } = upgrade_sizes(Sizes),
- {ASAcc, ESAcc, AttsAcc} = Acc,
- NewASAcc = ActiveSize + ASAcc,
- NewESAcc =
- ESAcc +
- if
- Type == leaf -> ExternalSize;
- true -> 0
- end,
- NewAttsAcc = lists:umerge(AttSizes, AttsAcc),
- {NewASAcc, NewESAcc, NewAttsAcc}.
-
-upgrade_sizes(#size_info{} = SI) ->
- SI;
-upgrade_sizes({D, E}) ->
- #size_info{active = D, external = E};
-upgrade_sizes(S) when is_integer(S) ->
- #size_info{active = S, external = 0}.
-
-send_result(Client, Doc, NewResult) ->
- % used to send a result to the client
- catch (Client ! {result, self(), {doc_tag(Doc), NewResult}}).
-
-doc_tag(#doc{meta = Meta}) ->
- case lists:keyfind(ref, 1, Meta) of
- {ref, Ref} -> Ref;
- false -> throw(no_doc_tag);
- Else -> throw({invalid_doc_tag, Else})
- end.
-
-merge_rev_trees([], [], Acc) ->
- {ok, Acc#merge_acc{
- add_infos = lists:reverse(Acc#merge_acc.add_infos)
- }};
-merge_rev_trees([NewDocs | RestDocsList], [OldDocInfo | RestOldInfo], Acc) ->
- #merge_acc{
- revs_limit = Limit,
- merge_conflicts = MergeConflicts,
- full_partitions = FullPartitions
- } = Acc,
-
- % Track doc ids so we can debug large revision trees
- erlang:put(last_id_merged, OldDocInfo#full_doc_info.id),
- NewDocInfo0 = lists:foldl(
- fun({Client, NewDoc}, OldInfoAcc) ->
- NewInfo = merge_rev_tree(OldInfoAcc, NewDoc, Client, MergeConflicts),
- case is_overflowed(NewInfo, OldInfoAcc, FullPartitions) of
- true when not MergeConflicts ->
- DocId = NewInfo#full_doc_info.id,
- send_result(Client, NewDoc, {partition_overflow, DocId}),
- OldInfoAcc;
- _ ->
- NewInfo
- end
- end,
- OldDocInfo,
- NewDocs
- ),
- NewDocInfo1 = maybe_stem_full_doc_info(NewDocInfo0, Limit),
- % When MergeConflicts is false, we updated #full_doc_info.deleted on every
- % iteration of merge_rev_tree. However, merge_rev_tree does not update
- % #full_doc_info.deleted when MergeConflicts is true, since we don't need
- % to know whether the doc is deleted between iterations. Since we still
- % need to know if the doc is deleted after the merge happens, we have to
- % set it here.
- NewDocInfo2 =
- case MergeConflicts of
- true ->
- NewDocInfo1#full_doc_info{
- deleted = couch_doc:is_deleted(NewDocInfo1)
- };
- false ->
- NewDocInfo1
- end,
- if
- NewDocInfo2 == OldDocInfo ->
- % nothing changed
- merge_rev_trees(RestDocsList, RestOldInfo, Acc);
- true ->
- % We have updated the document, give it a new update_seq. Its
- % important to note that the update_seq on OldDocInfo should
- % be identical to the value on NewDocInfo1.
- OldSeq = OldDocInfo#full_doc_info.update_seq,
- NewDocInfo3 = NewDocInfo2#full_doc_info{
- update_seq = Acc#merge_acc.cur_seq + 1
- },
- RemoveSeqs =
- case OldSeq of
- 0 -> Acc#merge_acc.rem_seqs;
- _ -> [OldSeq | Acc#merge_acc.rem_seqs]
- end,
- NewAcc = Acc#merge_acc{
- add_infos = [NewDocInfo3 | Acc#merge_acc.add_infos],
- rem_seqs = RemoveSeqs,
- cur_seq = Acc#merge_acc.cur_seq + 1
- },
- merge_rev_trees(RestDocsList, RestOldInfo, NewAcc)
- end.
-
-merge_rev_tree(OldInfo, NewDoc, Client, false) when
- OldInfo#full_doc_info.deleted
-->
- % We're recreating a document that was previously
- % deleted. To check that this is a recreation from
- % the root we assert that the new document has a
- % revision depth of 1 (this is to avoid recreating a
- % doc from a previous internal revision) and is also
- % not deleted. To avoid expanding the revision tree
- % unnecessarily we create a new revision based on
- % the winning deleted revision.
-
- {RevDepth, _} = NewDoc#doc.revs,
- NewDeleted = NewDoc#doc.deleted,
- case RevDepth == 1 andalso not NewDeleted of
- true ->
- % Update the new doc based on revisions in OldInfo
- #doc_info{revs = [WinningRev | _]} = couch_doc:to_doc_info(OldInfo),
- #rev_info{rev = {OldPos, OldRev}} = WinningRev,
- Body =
- case couch_util:get_value(comp_body, NewDoc#doc.meta) of
- CompBody when is_binary(CompBody) ->
- couch_compress:decompress(CompBody);
- _ ->
- NewDoc#doc.body
- end,
- RevIdDoc = NewDoc#doc{
- revs = {OldPos, [OldRev]},
- body = Body
- },
- NewRevId = couch_db:new_revid(RevIdDoc),
- NewDoc2 = NewDoc#doc{revs = {OldPos + 1, [NewRevId, OldRev]}},
-
- % Merge our modified new doc into the tree
- #full_doc_info{rev_tree = OldTree} = OldInfo,
- NewTree0 = couch_doc:to_path(NewDoc2),
- case couch_key_tree:merge(OldTree, NewTree0) of
- {NewTree1, new_leaf} ->
- % We changed the revision id so inform the caller
- send_result(Client, NewDoc, {ok, {OldPos + 1, NewRevId}}),
- OldInfo#full_doc_info{
- rev_tree = NewTree1,
- deleted = false
- };
- _ ->
- throw(doc_recreation_failed)
- end;
- _ ->
- send_result(Client, NewDoc, conflict),
- OldInfo
- end;
-merge_rev_tree(OldInfo, NewDoc, Client, false) ->
- % We're attempting to merge a new revision into an
- % undeleted document. To not be a conflict we require
- % that the merge results in extending a branch.
-
- OldTree = OldInfo#full_doc_info.rev_tree,
- NewTree0 = couch_doc:to_path(NewDoc),
- NewDeleted = NewDoc#doc.deleted,
- case couch_key_tree:merge(OldTree, NewTree0) of
- {NewTree, new_leaf} when not NewDeleted ->
- OldInfo#full_doc_info{
- rev_tree = NewTree,
- deleted = false
- };
- {NewTree, new_leaf} when NewDeleted ->
- % We have to check if we just deleted this
- % document completely or if it was a conflict
- % resolution.
- OldInfo#full_doc_info{
- rev_tree = NewTree,
- deleted = couch_doc:is_deleted(NewTree)
- };
- _ ->
- send_result(Client, NewDoc, conflict),
- OldInfo
- end;
-merge_rev_tree(OldInfo, NewDoc, _Client, true) ->
- % We're merging in revisions without caring about
- % conflicts. Most likely this is a replication update.
- OldTree = OldInfo#full_doc_info.rev_tree,
- NewTree0 = couch_doc:to_path(NewDoc),
- {NewTree, _} = couch_key_tree:merge(OldTree, NewTree0),
- OldInfo#full_doc_info{rev_tree = NewTree}.
-
-is_overflowed(_New, _Old, []) ->
- false;
-is_overflowed(Old, Old, _FullPartitions) ->
- false;
-is_overflowed(New, Old, FullPartitions) ->
- case New#full_doc_info.id of
- <<"_design/", _/binary>> ->
- false;
- DDocId ->
- Partition = couch_partition:from_docid(DDocId),
- case lists:member(Partition, FullPartitions) of
- true ->
- estimate_size(New) > estimate_size(Old);
- false ->
- false
- end
- end.
-
-maybe_stem_full_doc_info(#full_doc_info{rev_tree = Tree} = Info, Limit) ->
- case config:get_boolean("couchdb", "stem_interactive_updates", true) of
- true ->
- Stemmed = couch_key_tree:stem(Tree, Limit),
- Info#full_doc_info{rev_tree = Stemmed};
- false ->
- Info
- end.
-
-update_docs_int(Db, DocsList, LocalDocs, MergeConflicts) ->
- UpdateSeq = couch_db_engine:get_update_seq(Db),
- RevsLimit = couch_db_engine:get_revs_limit(Db),
-
- Ids = [Id || [{_Client, #doc{id = Id}} | _] <- DocsList],
- % lookup up the old documents, if they exist.
- OldDocLookups = couch_db_engine:open_docs(Db, Ids),
- OldDocInfos = lists:zipwith(
- fun
- (_Id, #full_doc_info{} = FDI) ->
- FDI;
- (Id, not_found) ->
- #full_doc_info{id = Id}
- end,
- Ids,
- OldDocLookups
- ),
-
- %% Get the list of full partitions
- FullPartitions =
- case couch_db:is_partitioned(Db) of
- true ->
- case max_partition_size() of
- N when N =< 0 ->
- [];
- Max ->
- Partitions = lists:usort(
- lists:flatmap(
- fun(Id) ->
- case couch_partition:extract(Id) of
- undefined -> [];
- {Partition, _} -> [Partition]
- end
- end,
- Ids
- )
- ),
- [P || P <- Partitions, partition_size(Db, P) >= Max]
- end;
- false ->
- []
- end,
-
- % Merge the new docs into the revision trees.
- AccIn = #merge_acc{
- revs_limit = RevsLimit,
- merge_conflicts = MergeConflicts,
- add_infos = [],
- rem_seqs = [],
- cur_seq = UpdateSeq,
- full_partitions = FullPartitions
- },
- {ok, AccOut} = merge_rev_trees(DocsList, OldDocInfos, AccIn),
- #merge_acc{
- add_infos = NewFullDocInfos,
- rem_seqs = RemSeqs
- } = AccOut,
-
- % Write out the document summaries (the bodies are stored in the nodes of
- % the trees, the attachments are already written to disk)
- {ok, IndexFDIs} = flush_trees(Db, NewFullDocInfos, []),
- Pairs = pair_write_info(OldDocLookups, IndexFDIs),
- LocalDocs2 = update_local_doc_revs(LocalDocs),
-
- {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, LocalDocs2),
-
- WriteCount = length(IndexFDIs),
- couch_stats:increment_counter(
- [couchdb, document_inserts],
- WriteCount - length(RemSeqs)
- ),
- couch_stats:increment_counter([couchdb, document_writes], WriteCount),
- couch_stats:increment_counter(
- [couchdb, local_document_writes],
- length(LocalDocs2)
- ),
-
- % Check if we just updated any design documents, and update the validation
- % funs if we did.
- UpdatedDDocIds = lists:flatmap(
- fun
- (<<"_design/", _/binary>> = Id) -> [Id];
- (_) -> []
- end,
- Ids
- ),
-
- {ok, commit_data(Db1), UpdatedDDocIds}.
-
-update_local_doc_revs(Docs) ->
- lists:foldl(
- fun({Client, Doc}, Acc) ->
- case increment_local_doc_revs(Doc) of
- {ok, #doc{revs = {0, [NewRev]}} = NewDoc} ->
- send_result(Client, Doc, {ok, {0, integer_to_binary(NewRev)}}),
- [NewDoc | Acc];
- {error, Error} ->
- send_result(Client, Doc, {error, Error}),
- Acc
- end
- end,
- [],
- Docs
- ).
-
-increment_local_doc_revs(#doc{deleted = true} = Doc) ->
- {ok, Doc#doc{revs = {0, [0]}}};
-increment_local_doc_revs(#doc{revs = {0, []}} = Doc) ->
- {ok, Doc#doc{revs = {0, [1]}}};
-increment_local_doc_revs(#doc{revs = {0, [RevStr | _]}} = Doc) ->
- try
- PrevRev = binary_to_integer(RevStr),
- {ok, Doc#doc{revs = {0, [PrevRev + 1]}}}
- catch
- error:badarg ->
- {error, <<"Invalid rev format">>}
- end;
-increment_local_doc_revs(#doc{}) ->
- {error, <<"Invalid rev format">>}.
-
-max_partition_size() ->
- config:get_integer(
- "couchdb",
- "max_partition_size",
- ?DEFAULT_MAX_PARTITION_SIZE
- ).
-
-partition_size(Db, Partition) ->
- {ok, Info} = couch_db:get_partition_info(Db, Partition),
- Sizes = couch_util:get_value(sizes, Info),
- couch_util:get_value(external, Sizes).
-
-estimate_size(#full_doc_info{} = FDI) ->
- #full_doc_info{rev_tree = RevTree} = FDI,
- Fun = fun
- (_Rev, Value, leaf, SizesAcc) ->
- case Value of
- #doc{} = Doc ->
- ExternalSize = get_meta_body_size(Value#doc.meta),
- {size_info, AttSizeInfo} =
- lists:keyfind(size_info, 1, Doc#doc.meta),
- Leaf = #leaf{
- sizes = #size_info{
- external = ExternalSize
- },
- atts = AttSizeInfo
- },
- add_sizes(leaf, Leaf, SizesAcc);
- #leaf{} ->
- add_sizes(leaf, Value, SizesAcc)
- end;
- (_Rev, _Value, branch, SizesAcc) ->
- SizesAcc
- end,
- {_, FinalES, FinalAtts} = couch_key_tree:fold(Fun, {0, 0, []}, RevTree),
- TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
- FinalES + TotalAttSize.
-
-purge_docs(Db, []) ->
- {ok, Db, []};
-purge_docs(Db, PurgeReqs) ->
- Ids = lists:usort(lists:map(fun({_UUID, Id, _Revs}) -> Id end, PurgeReqs)),
- FDIs = couch_db_engine:open_docs(Db, Ids),
- USeq = couch_db_engine:get_update_seq(Db),
-
- IdFDIs = lists:zip(Ids, FDIs),
- {NewIdFDIs, Replies} = apply_purge_reqs(PurgeReqs, IdFDIs, USeq, []),
-
- Pairs = lists:flatmap(
- fun({DocId, OldFDI}) ->
- {DocId, NewFDI} = lists:keyfind(DocId, 1, NewIdFDIs),
- case {OldFDI, NewFDI} of
- {not_found, not_found} ->
- [];
- {#full_doc_info{} = A, #full_doc_info{} = A} ->
- [];
- {#full_doc_info{}, _} ->
- [{OldFDI, NewFDI}]
- end
- end,
- IdFDIs
- ),
-
- PSeq = couch_db_engine:get_purge_seq(Db),
- {RevPInfos, _} = lists:foldl(
- fun({UUID, DocId, Revs}, {PIAcc, PSeqAcc}) ->
- Info = {PSeqAcc + 1, UUID, DocId, Revs},
- {[Info | PIAcc], PSeqAcc + 1}
- end,
- {[], PSeq},
- PurgeReqs
- ),
- PInfos = lists:reverse(RevPInfos),
-
- {ok, Db1} = couch_db_engine:purge_docs(Db, Pairs, PInfos),
- Db2 = commit_data(Db1),
- ok = couch_server:db_updated(Db2),
- couch_event:notify(Db2#db.name, updated),
- {ok, Db2, Replies}.
-
-apply_purge_reqs([], IdFDIs, _USeq, Replies) ->
- {IdFDIs, lists:reverse(Replies)};
-apply_purge_reqs([Req | RestReqs], IdFDIs, USeq, Replies) ->
- {_UUID, DocId, Revs} = Req,
- {value, {_, FDI0}, RestIdFDIs} = lists:keytake(DocId, 1, IdFDIs),
- {NewFDI, RemovedRevs, NewUSeq} =
- case FDI0 of
- #full_doc_info{rev_tree = Tree} ->
- case couch_key_tree:remove_leafs(Tree, Revs) of
- {_, []} ->
- % No change
- {FDI0, [], USeq};
- {[], Removed} ->
- % Completely purged
- {not_found, Removed, USeq};
- {NewTree, Removed} ->
- % Its possible to purge the #leaf{} that contains
- % the update_seq where this doc sits in the
- % update_seq sequence. Rather than do a bunch of
- % complicated checks we just re-label every #leaf{}
- % and reinsert it into the update_seq sequence.
- {NewTree2, NewUpdateSeq} = couch_key_tree:mapfold(
- fun
- (_RevId, Leaf, leaf, SeqAcc) ->
- {Leaf#leaf{seq = SeqAcc + 1}, SeqAcc + 1};
- (_RevId, Value, _Type, SeqAcc) ->
- {Value, SeqAcc}
- end,
- USeq,
- NewTree
- ),
-
- FDI1 = FDI0#full_doc_info{
- update_seq = NewUpdateSeq,
- rev_tree = NewTree2
- },
- {FDI1, Removed, NewUpdateSeq}
- end;
- not_found ->
- % Not found means nothing to change
- {not_found, [], USeq}
- end,
- NewIdFDIs = [{DocId, NewFDI} | RestIdFDIs],
- NewReplies = [{ok, RemovedRevs} | Replies],
- apply_purge_reqs(RestReqs, NewIdFDIs, NewUSeq, NewReplies).
-
-commit_data(Db) ->
- {ok, Db1} = couch_db_engine:commit_data(Db),
- Db1#db{
- committed_update_seq = couch_db_engine:get_update_seq(Db)
- }.
-
-pair_write_info(Old, New) ->
- lists:map(
- fun(FDI) ->
- case lists:keyfind(FDI#full_doc_info.id, #full_doc_info.id, Old) of
- #full_doc_info{} = OldFDI -> {OldFDI, FDI};
- false -> {not_found, FDI}
- end
- end,
- New
- ).
-
-get_meta_body_size(Meta) ->
- {ejson_size, ExternalSize} = lists:keyfind(ejson_size, 1, Meta),
- ExternalSize.
-
-default_security_object(<<"shards/", _/binary>>) ->
- case config:get("couchdb", "default_security", "admin_only") of
- "admin_only" ->
- [
- {<<"members">>, {[{<<"roles">>, [<<"_admin">>]}]}},
- {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}}
- ];
- Everyone when Everyone == "everyone"; Everyone == "admin_local" ->
- []
- end;
-default_security_object(_DbName) ->
- case config:get("couchdb", "default_security", "admin_only") of
- Admin when Admin == "admin_only"; Admin == "admin_local" ->
- [
- {<<"members">>, {[{<<"roles">>, [<<"_admin">>]}]}},
- {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}}
- ];
- "everyone" ->
- []
- end.
-
-% These functions rely on using the process dictionary. This is
-% usually frowned upon however in this case it is done to avoid
-% changing to a different server state record. Once PSE (Pluggable
-% Storage Engine) code lands this should be moved to the #db{} record.
-update_idle_limit_from_config() ->
- Default = integer_to_list(?IDLE_LIMIT_DEFAULT),
- IdleLimit =
- case config:get("couchdb", "idle_check_timeout", Default) of
- "infinity" ->
- infinity;
- Milliseconds ->
- list_to_integer(Milliseconds)
- end,
- put(idle_limit, IdleLimit),
- IdleLimit.
-
-idle_limit() ->
- get(idle_limit).
-
-hibernate_if_no_idle_limit() ->
- case idle_limit() of
- infinity ->
- hibernate;
- Timeout when is_integer(Timeout) ->
- Timeout
- end.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-update_local_doc_revs_test_() ->
- {inparallel, [
- {"Test local doc with valid rev", fun t_good_local_doc/0},
- {"Test local doc with invalid rev", fun t_bad_local_doc/0},
- {"Test deleted local doc", fun t_dead_local_doc/0}
- ]}.
-
-t_good_local_doc() ->
- Doc = #doc{
- id = <<"_local/alice">>,
- revs = {0, [<<"1">>]},
- meta = [{ref, make_ref()}]
- },
- [NewDoc] = update_local_doc_revs([{self(), Doc}]),
- ?assertEqual({0, [2]}, NewDoc#doc.revs),
- {ok, Result} = receive_result(Doc),
- ?assertEqual({ok, {0, <<"2">>}}, Result).
-
-t_bad_local_doc() ->
- lists:foreach(
- fun(BadRevs) ->
- Doc = #doc{
- id = <<"_local/alice">>,
- revs = BadRevs,
- meta = [{ref, make_ref()}]
- },
- NewDocs = update_local_doc_revs([{self(), Doc}]),
- ?assertEqual([], NewDocs),
- {ok, Result} = receive_result(Doc),
- ?assertEqual({error, <<"Invalid rev format">>}, Result)
- end,
- [{0, [<<"a">>]}, {1, [<<"1">>]}]
- ).
-
-t_dead_local_doc() ->
- Doc = #doc{
- id = <<"_local/alice">>,
- revs = {0, [<<"122">>]},
- deleted = true,
- meta = [{ref, make_ref()}]
- },
- [NewDoc] = update_local_doc_revs([{self(), Doc}]),
- ?assertEqual({0, [0]}, NewDoc#doc.revs),
- {ok, Result} = receive_result(Doc),
- ?assertEqual({ok, {0, <<"0">>}}, Result).
-
-receive_result(#doc{meta = Meta}) ->
- Ref = couch_util:get_value(ref, Meta),
- receive
- {result, _, {Ref, Result}} -> {ok, Result}
- end.
-
--endif.
diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl
deleted file mode 100644
index 7e90b9b07..000000000
--- a/src/couch/src/couch_debug.erl
+++ /dev/null
@@ -1,1067 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_debug).
-
--export([
- help/0,
- help/1
-]).
-
--export([
- opened_files/0,
- opened_files_by_regexp/1,
- opened_files_contains/1
-]).
-
--export([
- process_name/1,
- get_pid/1,
- link_tree/1,
- link_tree/2,
- mapfold_tree/3,
- map_tree/2,
- fold_tree/3,
- linked_processes_info/2,
- print_linked_processes/1,
- memory_info/1,
- memory_info/2,
- resource_hoggers/2,
- resource_hoggers_snapshot/1,
- analyze_resource_hoggers/2,
- busy/2,
- busy/3,
- restart/1,
- restart_busy/2,
- restart_busy/3,
- restart_busy/4
-]).
-
--export([
- print_table/2
-]).
-
--type throw(_Reason) :: no_return().
-
--type process_name() :: atom().
--type process() :: process_name() | pid().
--type function_name() :: atom().
--type busy_properties() ::
- heap_size
- | memory
- | message_queue_len
- | reductions
- | total_heap_size.
-
--spec help() -> [function_name()].
-
-help() ->
- [
- busy,
- opened_files,
- opened_files_by_regexp,
- opened_files_contains,
- process_name,
- get_pid,
- link_tree,
- mapfold,
- map,
- fold,
- linked_processes_info,
- print_linked_processes,
- memory_info,
- print_table,
- restart,
- restart_busy
- ].
-
--spec help(Function :: function_name()) -> ok.
-%% erlfmt-ignore
-help(busy) ->
- io:format("
- busy(ProcessList, Threshold)
- busy(ProcessList, Threshold, Property)
- --------------
-
- Iterate over given list of named processes or pids and returns the ones with
- a Property value greater than provided Threshold.
-
- If Property is not specified we use message box size
-
- Properties which can be used are listed below
-
- - heap_size
- - memory
- - message_queue_len (default)
- - reductions
- - total_heap_size
-
- ---
- ", []);
-help(opened_files) ->
- io:format("
- opened_files()
- --------------
-
- Returns list of currently opened files
- It iterates through `erlang:ports` and filters out all ports which are not efile.
- It uses `process_info(Pid, dictionary)` to get info about couch_file properties.
- ---
- ", []);
-help(opened_files_by_regexp) ->
- io:format("
- opened_files_by_regexp(FileRegExp)
- ----------------------------------
-
- Returns list of currently opened files which name match the provided regular expression.
- It iterates through `erlang:ports()` and filter out all ports which are not efile.
- It uses `process_info(Pid, dictionary)` to get info about couch_file properties.
- ---
- ", []);
-help(opened_files_contains) ->
- io:format("
- opened_files_contains(SubString)
- --------------------------------
-
- Returns list of currently opened files whose names contain the provided SubString.
- It iterates through `erlang:ports()` and filters out all ports which are not efile.
- It uses `process_info(Pid, dictionary)` to get info about couch_file properties.
- ---
- ", []);
-help(process_name) ->
- io:format("
- process_name(Pid)
- -----------------
-
- Uses heuristics to figure out the process name.
- The heuristic is based on the following information about the process:
- - process_info(Pid, registered_name)
- - '$initial_call' key in process dictionary
- - process_info(Pid, initial_call)
-
- ---
- ", []);
-help(get_pid) ->
- io:format("
- get_pid(PidOrName)
- -----------------
-
- Get the pid for a process name given either a name or pid. When a pid is given, it returns it as is.
- This has the same functionality as whereis/1 except it will not crash when a pid is given.
-
- ---
- ", []);
-help(restart) ->
- io:format("
- restart(ServerName)
- --------------
-
- Restart a process with given ServerName and wait for
- replacement process to start.
- ---
- ", []);
-help(restart_busy) ->
- io:format("
- restart_busy(ProcessList, Thereshold)
- restart_busy(ProcessList, Thereshold, DelayInMsec)
- --------------
-
- Iterate over given list of named processes and returns the ones with
- a Property value greater than provided Threshold.
-
- Then it restart the identified processes.
-
- If Property is not specified we use message box size
-
- Properties which can be used are listed below
-
- - heap_size
- - memory
- - message_queue_len (default)
- - reductions
- - total_heap_size
-
- The restarts happen sequentially with a given DelayInMsec between them.
- If DelayInMsec is not provided the default value is one second.
- The function doesn't proceed to next process until
- the replacement process starts.
- ---
- ", []);
-help(link_tree) ->
- io:format("
- link_tree(Pid)
- --------------
-
- Returns a tree which represents a cluster of linked processes.
- This function receives the initial Pid to start from.
- The function doesn't recurse to pids older than initial one.
- The Pids which are lesser than initial Pid are still shown in the output.
- This is analogue of `link_tree(RootPid, []).`
-
- link_tree(Pid, Info)
- --------------------
-
- Returns a tree which represents a cluster of linked processes.
- This function receives the initial Pid to start from.
- The function doesn't recurse to pids older than initial one.
- The Pids which are lesser than initial Pid are still shown in the output.
- The info argument is a list of process_info_item() as documented in
- erlang:process_info/2. We don't do any attempts to prevent dangerous items.
- Be warn that passing some of them such as `messages` for example
- can be dangerous in a very busy system.
- ---
- ", []);
-help(mapfold_tree) ->
- io:format("
- mapfold_tree(Tree, Acc, Fun)
- -----------------------
-
- Traverses all nodes of the tree. It is a combination of a map and fold.
- It calls a user provided callback for every node of the tree.
- `Fun(Key, Value, Pos, Acc) -> {NewValue, NewAcc}`.
- Where:
- - Key of the node (usualy Pid of a process)
- - Value of the node (usualy information collected by link_tree)
- - Pos - depth from the root of the tree
- - Acc - user's accumulator
-
- ---
- ", []);
-help(map_tree) ->
- io:format("
- map_tree(Tree, Fun)
- -----------------------
-
- Traverses all nodes of the tree in order to modify them.
- It calls a user provided callback
- `Fun(Key, Value, Pos) -> NewValue`
- Where:
- - Key of the node (usualy Pid of a process)
- - Value of the node (usualy information collected by link_tree)
- - Pos - depth from the root of the tree
-
- ---
- ", []);
-help(fold_tree) ->
- io:format("
- fold_tree(Tree, Fun)
- Traverses all nodes of the tree in order to collect some aggregated information
- about the tree. It calls a user provided callback
- `Fun(Key, Value, Pos) -> NewValue`
- Where:
- - Key of the node (usualy Pid of a process)
- - Value of the node (usualy information collected by link_tree)
- - Pos - depth from the root of the tree
-
- ---
- ", []);
-help(linked_processes_info) ->
- io:format("
- linked_processes_info(Pid, Info)
- --------------------------------
-
- Convenience function which reduces the amount of typing compared to direct
- use of link_tree.
- - Pid: initial Pid to start from
- - Info: a list of process_info_item() as documented
- in erlang:process_info/2.
-
- ---
- ", []);
-help(print_linked_processes) ->
- io:format("
- - print_linked_processes(Pid)
- - print_linked_processes(RegisteredName)
- - print_linked_processes(couch_index_server)
-
- ---------------------------
-
- Print cluster of linked processes. This function receives the
- initial Pid to start from. The function doesn't recurse to pids
- older than initial one. The output would look like similar to:
- ```
-couch_debug:print_linked_processes(whereis(couch_index_server)).
-name | reductions | message_queue_len | memory
-couch_index_server[<0.288.0>] | 478240 | 0 | 109696
- couch_index:init/1[<0.3520.22>] | 4899 | 0 | 109456
- couch_file:init/1[<0.886.22>] | 11973 | 0 | 67984
- couch_index:init/1[<0.3520.22>] | 4899 | 0 | 109456
- ```
-
- ---
- ", []);
-help(memory_info) ->
- io:format("
- - memory_info(ProcessList)
- - memory_info(ProcessList, InfoKeys)
- - memory_info(Pid, InfoKeys)
- --------------------------------
-
- Obtains the values for a set of optional InfoKeys for each process in ProcessList.
- - ProcessList: List of processes
- - InfoKeys: List of desired keys to obtain values for. The supported keys are
- [binary, dictionary, heap_size, links, memory, message_queue_len, monitored_by,
- monitors, stack_size, total_heap_size]
- - Pid: Initial Pid to start from
-
- The output is a list containing tuples of the form {Pid, ProcessName, #{InfoKey: InfoVal}}
- for each process in ProcessList.
- ", []);
-help(print_table) ->
- io:format("
- print_table(Rows, TableSpec)
- --------------------------------
-
- Print table of specifications.
- - Rows: List of {Id, Props} to be printed from the TableSpec
- - TableSpec: List of either {Value} or {Width, Align, Value}
- where Align is either left/center/right.
-
- ---
- ", []);
-help(print_tree) ->
- io:format("
- print_tree(Tree, TableSpec)
- --------------------------------
-
- Print tree of specifications.
- - Tree: Tree to be printed from the TableSpec
- - TableSpec: List of either {Value} or {Width, Align, Value}
- where Align is either left/center/right.
-
- ---
- ", []);
-help(resource_hoggers) ->
- io:format("
- resource_hoggers(MemoryInfo, InfoKey)
- --------------------------------
-
- Prints the top processes hogging resources along with the value associated with InfoKey.
- - MemoryInfo: Data map containing values for a set of InfoKeys
- (same structure returned by memory_info)
- - InfoKey: Desired key to obtain value for. The supported keys are
- binary, dictionary, heap_size, links, memory, message_queue_len, monitored_by,
- monitors, stack_size, and total_heap_size
-
- ---
- ", []);
-help(resource_hoggers_snapshot) ->
- io:format("
- resource_hoggers_snapshot(MemoryInfo)
- resource_hoggers_snapshot(PreviousSnapshot)
- --------------------------------
-
- Prints a snapshot of the top processes hogging resources.
- - MemoryInfo: Data map containing values for a set of InfoKeys
- (same structure returned by memory_info)
- - PreviousSnapshot: Previous snapshot of resource hoggers
-
- An example workflow is to call `memory_info(Pids)` and pass it as a first snapshot into
- `resource_hoggers_snapshot/1`. Then, periodically call `resource_hoggers_snapshot/1` and pass in
- the previous snapshot.
-
- Here is an example use case:
- ```
- S0 = couch_debug:memory_info(erlang:processes()).
- Summary = lists:foldl(fun(I, S) ->
- timer:sleep(1000),
- io:format(\"Snapshot ~~p/10~~n\", [I]),
- couch_debug:resource_hoggers_snapshot(S)
- end, S0, lists:seq(1, 10)).
- couch_debug:analyze_resource_hoggers(Summary, 10).
- ```
-
- ---
- ", []);
-help(analyze_resource_hoggers) ->
- io:format("
- analyze_resource_hoggers(Snapshot, TopN)
- --------------------------------
-
- Analyzes the TopN processes hogging resources along with the values associated with InfoKeys.
- - Snapshot: Snapshot of resource hoggers
- - TopN: Number of top processes to include in result
-
- An example workflow is to call `resource_hoggers_snapshot(memory_info(Pids))` and pass this to `analyze_resource_hoggers/2`
- along with the number of top processes to include in result, TopN. See `couch_debug:help(resource_hoggers_snapshot)` for an
- example and more info.
-
- ---
- ", []);
-help(Unknown) ->
- io:format("Unknown function: `~p`. Please try one of the following:~n", [Unknown]),
- [io:format(" - ~s~n", [Function]) || Function <- help()],
- io:format(" ---~n", []),
- ok.
-
--spec busy(ProcessList :: [process()], Threshold :: pos_integer()) ->
- [Name :: process_name()].
-
-busy(ProcessList, Threshold) when Threshold > 0 ->
- busy(ProcessList, Threshold, message_queue_len).
-
--spec busy(
- [process()], Threshold :: pos_integer(), Property :: busy_properties()
-) ->
- [Name :: process_name()].
-
-busy(ProcessList, Threshold, Property) when Threshold > 0 ->
- lists:filter(
- fun(Process) ->
- case (catch process_info(get_pid(Process), Property)) of
- {Property, Value} when is_integer(Value) andalso Value > Threshold ->
- true;
- _ ->
- false
- end
- end,
- ProcessList
- ).
-
--spec opened_files() ->
- [{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
-
-opened_files() ->
- Info = [
- couch_file_port_info(Port)
- || Port <- erlang:ports(),
- {name, "efile"} =:= erlang:port_info(Port, name)
- ],
- [I || I <- Info, is_tuple(I)].
-
-couch_file_port_info(Port) ->
- {connected, Pid} = erlang:port_info(Port, connected),
- case couch_file:process_info(Pid) of
- {Fd, FilePath} ->
- {Port, Pid, Fd, FilePath};
- undefined ->
- undefined
- end.
-
--spec opened_files_by_regexp(FileRegExp :: iodata()) ->
- [{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
-opened_files_by_regexp(FileRegExp) ->
- {ok, RegExp} = re:compile(FileRegExp),
- lists:filter(
- fun({_Port, _Pid, _Fd, Path}) ->
- re:run(Path, RegExp) =/= nomatch
- end,
- couch_debug:opened_files()
- ).
-
--spec opened_files_contains(FileNameFragment :: iodata()) ->
- [{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
-opened_files_contains(FileNameFragment) ->
- lists:filter(
- fun({_Port, _Pid, _Fd, Path}) ->
- string:str(Path, FileNameFragment) > 0
- end,
- couch_debug:opened_files()
- ).
-
-process_name(Pid) when is_pid(Pid) ->
- Info = process_info(Pid, [registered_name, dictionary, initial_call]),
- case Info of
- undefined ->
- iolist_to_list(io_lib:format("[~p]", [Pid]));
- [{registered_name, Name} | _] when Name =/= [] ->
- iolist_to_list(io_lib:format("~s[~p]", [Name, Pid]));
- [_, {dictionary, Dict}, {initial_call, MFA}] ->
- {M, F, A} = proplists:get_value('$initial_call', Dict, MFA),
- iolist_to_list(io_lib:format("~p:~p/~p[~p]", [M, F, A, Pid]))
- end;
-process_name(Else) ->
- iolist_to_list(io_lib:format("~p", [Else])).
-
-get_pid(Process) when is_pid(Process) ->
- Process;
-get_pid(Process) ->
- whereis(Process).
-
-iolist_to_list(List) ->
- binary_to_list(iolist_to_binary(List)).
-
-link_tree(RootPid) ->
- link_tree(RootPid, []).
-
-link_tree(RootPid, Info) ->
- link_tree(RootPid, Info, fun(_, Props) -> Props end).
-
-link_tree(RootPid, Info, Fun) ->
- {_, Result} = link_tree(
- RootPid, [links | Info], gb_trees:empty(), 0, [RootPid], Fun
- ),
- Result.
-
-link_tree(RootPid, Info, Visited0, Pos, [Pid | Rest], Fun) ->
- case gb_trees:lookup(Pid, Visited0) of
- {value, Props} ->
- {Visited0, [{Pos, {Pid, Fun(Pid, Props), []}}]};
- none when RootPid =< Pid ->
- Props = info(Pid, Info),
- Visited1 = gb_trees:insert(Pid, Props, Visited0),
- {links, Children} = lists:keyfind(links, 1, Props),
- {Visited2, NewTree} = link_tree(
- RootPid, Info, Visited1, Pos + 1, Children, Fun
- ),
- {Visited3, Result} = link_tree(
- RootPid, Info, Visited2, Pos, Rest, Fun
- ),
- {Visited3, [{Pos, {Pid, Fun(Pid, Props), NewTree}}] ++ Result};
- none ->
- Props = info(Pid, Info),
- Visited1 = gb_trees:insert(Pid, Props, Visited0),
- {Visited2, Result} = link_tree(
- RootPid, Info, Visited1, Pos, Rest, Fun
- ),
- {Visited2, [{Pos, {Pid, Fun(Pid, Props), []}}] ++ Result}
- end;
-link_tree(_RootPid, _Info, Visited, _Pos, [], _Fun) ->
- {Visited, []}.
-
-info(Pid, Info) when is_pid(Pid) ->
- ValidProps = [
- backtrace,
- binary,
- catchlevel,
- current_function,
- current_location,
- current_stacktrace,
- dictionary,
- error_handler,
- garbage_collection,
- garbage_collection_info,
- group_leader,
- heap_size,
- initial_call,
- links,
- last_calls,
- memory,
- message_queue_len,
- messages,
- min_heap_size,
- min_bin_vheap_size,
- monitored_by,
- monitors,
- message_queue_data,
- priority,
- reductions,
- registered_name,
- sequential_trace_token,
- stack_size,
- status,
- suspending,
- total_heap_size,
- trace,
- trap_exit
- ],
- Validated = lists:filter(fun(P) -> lists:member(P, ValidProps) end, Info),
- process_info(Pid, lists:usort(Validated));
-info(Port, Info) when is_port(Port) ->
- ValidProps = [
- registered_name,
- id,
- connected,
- links,
- name,
- input,
- output,
- os_pid
- ],
- Validated = lists:filter(fun(P) -> lists:member(P, ValidProps) end, Info),
- port_info(Port, lists:usort(Validated)).
-
-port_info(Port, Items) ->
- lists:foldl(
- fun(Item, Acc) ->
- case (catch erlang:port_info(Port, Item)) of
- {Item, _Value} = Info -> [Info | Acc];
- _Else -> Acc
- end
- end,
- [],
- Items
- ).
-
-mapfold_tree([], Acc, _Fun) ->
- {[], Acc};
-mapfold_tree([{Pos, {Key, Value0, SubTree0}} | Rest0], Acc0, Fun) ->
- {Value1, Acc1} = Fun(Key, Value0, Pos, Acc0),
- {SubTree1, Acc2} = mapfold_tree(SubTree0, Acc1, Fun),
- {Rest1, Acc3} = mapfold_tree(Rest0, Acc2, Fun),
- {[{Pos, {Key, Value1, SubTree1}} | Rest1], Acc3}.
-
-map_tree(Tree, Fun) ->
- {Result, _} = mapfold_tree(Tree, nil, fun(Key, Value, Pos, Acc) ->
- {Fun(Key, Value, Pos), Acc}
- end),
- Result.
-
-fold_tree(Tree, Acc, Fun) ->
- {_, Result} = mapfold_tree(Tree, Acc, fun(Key, Value, Pos, AccIn) ->
- {Value, Fun(Key, Value, Pos, AccIn)}
- end),
- Result.
-
-linked_processes_info(Pid, Info) ->
- link_tree(Pid, Info, fun(P, Props) -> {process_name(P), Props} end).
-
-print_linked_processes(couch_index_server) ->
- print_couch_index_server_processes();
-print_linked_processes(Name) when is_atom(Name) ->
- case whereis(Name) of
- undefined -> {error, {unknown, Name}};
- Pid -> print_linked_processes(Pid)
- end;
-print_linked_processes(Pid) when is_pid(Pid) ->
- Info = [reductions, message_queue_len, memory],
- TableSpec = [
- {50, left, name},
- {12, centre, reductions},
- {19, centre, message_queue_len},
- {10, centre, memory}
- ],
- Tree = linked_processes_info(Pid, Info),
- print_tree(Tree, TableSpec).
-
-memory_info(ProcessList) ->
- InfoKeys = [
- binary,
- dictionary,
- heap_size,
- links,
- memory,
- message_queue_len,
- monitored_by,
- monitors,
- stack_size,
- total_heap_size
- ],
- memory_info(ProcessList, InfoKeys).
-
-memory_info(ProcessList, InfoKeys) when is_list(ProcessList) ->
- lists:map(
- fun(Process) ->
- memory_info(Process, InfoKeys)
- end,
- ProcessList
- );
-memory_info(Pid, InfoKeys) ->
- case process_info(Pid, InfoKeys) of
- undefined ->
- {Pid, undefined, undefined};
- Values ->
- DataMap = maps:from_list(
- lists:map(
- fun({K, _} = I) ->
- {K, info_size(I)}
- end,
- Values
- )
- ),
- {Pid, process_name(Pid), DataMap}
- end.
-
-info_size(InfoKV) ->
- case InfoKV of
- {monitors, L} -> length(L);
- {monitored_by, L} -> length(L);
- {links, L} -> length(L);
- {dictionary, L} -> length(L);
- {binary, BinInfos} -> lists:sum([S || {_, S, _} <- BinInfos]);
- {_, V} -> V
- end.
-resource_hoggers(MemoryInfo, InfoKey) ->
- KeyFun = fun
- ({_Pid, _Id, undefined}) -> undefined;
- ({_Pid, Id, DataMap}) -> {Id, [{InfoKey, maps:get(InfoKey, DataMap)}]}
- end,
- resource_hoggers(MemoryInfo, InfoKey, KeyFun).
-
-resource_hoggers(MemoryInfo, InfoKey, KeyFun) ->
- HoggersData = resource_hoggers_data(MemoryInfo, InfoKey, KeyFun),
- TableSpec = [
- {50, centre, id},
- {20, centre, InfoKey}
- ],
- print_table(HoggersData, TableSpec).
-
-resource_hoggers_data(MemoryInfo, InfoKey, KeyFun) when is_atom(InfoKey) ->
- resource_hoggers_data(MemoryInfo, InfoKey, KeyFun, 20).
-
-resource_hoggers_data(MemoryInfo, InfoKey, KeyFun, N) when is_atom(InfoKey) and is_integer(N) ->
- SortedTuples = resource_hoggers_data(MemoryInfo, InfoKey, KeyFun, undefined),
- {TopN, _} = lists:split(N, SortedTuples),
- TopN;
-resource_hoggers_data(MemoryInfo, InfoKey, KeyFun, undefined) when is_atom(InfoKey) ->
- Tuples = lists:filtermap(
- fun(Tuple) ->
- case KeyFun(Tuple) of
- undefined ->
- false;
- Value ->
- {true, Value}
- end
- end,
- MemoryInfo
- ),
- lists:reverse(
- lists:sort(
- fun({_, A}, {_, B}) ->
- lists:keyfind(InfoKey, 1, A) < lists:keyfind(InfoKey, 1, B)
- end,
- Tuples
- )
- ).
-
-resource_hoggers_snapshot({N, MemoryInfo, InfoKeys} = _Snapshot) ->
- Data = lists:filtermap(
- fun({Pid, Id, Data}) ->
- case memory_info(Pid, InfoKeys) of
- {Pid, undefined, undefined} ->
- false;
- {_, _, DataMap} ->
- {true, {Pid, Id, update_delta(Data, DataMap)}}
- end
- end,
- MemoryInfo
- ),
- {N + 1, Data, InfoKeys};
-resource_hoggers_snapshot([]) ->
- [];
-resource_hoggers_snapshot([{_Pid, _Id, Data} | _] = MemoryInfo) ->
- resource_hoggers_snapshot({0, MemoryInfo, maps:keys(Data)}).
-
-update_delta({_, InitialDataMap}, DataMap) ->
- update_delta(InitialDataMap, DataMap);
-update_delta(InitialDataMap, DataMap) ->
- Delta = maps:fold(
- fun(Key, Value, AccIn) ->
- maps:put(Key, maps:get(Key, DataMap, Value) - Value, AccIn)
- end,
- maps:new(),
- InitialDataMap
- ),
- {Delta, InitialDataMap}.
-
-analyze_resource_hoggers({N, Data, InfoKeys}, TopN) ->
- io:format("Number of snapshots: ~p~n", [N]),
- lists:map(
- fun(InfoKey) ->
- KeyFun = fun
- ({_Pid, _Id, undefined}) ->
- undefined;
- ({_Pid, Id, {Delta, DataMap}}) ->
- {Id, [
- {InfoKey, maps:get(InfoKey, DataMap)},
- {delta, maps:get(InfoKey, Delta)}
- ]}
- end,
- io:format("Top ~p by change in ~p~n", [TopN, InfoKey]),
- HoggersData = resource_hoggers_data(Data, delta, KeyFun, TopN),
- TableSpec = [
- {50, centre, id},
- {20, right, InfoKey},
- {20, right, delta}
- ],
- print_table(HoggersData, TableSpec)
- end,
- InfoKeys
- ).
-
-id("couch_file:init" ++ _, Pid, _Props) ->
- case couch_file:process_info(Pid) of
- {{file_descriptor, prim_file, {Port, Fd}}, FilePath} ->
- term2str([
- term2str(Fd),
- ":",
- term2str(Port),
- ":",
- shorten_path(FilePath)
- ]);
- undefined ->
- ""
- end;
-id(_IdStr, _Pid, _Props) ->
- "".
-
-print_couch_index_server_processes() ->
- Info = [reductions, message_queue_len, memory],
- Trees = lists:map(
- fun(Name) ->
- link_tree(whereis(Name), Info, fun(P, Props) ->
- IdStr = process_name(P),
- {IdStr, [{id, id(IdStr, P, Props)} | Props]}
- end)
- end,
- couch_index_server:names()
- ),
- TableSpec = [
- {50, left, name},
- {12, centre, reductions},
- {19, centre, message_queue_len},
- {14, centre, memory},
- {id}
- ],
- print_trees(Trees, TableSpec).
-
-shorten_path(Path) ->
- ViewDir = list_to_binary(config:get("couchdb", "view_index_dir")),
- DatabaseDir = list_to_binary(config:get("couchdb", "database_dir")),
- File = list_to_binary(Path),
- Len = max(
- binary:longest_common_prefix([File, DatabaseDir]),
- binary:longest_common_prefix([File, ViewDir])
- ),
- <<_:Len/binary, Rest/binary>> = File,
- binary_to_list(Rest).
-
--spec restart(Name :: process_name()) ->
- Pid :: pid() | timeout.
-
-restart(Name) ->
- Res = test_util:with_process_restart(Name, fun() ->
- exit(whereis(Name), kill)
- end),
- case Res of
- {Pid, true} ->
- Pid;
- timeout ->
- timeout
- end.
-
--spec restart_busy(ProcessList :: [process_name()], Threshold :: pos_integer()) ->
- throw({timeout, Name :: process_name()}).
-
-restart_busy(ProcessList, Threshold) ->
- restart_busy(ProcessList, Threshold, 1000).
-
--spec restart_busy(
- ProcessList :: [process_name()], Threshold :: pos_integer(), DelayInMsec :: pos_integer()
-) ->
- throw({timeout, Name :: process_name()}) | ok.
-
-restart_busy(ProcessList, Threshold, DelayInMsec) ->
- restart_busy(ProcessList, Threshold, DelayInMsec, message_queue_len).
-
--spec restart_busy(
- ProcessList :: [process_name()],
- Threshold :: pos_integer(),
- DelayInMsec :: pos_integer(),
- Property :: busy_properties()
-) ->
- throw({timeout, Name :: process_name()}) | ok.
-
-restart_busy(ProcessList, Threshold, DelayInMsec, Property) when
- Threshold > 0 andalso DelayInMsec > 0
-->
- lists:foreach(
- fun(Name) ->
- case restart(Name) of
- timeout ->
- throw({timeout, Name});
- _ ->
- timer:sleep(DelayInMsec)
- end
- end,
- busy(ProcessList, Threshold, Property)
- ).
-
-%% Pretty print functions
-
-%% Limitations:
-%% - The first column has to be specified as {Width, left, Something}
-%% The TableSpec is a list of either:
-%% - {Value}
-%% - {Width, Align, Value}
-%% Align is one of the following:
-%% - left
-%% - centre
-%% - right
-print_table(Rows, TableSpec) ->
- io:format("~s~n", [format(TableSpec)]),
- lists:foreach(
- fun({Id, Props}) ->
- io:format("~s~n", [table_row(Id, 2, Props, TableSpec)])
- end,
- Rows
- ),
- ok.
-
-print_tree(Tree, TableSpec) ->
- io:format("~s~n", [format(TableSpec)]),
- map_tree(Tree, fun(_, {Id, Props}, Pos) ->
- io:format("~s~n", [table_row(Id, Pos * 2, Props, TableSpec)])
- end),
- ok.
-
-print_trees(Trees, TableSpec) ->
- io:format("~s~n", [format(TableSpec)]),
- io:format("~s~n", [separator(TableSpec)]),
- lists:foreach(
- fun(Tree) ->
- map_tree(Tree, fun(_, {Id, Props}, Pos) ->
- io:format("~s~n", [table_row(Id, Pos * 2, Props, TableSpec)])
- end),
- io:format("~s~n", [space(TableSpec)])
- end,
- Trees
- ),
- ok.
-
-format(Spec) ->
- Fields = [format_value(Format) || Format <- Spec],
- [$| | string:join(Fields, "|")].
-
-fill(Spec, [Char]) ->
- fill(Spec, Char);
-fill(Spec, Char) when is_integer(Char) ->
- Fields = [format_value(Format) || Format <- Spec],
- Sizes = [length(F) || F <- Fields],
- [$| | [string:join([string:chars(Char, F) || F <- Sizes], "|")]].
-
-space(Spec) ->
- fill(Spec, " ").
-
-separator(Spec) ->
- fill(Spec, "-").
-
-format_value({Value}) -> term2str(Value);
-format_value({Width, Align, Value}) -> string:Align(term2str(Value), Width).
-
-bind_value({K}, Props) when is_list(Props) ->
- {element(2, lists:keyfind(K, 1, Props))};
-bind_value({Width, Align, K}, Props) when is_list(Props) ->
- {Width, Align, element(2, lists:keyfind(K, 1, Props))}.
-
-term2str(Atom) when is_atom(Atom) -> atom_to_list(Atom);
-term2str(Binary) when is_binary(Binary) -> binary_to_list(Binary);
-term2str(Integer) when is_integer(Integer) -> integer_to_list(Integer);
-term2str(Float) when is_float(Float) -> float_to_list(Float);
-term2str(String) when is_list(String) -> lists:flatten(String);
-term2str(Term) -> iolist_to_list(io_lib:format("~p", [Term])).
-
-table_row(Key, Indent, Props, [{KeyWidth, Align, _} | Spec]) ->
- Values = [bind_value(Format, Props) || Format <- Spec],
- KeyStr = string:Align(term2str(Key), KeyWidth - Indent),
- [$|, string:copies(" ", Indent), KeyStr | format(Values)].
-
--ifdef(TEST).
--include_lib("couch/include/couch_eunit.hrl").
-
-random_processes(Depth) ->
- random_processes([], Depth).
-
-random_processes(Pids, 0) ->
- lists:usort(Pids);
-random_processes(Acc, Depth) ->
- Caller = self(),
- Ref = make_ref(),
- Pid =
- case oneof([spawn_link, open_port]) of
- spawn_monitor ->
- {P, _} = spawn_monitor(fun() ->
- Caller ! {Ref, random_processes(Depth - 1)},
- receive
- looper -> ok
- end
- end),
- P;
- spawn ->
- spawn(fun() ->
- Caller ! {Ref, random_processes(Depth - 1)},
- receive
- looper -> ok
- end
- end);
- spawn_link ->
- spawn_link(fun() ->
- Caller ! {Ref, random_processes(Depth - 1)},
- receive
- looper -> ok
- end
- end);
- open_port ->
- spawn_link(fun() ->
- Port = erlang:open_port({spawn, "sleep 10"}, []),
- true = erlang:link(Port),
- Caller ! {Ref, random_processes(Depth - 1)},
- receive
- looper -> ok
- end
- end)
- end,
- receive
- {Ref, Pids} -> random_processes([Pid | Pids] ++ Acc, Depth - 1)
- end.
-
-oneof(Options) ->
- lists:nth(couch_rand:uniform(length(Options)), Options).
-
-tree() ->
- [InitialPid | _] = Processes = random_processes(5),
- {InitialPid, Processes, link_tree(InitialPid)}.
-
-setup() ->
- tree().
-
-teardown({_InitialPid, Processes, _Tree}) ->
- [exit(Pid, normal) || Pid <- Processes].
-
-link_tree_test_() ->
- {
- "link_tree tests",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_have_same_shape/1,
- fun should_include_extra_info/1
- ]
- }
- }.
-
-should_have_same_shape({InitialPid, _Processes, Tree}) ->
- ?_test(begin
- InfoTree = linked_processes_info(InitialPid, []),
- ?assert(is_equal(InfoTree, Tree)),
- ok
- end).
-
-should_include_extra_info({InitialPid, _Processes, _Tree}) ->
- Info = [reductions, message_queue_len, memory],
- ?_test(begin
- InfoTree = linked_processes_info(InitialPid, Info),
- map_tree(InfoTree, fun(Key, {_Id, Props}, _Pos) ->
- case Key of
- Pid when is_pid(Pid) ->
- ?assert(lists:keymember(reductions, 1, Props)),
- ?assert(lists:keymember(message_queue_len, 1, Props)),
- ?assert(lists:keymember(memory, 1, Props));
- _Port ->
- ok
- end,
- Props
- end),
- ok
- end).
-
-is_equal([], []) ->
- true;
-is_equal([{Pos, {Pid, _, A}} | RestA], [{Pos, {Pid, _, B}} | RestB]) ->
- case is_equal(RestA, RestB) of
- false -> false;
- true -> is_equal(A, B)
- end.
-
--endif.
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
deleted file mode 100644
index 5d44e456c..000000000
--- a/src/couch/src/couch_doc.erl
+++ /dev/null
@@ -1,588 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_doc).
-
--export([to_doc_info/1, to_doc_info_path/1, parse_rev/1, parse_revs/1, rev_to_str/1, revs_to_strs/1]).
--export([from_json_obj/1, from_json_obj_validate/1]).
--export([from_json_obj/2, from_json_obj_validate/2]).
--export([to_json_obj/2, has_stubs/1, merge_stubs/2]).
--export([validate_docid/1, validate_docid/2, get_validate_doc_fun/1]).
--export([doc_from_multi_part_stream/2, doc_from_multi_part_stream/3]).
--export([doc_from_multi_part_stream/4]).
--export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]).
--export([restart_open_doc_revs/3]).
--export([to_path/1]).
-
--export([with_ejson_body/1]).
--export([is_deleted/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
--spec to_path(#doc{}) -> path().
-to_path(#doc{revs = {Start, RevIds}} = Doc) ->
- [Branch] = to_branch(Doc, lists:reverse(RevIds)),
- {Start - length(RevIds) + 1, Branch}.
-
--spec to_branch(#doc{}, [RevId :: binary()]) -> [branch()].
-to_branch(Doc, [RevId]) ->
- [{RevId, Doc, []}];
-to_branch(Doc, [RevId | Rest]) ->
- [{RevId, ?REV_MISSING, to_branch(Doc, Rest)}].
-
-% helpers used by to_json_obj
-to_json_rev(0, []) ->
- [];
-to_json_rev(Start, [FirstRevId | _]) ->
- [{<<"_rev">>, ?l2b([integer_to_list(Start), "-", revid_to_str(FirstRevId)])}].
-
-to_json_body(true, {Body}) ->
- Body ++ [{<<"_deleted">>, true}];
-to_json_body(false, {Body}) ->
- Body.
-
-to_json_revisions(Options, Start, RevIds0) ->
- RevIds =
- case proplists:get_value(revs, Options) of
- true ->
- RevIds0;
- Num when is_integer(Num), Num > 0 ->
- lists:sublist(RevIds0, Num);
- _ ->
- []
- end,
- if
- RevIds == [] ->
- [];
- true ->
- [
- {<<"_revisions">>,
- {[
- {<<"start">>, Start},
- {<<"ids">>, [revid_to_str(R) || R <- RevIds]}
- ]}}
- ]
- end.
-
-revid_to_str(RevId) when size(RevId) =:= 16 ->
- ?l2b(couch_util:to_hex(RevId));
-revid_to_str(RevId) ->
- RevId.
-
-rev_to_str({Pos, RevId}) ->
- ?l2b([integer_to_list(Pos), "-", revid_to_str(RevId)]).
-
-revs_to_strs([]) ->
- [];
-revs_to_strs([{Pos, RevId} | Rest]) ->
- [rev_to_str({Pos, RevId}) | revs_to_strs(Rest)].
-
-to_json_meta(Meta) ->
- lists:flatmap(
- fun
- ({revs_info, Start, RevsInfo}) ->
- {JsonRevsInfo, _Pos} = lists:mapfoldl(
- fun({RevId, Status}, PosAcc) ->
- JsonObj =
- {[
- {<<"rev">>, rev_to_str({PosAcc, RevId})},
- {<<"status">>, ?l2b(atom_to_list(Status))}
- ]},
- {JsonObj, PosAcc - 1}
- end,
- Start,
- RevsInfo
- ),
- [{<<"_revs_info">>, JsonRevsInfo}];
- ({local_seq, Seq}) ->
- [{<<"_local_seq">>, Seq}];
- ({conflicts, Conflicts}) ->
- [{<<"_conflicts">>, revs_to_strs(Conflicts)}];
- ({deleted_conflicts, DConflicts}) ->
- [{<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}];
- (_) ->
- []
- end,
- Meta
- ).
-
-to_json_attachments(Attachments, Options) ->
- to_json_attachments(
- Attachments,
- lists:member(attachments, Options),
- lists:member(follows, Options),
- lists:member(att_encoding_info, Options)
- ).
-
-to_json_attachments([], _OutputData, _Follows, _ShowEnc) ->
- [];
-to_json_attachments(Atts, OutputData, Follows, ShowEnc) ->
- Props = [couch_att:to_json(A, OutputData, Follows, ShowEnc) || A <- Atts],
- [{<<"_attachments">>, {Props}}].
-
-to_json_obj(Doc, Options) ->
- doc_to_json_obj(with_ejson_body(Doc), Options).
-
-doc_to_json_obj(
- #doc{
- id = Id,
- deleted = Del,
- body = Body,
- revs = {Start, RevIds},
- meta = Meta
- } = Doc,
- Options
-) ->
- {
- [{<<"_id">>, Id}] ++
- to_json_rev(Start, RevIds) ++
- to_json_body(Del, Body) ++
- to_json_revisions(Options, Start, RevIds) ++
- to_json_meta(Meta) ++
- to_json_attachments(Doc#doc.atts, Options)
- }.
-
-from_json_obj_validate(EJson) ->
- from_json_obj_validate(EJson, undefined).
-
-from_json_obj_validate(EJson, DbName) ->
- MaxSize = config:get_integer("couchdb", "max_document_size", 8000000),
- Doc = from_json_obj(EJson, DbName),
- case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of
- true ->
- validate_attachment_sizes(Doc#doc.atts),
- Doc;
- false ->
- throw({request_entity_too_large, Doc#doc.id})
- end.
-
-validate_attachment_sizes([]) ->
- ok;
-validate_attachment_sizes(Atts) ->
- MaxAttSize = couch_att:max_attachment_size(),
- lists:foreach(
- fun(Att) ->
- AttName = couch_att:fetch(name, Att),
- AttSize = couch_att:fetch(att_len, Att),
- couch_att:validate_attachment_size(AttName, AttSize, MaxAttSize)
- end,
- Atts
- ).
-
-from_json_obj({Props}) ->
- from_json_obj({Props}, undefined).
-
-from_json_obj({Props}, DbName) ->
- transfer_fields(Props, #doc{body = []}, DbName);
-from_json_obj(_Other, _) ->
- throw({bad_request, "Document must be a JSON object"}).
-
-parse_revid(RevId) when size(RevId) =:= 32 ->
- RevInt = erlang:list_to_integer(?b2l(RevId), 16),
- <<RevInt:128>>;
-parse_revid(RevId) when length(RevId) =:= 32 ->
- RevInt = erlang:list_to_integer(RevId, 16),
- <<RevInt:128>>;
-parse_revid(RevId) when is_binary(RevId) ->
- RevId;
-parse_revid(RevId) when is_list(RevId) ->
- ?l2b(RevId).
-
-parse_rev(Rev) when is_binary(Rev) ->
- parse_rev(?b2l(Rev));
-parse_rev(Rev) when is_list(Rev) ->
- SplitRev = lists:splitwith(
- fun
- ($-) -> false;
- (_) -> true
- end,
- Rev
- ),
- case SplitRev of
- {Pos, [$- | RevId]} ->
- try
- IntPos = list_to_integer(Pos),
- {IntPos, parse_revid(RevId)}
- catch
- error:badarg -> throw({bad_request, <<"Invalid rev format">>})
- end;
- _Else ->
- throw({bad_request, <<"Invalid rev format">>})
- end;
-parse_rev(_BadRev) ->
- throw({bad_request, <<"Invalid rev format">>}).
-
-parse_revs([]) ->
- [];
-parse_revs([Rev | Rest]) ->
- [parse_rev(Rev) | parse_revs(Rest)];
-parse_revs(_) ->
- throw({bad_request, "Invalid list of revisions"}).
-
-validate_docid(DocId, DbName) ->
- case
- DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso
- couch_db:is_system_db_name(DocId)
- of
- true ->
- ok;
- false ->
- validate_docid(DocId)
- end.
-
-validate_docid(<<"">>) ->
- throw({illegal_docid, <<"Document id must not be empty">>});
-validate_docid(<<"_design/">>) ->
- throw({illegal_docid, <<"Illegal document id `_design/`">>});
-validate_docid(<<"_local/">>) ->
- throw({illegal_docid, <<"Illegal document id `_local/`">>});
-validate_docid(Id) when is_binary(Id) ->
- MaxLen =
- case config:get("couchdb", "max_document_id_length", "infinity") of
- "infinity" -> infinity;
- IntegerVal -> list_to_integer(IntegerVal)
- end,
- case MaxLen > 0 andalso byte_size(Id) > MaxLen of
- true -> throw({illegal_docid, <<"Document id is too long">>});
- false -> ok
- end,
- case couch_util:validate_utf8(Id) of
- false -> throw({illegal_docid, <<"Document id must be valid UTF-8">>});
- true -> ok
- end,
- case Id of
- <<"_design/", _/binary>> ->
- ok;
- <<"_local/", _/binary>> ->
- ok;
- <<"_", _/binary>> ->
- case couch_db_plugin:validate_docid(Id) of
- true ->
- ok;
- false ->
- throw(
- {illegal_docid, <<"Only reserved document ids may start with underscore.">>}
- )
- end;
- _Else ->
- ok
- end;
-validate_docid(Id) ->
- couch_log:debug("Document id is not a string: ~p", [Id]),
- throw({illegal_docid, <<"Document id must be a string">>}).
-
-transfer_fields([], #doc{body = Fields} = Doc, _) ->
- % convert fields back to json object
- Doc#doc{body = {lists:reverse(Fields)}};
-transfer_fields([{<<"_id">>, Id} | Rest], Doc, DbName) ->
- validate_docid(Id, DbName),
- transfer_fields(Rest, Doc#doc{id = Id}, DbName);
-transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs = {0, []}} = Doc, DbName) ->
- {Pos, RevId} = parse_rev(Rev),
- transfer_fields(
- Rest,
- Doc#doc{revs = {Pos, [RevId]}},
- DbName
- );
-transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc, DbName) ->
- % we already got the rev from the _revisions
- transfer_fields(Rest, Doc, DbName);
-transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc, DbName) ->
- Atts = [couch_att:from_json(Name, Props) || {Name, {Props}} <- JsonBins],
- transfer_fields(Rest, Doc#doc{atts = Atts}, DbName);
-transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc, DbName) ->
- RevIds = couch_util:get_value(<<"ids">>, Props),
- Start = couch_util:get_value(<<"start">>, Props),
- if
- not is_integer(Start) ->
- throw({doc_validation, "_revisions.start isn't an integer."});
- not is_list(RevIds) ->
- throw({doc_validation, "_revisions.ids isn't a array."});
- true ->
- ok
- end,
- RevIds2 = lists:map(
- fun(RevId) ->
- try
- parse_revid(RevId)
- catch
- error:function_clause ->
- throw({doc_validation, "RevId isn't a string"});
- error:badarg ->
- throw({doc_validation, "RevId isn't a valid hexadecimal"})
- end
- end,
- RevIds
- ),
- transfer_fields(Rest, Doc#doc{revs = {Start, RevIds2}}, DbName);
-transfer_fields([{<<"_deleted">>, B} | Rest], Doc, DbName) when is_boolean(B) ->
- transfer_fields(Rest, Doc#doc{deleted = B}, DbName);
-% ignored fields
-transfer_fields([{<<"_revs_info">>, _} | Rest], Doc, DbName) ->
- transfer_fields(Rest, Doc, DbName);
-transfer_fields([{<<"_local_seq">>, _} | Rest], Doc, DbName) ->
- transfer_fields(Rest, Doc, DbName);
-transfer_fields([{<<"_conflicts">>, _} | Rest], Doc, DbName) ->
- transfer_fields(Rest, Doc, DbName);
-transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc, DbName) ->
- transfer_fields(Rest, Doc, DbName);
-% special field for per doc access control, for future compatibility
-transfer_fields(
- [{<<"_access">>, _} = Field | Rest],
- #doc{body = Fields} = Doc,
- DbName
-) ->
- transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
-% special fields for replication documents
-transfer_fields(
- [{<<"_replication_state">>, _} = Field | Rest],
- #doc{body = Fields} = Doc,
- DbName
-) ->
- transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
-transfer_fields(
- [{<<"_replication_state_time">>, _} = Field | Rest],
- #doc{body = Fields} = Doc,
- DbName
-) ->
- transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
-transfer_fields(
- [{<<"_replication_state_reason">>, _} = Field | Rest],
- #doc{body = Fields} = Doc,
- DbName
-) ->
- transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
-transfer_fields(
- [{<<"_replication_id">>, _} = Field | Rest],
- #doc{body = Fields} = Doc,
- DbName
-) ->
- transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
-transfer_fields(
- [{<<"_replication_stats">>, _} = Field | Rest],
- #doc{body = Fields} = Doc,
- DbName
-) ->
- transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
-% unknown special field
-transfer_fields([{<<"_", Name/binary>>, _} | _], _, _) ->
- throw({doc_validation, ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
-transfer_fields([Field | Rest], #doc{body = Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName).
-
-to_doc_info(FullDocInfo) ->
- {DocInfo, _Path} = to_doc_info_path(FullDocInfo),
- DocInfo.
-
-max_seq(Tree, UpdateSeq) ->
- FoldFun = fun({_Pos, _Key}, Value, _Type, MaxOldSeq) ->
- case Value of
- {_Deleted, _DiskPos, OldTreeSeq} ->
- % Older versions didn't track data sizes.
- erlang:max(MaxOldSeq, OldTreeSeq);
- % necessary clause?
- {_Deleted, _DiskPos, OldTreeSeq, _Size} ->
- % Older versions didn't store #leaf records.
- erlang:max(MaxOldSeq, OldTreeSeq);
- #leaf{seq = OldTreeSeq} ->
- erlang:max(MaxOldSeq, OldTreeSeq);
- _ ->
- MaxOldSeq
- end
- end,
- couch_key_tree:fold(FoldFun, UpdateSeq, Tree).
-
-to_doc_info_path(#full_doc_info{id = Id, rev_tree = Tree, update_seq = FDISeq}) ->
- RevInfosAndPath = [
- {rev_info(Node), Path}
- || {_Leaf, Path} = Node <-
- couch_key_tree:get_all_leafs(Tree)
- ],
- SortedRevInfosAndPath = lists:sort(
- fun(
- {#rev_info{deleted = DeletedA, rev = RevA}, _PathA},
- {#rev_info{deleted = DeletedB, rev = RevB}, _PathB}
- ) ->
- % sort descending by {not deleted, rev}
- {not DeletedA, RevA} > {not DeletedB, RevB}
- end,
- RevInfosAndPath
- ),
- [{_RevInfo, WinPath} | _] = SortedRevInfosAndPath,
- RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath],
- {#doc_info{id = Id, high_seq = max_seq(Tree, FDISeq), revs = RevInfos}, WinPath}.
-
-rev_info({#leaf{} = Leaf, {Pos, [RevId | _]}}) ->
- #rev_info{
- deleted = Leaf#leaf.deleted,
- body_sp = Leaf#leaf.ptr,
- seq = Leaf#leaf.seq,
- rev = {Pos, RevId}
- };
-rev_info({#doc{} = Doc, {Pos, [RevId | _]}}) ->
- #rev_info{
- deleted = Doc#doc.deleted,
- body_sp = undefined,
- seq = undefined,
- rev = {Pos, RevId}
- }.
-
-is_deleted(#full_doc_info{rev_tree = Tree}) ->
- is_deleted(Tree);
-is_deleted(Tree) ->
- Leafs = couch_key_tree:get_all_leafs(Tree),
- try
- lists:foldl(
- fun
- ({#leaf{deleted = false}, _}, _) ->
- throw(not_deleted);
- ({#doc{deleted = false}, _}, _) ->
- throw(not_deleted);
- (_, Acc) ->
- Acc
- end,
- nil,
- Leafs
- ),
- true
- catch
- throw:not_deleted ->
- false
- end.
-
-get_validate_doc_fun({Props}) ->
- get_validate_doc_fun(couch_doc:from_json_obj({Props}));
-get_validate_doc_fun(#doc{body = {Props}} = DDoc) ->
- case couch_util:get_value(<<"validate_doc_update">>, Props) of
- undefined ->
- nil;
- _Else ->
- fun(EditDoc, DiskDoc, Ctx, SecObj) ->
- couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj)
- end
- end.
-
-has_stubs(#doc{atts = Atts}) ->
- lists:any(fun couch_att:is_stub/1, Atts);
-has_stubs(Atts) ->
- lists:any(fun couch_att:is_stub/1, Atts).
-
-merge_stubs(#doc{id = Id}, nil) ->
- throw({missing_stub, <<"Previous revision missing for document ", Id/binary>>});
-merge_stubs(#doc{id = Id, atts = MemBins} = StubsDoc, #doc{atts = DiskBins}) ->
- case couch_att:merge_stubs(MemBins, DiskBins) of
- {ok, MergedBins} ->
- StubsDoc#doc{atts = MergedBins};
- {missing, Name} ->
- throw(
- {missing_stub, <<"Invalid attachment stub in ", Id/binary, " for ", Name/binary>>}
- )
- end.
-
-len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) ->
- AttsToInclude = lists:filter(fun(Att) -> not couch_att:is_stub(Att) end, Atts),
- AttsDecoded = decode_attributes(AttsToInclude, SendEncodedAtts),
- couch_httpd_multipart:length_multipart_stream(Boundary, JsonBytes, AttsDecoded).
-
-doc_to_multi_part_stream(
- Boundary,
- JsonBytes,
- Atts,
- WriteFun,
- SendEncodedAtts
-) ->
- AttsToInclude = lists:filter(fun(Att) -> couch_att:fetch(data, Att) /= stub end, Atts),
- AttsDecoded = decode_attributes(AttsToInclude, SendEncodedAtts),
- AttFun =
- case SendEncodedAtts of
- false -> fun couch_att:foldl_decode/3;
- true -> fun couch_att:foldl/3
- end,
- couch_httpd_multipart:encode_multipart_stream(
- Boundary, JsonBytes, AttsDecoded, WriteFun, AttFun
- ).
-
-decode_attributes(Atts, SendEncodedAtts) ->
- lists:map(
- fun(Att) ->
- [Name, AttLen, DiskLen, Type, Encoding] =
- couch_att:fetch([name, att_len, disk_len, type, encoding], Att),
- Len =
- case SendEncodedAtts of
- true -> AttLen;
- false -> DiskLen
- end,
- {Att, Name, Len, Type, Encoding}
- end,
- Atts
- ).
-
-doc_from_multi_part_stream(ContentType, DataFun) ->
- doc_from_multi_part_stream(ContentType, DataFun, make_ref()).
-
-doc_from_multi_part_stream(ContentType, DataFun, Ref) ->
- doc_from_multi_part_stream(ContentType, DataFun, Ref, true).
-
-doc_from_multi_part_stream(ContentType, DataFun, Ref, ValidateDocLimits) ->
- case couch_httpd_multipart:decode_multipart_stream(ContentType, DataFun, Ref) of
- {{started_open_doc_revs, NewRef}, Parser, _ParserRef} ->
- restart_open_doc_revs(Parser, Ref, NewRef);
- {{doc_bytes, Ref, DocBytes}, Parser, ParserRef} ->
- Doc =
- case ValidateDocLimits of
- true ->
- from_json_obj_validate(?JSON_DECODE(DocBytes));
- false ->
- from_json_obj(?JSON_DECODE(DocBytes))
- end,
- erlang:put(mochiweb_request_recv, true),
- % we'll send the Parser process ID to the remote nodes so they can
- % retrieve their own copies of the attachment data
- WithParser = fun
- (follows) -> {follows, Parser, Ref};
- (D) -> D
- end,
- Atts = [couch_att:transform(data, WithParser, A) || A <- Doc#doc.atts],
- WaitFun = fun() ->
- receive
- {'DOWN', ParserRef, _, _, _} -> ok
- end
- end,
- {ok, Doc#doc{atts = Atts}, WaitFun, Parser};
- ok ->
- ok
- end.
-
-restart_open_doc_revs(Parser, Ref, NewRef) ->
- unlink(Parser),
- exit(Parser, kill),
- flush_parser_messages(Ref),
- erlang:error({restart_open_doc_revs, NewRef}).
-
-flush_parser_messages(Ref) ->
- receive
- {headers, Ref, _} ->
- flush_parser_messages(Ref);
- {body_bytes, Ref, _} ->
- flush_parser_messages(Ref);
- {body_done, Ref} ->
- flush_parser_messages(Ref);
- {done, Ref} ->
- flush_parser_messages(Ref)
- after 0 ->
- ok
- end.
-
-with_ejson_body(#doc{body = Body} = Doc) when is_binary(Body) ->
- Doc#doc{body = couch_compress:decompress(Body)};
-with_ejson_body(#doc{body = {_}} = Doc) ->
- Doc.
diff --git a/src/couch/src/couch_ejson_compare.erl b/src/couch/src/couch_ejson_compare.erl
deleted file mode 100644
index 669f41364..000000000
--- a/src/couch/src/couch_ejson_compare.erl
+++ /dev/null
@@ -1,128 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_ejson_compare).
-
--export([
- less/2,
- less_json_ids/2,
- less_json/2,
- get_icu_version/0,
- get_uca_version/0,
- get_collator_version/0
-]).
-
-% For testing
--export([
- less_nif/2,
- less_erl/2,
- compare_strings_nif/2
-]).
-
--on_load(init/0).
-
-init() ->
- NumScheds = erlang:system_info(schedulers),
- Dir = code:priv_dir(couch),
- ok = erlang:load_nif(filename:join(Dir, ?MODULE), NumScheds).
-
-% partitioned row comparison
-less({p, PA, A}, {p, PB, B}) ->
- less([PA, A], [PB, B]);
-less(A, B) ->
- try
- less_nif(A, B)
- catch
- error:max_depth_error ->
- % The EJSON structure is too deep, fallback to Erlang land.
- less_erl(A, B)
- end.
-
-less_json_ids({JsonA, IdA}, {JsonB, IdB}) ->
- case less(JsonA, JsonB) of
- 0 ->
- IdA < IdB;
- Result ->
- Result < 0
- end.
-
-less_json(A, B) ->
- less(A, B) < 0.
-
-get_icu_version() ->
- erlang:nif_error(get_icu_version).
-
-get_uca_version() ->
- erlang:nif_error(get_uca_version).
-
-get_collator_version() ->
- erlang:nif_error(get_collator_version).
-
-less_nif(A, B) ->
- erlang:nif_error(less_nif_load_error, [A, B]).
-
-compare_strings_nif(A, B) ->
- erlang:nif_error(compare_string_nif, [A, B]).
-
-less_erl(A, A) -> 0;
-less_erl(A, B) when is_atom(A), is_atom(B) -> atom_sort(A) - atom_sort(B);
-less_erl(A, _) when is_atom(A) -> -1;
-less_erl(_, B) when is_atom(B) -> 1;
-less_erl(A, B) when is_number(A), is_number(B) -> A - B;
-less_erl(A, _) when is_number(A) -> -1;
-less_erl(_, B) when is_number(B) -> 1;
-less_erl(A, B) when is_binary(A), is_binary(B) -> compare_strings_nif(A, B);
-less_erl(A, _) when is_binary(A) -> -1;
-less_erl(_, B) when is_binary(B) -> 1;
-less_erl(A, B) when is_list(A), is_list(B) -> less_list(A, B);
-less_erl(A, _) when is_list(A) -> -1;
-less_erl(_, B) when is_list(B) -> 1;
-less_erl({A}, {B}) when is_list(A), is_list(B) -> less_props(A, B);
-less_erl({A}, _) when is_list(A) -> -1;
-less_erl(_, {B}) when is_list(B) -> 1.
-
-atom_sort(null) -> 1;
-atom_sort(false) -> 2;
-atom_sort(true) -> 3.
-
-less_props([], []) ->
- 0;
-less_props([], [_ | _]) ->
- -1;
-less_props(_, []) ->
- 1;
-less_props([{AKey, AValue} | RestA], [{BKey, BValue} | RestB]) ->
- case compare_strings_nif(AKey, BKey) of
- 0 ->
- case less_erl(AValue, BValue) of
- 0 ->
- less_props(RestA, RestB);
- Result ->
- Result
- end;
- Result ->
- Result
- end.
-
-less_list([], []) ->
- 0;
-less_list([], [_ | _]) ->
- -1;
-less_list(_, []) ->
- 1;
-less_list([A | RestA], [B | RestB]) ->
- case less_erl(A, B) of
- 0 ->
- less_list(RestA, RestB);
- Result ->
- Result
- end.
diff --git a/src/couch/src/couch_ejson_size.erl b/src/couch/src/couch_ejson_size.erl
deleted file mode 100644
index 54a7094ff..000000000
--- a/src/couch/src/couch_ejson_size.erl
+++ /dev/null
@@ -1,92 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_ejson_size).
-
--export([encoded_size/1]).
-
-%% Compound objects
-
-encoded_size({[]}) ->
- % opening { and closing }
- 2;
-encoded_size({KVs}) ->
- % Would add 2 because opening { and closing }, but then inside the LC
- % would accumulate an extra , at the end so subtract 2 - 1
- 1 + lists:sum([encoded_size(K) + encoded_size(V) + 2 || {K, V} <- KVs]);
-encoded_size([]) ->
- % opening [ and closing ]
- 2;
-encoded_size(List) when is_list(List) ->
- % 2 is for [ and ] but inside LC would accumulate an extra , so subtract
- % 2 - 1
- 1 + lists:sum([encoded_size(V) + 1 || V <- List]);
-%% Floats.
-
-encoded_size(0.0) ->
- 3;
-encoded_size(1.0) ->
- 3;
-encoded_size(Float) when is_float(Float), Float < 0.0 ->
- encoded_size(-Float) + 1;
-encoded_size(Float) when is_float(Float), Float < 1.0 ->
- if
- % close enough to 0.0
- Float =< 1.0e-300 -> 3;
- % Xe-YYY
- Float =< 1.0e-100 -> 6;
- % Xe-YY
- Float =< 1.0e-10 -> 5;
- % Xe-Y, 0.0X
- Float =< 0.01 -> 4;
- % 0.X
- true -> 3
- end;
-encoded_size(Float) when is_float(Float) ->
- if
- % XeYYY
- Float >= 1.0e100 -> 5;
- % XeYY
- Float >= 1.0e10 -> 4;
- % XeY, X.Y
- true -> 3
- end;
-%% Integers
-
-encoded_size(0) ->
- 1;
-encoded_size(Integer) when is_integer(Integer), Integer < 0 ->
- encoded_size(-Integer) + 1;
-encoded_size(Integer) when is_integer(Integer) ->
- if
- Integer < 10 -> 1;
- Integer < 100 -> 2;
- Integer < 1000 -> 3;
- Integer < 10000 -> 4;
- true -> trunc(math:log10(Integer)) + 1
- end;
-%% Strings
-
-encoded_size(Binary) when is_binary(Binary) ->
- 2 + byte_size(Binary);
-%% Special terminal symbols as atoms
-
-encoded_size(null) ->
- 4;
-encoded_size(true) ->
- 4;
-encoded_size(false) ->
- 5;
-%% Other atoms
-
-encoded_size(Atom) when is_atom(Atom) ->
- encoded_size(atom_to_binary(Atom, utf8)).
diff --git a/src/couch/src/couch_emsort.erl b/src/couch/src/couch_emsort.erl
deleted file mode 100644
index 9dcc08d67..000000000
--- a/src/couch/src/couch_emsort.erl
+++ /dev/null
@@ -1,347 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_emsort).
-
-% This is an implementation of an external N-way merge sort. It's primary
-% purpose is to be used during database compaction as an optimization for
-% managing the docid btree.
-%
-% Trunk currently writes the docid btree as its compacting the database but
-% this is quite inneficient as its written out of order in the general case
-% as writes are ordered by update_seq.
-%
-% The general design of this module is a very standard merge sort with one
-% caveat due to append only files. This is described in more detail in the
-% sorting phase.
-%
-% The basic algorithm is in two halves. The first half stores KV pairs to disk
-% which is then followed by the actual sorting phase that streams KV's back
-% to the client using a fold-like function. After some basic definitions we'll
-% describe both phases.
-%
-% Key/Value apairs (aka, KV pairs, or KVs) are simply lists of two-tuples with
-% a key as the first element and an arbitrary value as the second. The key of
-% this pair is what used to determine the sort order based on native Erlang
-% term comparison.
-%
-% Internally, KVs are stored as lists with a max size defined by
-% #ems.chain_chunk. These lists are then chained together on disk using disk
-% offsets as a poor man's linked list. The basic format of a list looks like
-% {KVs, DiskOffset} where DiskOffset is either the atom nil which means "end
-% of the list" or an integer that is a file position offset that is the
-% location of another {KVs, DiskOffset} term. The head of each list is
-% referred to with a single DiskOffset. The set of terms that extend from
-% this initial DiskOffset to the last {KVs, nil} term is referred to in the
-% code as a chain. Two important facts are that one call to couch_emsort:add/2
-% creates a single chain, and that a chain is always sorted on disk (though its
-% possible to be sorted in descending order which will be discussed later).
-%
-% The second major internal structure is the back bone. This is a list of
-% chains that has a quite similar structure to chains but contains different
-% data types and has no guarantee on ordering. The back bone is merely the
-% list of all head DiskOffsets. The structure has the similar structure of
-% {DiskOffsets, DiskOffset} that we use for chains, except that DiskOffsets is
-% a list of integers that refer to the heads of chains. The maximum size of
-% DiskOffsets is defined by #ems.bb_chunk. It is important to note that the
-% backbone has no defined ordering. The other thing of note is that the RAM
-% bounds are loosely defined as:
-%
-% #ems.bb_chunk * #ems.chain_chunk * avg_size(KV).
-%
-% Build Phase
-% -----------
-%
-% As mentioned, each call to couch_emsort:add/2 creates a chain from the
-% list of KVs that are passed in. This list is first sorted and then the
-% chain is created by foldr-ing (note: r) across the list to build the
-% chain on disk. It is important to note that the final chain is then
-% sorted in ascending order on disk.
-%
-%
-% Sort Phase
-% ----------
-%
-% The sort phase is where the merge sort kicks in. This is generally your
-% average merge sort with a caveat for append only storage. First the
-% general outline.
-%
-% The general outline for this sort is that it iteratively merges chains
-% in the backbone until less than #ems.bb_chunk chains exist. At this
-% point it switches to the last merge sort phase where it just streams
-% the sorted KVs back to the client using a fold function.
-%
-% The general chain merging is a pretty standard merge sort. You load up
-% the initial KVs from each phase, pick the next one in sort order and
-% then when you run out of KVs you're left with a single DiskOffset for
-% the head of a single chain that represents the merge. These new
-% DiskOffsets are used to build the new back bone.
-%
-% The one caveat here is that we're using append only storage. This is
-% important because once we make a pass we've effectively reversed the
-% sort order of each chain. Ie, the first merge results in chains that
-% are ordered in descending order. Since, one pass reverses the list
-% the trick is that each phase does two passes. The first phase picks
-% the smallest KV to write next and the second phase picks the largest.
-% In this manner each time we do a back bone merge we end up with chains
-% that are always sorted in an ascending order.
-%
-% The one downfall is that in the interest of simplicity the sorting is
-% restricted to Erlang's native term sorting. A possible extension would
-% be to allow two comparison functions to be used, but this module is
-% currently only used for docid sorting which is hardcoded to be raw
-% Erlang ordering.
-%
-% Diagram
-% -------
-%
-% If it helps, this is a general diagram of the internal structures. A
-% couple points to note since this is ASCII art. The BB pointers across
-% the top are lists of chains going down. Each BBN item is one of the
-% {DiskOffsets, DiskOffset} structures discussed earlier. Going down,
-% the CMN nodes are actually representing #ems.bb_chunk chains in parallel
-% going off the back bone. It is important and not represented in this
-% diagram that within these groups the chains don't have to be the same
-% length. That's just a limitiationg of my ASCII artistic abilities.
-%
-% The BBN* node is marked with a * to denote that it is the only state
-% that we store when writing headeres to disk as it has pointers that
-% lead us to all data in the tree.
-%
-% BB1 <- BB2 <- BB3 <- BBN*
-% | | | |
-% v v v v
-% CA1 CB1 CC1 CD1
-% | | |
-% v v v
-% CA2 CC2 CD2
-% | |
-% v v
-% CA3 CD3
-%
-
--export([open/1, open/2, get_fd/1, get_state/1]).
--export([add/2, merge/1, merge/2, sort/1, iter/1, next/1]).
--export([num_kvs/1, num_merges/1]).
-
--record(ems, {
- fd,
- root,
- bb_chunk = 10,
- chain_chunk = 100,
- num_kvs = 0,
- num_bb = 0
-}).
-
--define(REPORT_INTERVAL, 1000).
-
-open(Fd) ->
- {ok, #ems{fd = Fd}}.
-
-open(Fd, Options) ->
- {ok, set_options(#ems{fd = Fd}, Options)}.
-
-set_options(Ems, []) ->
- Ems;
-set_options(Ems, [{root, Root} | Rest]) ->
- set_options(Ems#ems{root = Root}, Rest);
-set_options(Ems, [{chain_chunk, Count} | Rest]) when is_integer(Count) ->
- set_options(Ems#ems{chain_chunk = Count}, Rest);
-set_options(Ems, [{back_bone_chunk, Count} | Rest]) when is_integer(Count) ->
- set_options(Ems#ems{bb_chunk = Count}, Rest);
-set_options(Ems, [{num_kvs, NumKVs} | Rest]) when is_integer(NumKVs) ->
- set_options(Ems#ems{num_kvs = NumKVs}, Rest);
-set_options(Ems, [{num_bb, NumBB} | Rest]) when is_integer(NumBB) ->
- set_options(Ems#ems{num_bb = NumBB}, Rest).
-
-get_fd(#ems{fd = Fd}) ->
- Fd.
-
-get_state(#ems{} = Ems) ->
- #ems{
- root = Root,
- num_kvs = NumKVs,
- num_bb = NumBB
- } = Ems,
- [
- {root, Root},
- {num_kvs, NumKVs},
- {num_bb, NumBB}
- ].
-
-add(Ems, []) ->
- {ok, Ems};
-add(Ems, KVs) ->
- Pos = write_kvs(Ems, KVs),
- NewEms = add_bb_pos(Ems, Pos),
- {ok, NewEms#ems{
- num_kvs = Ems#ems.num_kvs + length(KVs),
- num_bb = Ems#ems.num_bb + 1
- }}.
-
-sort(#ems{} = Ems) ->
- {ok, Ems1} = merge(Ems),
- iter(Ems1).
-
-merge(Ems) ->
- merge(Ems, fun(_) -> ok end).
-
-merge(#ems{root = undefined} = Ems, _Reporter) ->
- {ok, Ems};
-merge(#ems{} = Ems, Reporter) ->
- {ok, decimate(Ems, Reporter)}.
-
-iter(#ems{root = undefined} = Ems) ->
- {ok, {Ems, []}};
-iter(#ems{root = {BB, nil}} = Ems) ->
- Chains = init_chains(Ems, small, BB),
- {ok, {Ems, Chains}};
-iter(#ems{root = {_, _}}) ->
- {error, not_merged}.
-
-next({_Ems, []}) ->
- finished;
-next({Ems, Chains}) ->
- {KV, RestChains} = choose_kv(small, Ems, Chains),
- {ok, KV, {Ems, RestChains}}.
-
-num_kvs(#ems{num_kvs = NumKVs}) ->
- NumKVs.
-
-num_merges(#ems{bb_chunk = BBChunk, num_bb = NumBB}) ->
- num_merges(BBChunk, NumBB).
-
-add_bb_pos(#ems{root = undefined} = Ems, Pos) ->
- Ems#ems{root = {[Pos], nil}};
-add_bb_pos(#ems{root = {BB, Prev}} = Ems, Pos) ->
- {NewBB, NewPrev} = append_item(Ems, {BB, Prev}, Pos, Ems#ems.bb_chunk),
- Ems#ems{root = {NewBB, NewPrev}}.
-
-write_kvs(Ems, KVs) ->
- % Write the list of KV's to disk in sorted order in chunks
- % of 100. Also make sure that the order is so that they
- % can be streamed in asscending order.
- {LastKVs, LastPos} =
- lists:foldr(
- fun(KV, Acc) ->
- append_item(Ems, Acc, KV, Ems#ems.chain_chunk)
- end,
- {[], nil},
- lists:sort(KVs)
- ),
- {ok, Final, _} = couch_file:append_term(Ems#ems.fd, {LastKVs, LastPos}),
- Final.
-
-decimate(#ems{root = {_BB, nil}} = Ems, _Reporter) ->
- % We have less than bb_chunk backbone pointers so we're
- % good to start streaming KV's back to the client.
- Ems;
-decimate(#ems{root = {BB, NextBB}} = Ems, Reporter) ->
- % To make sure we have a bounded amount of data in RAM
- % at any given point we first need to decimate the data
- % by performing the first couple iterations of a merge
- % sort writing the intermediate results back to disk.
-
- % The first pass gives us a sort with pointers linked from
- % largest to smallest.
- {RevBB, RevNextBB} = merge_back_bone(Ems, small, BB, NextBB, Reporter),
-
- % We have to run a second pass so that links are pointed
- % back from smallest to largest.
- {FwdBB, FwdNextBB} = merge_back_bone(Ems, big, RevBB, RevNextBB, Reporter),
-
- % Continue deicmating until we have an acceptable bound on
- % the number of keys to use.
- decimate(Ems#ems{root = {FwdBB, FwdNextBB}}, Reporter).
-
-merge_back_bone(Ems, Choose, BB, NextBB, Reporter) ->
- BBPos = merge_chains(Ems, Choose, BB, Reporter),
- Reporter(length(BB)),
- merge_rest_back_bone(Ems, Choose, NextBB, {[BBPos], nil}, Reporter).
-
-merge_rest_back_bone(_Ems, _Choose, nil, Acc, _Reporter) ->
- Acc;
-merge_rest_back_bone(Ems, Choose, BBPos, Acc, Reporter) ->
- {ok, {BB, NextBB}} = couch_file:pread_term(Ems#ems.fd, BBPos),
- NewPos = merge_chains(Ems, Choose, BB, Reporter),
- {NewBB, NewPrev} = append_item(Ems, Acc, NewPos, Ems#ems.bb_chunk),
- merge_rest_back_bone(Ems, Choose, NextBB, {NewBB, NewPrev}, Reporter).
-
-merge_chains(Ems, Choose, BB, Reporter) ->
- Chains = init_chains(Ems, Choose, BB),
- merge_chains(Ems, Choose, Chains, {[], nil}, Reporter, 0).
-
-merge_chains(Ems, _Choose, [], ChainAcc, _Reporter, _Count) ->
- {ok, CPos, _} = couch_file:append_term(Ems#ems.fd, ChainAcc),
- CPos;
-merge_chains(#ems{chain_chunk = CC} = Ems, Choose, Chains, Acc, Reporter, Count0) ->
- {KV, RestChains} = choose_kv(Choose, Ems, Chains),
- {NewKVs, NewPrev} = append_item(Ems, Acc, KV, CC),
- Count1 =
- case (Count0 + 1) rem ?REPORT_INTERVAL of
- 0 ->
- Reporter(Count0),
- 0;
- _ ->
- Count0 + 1
- end,
- merge_chains(Ems, Choose, RestChains, {NewKVs, NewPrev}, Reporter, Count1).
-
-init_chains(Ems, Choose, BB) ->
- Chains = lists:map(
- fun(CPos) ->
- {ok, {KVs, NextKVs}} = couch_file:pread_term(Ems#ems.fd, CPos),
- {KVs, NextKVs}
- end,
- BB
- ),
- order_chains(Choose, Chains).
-
-order_chains(small, Chains) -> lists:sort(Chains);
-order_chains(big, Chains) -> lists:reverse(lists:sort(Chains)).
-
-choose_kv(_Choose, _Ems, [{[KV], nil} | Rest]) ->
- {KV, Rest};
-choose_kv(Choose, Ems, [{[KV], Pos} | RestChains]) ->
- {ok, Chain} = couch_file:pread_term(Ems#ems.fd, Pos),
- case Choose of
- small -> {KV, ins_small_chain(RestChains, Chain, [])};
- big -> {KV, ins_big_chain(RestChains, Chain, [])}
- end;
-choose_kv(Choose, _Ems, [{[KV | RestKVs], Prev} | RestChains]) ->
- case Choose of
- small -> {KV, ins_small_chain(RestChains, {RestKVs, Prev}, [])};
- big -> {KV, ins_big_chain(RestChains, {RestKVs, Prev}, [])}
- end.
-
-ins_small_chain([{[{K1, _} | _], _} = C1 | Rest], {[{K2, _} | _], _} = C2, Acc) when K1 < K2 ->
- ins_small_chain(Rest, C2, [C1 | Acc]);
-ins_small_chain(Rest, Chain, Acc) ->
- lists:reverse(Acc, [Chain | Rest]).
-
-ins_big_chain([{[{K1, _} | _], _} = C1 | Rest], {[{K2, _} | _], _} = C2, Acc) when K1 > K2 ->
- ins_big_chain(Rest, C2, [C1 | Acc]);
-ins_big_chain(Rest, Chain, Acc) ->
- lists:reverse(Acc, [Chain | Rest]).
-
-append_item(Ems, {List, Prev}, Pos, Size) when length(List) >= Size ->
- {ok, PrevList, _} = couch_file:append_term(Ems#ems.fd, {List, Prev}),
- {[Pos], PrevList};
-append_item(_Ems, {List, Prev}, Pos, _Size) ->
- {[Pos | List], Prev}.
-
-num_merges(BBChunk, NumBB) when NumBB =< BBChunk ->
- 0;
-num_merges(BBChunk, NumBB) when NumBB > BBChunk ->
- RevNumBB = ceil(NumBB / BBChunk),
- FwdNumBB = ceil(RevNumBB / BBChunk),
- 2 + num_merges(BBChunk, FwdNumBB).
diff --git a/src/couch/src/couch_event_sup.erl b/src/couch/src/couch_event_sup.erl
deleted file mode 100644
index e9fc2e5db..000000000
--- a/src/couch/src/couch_event_sup.erl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% The purpose of this module is to allow event handlers to particpate in Erlang
-%% supervisor trees. It provide a monitorable process that crashes if the event
-%% handler fails. The process, when shutdown, deregisters the event handler.
-
--module(couch_event_sup).
--behaviour(gen_server).
--vsn(1).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([start_link/3, start_link/4, stop/1]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
-
-%
-% Instead calling the
-% ok = gen_event:add_sup_handler(error_logger, my_log, Args)
-%
-% do this:
-% {ok, LinkedPid} = couch_event_sup:start_link(error_logger, my_log, Args)
-%
-% The benefit is the event is now part of the process tree, and can be
-% started, restarted and shutdown consistently like the rest of the server
-% components.
-%
-% And now if the "event" crashes, the supervisor is notified and can restart
-% the event handler.
-%
-% Use this form to named process:
-% {ok, LinkedPid} = couch_event_sup:start_link({local, my_log}, error_logger, my_log, Args)
-%
-
-start_link(EventMgr, EventHandler, Args) ->
- gen_server:start_link(couch_event_sup, {EventMgr, EventHandler, Args}, []).
-
-start_link(ServerName, EventMgr, EventHandler, Args) ->
- gen_server:start_link(ServerName, couch_event_sup, {EventMgr, EventHandler, Args}, []).
-
-stop(Pid) ->
- gen_server:call(Pid, stop).
-
-init({EventMgr, EventHandler, Args}) ->
- case gen_event:add_sup_handler(EventMgr, EventHandler, Args) of
- ok ->
- {ok, {EventMgr, EventHandler}};
- {stop, Error} ->
- {stop, Error}
- end.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call(stop, _From, State) ->
- {stop, normal, ok, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({gen_event_EXIT, _Handler, Reason}, State) ->
- {stop, Reason, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl
deleted file mode 100644
index ba8d9c42f..000000000
--- a/src/couch/src/couch_file.erl
+++ /dev/null
@@ -1,956 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_file).
--behaviour(gen_server).
--vsn(2).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(INITIAL_WAIT, 60000).
--define(MONITOR_CHECK, 10000).
-% 4 KiB
--define(SIZE_BLOCK, 16#1000).
--define(IS_OLD_STATE(S), is_pid(S#file.db_monitor)).
--define(PREFIX_SIZE, 5).
--define(DEFAULT_READ_COUNT, 1024).
-
--type block_id() :: non_neg_integer().
--type location() :: non_neg_integer().
--type header_size() :: non_neg_integer().
-
--record(file, {
- fd,
- is_sys,
- eof = 0,
- db_monitor,
- pread_limit = 0
-}).
-
-% public API
--export([open/1, open/2, close/1, bytes/1, sync/1, truncate/2, set_db_pid/2]).
--export([pread_term/2, pread_iolist/2, pread_binary/2]).
--export([append_binary/2]).
--export([append_raw_chunk/2, assemble_file_chunk/2]).
--export([append_term/2, append_term/3]).
--export([pread_terms/2]).
--export([append_terms/2, append_terms/3]).
--export([write_header/2, read_header/1]).
--export([delete/2, delete/3, nuke_dir/2, init_delete_dir/1]).
--export([last_read/1]).
-
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3, format_status/2]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-%% helper functions
--export([process_info/1]).
-
-%%----------------------------------------------------------------------
-%% Args: Valid Options are [create] and [create,overwrite].
-%% Files are opened in read/write mode.
-%% Returns: On success, {ok, Fd}
-%% or {error, Reason} if the file could not be opened.
-%%----------------------------------------------------------------------
-
-open(Filepath) ->
- open(Filepath, []).
-
-open(Filepath, Options) ->
- case
- gen_server:start_link(
- couch_file,
- {Filepath, Options, self(), Ref = make_ref()},
- []
- )
- of
- {ok, Fd} ->
- {ok, Fd};
- ignore ->
- % get the error
- receive
- {Ref, Pid, {error, Reason} = Error} ->
- case process_info(self(), trap_exit) of
- {trap_exit, true} ->
- receive
- {'EXIT', Pid, _} -> ok
- end;
- {trap_exit, false} ->
- ok
- end,
- case {lists:member(nologifmissing, Options), Reason} of
- {true, enoent} ->
- ok;
- _ ->
- couch_log:error(
- "Could not open file ~s: ~s",
- [Filepath, file:format_error(Reason)]
- )
- end,
- Error
- end;
- Error ->
- % We can't say much here, because it could be any kind of error.
- % Just let it bubble and an encapsulating subcomponent can perhaps
- % be more informative. It will likely appear in the SASL log, anyway.
- Error
- end.
-
-set_db_pid(Fd, Pid) ->
- gen_server:call(Fd, {set_db_pid, Pid}).
-
-%%----------------------------------------------------------------------
-%% Purpose: To append an Erlang term to the end of the file.
-%% Args: Erlang term to serialize and append to the file.
-%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to
-%% the beginning the serialized term. Use pread_term to read the term
-%% back.
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-append_term(Fd, Term) ->
- append_term(Fd, Term, []).
-
-append_term(Fd, Term, Options) ->
- Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
- append_binary(Fd, couch_compress:compress(Term, Comp)).
-
-%%----------------------------------------------------------------------
-%% Purpose: To append an Erlang binary to the end of the file.
-%% Args: Erlang term to serialize and append to the file.
-%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to the
-%% beginning the serialized term. Use pread_term to read the term back.
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-append_binary(Fd, Bin) ->
- ioq:call(Fd, {append_bin, assemble_file_chunk(Bin)}, erlang:get(io_priority)).
-
-append_raw_chunk(Fd, Chunk) ->
- ioq:call(Fd, {append_bin, Chunk}, erlang:get(io_priority)).
-
-assemble_file_chunk(Bin) ->
- [<<0:1/integer, (iolist_size(Bin)):31/integer>>, Bin].
-
-assemble_file_chunk(Bin, Md5) ->
- [<<1:1/integer, (iolist_size(Bin)):31/integer>>, Md5, Bin].
-
-%%----------------------------------------------------------------------
-%% Purpose: Reads a term from a file that was written with append_term
-%% Args: Pos, the offset into the file where the term is serialized.
-%% Returns: {ok, Term}
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-pread_term(Fd, Pos) ->
- {ok, Bin} = pread_binary(Fd, Pos),
- {ok, couch_compress:decompress(Bin)}.
-
-%%----------------------------------------------------------------------
-%% Purpose: Reads a binrary from a file that was written with append_binary
-%% Args: Pos, the offset into the file where the term is serialized.
-%% Returns: {ok, Term}
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-pread_binary(Fd, Pos) ->
- {ok, L} = pread_iolist(Fd, Pos),
- {ok, iolist_to_binary(L)}.
-
-pread_iolist(Fd, Pos) ->
- case ioq:call(Fd, {pread_iolist, Pos}, erlang:get(io_priority)) of
- {ok, IoList, Md5} ->
- {ok, verify_md5(Fd, Pos, IoList, Md5)};
- Error ->
- Error
- end.
-
-pread_terms(Fd, PosList) ->
- {ok, Bins} = pread_binaries(Fd, PosList),
- Terms = lists:map(
- fun(Bin) ->
- couch_compress:decompress(Bin)
- end,
- Bins
- ),
- {ok, Terms}.
-
-pread_binaries(Fd, PosList) ->
- {ok, Data} = pread_iolists(Fd, PosList),
- {ok, lists:map(fun erlang:iolist_to_binary/1, Data)}.
-
-pread_iolists(Fd, PosList) ->
- case ioq:call(Fd, {pread_iolists, PosList}, erlang:get(io_priority)) of
- {ok, DataMd5s} ->
- Data = lists:zipwith(
- fun(Pos, {IoList, Md5}) ->
- verify_md5(Fd, Pos, IoList, Md5)
- end,
- PosList,
- DataMd5s
- ),
- {ok, Data};
- Error ->
- Error
- end.
-
-append_terms(Fd, Terms) ->
- append_terms(Fd, Terms, []).
-
-append_terms(Fd, Terms, Options) ->
- Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
- Bins = lists:map(
- fun(Term) ->
- couch_compress:compress(Term, Comp)
- end,
- Terms
- ),
- append_binaries(Fd, Bins).
-
-append_binaries(Fd, Bins) ->
- WriteBins = lists:map(fun assemble_file_chunk/1, Bins),
- ioq:call(Fd, {append_bins, WriteBins}, erlang:get(io_priority)).
-
-%%----------------------------------------------------------------------
-%% Purpose: The length of a file, in bytes.
-%% Returns: {ok, Bytes}
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-% length in bytes
-bytes(Fd) ->
- gen_server:call(Fd, bytes, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Truncate a file to the number of bytes.
-%% Returns: ok
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-truncate(Fd, Pos) ->
- gen_server:call(Fd, {truncate, Pos}, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Ensure all bytes written to the file are flushed to disk.
-%% Returns: ok
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-sync(Filepath) when is_list(Filepath) ->
- case file:open(Filepath, [append, raw]) of
- {ok, Fd} ->
- try
- case file:sync(Fd) of
- ok ->
- ok;
- {error, Reason} ->
- erlang:error({fsync_error, Reason})
- end
- after
- ok = file:close(Fd)
- end;
- {error, Error} ->
- erlang:error(Error)
- end;
-sync(Fd) ->
- case gen_server:call(Fd, sync, infinity) of
- ok ->
- ok;
- {error, Reason} ->
- erlang:error({fsync_error, Reason})
- end.
-
-%%----------------------------------------------------------------------
-%% Purpose: Close the file.
-%% Returns: ok
-%%----------------------------------------------------------------------
-close(Fd) ->
- gen_server:call(Fd, close, infinity).
-
-delete(RootDir, Filepath) ->
- delete(RootDir, Filepath, []).
-
-delete(RootDir, FullFilePath, Options) ->
- EnableRecovery = config:get_boolean(
- "couchdb",
- "enable_database_recovery",
- false
- ),
- Async = not lists:member(sync, Options),
- Context = couch_util:get_value(context, Options, compaction),
- case Context =:= delete andalso EnableRecovery of
- true ->
- rename_file(FullFilePath);
- false ->
- DeleteAfterRename = config:get_boolean(
- "couchdb",
- "delete_after_rename",
- true
- ),
- delete_file(RootDir, FullFilePath, Async, DeleteAfterRename)
- end.
-
-delete_file(RootDir, Filepath, Async, DeleteAfterRename) ->
- DelFile = filename:join([RootDir, ".delete", ?b2l(couch_uuids:random())]),
- case file:rename(Filepath, DelFile) of
- ok when DeleteAfterRename ->
- if
- (Async) ->
- spawn(file, delete, [DelFile]),
- ok;
- true ->
- file:delete(DelFile)
- end;
- Else ->
- Else
- end.
-
-rename_file(Original) ->
- DeletedFileName = deleted_filename(Original),
- Now = calendar:local_time(),
- case file:rename(Original, DeletedFileName) of
- ok -> file:change_time(DeletedFileName, Now);
- Else -> Else
- end.
-
-deleted_filename(Original) ->
- {{Y, Mon, D}, {H, Min, S}} = calendar:universal_time(),
- Suffix = lists:flatten(
- io_lib:format(
- ".~w~2.10.0B~2.10.0B." ++
- "~2.10.0B~2.10.0B~2.10.0B.deleted" ++
- filename:extension(Original),
- [Y, Mon, D, H, Min, S]
- )
- ),
- filename:rootname(Original) ++ Suffix.
-
-nuke_dir(RootDelDir, Dir) ->
- EnableRecovery = config:get_boolean(
- "couchdb",
- "enable_database_recovery",
- false
- ),
- case EnableRecovery of
- true ->
- rename_file(Dir);
- false ->
- delete_dir(RootDelDir, Dir)
- end.
-
-delete_dir(RootDelDir, Dir) ->
- DeleteAfterRename = config:get_boolean(
- "couchdb",
- "delete_after_rename",
- true
- ),
- FoldFun = fun(File) ->
- Path = Dir ++ "/" ++ File,
- case filelib:is_dir(Path) of
- true ->
- ok = nuke_dir(RootDelDir, Path),
- file:del_dir(Path);
- false ->
- delete_file(RootDelDir, Path, false, DeleteAfterRename)
- end
- end,
- case file:list_dir(Dir) of
- {ok, Files} ->
- lists:foreach(FoldFun, Files),
- ok = file:del_dir(Dir);
- {error, enoent} ->
- ok
- end.
-
-init_delete_dir(RootDir) ->
- Dir = filename:join(RootDir, ".delete"),
- % note: ensure_dir requires an actual filename companent, which is the
- % reason for "foo".
- filelib:ensure_dir(filename:join(Dir, "foo")),
- spawn(fun() ->
- filelib:fold_files(
- Dir,
- ".*",
- true,
- fun(Filename, _) ->
- ok = file:delete(Filename)
- end,
- ok
- )
- end),
- ok.
-
-read_header(Fd) ->
- case ioq:call(Fd, find_header, erlang:get(io_priority)) of
- {ok, Bin} ->
- {ok, binary_to_term(Bin)};
- Else ->
- Else
- end.
-
-write_header(Fd, Data) ->
- Bin = term_to_binary(Data),
- Md5 = couch_hash:md5_hash(Bin),
- % now we assemble the final header binary and write to disk
- FinalBin = <<Md5/binary, Bin/binary>>,
- ioq:call(Fd, {write_header, FinalBin}, erlang:get(io_priority)).
-
-init_status_error(ReturnPid, Ref, Error) ->
- ReturnPid ! {Ref, self(), Error},
- ignore.
-
-last_read(Fd) when is_pid(Fd) ->
- Now = os:timestamp(),
- couch_util:process_dict_get(Fd, read_timestamp, Now).
-
-% server functions
-
-init({Filepath, Options, ReturnPid, Ref}) ->
- OpenOptions = file_open_options(Options),
- Limit = get_pread_limit(),
- IsSys = lists:member(sys_db, Options),
- update_read_timestamp(),
- case lists:member(create, Options) of
- true ->
- filelib:ensure_dir(Filepath),
- case file:open(Filepath, OpenOptions) of
- {ok, Fd} ->
- %% Save Fd in process dictionary for debugging purposes
- put(couch_file_fd, {Fd, Filepath}),
- {ok, Length} = file:position(Fd, eof),
- case Length > 0 of
- true ->
- % this means the file already exists and has data.
- % FYI: We don't differentiate between empty files and non-existant
- % files here.
- case lists:member(overwrite, Options) of
- true ->
- {ok, 0} = file:position(Fd, 0),
- ok = file:truncate(Fd),
- ok = file:sync(Fd),
- maybe_track_open_os_files(Options),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd = Fd, is_sys = IsSys, pread_limit = Limit}};
- false ->
- ok = file:close(Fd),
- init_status_error(ReturnPid, Ref, {error, eexist})
- end;
- false ->
- maybe_track_open_os_files(Options),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd = Fd, is_sys = IsSys, pread_limit = Limit}}
- end;
- Error ->
- init_status_error(ReturnPid, Ref, Error)
- end;
- false ->
- % open in read mode first, so we don't create the file if it doesn't exist.
- case file:open(Filepath, [read, raw]) of
- {ok, Fd_Read} ->
- case file:open(Filepath, OpenOptions) of
- {ok, Fd} ->
- %% Save Fd in process dictionary for debugging purposes
- put(couch_file_fd, {Fd, Filepath}),
- ok = file:close(Fd_Read),
- maybe_track_open_os_files(Options),
- {ok, Eof} = file:position(Fd, eof),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd = Fd, eof = Eof, is_sys = IsSys, pread_limit = Limit}};
- Error ->
- init_status_error(ReturnPid, Ref, Error)
- end;
- Error ->
- init_status_error(ReturnPid, Ref, Error)
- end
- end.
-
-file_open_options(Options) ->
- [read, raw, binary] ++
- case lists:member(read_only, Options) of
- true ->
- [];
- false ->
- [append]
- end.
-
-maybe_track_open_os_files(Options) ->
- case not lists:member(sys_db, Options) of
- true ->
- couch_stats_process_tracker:track([couchdb, open_os_files]);
- false ->
- ok
- end.
-
-terminate(_Reason, #file{fd = nil}) ->
- ok;
-terminate(_Reason, #file{fd = Fd}) ->
- ok = file:close(Fd).
-
-handle_call(Msg, From, File) when ?IS_OLD_STATE(File) ->
- handle_call(Msg, From, upgrade_state(File));
-handle_call(close, _From, #file{fd = Fd} = File) ->
- {stop, normal, file:close(Fd), File#file{fd = nil}};
-handle_call({pread_iolist, Pos}, _From, File) ->
- update_read_timestamp(),
- {LenIolist, NextPos} = read_raw_iolist_int(File, Pos, 4),
- case iolist_to_binary(LenIolist) of
- % an MD5-prefixed term
- <<1:1/integer, Len:31/integer>> ->
- {Md5AndIoList, _} = read_raw_iolist_int(File, NextPos, Len + 16),
- {Md5, IoList} = extract_md5(Md5AndIoList),
- {reply, {ok, IoList, Md5}, File};
- <<0:1/integer, Len:31/integer>> ->
- {Iolist, _} = read_raw_iolist_int(File, NextPos, Len),
- {reply, {ok, Iolist, <<>>}, File}
- end;
-handle_call({pread_iolists, PosL}, _From, File) ->
- update_read_timestamp(),
- LocNums1 = [{Pos, 4} || Pos <- PosL],
- DataSizes = read_multi_raw_iolists_int(File, LocNums1),
- LocNums2 = lists:map(
- fun({LenIoList, NextPos}) ->
- case iolist_to_binary(LenIoList) of
- % an MD5-prefixed term
- <<1:1/integer, Len:31/integer>> ->
- {NextPos, Len + 16};
- <<0:1/integer, Len:31/integer>> ->
- {NextPos, Len}
- end
- end,
- DataSizes
- ),
- Resps = read_multi_raw_iolists_int(File, LocNums2),
- Extracted = lists:zipwith(
- fun({LenIoList, _}, {IoList, _}) ->
- case iolist_to_binary(LenIoList) of
- <<1:1/integer, _:31/integer>> ->
- {Md5, IoList} = extract_md5(IoList),
- {IoList, Md5};
- <<0:1/integer, _:31/integer>> ->
- {IoList, <<>>}
- end
- end,
- DataSizes,
- Resps
- ),
- {reply, {ok, Extracted}, File};
-handle_call(bytes, _From, #file{fd = Fd} = File) ->
- {reply, file:position(Fd, eof), File};
-handle_call({set_db_pid, Pid}, _From, #file{db_monitor = OldRef} = File) ->
- case is_reference(OldRef) of
- true -> demonitor(OldRef, [flush]);
- false -> ok
- end,
- Ref = monitor(process, Pid),
- {reply, ok, File#file{db_monitor = Ref}};
-handle_call(sync, _From, #file{fd = Fd} = File) ->
- case file:sync(Fd) of
- ok ->
- {reply, ok, File};
- {error, _} = Error ->
- % We're intentionally dropping all knowledge
- % of this Fd so that we don't accidentally
- % recover in some whacky edge case that I
- % can't fathom.
- {stop, Error, Error, #file{fd = nil}}
- end;
-handle_call({truncate, Pos}, _From, #file{fd = Fd} = File) ->
- {ok, Pos} = file:position(Fd, Pos),
- case file:truncate(Fd) of
- ok ->
- {reply, ok, File#file{eof = Pos}};
- Error ->
- {reply, Error, File}
- end;
-handle_call({append_bin, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
- Blocks = make_blocks(Pos rem ?SIZE_BLOCK, Bin),
- Size = iolist_size(Blocks),
- case file:write(Fd, Blocks) of
- ok ->
- {reply, {ok, Pos, Size}, File#file{eof = Pos + Size}};
- Error ->
- {reply, Error, reset_eof(File)}
- end;
-handle_call({append_bins, Bins}, _From, #file{fd = Fd, eof = Pos} = File) ->
- {BlockResps, FinalPos} = lists:mapfoldl(
- fun(Bin, PosAcc) ->
- Blocks = make_blocks(PosAcc rem ?SIZE_BLOCK, Bin),
- Size = iolist_size(Blocks),
- {{Blocks, {PosAcc, Size}}, PosAcc + Size}
- end,
- Pos,
- Bins
- ),
- {AllBlocks, Resps} = lists:unzip(BlockResps),
- case file:write(Fd, AllBlocks) of
- ok ->
- {reply, {ok, Resps}, File#file{eof = FinalPos}};
- Error ->
- {reply, Error, reset_eof(File)}
- end;
-handle_call({write_header, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
- BinSize = byte_size(Bin),
- case Pos rem ?SIZE_BLOCK of
- 0 ->
- Padding = <<>>;
- BlockOffset ->
- Padding = <<0:(8 * (?SIZE_BLOCK - BlockOffset))>>
- end,
- FinalBin = [Padding, <<1, BinSize:32/integer>> | make_blocks(5, [Bin])],
- case file:write(Fd, FinalBin) of
- ok ->
- {reply, ok, File#file{eof = Pos + iolist_size(FinalBin)}};
- Error ->
- {reply, Error, reset_eof(File)}
- end;
-handle_call(find_header, _From, #file{fd = Fd, eof = Pos} = File) ->
- {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
-
-handle_cast(close, Fd) ->
- {stop, normal, Fd}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_info(Msg, File) when ?IS_OLD_STATE(File) ->
- handle_info(Msg, upgrade_state(File));
-handle_info(maybe_close, File) ->
- case is_idle(File) of
- true ->
- {stop, normal, File};
- false ->
- erlang:send_after(?MONITOR_CHECK, self(), maybe_close),
- {noreply, File}
- end;
-handle_info({'DOWN', Ref, process, _Pid, _Info}, #file{db_monitor = Ref} = File) ->
- case is_idle(File) of
- true -> {stop, normal, File};
- false -> {noreply, File}
- end.
-
-format_status(_Opt, [PDict, #file{} = File]) ->
- {_Fd, FilePath} = couch_util:get_value(couch_file_fd, PDict),
- [{data, [{"State", File}, {"InitialFilePath", FilePath}]}].
-
-find_header(Fd, Block) ->
- case (catch load_header(Fd, Block)) of
- {ok, Bin} ->
- {ok, Bin};
- _Error ->
- ReadCount = config:get_integer(
- "couchdb", "find_header_read_count", ?DEFAULT_READ_COUNT
- ),
- find_header(Fd, Block - 1, ReadCount)
- end.
-
-load_header(Fd, Block) ->
- {ok, <<1, HeaderLen:32/integer, RestBlock/binary>>} =
- file:pread(Fd, Block * ?SIZE_BLOCK, ?SIZE_BLOCK),
- load_header(Fd, Block * ?SIZE_BLOCK, HeaderLen, RestBlock).
-
-load_header(Fd, Pos, HeaderLen) ->
- load_header(Fd, Pos, HeaderLen, <<>>).
-
-load_header(Fd, Pos, HeaderLen, RestBlock) ->
- TotalBytes = calculate_total_read_len(?PREFIX_SIZE, HeaderLen),
- RawBin =
- case TotalBytes =< byte_size(RestBlock) of
- true ->
- <<RawBin0:TotalBytes/binary, _/binary>> = RestBlock,
- RawBin0;
- false ->
- ReadStart = Pos + ?PREFIX_SIZE + byte_size(RestBlock),
- ReadLen = TotalBytes - byte_size(RestBlock),
- {ok, Missing} = file:pread(Fd, ReadStart, ReadLen),
- <<RestBlock/binary, Missing/binary>>
- end,
- <<Md5Sig:16/binary, HeaderBin/binary>> =
- iolist_to_binary(remove_block_prefixes(?PREFIX_SIZE, RawBin)),
- Md5Sig = couch_hash:md5_hash(HeaderBin),
- {ok, HeaderBin}.
-
-%% Read multiple block locations using a single file:pread/2.
--spec find_header(file:fd(), block_id(), non_neg_integer()) ->
- {ok, binary()} | no_valid_header.
-find_header(_Fd, Block, _ReadCount) when Block < 0 ->
- no_valid_header;
-find_header(Fd, Block, ReadCount) ->
- FirstBlock = max(0, Block - ReadCount + 1),
- BlockLocations = [?SIZE_BLOCK * B || B <- lists:seq(FirstBlock, Block)],
- {ok, DataL} = file:pread(Fd, [{L, ?PREFIX_SIZE} || L <- BlockLocations]),
- %% Since BlockLocations are ordered from oldest to newest, we rely
- %% on lists:foldl/3 to reverse the order, making HeaderLocations
- %% correctly ordered from newest to oldest.
- HeaderLocations = lists:foldl(
- fun
- ({Loc, <<1, HeaderSize:32/integer>>}, Acc) ->
- [{Loc, HeaderSize} | Acc];
- (_, Acc) ->
- Acc
- end,
- [],
- lists:zip(BlockLocations, DataL)
- ),
- case find_newest_header(Fd, HeaderLocations) of
- {ok, _Location, HeaderBin} ->
- {ok, HeaderBin};
- _ ->
- ok = file:advise(
- Fd, hd(BlockLocations), ReadCount * ?SIZE_BLOCK, dont_need
- ),
- NextBlock = hd(BlockLocations) div ?SIZE_BLOCK - 1,
- find_header(Fd, NextBlock, ReadCount)
- end.
-
--spec find_newest_header(file:fd(), [{location(), header_size()}]) ->
- {ok, location(), binary()} | not_found.
-find_newest_header(_Fd, []) ->
- not_found;
-find_newest_header(Fd, [{Location, Size} | LocationSizes]) ->
- case (catch load_header(Fd, Location, Size)) of
- {ok, HeaderBin} ->
- {ok, Location, HeaderBin};
- _Error ->
- find_newest_header(Fd, LocationSizes)
- end.
-
--spec read_raw_iolist_int(#file{}, Pos :: non_neg_integer(), Len :: non_neg_integer()) ->
- {Data :: iolist(), CurPos :: non_neg_integer()}.
-% 0110 UPGRADE CODE
-read_raw_iolist_int(Fd, {Pos, _Size}, Len) ->
- read_raw_iolist_int(Fd, Pos, Len);
-read_raw_iolist_int(#file{fd = Fd} = File, Pos, Len) ->
- {Pos, TotalBytes} = get_pread_locnum(File, Pos, Len),
- case catch file:pread(Fd, Pos, TotalBytes) of
- {ok, <<RawBin:TotalBytes/binary>>} ->
- {remove_block_prefixes(Pos rem ?SIZE_BLOCK, RawBin), Pos + TotalBytes};
- Else ->
- % This clause matches when the file we are working with got truncated
- % outside of CouchDB after we opened it. To find affected files, we
- % need to log the file path.
- %
- % Technically, this should also go into read_multi_raw_iolists_int/2,
- % but that doesn’t seem to be in use anywhere.
- {_Fd, Filepath} = get(couch_file_fd),
- throw({file_truncate_error, Else, Filepath})
- end.
-
-% TODO: check if this is really unused
-read_multi_raw_iolists_int(#file{fd = Fd} = File, PosLens) ->
- LocNums = lists:map(
- fun({Pos, Len}) ->
- get_pread_locnum(File, Pos, Len)
- end,
- PosLens
- ),
- {ok, Bins} = file:pread(Fd, LocNums),
- lists:zipwith(
- fun({Pos, TotalBytes}, Bin) ->
- <<RawBin:TotalBytes/binary>> = Bin,
- {remove_block_prefixes(Pos rem ?SIZE_BLOCK, RawBin), Pos + TotalBytes}
- end,
- LocNums,
- Bins
- ).
-
-get_pread_locnum(File, Pos, Len) ->
- BlockOffset = Pos rem ?SIZE_BLOCK,
- TotalBytes = calculate_total_read_len(BlockOffset, Len),
- case Pos + TotalBytes of
- Size when Size > File#file.eof ->
- couch_stats:increment_counter([pread, exceed_eof]),
- {_Fd, Filepath} = get(couch_file_fd),
- throw({read_beyond_eof, Filepath});
- Size when Size > File#file.pread_limit ->
- couch_stats:increment_counter([pread, exceed_limit]),
- {_Fd, Filepath} = get(couch_file_fd),
- throw({exceed_pread_limit, Filepath, File#file.pread_limit});
- _ ->
- {Pos, TotalBytes}
- end.
-
--spec extract_md5(iolist()) -> {binary(), iolist()}.
-extract_md5(FullIoList) ->
- {Md5List, IoList} = split_iolist(FullIoList, 16, []),
- {iolist_to_binary(Md5List), IoList}.
-
-calculate_total_read_len(0, FinalLen) ->
- calculate_total_read_len(1, FinalLen) + 1;
-calculate_total_read_len(BlockOffset, FinalLen) ->
- case ?SIZE_BLOCK - BlockOffset of
- BlockLeft when BlockLeft >= FinalLen ->
- FinalLen;
- BlockLeft ->
- FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK - 1)) +
- if
- ((FinalLen - BlockLeft) rem (?SIZE_BLOCK - 1)) =:= 0 -> 0;
- true -> 1
- end
- end.
-
-remove_block_prefixes(_BlockOffset, <<>>) ->
- [];
-remove_block_prefixes(0, <<_BlockPrefix, Rest/binary>>) ->
- remove_block_prefixes(1, Rest);
-remove_block_prefixes(BlockOffset, Bin) ->
- BlockBytesAvailable = ?SIZE_BLOCK - BlockOffset,
- case size(Bin) of
- Size when Size > BlockBytesAvailable ->
- <<DataBlock:BlockBytesAvailable/binary, Rest/binary>> = Bin,
- [DataBlock | remove_block_prefixes(0, Rest)];
- _Size ->
- [Bin]
- end.
-
-make_blocks(_BlockOffset, []) ->
- [];
-make_blocks(0, IoList) ->
- [<<0>> | make_blocks(1, IoList)];
-make_blocks(BlockOffset, IoList) ->
- case split_iolist(IoList, (?SIZE_BLOCK - BlockOffset), []) of
- {Begin, End} ->
- [Begin | make_blocks(0, End)];
- _SplitRemaining ->
- IoList
- end.
-
-%% @doc Returns a tuple where the first element contains the leading SplitAt
-%% bytes of the original iolist, and the 2nd element is the tail. If SplitAt
-%% is larger than byte_size(IoList), return the difference.
--spec split_iolist(IoList :: iolist(), SplitAt :: non_neg_integer(), Acc :: list()) ->
- {iolist(), iolist()} | non_neg_integer().
-split_iolist(List, 0, BeginAcc) ->
- {lists:reverse(BeginAcc), List};
-split_iolist([], SplitAt, _BeginAcc) ->
- SplitAt;
-split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) when SplitAt > byte_size(Bin) ->
- split_iolist(Rest, SplitAt - byte_size(Bin), [Bin | BeginAcc]);
-split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) ->
- <<Begin:SplitAt/binary, End/binary>> = Bin,
- split_iolist([End | Rest], 0, [Begin | BeginAcc]);
-split_iolist([Sublist | Rest], SplitAt, BeginAcc) when is_list(Sublist) ->
- case split_iolist(Sublist, SplitAt, BeginAcc) of
- {Begin, End} ->
- {Begin, [End | Rest]};
- SplitRemaining ->
- split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc])
- end;
-split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) ->
- split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]).
-
-monitored_by_pids() ->
- {monitored_by, PidsAndRefs} = process_info(self(), monitored_by),
- lists:filter(fun is_pid/1, PidsAndRefs).
-
-verify_md5(_Fd, _Pos, IoList, <<>>) ->
- IoList;
-verify_md5(Fd, Pos, IoList, Md5) ->
- case couch_hash:md5_hash(IoList) of
- Md5 -> IoList;
- _ -> report_md5_error(Fd, Pos)
- end.
-
-report_md5_error(Fd, Pos) ->
- couch_log:emergency("File corruption in ~p at position ~B", [Fd, Pos]),
- exit({file_corruption, <<"file corruption">>}).
-
-% System dbs aren't monitored by couch_stats_process_tracker
-is_idle(#file{is_sys = true}) ->
- case monitored_by_pids() of
- [] -> true;
- _ -> false
- end;
-is_idle(#file{is_sys = false}) ->
- Tracker = whereis(couch_stats_process_tracker),
- case monitored_by_pids() of
- [] -> true;
- [Tracker] -> true;
- [_] -> exit(tracker_monitoring_failed);
- _ -> false
- end.
-
--spec process_info(CouchFilePid :: pid()) ->
- {Fd :: pid() | tuple(), FilePath :: string()} | undefined.
-
-process_info(Pid) ->
- couch_util:process_dict_get(Pid, couch_file_fd).
-
-update_read_timestamp() ->
- put(read_timestamp, os:timestamp()).
-
-upgrade_state(#file{db_monitor = DbPid} = File) when is_pid(DbPid) ->
- unlink(DbPid),
- Ref = monitor(process, DbPid),
- File#file{db_monitor = Ref};
-upgrade_state(State) ->
- State.
-
-get_pread_limit() ->
- case config:get_integer("couchdb", "max_pread_size", 0) of
- N when N > 0 -> N;
- _ -> infinity
- end.
-
-%% in event of a partially successful write.
-reset_eof(#file{} = File) ->
- {ok, Eof} = file:position(File#file.fd, eof),
- File#file{eof = Eof}.
-
--ifdef(TEST).
--include_lib("couch/include/couch_eunit.hrl").
-
-deleted_filename_test_() ->
- DbNames = ["dbname", "db.name", "user/dbname"],
- Fixtures = make_filename_fixtures(DbNames),
- lists:map(
- fun(Fixture) ->
- should_create_proper_deleted_filename(Fixture)
- end,
- Fixtures
- ).
-
-should_create_proper_deleted_filename(Before) ->
- {Before,
- ?_test(begin
- BeforeExtension = filename:extension(Before),
- BeforeBasename = filename:basename(Before, BeforeExtension),
- Re = "^" ++ BeforeBasename ++ "\.[0-9]{8}\.[0-9]{6}\.deleted\..*$",
- After = deleted_filename(Before),
- ?assertEqual(
- match,
- re:run(filename:basename(After), Re, [{capture, none}])
- ),
- ?assertEqual(BeforeExtension, filename:extension(After))
- end)}.
-
-make_filename_fixtures(DbNames) ->
- Formats = [
- "~s.couch",
- ".~s_design/mrview/3133e28517e89a3e11435dd5ac4ad85a.view",
- "shards/00000000-1fffffff/~s.1458336317.couch",
- ".shards/00000000-1fffffff/~s.1458336317_design",
- ".shards/00000000-1fffffff/~s.1458336317_design"
- "/mrview/3133e28517e89a3e11435dd5ac4ad85a.view"
- ],
- lists:flatmap(
- fun(DbName) ->
- lists:map(
- fun(Format) ->
- filename:join("/srv/data", io_lib:format(Format, [DbName]))
- end,
- Formats
- )
- end,
- DbNames
- ).
-
--endif.
diff --git a/src/couch/src/couch_flags.erl b/src/couch/src/couch_flags.erl
deleted file mode 100644
index 42d585f2e..000000000
--- a/src/couch/src/couch_flags.erl
+++ /dev/null
@@ -1,138 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% This module serves two functions
-% - provides public API to use to get value for a given feature flag and subject
-% - implements {feature_flags, couch_flags} service
-
-% The module relies on couch_epi_data_gen which uses the data returned by
-% `couch_flags_config:data()` to generate callback module `couch_epi_data_gen_flags_config`.
-% The generated module shouldn't be used directly. We use following APIs
-% - `couch_epi:get_handle({flags, config})` - to get handler (name of generated module)
-% - `couch_epi:get_value(Handle, Key) - to do efficient matching
-%
-% The generated module implements clauses like the following
-% - get(couch, {binary_match_rule()}) ->
-% {matched_pattern(), size(matched_pattern()), [flag()]} | undefined
-% For example
-% - get(couch, {<<"/shards/test/exact">>}) ->
-% {<<"/shards/test/exact">>,18,[baz,flag_bar,flag_foo]};
-% - get(couch, {<<"/shards/test", _/binary>>}) ->
-% {<<"/shards/test*">>,13,[baz,flag_bar,flag_foo]};
-% - get(couch, {<<"/shards/exact">>}) ->
-% {<<"/shards/exact">>,13,[flag_bar,flag_foo]};
-% - get(couch, {<<"/shards/blacklist", _/binary>>}) ->
-% {<<"/shards/blacklist*">>,18,[]};
-% - get(couch, {<<"/", _/binary>>}) ->
-% {<<"/*">>,2,[flag_foo]};
-% - get(_, _) -> undefined.
-%
-% The `couch_epi:get/2` uses the Handler module to implement efficient matching.
-
-% In order to distinguish between shards and clustered db the following
-% convention is used.
-% - it is a shard if pattern starts with `/`
-
--module(couch_flags).
-
-%% Public API
--export([
- enabled/1,
- is_enabled/2
-]).
-
-%% For internal use
--export([
- rules/0
-]).
-
-%% For use from plugin
--export([
- subject_key/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
--include("couch_db_int.hrl").
-
--type subject() ::
- #db{}
- | #httpd{}
- | #shard{}
- | #ordered_shard{}
- | string()
- | binary().
-
--define(SERVICE_ID, feature_flags).
-
--spec enabled(subject()) -> [atom()].
-
-enabled(Subject) ->
- Key = maybe_handle(subject_key, [Subject], fun subject_key/1),
- Handle = couch_epi:get_handle({flags, config}),
- lists:usort(
- enabled(Handle, {<<"/", Key/binary>>}) ++
- enabled(Handle, {couch_db:normalize_dbname(Key)})
- ).
-
--spec is_enabled(FlagId :: atom(), subject()) -> boolean().
-
-is_enabled(FlagId, Subject) ->
- lists:member(FlagId, enabled(Subject)).
-
--spec rules() ->
- [{Key :: string(), Value :: string()}].
-
-rules() ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- lists:flatten(couch_epi:apply(Handle, ?SERVICE_ID, rules, [], [])).
-
--spec enabled(Handle :: couch_epi:handle(), Key :: {binary()}) -> [atom()].
-
-enabled(Handle, Key) ->
- case couch_epi:get_value(Handle, couch, Key) of
- {_, _, Flags} -> Flags;
- undefined -> []
- end.
-
--spec subject_key(subject()) -> binary().
-
-subject_key(#db{name = Name}) ->
- subject_key(Name);
-subject_key(#httpd{path_parts = [Name | _Rest]}) ->
- subject_key(Name);
-subject_key(#httpd{path_parts = []}) ->
- <<>>;
-subject_key(#shard{name = Name}) ->
- subject_key(Name);
-subject_key(#ordered_shard{name = Name}) ->
- subject_key(Name);
-subject_key(Name) when is_list(Name) ->
- subject_key(list_to_binary(Name));
-subject_key(Name) when is_binary(Name) ->
- Name.
-
--spec maybe_handle(
- Function :: atom(),
- Args :: [term()],
- Default :: fun((Args :: [term()]) -> term())
-) ->
- term().
-
-maybe_handle(Func, Args, Default) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of
- no_decision when is_function(Default) ->
- apply(Default, Args);
- {decided, Result} ->
- Result
- end.
diff --git a/src/couch/src/couch_flags_config.erl b/src/couch/src/couch_flags_config.erl
deleted file mode 100644
index a50f4411f..000000000
--- a/src/couch/src/couch_flags_config.erl
+++ /dev/null
@@ -1,309 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% This module implements {flags, config} data provider
--module(couch_flags_config).
-
--export([
- enable/2,
- data/0,
- data/1,
- data_provider/0
-]).
-
-%% for test suite only
--export([
- parse_flags_term/1
-]).
-
--define(DATA_INTERVAL, 1000).
--define(MAX_FLAG_NAME_LENGTH, 256).
-
--type pattern() ::
- %% non empty binary which optionally can end with *
- binary().
-
--type flag_id() :: atom().
-
--type flags() :: list(flag_id()).
-
--type parse_pattern() ::
- {
- %% pattern without trainig * if it is present
- binary(),
- pattern(),
- %% true if the pattern has training *
- IsWildCard :: boolean(),
- PatternSize :: pos_integer()
- }.
-
--type rule() ::
- {
- parse_pattern(),
- EnabledFlags :: flags(),
- DisabledFlags :: flags()
- }.
-
-data_provider() ->
- {
- {flags, config},
- {callback_module, ?MODULE},
- [{interval, ?DATA_INTERVAL}]
- }.
-
--spec enable(FlagId :: atom(), Pattern :: string()) ->
- ok | {error, Reason :: term()}.
-
-enable(FlagId, Pattern) ->
- Key = atom_to_list(FlagId) ++ "||" ++ Pattern,
- config:set("feature_flags", Key, "true", false).
-
--spec data() ->
- [{{pattern()}, {pattern(), PatternSize :: pos_integer(), flags()}}].
-
-data() ->
- data(get_config_section("feature_flags") ++ couch_flags:rules()).
-
--spec data(Rules :: [{Key :: string(), Value :: string()}]) ->
- [{{pattern()}, {pattern(), PatternSize :: pos_integer(), flags()}}].
-
-data(Config) ->
- ByPattern = collect_rules(Config),
- lists:reverse([{{P}, {P, size(P), E -- D}} || {P, {_, E, D}} <- ByPattern]).
-
--spec parse_rules([{Key :: string(), Value :: string()}]) -> [rule()].
-
-parse_rules(Config) ->
- lists:filtermap(
- fun({K, V}) ->
- case parse_rule(K, V) of
- {error, {Format, Args}} ->
- couch_log:error(Format, Args),
- false;
- Rule ->
- {true, Rule}
- end
- end,
- Config
- ).
-
--spec parse_rule(Key :: string(), Value :: string()) ->
- rule()
- | {error, Reason :: term()}.
-
-parse_rule(Key, "true") ->
- parse_flags(binary:split(list_to_binary(Key), <<"||">>), true);
-parse_rule(Key, "false") ->
- parse_flags(binary:split(list_to_binary(Key), <<"||">>), false);
-parse_rule(Key, Value) ->
- Reason = {
- "Expected value for the `~p` either `true` or `false`, (got ~p)",
- [Key, Value]
- },
- {error, Reason}.
-
--spec parse_flags([binary()], Value :: boolean()) ->
- rule() | {error, Reason :: term()}.
-
-parse_flags([FlagsBin, PatternBin], Value) ->
- case {parse_flags_term(FlagsBin), Value} of
- {{error, _} = Error, _} ->
- Error;
- {Flags, true} ->
- {parse_pattern(PatternBin), Flags, []};
- {Flags, false} ->
- {parse_pattern(PatternBin), [], Flags}
- end;
-parse_flags(_Tokens, _) ->
- couch_log:error(
- "Key should be in the form of `[flags]||pattern` (got ~s)", []
- ),
- false.
-
--spec parse_flags_term(Flags :: binary()) ->
- [flag_id()] | {error, Reason :: term()}.
-
-parse_flags_term(FlagsBin) ->
- {Flags, Errors} = lists:splitwith(
- fun erlang:is_atom/1,
- [parse_flag(F) || F <- split_by_comma(FlagsBin)]
- ),
- case Errors of
- [] ->
- lists:usort(Flags);
- _ ->
- {error, {
- "Cannot parse list of tags: ~n~p",
- Errors
- }}
- end.
-
-split_by_comma(Binary) ->
- case binary:split(Binary, <<",">>, [global]) of
- [<<>>] -> [];
- Tokens -> Tokens
- end.
-
-parse_flag(FlagName) when size(FlagName) > ?MAX_FLAG_NAME_LENGTH ->
- {too_long, FlagName};
-parse_flag(FlagName) ->
- FlagNameS = string:strip(binary_to_list(FlagName)),
- try
- list_to_existing_atom(FlagNameS)
- catch
- _:_ -> {invalid_flag, FlagName}
- end.
-
--spec parse_pattern(Pattern :: binary()) -> parse_pattern().
-
-parse_pattern(PatternBin) ->
- PatternSize = size(PatternBin),
- case binary:last(PatternBin) of
- $* ->
- PrefixBin = binary:part(PatternBin, 0, PatternSize - 1),
- {PrefixBin, PatternBin, true, PatternSize - 1};
- _ ->
- {PatternBin, PatternBin, false, PatternSize}
- end.
-
--spec collect_rules([{ConfigurationKey :: string(), ConfigurationValue :: string()}]) ->
- [{pattern(), rule()}].
-
-collect_rules(ConfigData) ->
- ByKey = by_key(parse_rules(ConfigData)),
- Keys = lists:sort(fun sort_by_length/2, gb_trees:keys(ByKey)),
- FuzzyKeys = lists:sort(
- fun sort_by_length/2,
- [K || {K, {{_, _, true, _}, _, _}} <- gb_trees:to_list(ByKey)]
- ),
- Rules = collect_rules(lists:reverse(Keys), FuzzyKeys, ByKey),
- gb_trees:to_list(Rules).
-
--spec sort_by_length(A :: binary(), B :: binary()) -> boolean().
-
-sort_by_length(A, B) ->
- size(A) =< size(B).
-
--spec by_key(Items :: [rule()]) -> Dictionary :: gb_trees:tree().
-
-by_key(Items) ->
- lists:foldl(
- fun({{_, K, _, _}, _, _} = Item, Acc) ->
- update_element(Acc, K, Item, fun(Value) ->
- update_flags(Value, Item)
- end)
- end,
- gb_trees:empty(),
- Items
- ).
-
--spec update_element(
- Tree :: gb_trees:tree(),
- Key :: pattern(),
- Default :: rule(),
- Fun :: fun((Item :: rule()) -> rule())
-) ->
- gb_trees:tree().
-
-update_element(Tree, Key, Default, Fun) ->
- case gb_trees:lookup(Key, Tree) of
- none ->
- gb_trees:insert(Key, Default, Tree);
- {value, Value} ->
- gb_trees:update(Key, Fun(Value), Tree)
- end.
-
--spec collect_rules(
- Keys :: [pattern()],
- FuzzyKeys :: [pattern()],
- ByKey :: gb_trees:tree()
-) ->
- gb_trees:tree().
-
-collect_rules([], _, Acc) ->
- Acc;
-collect_rules([Current | Rest], Items, Acc) ->
- collect_rules(Rest, Items -- [Current], inherit_flags(Current, Items, Acc)).
-
--spec inherit_flags(
- Current :: pattern(),
- FuzzyKeys :: [pattern()],
- ByKey :: gb_trees:tree()
-) ->
- gb_trees:tree().
-
-inherit_flags(_Current, [], Acc) ->
- Acc;
-inherit_flags(Current, [Item | Items], Acc) ->
- case match_prefix(Current, Item, Acc) of
- true ->
- inherit_flags(Current, Items, update_flags(Current, Item, Acc));
- false ->
- inherit_flags(Current, Items, Acc)
- end.
-
--spec match_prefix(
- AKey :: pattern(),
- BKey :: pattern(),
- ByKey :: gb_trees:tree()
-) ->
- boolean().
-
-match_prefix(AKey, BKey, Acc) ->
- {value, A} = gb_trees:lookup(AKey, Acc),
- {value, B} = gb_trees:lookup(BKey, Acc),
- match_prefix(A, B).
-
--spec match_prefix(A :: rule(), B :: rule()) -> boolean().
-
-match_prefix({{_, _, _, _}, _, _}, {{_, _, false, _}, _, _}) ->
- false;
-match_prefix({{Key, _, _, _}, _, _}, {{Key, _, true, _}, _, _}) ->
- true;
-match_prefix({{Key0, _, _, _}, _, _}, {{Key1, _, true, S1}, _, _}) ->
- case Key0 of
- <<Key1:S1/binary, _/binary>> -> true;
- _ -> false
- end.
-
--spec update_flags(
- AKey :: pattern(),
- BKey :: pattern(),
- ByKey :: gb_trees:tree()
-) ->
- gb_trees:tree().
-
-update_flags(AKey, BKey, Acc) ->
- {value, A} = gb_trees:lookup(AKey, Acc),
- {value, B} = gb_trees:lookup(BKey, Acc),
- gb_trees:update(AKey, update_flags(A, B), Acc).
-
--spec update_flags(A :: rule(), B :: rule()) -> rule().
-
-update_flags({Pattern, E0, D0}, {_, E1, D1}) ->
- DisabledByParent = lists:usort(D1 -- E0),
- E = lists:usort(lists:usort(E0 ++ E1) -- D0),
- D = lists:usort(D0 ++ DisabledByParent),
- {Pattern, E, D}.
-
--spec get_config_section(Section :: string()) ->
- [{Key :: string(), Value :: string()}].
-
-%% When we start couch_epi the config is not started yet
-% so we would get `badarg` for some time
-get_config_section(Section) ->
- try
- config:get(Section)
- catch
- error:badarg ->
- []
- end.
diff --git a/src/couch/src/couch_hash.erl b/src/couch/src/couch_hash.erl
deleted file mode 100644
index 842b37423..000000000
--- a/src/couch/src/couch_hash.erl
+++ /dev/null
@@ -1,45 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_hash).
-
--export([md5_hash/1, md5_hash_final/1, md5_hash_init/0, md5_hash_update/2]).
-
--ifdef(ERLANG_MD5).
-
-md5_hash(Data) ->
- erlang:md5(Data).
-
-md5_hash_final(Context) ->
- erlang:md5_final(Context).
-
-md5_hash_init() ->
- erlang:md5_init().
-
-md5_hash_update(Context, Data) ->
- erlang:md5_update(Context, Data).
-
--else.
-
-md5_hash(Data) ->
- crypto:hash(md5, Data).
-
-md5_hash_final(Context) ->
- crypto:hash_final(Context).
-
-md5_hash_init() ->
- crypto:hash_init(md5).
-
-md5_hash_update(Context, Data) ->
- crypto:hash_update(Context, Data).
-
--endif.
diff --git a/src/couch/src/couch_hotp.erl b/src/couch/src/couch_hotp.erl
deleted file mode 100644
index cdb8291f3..000000000
--- a/src/couch/src/couch_hotp.erl
+++ /dev/null
@@ -1,31 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_hotp).
-
--export([generate/4]).
-
-generate(Alg, Key, Counter, OutputLen) when
- is_atom(Alg), is_binary(Key), is_integer(Counter), is_integer(OutputLen)
-->
- Hmac = couch_util:hmac(Alg, Key, <<Counter:64>>),
- Offset = binary:last(Hmac) band 16#f,
- Code =
- ((binary:at(Hmac, Offset) band 16#7f) bsl 24) +
- ((binary:at(Hmac, Offset + 1) band 16#ff) bsl 16) +
- ((binary:at(Hmac, Offset + 2) band 16#ff) bsl 8) +
- (binary:at(Hmac, Offset + 3) band 16#ff),
- case OutputLen of
- 6 -> Code rem 1000000;
- 7 -> Code rem 10000000;
- 8 -> Code rem 100000000
- end.
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
deleted file mode 100644
index 629cbbdcc..000000000
--- a/src/couch/src/couch_httpd.erl
+++ /dev/null
@@ -1,1492 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([start_link/0, start_link/1, stop/0, handle_request/5]).
-
--export([header_value/2, header_value/3, qs_value/2, qs_value/3, qs/1, qs_json_value/3]).
--export([path/1, absolute_uri/2, body_length/1]).
--export([verify_is_server_admin/1, unquote/1, quote/1, recv/2, recv_chunked/4, error_info/1]).
--export([make_fun_spec_strs/1]).
--export([make_arity_1_fun/1, make_arity_2_fun/1, make_arity_3_fun/1]).
--export([parse_form/1, json_body/1, json_body_obj/1, body/1]).
--export([doc_etag/1, doc_etag/3, make_etag/1, etag_match/2, etag_respond/3, etag_maybe/2]).
--export([primary_header_value/2, partition/1, serve_file/3, serve_file/4, server_header/0]).
--export([start_chunked_response/3, send_chunk/2, log_request/2]).
--export([start_response_length/4, start_response/3, send/2]).
--export([start_json_response/2, start_json_response/3, end_json_response/1]).
--export([
- send_response/4,
- send_response_no_cors/4,
- send_method_not_allowed/2,
- send_error/2, send_error/4,
- send_redirect/2,
- send_chunked_error/2
-]).
--export([send_json/2, send_json/3, send_json/4, last_chunk/1, parse_multipart_request/3]).
--export([accepted_encodings/1, handle_request_int/5, validate_referer/1, validate_ctype/2]).
--export([http_1_0_keep_alive/2]).
--export([validate_host/1]).
--export([validate_bind_address/1]).
--export([check_max_request_length/1]).
--export([handle_request/1]).
--export([set_auth_handlers/0]).
--export([maybe_decompress/2]).
-
--define(HANDLER_NAME_IN_MODULE_POS, 6).
--define(MAX_DRAIN_BYTES, 1048576).
--define(MAX_DRAIN_TIME_MSEC, 1000).
--define(DEFAULT_SOCKET_OPTIONS, "[{sndbuf, 262144}]").
--define(DEFAULT_AUTHENTICATION_HANDLERS,
- "{couch_httpd_auth, cookie_authentication_handler}, "
- "{couch_httpd_auth, default_authentication_handler}"
-).
-
-start_link() ->
- start_link(http).
-start_link(http) ->
- Port = config:get("httpd", "port", "5984"),
- start_link(?MODULE, [{port, Port}]);
-start_link(https) ->
- Port = config:get("ssl", "port", "6984"),
- {ok, Ciphers} = couch_util:parse_term(config:get("ssl", "ciphers", undefined)),
- {ok, Versions} = couch_util:parse_term(config:get("ssl", "tls_versions", undefined)),
- {ok, SecureRenegotiate} = couch_util:parse_term(
- config:get("ssl", "secure_renegotiate", undefined)
- ),
- ServerOpts0 =
- [
- {cacertfile, config:get("ssl", "cacert_file", undefined)},
- {keyfile, config:get("ssl", "key_file", undefined)},
- {certfile, config:get("ssl", "cert_file", undefined)},
- {password, config:get("ssl", "password", undefined)},
- {secure_renegotiate, SecureRenegotiate},
- {versions, Versions},
- {ciphers, Ciphers}
- ],
-
- case
- (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
- couch_util:get_value(certfile, ServerOpts0) == undefined)
- of
- true ->
- couch_log:error("SSL enabled but PEM certificates are missing", []),
- throw({error, missing_certs});
- false ->
- ok
- end,
-
- ServerOpts = [Opt || {_, V} = Opt <- ServerOpts0, V /= undefined],
-
- ClientOpts =
- case config:get("ssl", "verify_ssl_certificates", "false") of
- "false" ->
- [];
- "true" ->
- FailIfNoPeerCert =
- case config:get("ssl", "fail_if_no_peer_cert", "false") of
- "false" -> false;
- "true" -> true
- end,
- [
- {depth,
- list_to_integer(
- config:get(
- "ssl",
- "ssl_certificate_max_depth",
- "1"
- )
- )},
- {fail_if_no_peer_cert, FailIfNoPeerCert},
- {verify, verify_peer}
- ] ++
- case config:get("ssl", "verify_fun", undefined) of
- undefined -> [];
- SpecStr -> [{verify_fun, make_arity_3_fun(SpecStr)}]
- end
- end,
- SslOpts = ServerOpts ++ ClientOpts,
-
- Options =
- [
- {port, Port},
- {ssl, true},
- {ssl_opts, SslOpts}
- ],
- start_link(https, Options).
-start_link(Name, Options) ->
- BindAddress =
- case config:get("httpd", "bind_address", "any") of
- "any" -> any;
- Else -> Else
- end,
- ok = validate_bind_address(BindAddress),
-
- {ok, ServerOptions} = couch_util:parse_term(
- config:get("httpd", "server_options", "[]")
- ),
- {ok, SocketOptions} = couch_util:parse_term(
- config:get("httpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS)
- ),
-
- set_auth_handlers(),
- Handlers = get_httpd_handlers(),
-
- % ensure uuid is set so that concurrent replications
- % get the same value.
- couch_server:get_uuid(),
-
- Loop = fun(Req) ->
- case SocketOptions of
- [] ->
- ok;
- _ ->
- ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
- end,
- apply(?MODULE, handle_request, [Req | Handlers])
- end,
-
- % set mochiweb options
- FinalOptions = lists:append([
- Options,
- ServerOptions,
- [
- {loop, Loop},
- {name, Name},
- {ip, BindAddress}
- ]
- ]),
-
- % launch mochiweb
- case mochiweb_http:start(FinalOptions) of
- {ok, MochiPid} ->
- {ok, MochiPid};
- {error, Reason} ->
- couch_log:error("Failure to start Mochiweb: ~s~n", [Reason]),
- throw({error, Reason})
- end.
-
-stop() ->
- mochiweb_http:stop(couch_httpd),
- catch mochiweb_http:stop(https).
-
-set_auth_handlers() ->
- AuthenticationSrcs = make_fun_spec_strs(
- config:get(
- "httpd",
- "authentication_handlers",
- ?DEFAULT_AUTHENTICATION_HANDLERS
- )
- ),
- AuthHandlers = lists:map(
- fun(A) -> {auth_handler_name(A), make_arity_1_fun(A)} end, AuthenticationSrcs
- ),
- AuthenticationFuns =
- AuthHandlers ++
- [
- %% must be last
- fun couch_httpd_auth:party_mode_handler/1
- ],
- ok = application:set_env(couch, auth_handlers, AuthenticationFuns).
-
-auth_handler_name(SpecStr) ->
- lists:nth(?HANDLER_NAME_IN_MODULE_POS, re:split(SpecStr, "[\\W_]", [])).
-
-get_httpd_handlers() ->
- {ok, HttpdGlobalHandlers} = application:get_env(couch, httpd_global_handlers),
-
- UrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
- end,
- HttpdGlobalHandlers
- ),
-
- {ok, HttpdDbHandlers} = application:get_env(couch, httpd_db_handlers),
-
- DbUrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
- end,
- HttpdDbHandlers
- ),
-
- {ok, HttpdDesignHandlers} = application:get_env(couch, httpd_design_handlers),
-
- DesignUrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
- end,
- HttpdDesignHandlers
- ),
-
- UrlHandlers = dict:from_list(UrlHandlersList),
- DbUrlHandlers = dict:from_list(DbUrlHandlersList),
- DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
- DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
- [DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers].
-
-% SpecStr is a string like "{my_module, my_fun}"
-% or "{my_module, my_fun, <<"my_arg">>}"
-make_arity_1_fun(SpecStr) ->
- case couch_util:parse_term(SpecStr) of
- {ok, {Mod, Fun, SpecArg}} ->
- fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
- {ok, {Mod, Fun}} ->
- fun(Arg) -> Mod:Fun(Arg) end
- end.
-
-make_arity_2_fun(SpecStr) ->
- case couch_util:parse_term(SpecStr) of
- {ok, {Mod, Fun, SpecArg}} ->
- fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
- {ok, {Mod, Fun}} ->
- fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
- end.
-
-make_arity_3_fun(SpecStr) ->
- case couch_util:parse_term(SpecStr) of
- {ok, {Mod, Fun, SpecArg}} ->
- fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
- {ok, {Mod, Fun}} ->
- fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
- end.
-
-% SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}"
-make_fun_spec_strs(SpecStr) ->
- re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
-
-handle_request(MochiReq) ->
- Body = proplists:get_value(body, MochiReq:get(opts)),
- erlang:put(mochiweb_request_body, Body),
- apply(?MODULE, handle_request, [MochiReq | get_httpd_handlers()]).
-
-handle_request(
- MochiReq,
- DefaultFun,
- UrlHandlers,
- DbUrlHandlers,
- DesignUrlHandlers
-) ->
- %% reset rewrite count for new request
- erlang:put(?REWRITE_COUNT, 0),
-
- MochiReq1 = couch_httpd_vhost:dispatch_host(MochiReq),
-
- handle_request_int(
- MochiReq1,
- DefaultFun,
- UrlHandlers,
- DbUrlHandlers,
- DesignUrlHandlers
- ).
-
-handle_request_int(MochiReq, DefaultFun,
- UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
- Begin = os:timestamp(),
- % for the path, use the raw path with the query string and fragment
- % removed, but URL quoting left intact
- RawUri = MochiReq:get(raw_path),
- {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
- % get requested path
- RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
- undefined ->
- case MochiReq:get_header_value("x-couchdb-requested-path") of
- undefined -> RawUri;
- R -> R
- end;
- P -> P
- end,
-
- HandlerKey =
- case mochiweb_util:partition(Path, "/") of
- {"", "", ""} ->
- <<"/">>; % Special case the root url handler
- {FirstPart, _, _} ->
- list_to_binary(FirstPart)
- end,
- couch_log:debug("~p ~s ~p from ~p~nHeaders: ~p", [
- MochiReq:get(method),
- RawUri,
- MochiReq:get(version),
- peer(MochiReq),
- mochiweb_headers:to_list(MochiReq:get(headers))
- ]),
-
- Method1 =
- case MochiReq:get(method) of
- % already an atom
- Meth when is_atom(Meth) -> Meth;
-
- % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
- % possible (if any module references the atom, then it's existing).
- Meth -> couch_util:to_existing_atom(Meth)
- end,
- increment_method_stats(Method1),
-
- % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
- MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
- Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST",
- "PUT", "DELETE",
- "TRACE", "CONNECT",
- "COPY"]) of
- true ->
- couch_log:info("MethodOverride: ~s (real method was ~s)",
- [MethodOverride, Method1]),
- case Method1 of
- 'POST' -> couch_util:to_existing_atom(MethodOverride);
- _ ->
- % Ignore X-HTTP-Method-Override when the original verb isn't POST.
- % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
- % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
- Method1
- end;
- _ -> Method1
- end,
-
- % alias HEAD to GET as mochiweb takes care of stripping the body
- Method = case Method2 of
- 'HEAD' -> 'GET';
- Other -> Other
- end,
-
- HttpReq = #httpd{
- mochi_req = MochiReq,
- peer = peer(MochiReq),
- method = Method,
- requested_path_parts =
- [?l2b(unquote(Part)) || Part <- string:tokens(RequestedPath, "/")],
- path_parts = [?l2b(unquote(Part)) || Part <- string:tokens(Path, "/")],
- db_url_handlers = DbUrlHandlers,
- design_url_handlers = DesignUrlHandlers,
- default_fun = DefaultFun,
- url_handlers = UrlHandlers,
- user_ctx = erlang:erase(pre_rewrite_user_ctx),
- auth = erlang:erase(pre_rewrite_auth)
- },
-
- HandlerFun = couch_util:dict_find(HandlerKey, UrlHandlers, DefaultFun),
-
- {ok, Resp} =
- try
- validate_host(HttpReq),
- check_request_uri_length(RawUri),
- case chttpd_cors:maybe_handle_preflight_request(HttpReq) of
- not_preflight ->
- case authenticate_request(HttpReq) of
- #httpd{} = Req ->
- HandlerFun(Req);
- Response ->
- Response
- end;
- Response ->
- Response
- end
- catch
- throw:{http_head_abort, Resp0} ->
- {ok, Resp0};
- throw:{invalid_json, S} ->
- couch_log:error("attempted upload of invalid JSON"
- " (set log_level to debug to log it)", []),
- couch_log:debug("Invalid JSON: ~p",[S]),
- send_error(HttpReq, {bad_request, invalid_json});
- throw:unacceptable_encoding ->
- couch_log:error("unsupported encoding method for the response", []),
- send_error(HttpReq, {not_acceptable, "unsupported encoding"});
- throw:bad_accept_encoding_value ->
- couch_log:error("received invalid Accept-Encoding header", []),
- send_error(HttpReq, bad_request);
- exit:{shutdown, Error} ->
- exit({shutdown, Error});
- exit:normal ->
- exit(normal);
- exit:snappy_nif_not_loaded ->
- ErrorReason = "To access the database or view index, Apache CouchDB"
- " must be built with Erlang OTP R13B04 or higher.",
- couch_log:error("~s", [ErrorReason]),
- send_error(HttpReq, {bad_otp_release, ErrorReason});
- exit:{body_too_large, _} ->
- send_error(HttpReq, request_entity_too_large);
- exit:{uri_too_long, _} ->
- send_error(HttpReq, request_uri_too_long);
- ?STACKTRACE(throw, Error, Stack)
- couch_log:debug("Minor error in HTTP request: ~p",[Error]),
- couch_log:debug("Stacktrace: ~p",[Stack]),
- send_error(HttpReq, Error);
- ?STACKTRACE(error, badarg, Stack)
- couch_log:error("Badarg error in HTTP request",[]),
- couch_log:info("Stacktrace: ~p",[Stack]),
- send_error(HttpReq, badarg);
- ?STACKTRACE(error, function_clause, Stack)
- couch_log:error("function_clause error in HTTP request",[]),
- couch_log:info("Stacktrace: ~p",[Stack]),
- send_error(HttpReq, function_clause);
- ?STACKTRACE(ErrorType, Error, Stack)
- couch_log:error("Uncaught error in HTTP request: ~p",
- [{ErrorType, Error}]),
- couch_log:info("Stacktrace: ~p",[Stack]),
- send_error(HttpReq, Error)
- end,
- RequestTime = round(timer:now_diff(os:timestamp(), Begin)/1000),
- couch_stats:update_histogram([couchdb, request_time], RequestTime),
- couch_stats:increment_counter([couchdb, httpd, requests]),
- {ok, Resp}.
-
-validate_host(#httpd{} = Req) ->
- case chttpd_util:get_chttpd_config_boolean("validate_host", false) of
- true ->
- Host = hostname(Req),
- ValidHosts = valid_hosts(),
- case lists:member(Host, ValidHosts) of
- true ->
- ok;
- false ->
- throw({bad_request, <<"Invalid host header">>})
- end;
- false ->
- ok
- end.
-
-hostname(#httpd{} = Req) ->
- case header_value(Req, "Host") of
- undefined ->
- undefined;
- Host ->
- [Name | _] = re:split(Host, ":[0-9]+$", [{parts, 2}, {return, list}]),
- Name
- end.
-
-valid_hosts() ->
- List = chttpd_util:get_chttpd_config("valid_hosts", ""),
- re:split(List, ",", [{return, list}]).
-
-check_request_uri_length(Uri) ->
- check_request_uri_length(
- Uri,
- chttpd_util:get_chttpd_config("max_uri_length")
- ).
-
-check_request_uri_length(_Uri, undefined) ->
- ok;
-check_request_uri_length(Uri, MaxUriLen) when is_list(MaxUriLen) ->
- case length(Uri) > list_to_integer(MaxUriLen) of
- true ->
- throw(request_uri_too_long);
- false ->
- ok
- end.
-
-authenticate_request(Req) ->
- {ok, AuthenticationFuns} = application:get_env(couch, auth_handlers),
- chttpd:authenticate_request(Req, couch_auth_cache, AuthenticationFuns).
-
-increment_method_stats(Method) ->
- couch_stats:increment_counter([couchdb, httpd_request_methods, Method]).
-
-validate_referer(Req) ->
- Host = host_for_request(Req),
- Referer = header_value(Req, "Referer", fail),
- case Referer of
- fail ->
- throw({bad_request, <<"Referer header required.">>});
- Referer ->
- {_, RefererHost, _, _, _} = mochiweb_util:urlsplit(Referer),
- if
- RefererHost =:= Host -> ok;
- true -> throw({bad_request, <<"Referer header must match host.">>})
- end
- end.
-
-validate_ctype(Req, Ctype) ->
- case header_value(Req, "Content-Type") of
- undefined ->
- throw({bad_ctype, "Content-Type must be " ++ Ctype});
- ReqCtype ->
- case string:tokens(ReqCtype, ";") of
- [Ctype] -> ok;
- [Ctype | _Rest] -> ok;
- _Else -> throw({bad_ctype, "Content-Type must be " ++ Ctype})
- end
- end.
-
-check_max_request_length(Req) ->
- Len = list_to_integer(header_value(Req, "Content-Length", "0")),
- MaxLen = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296
- ),
- case Len > MaxLen of
- true ->
- exit({body_too_large, Len});
- false ->
- ok
- end.
-
-% Utilities
-
-partition(Path) ->
- mochiweb_util:partition(Path, "/").
-
-header_value(#httpd{mochi_req = MochiReq}, Key) ->
- MochiReq:get_header_value(Key).
-
-header_value(#httpd{mochi_req = MochiReq}, Key, Default) ->
- case MochiReq:get_header_value(Key) of
- undefined -> Default;
- Value -> Value
- end.
-
-primary_header_value(#httpd{mochi_req = MochiReq}, Key) ->
- MochiReq:get_primary_header_value(Key).
-
-accepted_encodings(#httpd{mochi_req = MochiReq}) ->
- case MochiReq:accepted_encodings(["gzip", "identity"]) of
- bad_accept_encoding_value ->
- throw(bad_accept_encoding_value);
- [] ->
- throw(unacceptable_encoding);
- EncList ->
- EncList
- end.
-
-serve_file(Req, RelativePath, DocumentRoot) ->
- serve_file(Req, RelativePath, DocumentRoot, []).
-
-serve_file(Req0, RelativePath0, DocumentRoot0, ExtraHeaders) ->
- Headers0 = basic_headers(Req0, ExtraHeaders),
- {ok, {Req1, Code1, Headers1, RelativePath1, DocumentRoot1}} =
- chttpd_plugin:before_serve_file(
- Req0, 200, Headers0, RelativePath0, DocumentRoot0
- ),
- log_request(Req1, Code1),
- #httpd{mochi_req = MochiReq} = Req1,
- {ok, MochiReq:serve_file(RelativePath1, DocumentRoot1, Headers1)}.
-
-qs_value(Req, Key) ->
- qs_value(Req, Key, undefined).
-
-qs_value(Req, Key, Default) ->
- couch_util:get_value(Key, qs(Req), Default).
-
-qs_json_value(Req, Key, Default) ->
- case qs_value(Req, Key, Default) of
- Default ->
- Default;
- Result ->
- ?JSON_DECODE(Result)
- end.
-
-qs(#httpd{mochi_req = MochiReq}) ->
- MochiReq:parse_qs().
-
-path(#httpd{mochi_req = MochiReq}) ->
- MochiReq:get(path).
-
-host_for_request(#httpd{mochi_req = MochiReq}) ->
- XHost = chttpd_util:get_chttpd_config(
- "x_forwarded_host", "X-Forwarded-Host"
- ),
- case MochiReq:get_header_value(XHost) of
- undefined ->
- case MochiReq:get_header_value("Host") of
- undefined ->
- {ok, {Address, Port}} =
- case MochiReq:get(socket) of
- {ssl, SslSocket} -> ssl:sockname(SslSocket);
- Socket -> inet:sockname(Socket)
- end,
- inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
- Value1 ->
- Value1
- end;
- Value ->
- Value
- end.
-
-absolute_uri(#httpd{mochi_req = MochiReq} = Req, [$/ | _] = Path) ->
- Host = host_for_request(Req),
- XSsl = chttpd_util:get_chttpd_config("x_forwarded_ssl", "X-Forwarded-Ssl"),
- Scheme =
- case MochiReq:get_header_value(XSsl) of
- "on" ->
- "https";
- _ ->
- XProto = chttpd_util:get_chttpd_config(
- "x_forwarded_proto", "X-Forwarded-Proto"
- ),
- case MochiReq:get_header_value(XProto) of
- %% Restrict to "https" and "http" schemes only
- "https" ->
- "https";
- _ ->
- case MochiReq:get(scheme) of
- https -> "https";
- http -> "http"
- end
- end
- end,
- Scheme ++ "://" ++ Host ++ Path;
-absolute_uri(_Req, _Path) ->
- throw({bad_request, "path must begin with a /."}).
-
-unquote(UrlEncodedString) ->
- chttpd:unquote(UrlEncodedString).
-
-quote(UrlDecodedString) ->
- mochiweb_util:quote_plus(UrlDecodedString).
-
-parse_form(#httpd{mochi_req = MochiReq}) ->
- mochiweb_multipart:parse_form(MochiReq).
-
-recv(#httpd{mochi_req = MochiReq}, Len) ->
- MochiReq:recv(Len).
-
-recv_chunked(#httpd{mochi_req = MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
- % Fun is called once with each chunk
- % Fun({Length, Binary}, State)
- % called with Length == 0 on the last time.
- MochiReq:stream_body(
- MaxChunkSize,
- ChunkFun,
- InitState,
- chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296
- )
- ).
-
-body_length(#httpd{mochi_req = MochiReq}) ->
- MochiReq:get(body_length).
-
-body(#httpd{mochi_req = MochiReq, req_body = undefined}) ->
- MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296
- ),
- MochiReq:recv_body(MaxSize);
-body(#httpd{req_body = ReqBody}) ->
- ReqBody.
-
-json_body(#httpd{req_body = undefined} = Httpd) ->
- case body(Httpd) of
- undefined ->
- throw({bad_request, "Missing request body"});
- Body ->
- ?JSON_DECODE(maybe_decompress(Httpd, Body))
- end;
-json_body(#httpd{req_body = ReqBody}) ->
- ReqBody.
-
-json_body_obj(Httpd) ->
- case json_body(Httpd) of
- {Props} -> {Props};
- _Else -> throw({bad_request, "Request body must be a JSON object"})
- end.
-
-maybe_decompress(Httpd, Body) ->
- case header_value(Httpd, "Content-Encoding", "identity") of
- "gzip" ->
- zlib:gunzip(Body);
- "identity" ->
- Body;
- Else ->
- throw({bad_ctype, [Else, " is not a supported content encoding."]})
- end.
-
-doc_etag(#doc{id = Id, body = Body, revs = {Start, [DiskRev | _]}}) ->
- doc_etag(Id, Body, {Start, DiskRev}).
-
-doc_etag(<<"_local/", _/binary>>, Body, {Start, DiskRev}) ->
- make_etag({Start, DiskRev, Body});
-doc_etag(_Id, _Body, {Start, DiskRev}) ->
- rev_etag({Start, DiskRev}).
-
-rev_etag({Start, DiskRev}) ->
- Rev = couch_doc:rev_to_str({Start, DiskRev}),
- <<$", Rev/binary, $">>.
-
-make_etag(Term) ->
- <<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
- iolist_to_binary([$", io_lib:format("~.36B", [SigInt]), $"]).
-
-etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
- etag_match(Req, binary_to_list(CurrentEtag));
-etag_match(Req, CurrentEtag) ->
- EtagsToMatch = string:tokens(
- header_value(Req, "If-None-Match", ""), ", "
- ),
- lists:member(CurrentEtag, EtagsToMatch).
-
-etag_respond(Req, CurrentEtag, RespFun) ->
- case etag_match(Req, CurrentEtag) of
- true ->
- % the client has this in their cache.
- send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>);
- false ->
- % Run the function.
- RespFun()
- end.
-
-etag_maybe(Req, RespFun) ->
- try
- RespFun()
- catch
- throw:{etag_match, ETag} ->
- send_response(Req, 304, [{"ETag", ETag}], <<>>)
- end.
-
-verify_is_server_admin(#httpd{user_ctx = UserCtx}) ->
- verify_is_server_admin(UserCtx);
-verify_is_server_admin(#user_ctx{roles = Roles}) ->
- case lists:member(<<"_admin">>, Roles) of
- true -> ok;
- false -> throw({unauthorized, <<"You are not a server admin.">>})
- end.
-
-log_request(#httpd{mochi_req = MochiReq, peer = Peer} = Req, Code) ->
- case erlang:get(dont_log_request) of
- true ->
- ok;
- _ ->
- couch_log:notice("~s - - ~s ~s ~B", [
- Peer,
- MochiReq:get(method),
- MochiReq:get(raw_path),
- Code
- ]),
- gen_event:notify(couch_plugin, {log_request, Req, Code})
- end.
-
-log_response(Code, _) when Code < 400 ->
- ok;
-log_response(Code, Body) ->
- case {erlang:get(dont_log_response), Body} of
- {true, _} ->
- ok;
- {_, {json, JsonObj}} ->
- ErrorMsg = couch_util:json_encode(JsonObj),
- couch_log:error("httpd ~p error response:~n ~s", [Code, ErrorMsg]);
- _ ->
- couch_log:error("httpd ~p error response:~n ~s", [Code, Body])
- end.
-
-start_response_length(#httpd{mochi_req = MochiReq} = Req, Code, Headers0, Length) ->
- Headers1 = basic_headers(Req, Headers0),
- Resp = handle_response(Req, Code, Headers1, Length, start_response_length),
- case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
- end,
- {ok, Resp}.
-
-start_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) ->
- Headers1 = basic_headers(Req, Headers0),
- Resp = handle_response(Req, Code, Headers1, undefined, start_response),
- case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
- end,
- {ok, Resp}.
-
-send({remote, Pid, Ref} = Resp, Data) ->
- Pid ! {Ref, send, Data},
- {ok, Resp};
-send(Resp, Data) ->
- Resp:send(Data),
- {ok, Resp}.
-
-no_resp_conn_header([]) ->
- true;
-no_resp_conn_header([{Hdr, V} | Rest]) when is_binary(Hdr) ->
- no_resp_conn_header([{?b2l(Hdr), V} | Rest]);
-no_resp_conn_header([{Hdr, _} | Rest]) when is_list(Hdr) ->
- case string:to_lower(Hdr) of
- "connection" -> false;
- _ -> no_resp_conn_header(Rest)
- end.
-
-http_1_0_keep_alive(#httpd{mochi_req = MochiReq}, Headers) ->
- http_1_0_keep_alive(MochiReq, Headers);
-http_1_0_keep_alive(Req, Headers) ->
- KeepOpen = Req:should_close() == false,
- IsHttp10 = Req:get(version) == {1, 0},
- NoRespHeader = no_resp_conn_header(Headers),
- case KeepOpen andalso IsHttp10 andalso NoRespHeader of
- true -> [{"Connection", "Keep-Alive"} | Headers];
- false -> Headers
- end.
-
-start_chunked_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) ->
- Headers1 = add_headers(Req, Headers0),
- Resp = handle_response(Req, Code, Headers1, chunked, respond),
- case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
- end,
- {ok, Resp}.
-
-send_chunk({remote, Pid, Ref} = Resp, Data) ->
- Pid ! {Ref, chunk, Data},
- {ok, Resp};
-send_chunk(Resp, Data) ->
- case iolist_size(Data) of
- % do nothing
- 0 -> ok;
- _ -> Resp:write_chunk(Data)
- end,
- {ok, Resp}.
-
-last_chunk({remote, Pid, Ref} = Resp) ->
- Pid ! {Ref, chunk, <<>>},
- {ok, Resp};
-last_chunk(Resp) ->
- Resp:write_chunk([]),
- {ok, Resp}.
-
-send_response(Req, Code, Headers0, Body) ->
- Headers1 = chttpd_cors:headers(Req, Headers0),
- send_response_no_cors(Req, Code, Headers1, Body).
-
-send_response_no_cors(#httpd{mochi_req = MochiReq} = Req, Code, Headers, Body) ->
- Headers1 = http_1_0_keep_alive(MochiReq, Headers),
- Headers2 = basic_headers_no_cors(Req, Headers1),
- Headers3 = chttpd_xframe_options:header(Req, Headers2),
- Headers4 = chttpd_prefer_header:maybe_return_minimal(Req, Headers3),
- Resp = handle_response(Req, Code, Headers4, Body, respond),
- log_response(Code, Body),
- {ok, Resp}.
-
-send_method_not_allowed(Req, Methods) ->
- send_error(
- Req,
- 405,
- [{"Allow", Methods}],
- <<"method_not_allowed">>,
- ?l2b("Only " ++ Methods ++ " allowed")
- ).
-
-send_json(Req, Value) ->
- send_json(Req, 200, Value).
-
-send_json(Req, Code, Value) ->
- send_json(Req, Code, [], Value).
-
-send_json(Req, Code, Headers, Value) ->
- initialize_jsonp(Req),
- AllHeaders = maybe_add_default_headers(Req, Headers),
- send_response(Req, Code, AllHeaders, {json, Value}).
-
-start_json_response(Req, Code) ->
- start_json_response(Req, Code, []).
-
-start_json_response(Req, Code, Headers) ->
- initialize_jsonp(Req),
- AllHeaders = maybe_add_default_headers(Req, Headers),
- {ok, Resp} = start_chunked_response(Req, Code, AllHeaders),
- case start_jsonp() of
- [] -> ok;
- Start -> send_chunk(Resp, Start)
- end,
- {ok, Resp}.
-
-end_json_response(Resp) ->
- send_chunk(Resp, end_jsonp() ++ [$\n]),
- last_chunk(Resp).
-
-maybe_add_default_headers(ForRequest, ToHeaders) ->
- DefaultHeaders = [
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", negotiate_content_type(ForRequest)}
- ],
- lists:ukeymerge(1, lists:keysort(1, ToHeaders), DefaultHeaders).
-
-initialize_jsonp(Req) ->
- case get(jsonp) of
- undefined -> put(jsonp, qs_value(Req, "callback", no_jsonp));
- _ -> ok
- end,
- case get(jsonp) of
- no_jsonp ->
- [];
- [] ->
- [];
- CallBack ->
- try
- % make sure jsonp is configured on (default off)
- case
- chttpd_util:get_chttpd_config_boolean(
- "allow_jsonp", false
- )
- of
- true ->
- validate_callback(CallBack);
- false ->
- put(jsonp, no_jsonp)
- end
- catch
- Error ->
- put(jsonp, no_jsonp),
- throw(Error)
- end
- end.
-
-start_jsonp() ->
- case get(jsonp) of
- no_jsonp -> [];
- [] -> [];
- CallBack -> ["/* CouchDB */", CallBack, "("]
- end.
-
-end_jsonp() ->
- case erlang:erase(jsonp) of
- no_jsonp -> [];
- [] -> [];
- _ -> ");"
- end.
-
-validate_callback(CallBack) when is_binary(CallBack) ->
- validate_callback(binary_to_list(CallBack));
-validate_callback([]) ->
- ok;
-validate_callback([Char | Rest]) ->
- case Char of
- _ when Char >= $a andalso Char =< $z -> ok;
- _ when Char >= $A andalso Char =< $Z -> ok;
- _ when Char >= $0 andalso Char =< $9 -> ok;
- _ when Char == $. -> ok;
- _ when Char == $_ -> ok;
- _ when Char == $[ -> ok;
- _ when Char == $] -> ok;
- _ -> throw({bad_request, invalid_callback})
- end,
- validate_callback(Rest).
-
-error_info({Error, Reason}) when is_list(Reason) ->
- error_info({Error, ?l2b(Reason)});
-error_info(bad_request) ->
- {400, <<"bad_request">>, <<>>};
-error_info({bad_request, Reason}) ->
- {400, <<"bad_request">>, Reason};
-error_info({query_parse_error, Reason}) ->
- {400, <<"query_parse_error">>, Reason};
-% Prior art for md5 mismatch resulting in a 400 is from AWS S3
-error_info(md5_mismatch) ->
- {400, <<"content_md5_mismatch">>, <<"Possible message corruption.">>};
-error_info({illegal_docid, Reason}) ->
- {400, <<"illegal_docid">>, Reason};
-error_info({illegal_partition, Reason}) ->
- {400, <<"illegal_partition">>, Reason};
-error_info(not_found) ->
- {404, <<"not_found">>, <<"missing">>};
-error_info({not_found, Reason}) ->
- {404, <<"not_found">>, Reason};
-error_info({not_acceptable, Reason}) ->
- {406, <<"not_acceptable">>, Reason};
-error_info(conflict) ->
- {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({forbidden, Msg}) ->
- {403, <<"forbidden">>, Msg};
-error_info({unauthorized, Msg}) ->
- {401, <<"unauthorized">>, Msg};
-error_info(file_exists) ->
- {412, <<"file_exists">>, <<
- "The database could not be "
- "created, the file already exists."
- >>};
-error_info(request_entity_too_large) ->
- {413, <<"too_large">>, <<"the request entity is too large">>};
-error_info({request_entity_too_large, {attachment, AttName}}) ->
- {413, <<"attachment_too_large">>, AttName};
-error_info({request_entity_too_large, DocID}) ->
- {413, <<"document_too_large">>, DocID};
-error_info(request_uri_too_long) ->
- {414, <<"too_long">>, <<"the request uri is too long">>};
-error_info({bad_ctype, Reason}) ->
- {415, <<"bad_content_type">>, Reason};
-error_info(requested_range_not_satisfiable) ->
- {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
-error_info({error, {illegal_database_name, Name}}) ->
- Message =
- <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
- "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
- "are allowed. Must begin with a letter.">>,
- {400, <<"illegal_database_name">>, Message};
-error_info({missing_stub, Reason}) ->
- {412, <<"missing_stub">>, Reason};
-error_info({misconfigured_server, Reason}) ->
- {500, <<"misconfigured_server">>, couch_util:to_binary(Reason)};
-error_info({Error, Reason}) ->
- {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
-error_info(Error) ->
- {500, <<"unknown_error">>, couch_util:to_binary(Error)}.
-
-error_headers(#httpd{mochi_req = MochiReq} = Req, Code, ErrorStr, ReasonStr) ->
- if
- Code == 401 ->
- % this is where the basic auth popup is triggered
- case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
- undefined ->
- case chttpd_util:get_chttpd_config("WWW-Authenticate") of
- undefined ->
- % If the client is a browser and the basic auth popup isn't turned on
- % redirect to the session page.
- case ErrorStr of
- <<"unauthorized">> ->
- case
- chttpd_util:get_chttpd_auth_config(
- "authentication_redirect", "/_utils/session.html"
- )
- of
- undefined ->
- {Code, []};
- AuthRedirect ->
- case
- chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false
- )
- of
- true ->
- % send the browser popup header no matter what if we are require_valid_user
- {Code, [
- {"WWW-Authenticate",
- "Basic realm=\"server\""}
- ]};
- false ->
- case
- MochiReq:accepts_content_type(
- "application/json"
- )
- of
- true ->
- {Code, []};
- false ->
- case
- MochiReq:accepts_content_type(
- "text/html"
- )
- of
- true ->
- % Redirect to the path the user requested, not
- % the one that is used internally.
- UrlReturnRaw =
- case
- MochiReq:get_header_value(
- "x-couchdb-vhost-path"
- )
- of
- undefined ->
- MochiReq:get(path);
- VHostPath ->
- VHostPath
- end,
- RedirectLocation = lists:flatten(
- [
- AuthRedirect,
- "?return=",
- couch_util:url_encode(
- UrlReturnRaw
- ),
- "&reason=",
- couch_util:url_encode(
- ReasonStr
- )
- ]
- ),
- {302, [
- {"Location",
- absolute_uri(
- Req,
- RedirectLocation
- )}
- ]};
- false ->
- {Code, []}
- end
- end
- end
- end;
- _Else ->
- {Code, []}
- end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
- end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
- end;
- true ->
- {Code, []}
- end.
-
-send_error(Req, Error) ->
- {Code, ErrorStr, ReasonStr} = error_info(Error),
- {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr),
- send_error(Req, Code1, Headers, ErrorStr, ReasonStr).
-
-send_error(Req, Code, ErrorStr, ReasonStr) ->
- send_error(Req, Code, [], ErrorStr, ReasonStr).
-
-send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
- send_json(
- Req,
- Code,
- Headers,
- {[
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}
- ]}
- ).
-
-% give the option for list functions to output html or other raw errors
-send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
- send_chunk(Resp, Reason),
- last_chunk(Resp);
-send_chunked_error(Resp, Error) ->
- {Code, ErrorStr, ReasonStr} = error_info(Error),
- JsonError =
- {[
- {<<"code">>, Code},
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}
- ]},
- send_chunk(Resp, ?l2b([$\n, ?JSON_ENCODE(JsonError), $\n])),
- last_chunk(Resp).
-
-send_redirect(Req, Path) ->
- send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>).
-
-negotiate_content_type(_Req) ->
- case get(jsonp) of
- no_jsonp -> "application/json";
- [] -> "application/json";
- _Callback -> "application/javascript"
- end.
-
-server_header() ->
- [
- {"Server",
- "CouchDB/" ++ couch_server:get_version() ++
- " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}
- ].
-
--record(mp, {boundary, buffer, data_fun, callback}).
-
-parse_multipart_request(ContentType, DataFun, Callback) ->
- Boundary0 = iolist_to_binary(get_boundary(ContentType)),
- Boundary = <<"\r\n--", Boundary0/binary>>,
- Mp = #mp{
- boundary = Boundary,
- buffer = <<>>,
- data_fun = DataFun,
- callback = Callback
- },
- {Mp2, _NilCallback} = read_until(
- Mp,
- <<"--", Boundary0/binary>>,
- fun nil_callback/1
- ),
- #mp{buffer = Buffer, data_fun = DataFun2, callback = Callback2} =
- parse_part_header(Mp2),
- {Buffer, DataFun2, Callback2}.
-
-nil_callback(_Data) ->
- fun nil_callback/1.
-
-get_boundary({"multipart/" ++ _, Opts}) ->
- case couch_util:get_value("boundary", Opts) of
- S when is_list(S) ->
- S
- end;
-get_boundary(ContentType) ->
- {"multipart/" ++ _, Opts} = mochiweb_util:parse_header(ContentType),
- get_boundary({"multipart/", Opts}).
-
-split_header(<<>>) ->
- [];
-split_header(Line) ->
- {Name, Rest} = lists:splitwith(
- fun(C) -> C =/= $: end,
- binary_to_list(Line)
- ),
- [$: | Value] =
- case Rest of
- [] ->
- throw({bad_request, <<"bad part header">>});
- Res ->
- Res
- end,
- [{string:to_lower(string:strip(Name)), mochiweb_util:parse_header(Value)}].
-
-read_until(#mp{data_fun = DataFun, buffer = Buffer} = Mp, Pattern, Callback) ->
- case couch_util:find_in_binary(Pattern, Buffer) of
- not_found ->
- Callback2 = Callback(Buffer),
- {Buffer2, DataFun2} = DataFun(),
- Buffer3 = iolist_to_binary(Buffer2),
- read_until(Mp#mp{data_fun = DataFun2, buffer = Buffer3}, Pattern, Callback2);
- {partial, 0} ->
- {NewData, DataFun2} = DataFun(),
- read_until(
- Mp#mp{
- data_fun = DataFun2,
- buffer = iolist_to_binary([Buffer, NewData])
- },
- Pattern,
- Callback
- );
- {partial, Skip} ->
- <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
- Callback2 = Callback(DataChunk),
- {NewData, DataFun2} = DataFun(),
- read_until(
- Mp#mp{
- data_fun = DataFun2,
- buffer = iolist_to_binary([Rest | NewData])
- },
- Pattern,
- Callback2
- );
- {exact, 0} ->
- PatternLen = size(Pattern),
- <<_:PatternLen/binary, Rest/binary>> = Buffer,
- {Mp#mp{buffer = Rest}, Callback};
- {exact, Skip} ->
- PatternLen = size(Pattern),
- <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
- Callback2 = Callback(DataChunk),
- {Mp#mp{buffer = Rest}, Callback2}
- end.
-
-parse_part_header(#mp{callback = UserCallBack} = Mp) ->
- {Mp2, AccCallback} = read_until(
- Mp,
- <<"\r\n\r\n">>,
- fun(Next) -> acc_callback(Next, []) end
- ),
- HeaderData = AccCallback(get_data),
-
- Headers =
- lists:foldl(
- fun(Line, Acc) ->
- split_header(Line) ++ Acc
- end,
- [],
- re:split(HeaderData, <<"\r\n">>, [])
- ),
- NextCallback = UserCallBack({headers, Headers}),
- parse_part_body(Mp2#mp{callback = NextCallback}).
-
-parse_part_body(#mp{boundary = Prefix, callback = Callback} = Mp) ->
- {Mp2, WrappedCallback} = read_until(
- Mp,
- Prefix,
- fun(Data) -> body_callback_wrapper(Data, Callback) end
- ),
- Callback2 = WrappedCallback(get_callback),
- Callback3 = Callback2(body_end),
- case check_for_last(Mp2#mp{callback = Callback3}) of
- {last, #mp{callback = Callback3} = Mp3} ->
- Mp3#mp{callback = Callback3(eof)};
- {more, Mp3} ->
- parse_part_header(Mp3)
- end.
-
-acc_callback(get_data, Acc) ->
- iolist_to_binary(lists:reverse(Acc));
-acc_callback(Data, Acc) ->
- fun(Next) -> acc_callback(Next, [Data | Acc]) end.
-
-body_callback_wrapper(get_callback, Callback) ->
- Callback;
-body_callback_wrapper(Data, Callback) ->
- Callback2 = Callback({body, Data}),
- fun(Next) -> body_callback_wrapper(Next, Callback2) end.
-
-check_for_last(#mp{buffer = Buffer, data_fun = DataFun} = Mp) ->
- case Buffer of
- <<"--", _/binary>> ->
- {last, Mp};
- <<_, _, _/binary>> ->
- {more, Mp};
- % not long enough
- _ ->
- {Data, DataFun2} = DataFun(),
- check_for_last(Mp#mp{
- buffer = <<Buffer/binary, Data/binary>>,
- data_fun = DataFun2
- })
- end.
-
-validate_bind_address(any) ->
- ok;
-validate_bind_address(Address) ->
- case inet_parse:address(Address) of
- {ok, _} -> ok;
- _ -> throw({error, invalid_bind_address})
- end.
-
-add_headers(Req, Headers0) ->
- Headers = basic_headers(Req, Headers0),
- Headers1 = http_1_0_keep_alive(Req, Headers),
- chttpd_prefer_header:maybe_return_minimal(Req, Headers1).
-
-basic_headers(Req, Headers0) ->
- Headers1 = basic_headers_no_cors(Req, Headers0),
- Headers2 = chttpd_xframe_options:header(Req, Headers1),
- chttpd_cors:headers(Req, Headers2).
-
-basic_headers_no_cors(Req, Headers) ->
- Headers ++
- server_header() ++
- couch_httpd_auth:cookie_auth_header(Req, Headers).
-
-handle_response(Req0, Code0, Headers0, Args0, Type) ->
- {ok, {Req1, Code1, Headers1, Args1}} = before_response(Req0, Code0, Headers0, Args0),
- couch_stats:increment_counter([couchdb, httpd_status_codes, Code1]),
- log_request(Req0, Code1),
- respond_(Req1, Code1, Headers1, Args1, Type).
-
-before_response(Req0, Code0, Headers0, {json, JsonObj}) ->
- {ok, {Req1, Code1, Headers1, Body1}} =
- chttpd_plugin:before_response(Req0, Code0, Headers0, JsonObj),
- Body2 = [start_jsonp(), ?JSON_ENCODE(Body1), end_jsonp(), $\n],
- {ok, {Req1, Code1, Headers1, Body2}};
-before_response(Req0, Code0, Headers0, Args0) ->
- chttpd_plugin:before_response(Req0, Code0, Headers0, Args0).
-
-respond_(#httpd{mochi_req = MochiReq} = Req, Code, Headers, Args, Type) ->
- case MochiReq:get(socket) of
- {remote, Pid, Ref} ->
- Pid ! {Ref, Code, Headers, Args, Type},
- {remote, Pid, Ref};
- _Else ->
- http_respond_(Req, Code, Headers, Args, Type)
- end.
-
-http_respond_(#httpd{mochi_req = MochiReq}, Code, Headers, _Args, start_response) ->
- MochiReq:start_response({Code, Headers});
-http_respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
- % Special handling for the 413 response. Make sure the socket is closed as
- % we don't know how much data was read before the error was thrown. Also
- % drain all the data in the receive buffer to avoid connction being reset
- % before the 413 response is parsed by the client. This is still racy, it
- % just increases the chances of 413 being detected correctly by the client
- % (rather than getting a brutal TCP reset).
- erlang:put(mochiweb_request_force_close, true),
- Result = MochiReq:Type({413, Headers, Args}),
- Socket = MochiReq:get(socket),
- mochiweb_socket:recv(Socket, ?MAX_DRAIN_BYTES, ?MAX_DRAIN_TIME_MSEC),
- Result;
-http_respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
- MochiReq:Type({Code, Headers, Args}).
-
-peer(MochiReq) ->
- case MochiReq:get(socket) of
- {remote, Pid, _} ->
- node(Pid);
- _ ->
- MochiReq:get(peer)
- end.
-
-%%%%%%%% module tests below %%%%%%%%
-
--ifdef(TEST).
--include_lib("couch/include/couch_eunit.hrl").
-
-maybe_add_default_headers_test_() ->
- DummyRequest = [],
- NoCache = {"Cache-Control", "no-cache"},
- ApplicationJson = {"Content-Type", "application/json"},
- % couch_httpd uses process dictionary to check if currently in a
- % json serving method. Defaults to 'application/javascript' otherwise.
- % Therefore must-revalidate and application/javascript should be added
- % by chttpd if such headers are not present
- MustRevalidate = {"Cache-Control", "must-revalidate"},
- ApplicationJavascript = {"Content-Type", "application/javascript"},
- Cases = [
- {
- [],
- [MustRevalidate, ApplicationJavascript],
- "Should add Content-Type and Cache-Control to empty heaeders"
- },
-
- {
- [NoCache],
- [NoCache, ApplicationJavascript],
- "Should add Content-Type only if Cache-Control is present"
- },
-
- {
- [ApplicationJson],
- [MustRevalidate, ApplicationJson],
- "Should add Cache-Control if Content-Type is present"
- },
-
- {
- [NoCache, ApplicationJson],
- [NoCache, ApplicationJson],
- "Should not add headers if Cache-Control and Content-Type are there"
- }
- ],
- Tests = lists:map(
- fun({InitialHeaders, ProperResult, Desc}) ->
- {Desc,
- ?_assertEqual(
- ProperResult,
- maybe_add_default_headers(DummyRequest, InitialHeaders)
- )}
- end,
- Cases
- ),
- {"Tests adding default headers", Tests}.
-
-log_request_test_() ->
- {setup,
- fun() ->
- ok = meck:new([couch_log]),
- ok = meck:expect(couch_log, error, fun(Fmt, Args) ->
- case catch io_lib_format:fwrite(Fmt, Args) of
- {'EXIT', Error} -> Error;
- _ -> ok
- end
- end)
- end,
- fun(_) ->
- meck:unload()
- end,
- [
- fun() -> should_accept_code_and_message(true) end,
- fun() -> should_accept_code_and_message(false) end
- ]}.
-
-should_accept_code_and_message(DontLogFlag) ->
- erlang:put(dont_log_response, DontLogFlag),
- {"with dont_log_response = " ++ atom_to_list(DontLogFlag), [
- {"Should accept code 200 and string message", ?_assertEqual(ok, log_response(200, "OK"))},
- {"Should accept code 200 and JSON message",
- ?_assertEqual(ok, log_response(200, {json, {[{ok, true}]}}))},
- {"Should accept code >= 400 and string error",
- ?_assertEqual(ok, log_response(405, method_not_allowed))},
- {"Should accept code >= 400 and JSON error",
- ?_assertEqual(
- ok,
- log_response(405, {json, {[{error, method_not_allowed}]}})
- )},
- {"Should accept code >= 500 and string error", ?_assertEqual(ok, log_response(500, undef))},
- {"Should accept code >= 500 and JSON error",
- ?_assertEqual(ok, log_response(500, {json, {[{error, undef}]}}))}
- ]}.
-
--endif.
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
deleted file mode 100644
index 24a0c15ed..000000000
--- a/src/couch/src/couch_httpd_auth.erl
+++ /dev/null
@@ -1,697 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_auth).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([party_mode_handler/1]).
-
--export([
- default_authentication_handler/1, default_authentication_handler/2,
- special_test_authentication_handler/1
-]).
--export([cookie_authentication_handler/1, cookie_authentication_handler/2]).
--export([null_authentication_handler/1]).
--export([proxy_authentication_handler/1, proxy_authentification_handler/1]).
--export([cookie_auth_header/2]).
--export([handle_session_req/1, handle_session_req/2]).
-
--export([authenticate/2, verify_totp/2]).
--export([ensure_cookie_auth_secret/0, make_cookie_time/0]).
--export([cookie_auth_cookie/4, cookie_scheme/1]).
--export([maybe_value/3]).
-
--export([jwt_authentication_handler/1]).
-
--import(couch_httpd, [
- header_value/2, send_json/2, send_json/4, send_method_not_allowed/2, maybe_decompress/2
-]).
-
--compile({no_auto_import, [integer_to_binary/1, integer_to_binary/2]}).
-
-party_mode_handler(Req) ->
- case
- chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false
- )
- of
- true ->
- throw({unauthorized, <<"Authentication required.">>});
- false ->
- Req#httpd{user_ctx = #user_ctx{}}
- end.
-
-special_test_authentication_handler(Req) ->
- case header_value(Req, "WWW-Authenticate") of
- "X-Couch-Test-Auth " ++ NamePass ->
- % NamePass is a colon separated string: "joe schmoe:a password".
- [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]),
- case {Name, Pass} of
- {"Jan Lehnardt", "apple"} -> ok;
- {"Christopher Lenz", "dog food"} -> ok;
- {"Noah Slater", "biggiesmalls endian"} -> ok;
- {"Chris Anderson", "mp3"} -> ok;
- {"Damien Katz", "pecan pie"} -> ok;
- {_, _} -> throw({unauthorized, <<"Name or password is incorrect.">>})
- end,
- Req#httpd{user_ctx = #user_ctx{name = ?l2b(Name)}};
- _ ->
- % No X-Couch-Test-Auth credentials sent, give admin access so the
- % previous authentication can be restored after the test
- Req#httpd{user_ctx = ?ADMIN_USER}
- end.
-
-basic_name_pw(Req) ->
- AuthorizationHeader = header_value(Req, "Authorization"),
- case AuthorizationHeader of
- "Basic " ++ Base64Value ->
- try
- re:split(
- base64:decode(Base64Value),
- ":",
- [{return, list}, {parts, 2}]
- )
- of
- ["_", "_"] ->
- % special name and pass to be logged out
- nil;
- [User, Pass] ->
- {User, Pass};
- _ ->
- nil
- catch
- error:function_clause ->
- throw({bad_request, "Authorization header has invalid base64 value"})
- end;
- _ ->
- nil
- end.
-
-default_authentication_handler(Req) ->
- default_authentication_handler(Req, couch_auth_cache).
-
-default_authentication_handler(Req, AuthModule) ->
- case basic_name_pw(Req) of
- {User, Pass} ->
- case AuthModule:get_user_creds(Req, User) of
- nil ->
- throw({unauthorized, <<"Name or password is incorrect.">>});
- {ok, UserProps, _AuthCtx} ->
- reject_if_totp(UserProps),
- UserName = ?l2b(User),
- Password = ?l2b(Pass),
- case authenticate(Password, UserProps) of
- true ->
- Req#httpd{
- user_ctx = #user_ctx{
- name = UserName,
- roles = couch_util:get_value(<<"roles">>, UserProps, [])
- }
- };
- false ->
- authentication_warning(Req, UserName),
- throw({unauthorized, <<"Name or password is incorrect.">>})
- end
- end;
- nil ->
- case couch_server:has_admins() of
- true ->
- Req;
- false ->
- case
- chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false
- )
- of
- true -> Req;
- % If no admins, and no user required, then everyone is admin!
- % Yay, admin party!
- false -> Req#httpd{user_ctx = ?ADMIN_USER}
- end
- end
- end.
-
-null_authentication_handler(Req) ->
- Req#httpd{user_ctx = ?ADMIN_USER}.
-
-%% @doc proxy auth handler.
-%
-% This handler allows creation of a userCtx object from a user authenticated remotly.
-% The client just pass specific headers to CouchDB and the handler create the userCtx.
-% Headers name can be defined in local.ini. By thefault they are :
-%
-% * X-Auth-CouchDB-UserName : contain the username, (x_auth_username in
-% couch_httpd_auth section)
-% * X-Auth-CouchDB-Roles : contain the user roles, list of roles separated by a
-% comma (x_auth_roles in couch_httpd_auth section)
-% * X-Auth-CouchDB-Token : token to authenticate the authorization (x_auth_token
-% in couch_httpd_auth section). This token is an hmac-sha1 created from secret key
-% and username. The secret key should be the same in the client and couchdb node. s
-% ecret key is the secret key in couch_httpd_auth section of ini. This token is optional
-% if value of proxy_use_secret key in couch_httpd_auth section of ini isn't true.
-%
-proxy_authentication_handler(Req) ->
- case proxy_auth_user(Req) of
- nil -> Req;
- Req2 -> Req2
- end.
-
-%% @deprecated
-proxy_authentification_handler(Req) ->
- proxy_authentication_handler(Req).
-
-proxy_auth_user(Req) ->
- XHeaderUserName = chttpd_util:get_chttpd_auth_config(
- "x_auth_username", "X-Auth-CouchDB-UserName"
- ),
- XHeaderRoles = chttpd_util:get_chttpd_auth_config(
- "x_auth_roles", "X-Auth-CouchDB-Roles"
- ),
- XHeaderToken = chttpd_util:get_chttpd_auth_config(
- "x_auth_token", "X-Auth-CouchDB-Token"
- ),
- case header_value(Req, XHeaderUserName) of
- undefined ->
- nil;
- UserName ->
- Roles =
- case header_value(Req, XHeaderRoles) of
- undefined -> [];
- Else -> [?l2b(R) || R <- string:tokens(Else, ",")]
- end,
- case
- chttpd_util:get_chttpd_auth_config_boolean(
- "proxy_use_secret", false
- )
- of
- true ->
- case chttpd_util:get_chttpd_auth_config("secret") of
- undefined ->
- Req#httpd{user_ctx = #user_ctx{name = ?l2b(UserName), roles = Roles}};
- Secret ->
- ExpectedToken = couch_util:to_hex(
- couch_util:hmac(sha, Secret, UserName)
- ),
- case header_value(Req, XHeaderToken) of
- Token when Token == ExpectedToken ->
- Req#httpd{
- user_ctx = #user_ctx{
- name = ?l2b(UserName),
- roles = Roles
- }
- };
- _ ->
- nil
- end
- end;
- false ->
- Req#httpd{user_ctx = #user_ctx{name = ?l2b(UserName), roles = Roles}}
- end
- end.
-
-jwt_authentication_handler(Req) ->
- case header_value(Req, "Authorization") of
- "Bearer " ++ Jwt ->
- RequiredClaims = get_configured_claims(),
- case jwtf:decode(?l2b(Jwt), [alg | RequiredClaims], fun jwtf_keystore:get/2) of
- {ok, {Claims}} ->
- case lists:keyfind(<<"sub">>, 1, Claims) of
- false ->
- throw({unauthorized, <<"Token missing sub claim.">>});
- {_, User} ->
- Req#httpd{
- user_ctx = #user_ctx{
- name = User,
- roles = couch_util:get_value(
- ?l2b(
- config:get(
- "jwt_auth", "roles_claim_name", "_couchdb.roles"
- )
- ),
- Claims,
- []
- )
- }
- }
- end;
- {error, Reason} ->
- throw(Reason)
- end;
- _ ->
- Req
- end.
-
-get_configured_claims() ->
- Claims = config:get("jwt_auth", "required_claims", ""),
- Re = "((?<key1>[a-z]+)|{(?<key2>[a-z]+)\s*,\s*\"(?<val>[^\"]+)\"})",
- case re:run(Claims, Re, [global, {capture, [key1, key2, val], binary}]) of
- nomatch when Claims /= "" ->
- couch_log:error("[jwt_auth] required_claims is set to an invalid value.", []),
- throw({misconfigured_server, <<"JWT is not configured correctly">>});
- nomatch ->
- [];
- {match, Matches} ->
- lists:map(fun to_claim/1, Matches)
- end.
-
-to_claim([Key, <<>>, <<>>]) ->
- binary_to_atom(Key, latin1);
-to_claim([<<>>, Key, Value]) ->
- {binary_to_atom(Key, latin1), Value}.
-
-cookie_authentication_handler(Req) ->
- cookie_authentication_handler(Req, couch_auth_cache).
-
-cookie_authentication_handler(#httpd{mochi_req = MochiReq} = Req, AuthModule) ->
- case MochiReq:get_cookie_value("AuthSession") of
- undefined ->
- Req;
- [] ->
- Req;
- Cookie ->
- [User, TimeStr, HashStr] =
- try
- AuthSession = couch_util:decodeBase64Url(Cookie),
- [_A, _B, _Cs] = re:split(
- ?b2l(AuthSession),
- ":",
- [{return, list}, {parts, 3}]
- )
- catch
- _:_Error ->
- Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
- throw({bad_request, Reason})
- end,
- % Verify expiry and hash
- CurrentTime = make_cookie_time(),
- case chttpd_util:get_chttpd_auth_config("secret") of
- undefined ->
- couch_log:debug("cookie auth secret is not set", []),
- Req;
- SecretStr ->
- Secret = ?l2b(SecretStr),
- case AuthModule:get_user_creds(Req, User) of
- nil ->
- Req;
- {ok, UserProps, _AuthCtx} ->
- UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
- FullSecret = <<Secret/binary, UserSalt/binary>>,
- ExpectedHash = couch_util:hmac(sha, FullSecret, User ++ ":" ++ TimeStr),
- Hash = ?l2b(HashStr),
- Timeout = chttpd_util:get_chttpd_auth_config_integer(
- "timeout", 600
- ),
- couch_log:debug("timeout ~p", [Timeout]),
- case (catch erlang:list_to_integer(TimeStr, 16)) of
- TimeStamp when CurrentTime < TimeStamp + Timeout ->
- case couch_passwords:verify(ExpectedHash, Hash) of
- true ->
- TimeLeft = TimeStamp + Timeout - CurrentTime,
- couch_log:debug(
- "Successful cookie auth as: ~p",
- [User]
- ),
- Req#httpd{
- user_ctx = #user_ctx{
- name = ?l2b(User),
- roles = couch_util:get_value(
- <<"roles">>, UserProps, []
- )
- },
- auth = {FullSecret, TimeLeft < Timeout * 0.9}
- };
- _Else ->
- Req
- end;
- _Else ->
- Req
- end
- end
- end
- end.
-
-cookie_auth_header(#httpd{user_ctx = #user_ctx{name = null}}, _Headers) ->
- [];
-cookie_auth_header(#httpd{user_ctx = #user_ctx{name = User}, auth = {Secret, true}} = Req, Headers) ->
- % Note: we only set the AuthSession cookie if:
- % * a valid AuthSession cookie has been received
- % * we are outside a 10% timeout window
- % * and if an AuthSession cookie hasn't already been set e.g. by a login
- % or logout handler.
- % The login and logout handlers need to set the AuthSession cookie
- % themselves.
- CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
- Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
- AuthSession = couch_util:get_value("AuthSession", Cookies),
- if
- AuthSession == undefined ->
- TimeStamp = make_cookie_time(),
- [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
- true ->
- []
- end;
-cookie_auth_header(_Req, _Headers) ->
- [].
-
-cookie_auth_cookie(Req, User, Secret, TimeStamp) ->
- SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16),
- Hash = couch_util:hmac(sha, Secret, SessionData),
- mochiweb_cookies:cookie(
- "AuthSession",
- couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
- cookie_attributes(Req)
- ).
-
-clear_auth_cookie(Req) ->
- mochiweb_cookies:cookie(
- "AuthSession", "", cookie_attributes(Req)
- ).
-
-cookie_attributes(Req) ->
- Attributes = [path(), http_only(), max_age(), cookie_scheme(Req), cookie_domain(), same_site()],
- lists:flatten(Attributes).
-
-ensure_cookie_auth_secret() ->
- case chttpd_util:get_chttpd_auth_config("secret") of
- undefined ->
- NewSecret = ?b2l(couch_uuids:random()),
- config:set("chttpd_auth", "secret", NewSecret),
- NewSecret;
- Secret ->
- Secret
- end.
-
-% session handlers
-% Login handler with user db
-handle_session_req(Req) ->
- handle_session_req(Req, couch_auth_cache).
-
-handle_session_req(#httpd{method = 'POST', mochi_req = MochiReq} = Req, AuthModule) ->
- ReqBody = MochiReq:recv_body(),
- Form =
- case MochiReq:get_primary_header_value("content-type") of
- % content type should be json
- "application/x-www-form-urlencoded" ++ _ ->
- mochiweb_util:parse_qs(ReqBody);
- "application/json" ++ _ ->
- {Pairs} = ?JSON_DECODE(maybe_decompress(Req, ReqBody)),
- lists:map(
- fun({Key, Value}) ->
- {?b2l(Key), ?b2l(Value)}
- end,
- Pairs
- );
- _ ->
- []
- end,
- UserName = ?l2b(extract_username(Form)),
- Password = ?l2b(couch_util:get_value("password", Form, "")),
- couch_log:debug("Attempt Login: ~s", [UserName]),
- {ok, UserProps, _AuthCtx} =
- case AuthModule:get_user_creds(Req, UserName) of
- nil -> {ok, [], nil};
- Result -> Result
- end,
- case authenticate(Password, UserProps) of
- true ->
- verify_totp(UserProps, Form),
- % setup the session cookie
- Secret = ?l2b(ensure_cookie_auth_secret()),
- UserSalt = couch_util:get_value(<<"salt">>, UserProps),
- CurrentTime = make_cookie_time(),
- Cookie = cookie_auth_cookie(
- Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime
- ),
- % TODO document the "next" feature in Futon
- {Code, Headers} =
- case couch_httpd:qs_value(Req, "next", nil) of
- nil ->
- {200, [Cookie]};
- Redirect ->
- {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
- end,
- send_json(
- Req#httpd{req_body = ReqBody},
- Code,
- Headers,
- {[
- {ok, true},
- {name, UserName},
- {roles, couch_util:get_value(<<"roles">>, UserProps, [])}
- ]}
- );
- false ->
- authentication_warning(Req, UserName),
- % clear the session
- Cookie = clear_auth_cookie(Req),
- {Code, Headers} =
- case couch_httpd:qs_value(Req, "fail", nil) of
- nil ->
- {401, [Cookie]};
- Redirect ->
- {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
- end,
- send_json(
- Req,
- Code,
- Headers,
- {[{error, <<"unauthorized">>}, {reason, <<"Name or password is incorrect.">>}]}
- )
- end;
-% get user info
-% GET /_session
-handle_session_req(#httpd{method = 'GET', user_ctx = UserCtx} = Req, _AuthModule) ->
- Name = UserCtx#user_ctx.name,
- ForceLogin = couch_httpd:qs_value(Req, "basic", "false"),
- case {Name, ForceLogin} of
- {null, "true"} ->
- throw({unauthorized, <<"Please login.">>});
- {Name, _} ->
- send_json(
- Req,
- {[
- % remove this ok
- {ok, true},
- {<<"userCtx">>,
- {[
- {name, Name},
- {roles, UserCtx#user_ctx.roles}
- ]}},
- {info, {
- [
- {authentication_handlers, [
- N
- || {N, _Fun} <- Req#httpd.authentication_handlers
- ]}
- ] ++
- maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
- Handler
- end) ++
- maybe_value(
- authentication_db,
- config:get("chttpd_auth", "authentication_db"),
- fun(Val) ->
- ?l2b(Val)
- end
- )
- }}
- ]}
- )
- end;
-% logout by deleting the session
-handle_session_req(#httpd{method = 'DELETE'} = Req, _AuthModule) ->
- Cookie = clear_auth_cookie(Req),
- {Code, Headers} =
- case couch_httpd:qs_value(Req, "next", nil) of
- nil ->
- {200, [Cookie]};
- Redirect ->
- {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
- end,
- send_json(Req, Code, Headers, {[{ok, true}]});
-handle_session_req(Req, _AuthModule) ->
- send_method_not_allowed(Req, "GET,HEAD,POST,DELETE").
-
-extract_username(Form) ->
- CouchFormat = couch_util:get_value("name", Form),
- case couch_util:get_value("username", Form, CouchFormat) of
- undefined ->
- throw({bad_request, <<"request body must contain a username">>});
- CouchFormat ->
- CouchFormat;
- Else1 when CouchFormat == undefined ->
- Else1;
- _Else2 ->
- throw({bad_request, <<"request body contains different usernames">>})
- end.
-
-maybe_value(_Key, undefined, _Fun) -> [];
-maybe_value(Key, Else, Fun) -> [{Key, Fun(Else)}].
-
-authenticate(Pass, UserProps) ->
- UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>),
- {PasswordHash, ExpectedHash} =
- case couch_util:get_value(<<"password_scheme">>, UserProps, <<"simple">>) of
- <<"simple">> ->
- {
- couch_passwords:simple(Pass, UserSalt),
- couch_util:get_value(<<"password_sha">>, UserProps, nil)
- };
- <<"pbkdf2">> ->
- Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
- verify_iterations(Iterations),
- {
- couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
- couch_util:get_value(<<"derived_key">>, UserProps, nil)
- }
- end,
- couch_passwords:verify(PasswordHash, ExpectedHash).
-
-verify_iterations(Iterations) when is_integer(Iterations) ->
- Min = chttpd_util:get_chttpd_auth_config_integer("min_iterations", 1),
- Max = chttpd_util:get_chttpd_auth_config_integer("max_iterations", 1000000000),
- case Iterations < Min of
- true ->
- throw({forbidden, <<"Iteration count is too low for this server">>});
- false ->
- ok
- end,
- case Iterations > Max of
- true ->
- throw({forbidden, <<"Iteration count is too high for this server">>});
- false ->
- ok
- end.
-
-make_cookie_time() ->
- {NowMS, NowS, _} = os:timestamp(),
- NowMS * 1000000 + NowS.
-
-path() ->
- {path, "/"}.
-
-http_only() ->
- {http_only, true}.
-
-cookie_scheme(#httpd{mochi_req = MochiReq}) ->
- case MochiReq:get(scheme) of
- http -> [];
- https -> [{secure, true}]
- end.
-
-max_age() ->
- case
- chttpd_util:get_chttpd_auth_config_boolean(
- "allow_persistent_cookies", true
- )
- of
- false ->
- [];
- true ->
- Timeout = chttpd_util:get_chttpd_auth_config_integer(
- "timeout", 600
- ),
- [{max_age, Timeout}]
- end.
-
-cookie_domain() ->
- Domain = chttpd_util:get_chttpd_auth_config("cookie_domain", ""),
- case Domain of
- "" -> [];
- _ -> [{domain, Domain}]
- end.
-
-same_site() ->
- SameSite = chttpd_util:get_chttpd_auth_config("same_site", ""),
- case string:to_lower(SameSite) of
- "" ->
- [];
- "none" ->
- [{same_site, none}];
- "lax" ->
- [{same_site, lax}];
- "strict" ->
- [{same_site, strict}];
- _ ->
- couch_log:error("invalid config value couch_httpd_auth.same_site: ~p ", [SameSite]),
- []
- end.
-
-reject_if_totp(User) ->
- case get_totp_config(User) of
- undefined ->
- ok;
- _ ->
- throw({unauthorized, <<"Name or password is incorrect.">>})
- end.
-
-verify_totp(User, Form) ->
- case get_totp_config(User) of
- undefined ->
- ok;
- {Props} ->
- Key = couch_base32:decode(couch_util:get_value(<<"key">>, Props)),
- Alg = couch_util:to_existing_atom(
- couch_util:get_value(<<"algorithm">>, Props, <<"sha">>)
- ),
- Len = couch_util:get_value(<<"length">>, Props, 6),
- Token = ?l2b(couch_util:get_value("token", Form, "")),
- verify_token(Alg, Key, Len, Token)
- end.
-
-get_totp_config(User) ->
- couch_util:get_value(<<"totp">>, User).
-
-verify_token(Alg, Key, Len, Token) ->
- Now = make_cookie_time(),
- Tokens = [
- generate_token(Alg, Key, Len, Now - 30),
- generate_token(Alg, Key, Len, Now),
- generate_token(Alg, Key, Len, Now + 30)
- ],
- %% evaluate all tokens in constant time
- Match = lists:foldl(
- fun(T, Acc) -> couch_util:verify(T, Token) or Acc end,
- false,
- Tokens
- ),
- case Match of
- true ->
- ok;
- _ ->
- throw({unauthorized, <<"Name or password is incorrect.">>})
- end.
-
-generate_token(Alg, Key, Len, Timestamp) ->
- integer_to_binary(couch_totp:generate(Alg, Key, Timestamp, 30, Len), Len).
-
-integer_to_binary(Int, Len) when is_integer(Int), is_integer(Len) ->
- Unpadded =
- case erlang:function_exported(erlang, integer_to_binary, 1) of
- true ->
- erlang:integer_to_binary(Int);
- false ->
- ?l2b(integer_to_list(Int))
- end,
- Padding = binary:copy(<<"0">>, Len),
- Padded = <<Padding/binary, Unpadded/binary>>,
- binary:part(Padded, byte_size(Padded), -Len).
-
-authentication_warning(#httpd{mochi_req = Req}, User) ->
- Peer = Req:get(peer),
- couch_log:warning(
- "~p: Authentication failed for user ~s from ~s",
- [?MODULE, User, Peer]
- ).
diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl
deleted file mode 100644
index e33a52b36..000000000
--- a/src/couch/src/couch_httpd_db.erl
+++ /dev/null
@@ -1,1449 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_db).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([
- handle_request/1,
- handle_compact_req/2,
- handle_design_req/2,
- db_req/2,
- couch_doc_open/4,
- handle_db_changes_req/2,
- update_doc_result_to_json/1, update_doc_result_to_json/2,
- handle_design_info_req/3,
- parse_copy_destination_header/1,
- parse_changes_query/2,
- handle_changes_req/4
-]).
-
--import(
- couch_httpd,
- [
- send_json/2, send_json/3, send_json/4,
- send_method_not_allowed/2,
- start_json_response/2,
- send_chunk/2,
- last_chunk/1,
- end_json_response/1,
- start_chunked_response/3,
- absolute_uri/2,
- send/2,
- start_response_length/4,
- send_error/4
- ]
-).
-
--record(doc_query_args, {
- options = [],
- rev = nil,
- open_revs = [],
- update_type = interactive_edit,
- atts_since = nil
-}).
-
-% Database request handlers
-handle_request(
- #httpd{
- path_parts = [DbName | RestParts],
- method = Method,
- db_url_handlers = DbUrlHandlers
- } = Req
-) ->
- case {Method, RestParts} of
- {'PUT', []} ->
- create_db_req(Req, DbName);
- {'DELETE', []} ->
- % if we get ?rev=... the user is using a faulty script where the
- % document id is empty by accident. Let them recover safely.
- case couch_httpd:qs_value(Req, "rev", false) of
- false ->
- delete_db_req(Req, DbName);
- _Rev ->
- throw(
- {bad_request,
- "You tried to DELETE a database with a ?rev= parameter. " ++
- "Did you mean to DELETE a document instead?"}
- )
- end;
- {_, []} ->
- do_db_req(Req, fun db_req/2);
- {_, [SecondPart | _]} ->
- Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2),
- do_db_req(Req, Handler)
- end.
-
-handle_db_changes_req(Req, Db) ->
- ChangesArgs = parse_changes_query(Req, Db),
- ChangesFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db),
- handle_changes_req(Req, Db, ChangesArgs, ChangesFun).
-
-handle_changes_req(#httpd{method = 'POST'} = Req, Db, ChangesArgs, ChangesFun) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- handle_changes_req1(Req, Db, ChangesArgs, ChangesFun);
-handle_changes_req(#httpd{method = 'GET'} = Req, Db, ChangesArgs, ChangesFun) ->
- handle_changes_req1(Req, Db, ChangesArgs, ChangesFun);
-handle_changes_req(#httpd{} = Req, _Db, _ChangesArgs, _ChangesFun) ->
- couch_httpd:send_method_not_allowed(Req, "GET,HEAD,POST").
-
-handle_changes_req1(Req, Db, ChangesArgs, ChangesFun) ->
- DbName = couch_db:name(Db),
- AuthDbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")),
- case AuthDbName of
- DbName ->
- % in the authentication database, _changes is admin-only.
- ok = couch_db:check_is_admin(Db);
- _Else ->
- % on other databases, _changes is free for all.
- ok
- end,
-
- MakeCallback = fun(Resp) ->
- fun
- ({change, {ChangeProp} = Change, _}, "eventsource") ->
- Seq = proplists:get_value(<<"seq">>, ChangeProp),
- couch_httpd:send_chunk(Resp, [
- "data: ",
- ?JSON_ENCODE(Change),
- "\n",
- "id: ",
- ?JSON_ENCODE(Seq),
- "\n\n"
- ]);
- ({change, Change, _}, "continuous") ->
- couch_httpd:send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]);
- ({change, Change, Prepend}, _) ->
- couch_httpd:send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]);
- (start, "eventsource") ->
- ok;
- (start, "continuous") ->
- ok;
- (start, _) ->
- couch_httpd:send_chunk(Resp, "{\"results\":[\n");
- ({stop, _EndSeq}, "eventsource") ->
- couch_httpd:end_json_response(Resp);
- ({stop, EndSeq}, "continuous") ->
- couch_httpd:send_chunk(
- Resp,
- [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
- ),
- couch_httpd:end_json_response(Resp);
- ({stop, EndSeq}, _) ->
- couch_httpd:send_chunk(
- Resp,
- io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq])
- ),
- couch_httpd:end_json_response(Resp);
- (timeout, "eventsource") ->
- couch_httpd:send_chunk(Resp, "event: heartbeat\ndata: \n\n");
- (timeout, _) ->
- couch_httpd:send_chunk(Resp, "\n")
- end
- end,
- WrapperFun =
- case ChangesArgs#changes_args.feed of
- "normal" ->
- {ok, Info} = couch_db:get_db_info(Db),
- CurrentEtag = couch_httpd:make_etag(Info),
- fun(FeedChangesFun) ->
- couch_httpd:etag_respond(
- Req,
- CurrentEtag,
- fun() ->
- {ok, Resp} = couch_httpd:start_json_response(
- Req, 200, [{"ETag", CurrentEtag}]
- ),
- FeedChangesFun(MakeCallback(Resp))
- end
- )
- end;
- "eventsource" ->
- Headers = [
- {"Content-Type", "text/event-stream"},
- {"Cache-Control", "no-cache"}
- ],
- {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers),
- fun(FeedChangesFun) ->
- FeedChangesFun(MakeCallback(Resp))
- end;
- _ ->
- % "longpoll" or "continuous"
- {ok, Resp} = couch_httpd:start_json_response(Req, 200),
- fun(FeedChangesFun) ->
- FeedChangesFun(MakeCallback(Resp))
- end
- end,
- couch_stats:increment_counter(
- [couchdb, httpd, clients_requesting_changes]
- ),
- try
- WrapperFun(ChangesFun)
- after
- couch_stats:decrement_counter(
- [couchdb, httpd, clients_requesting_changes]
- )
- end.
-
-handle_compact_req(#httpd{method = 'POST'} = Req, Db) ->
- case Req#httpd.path_parts of
- [_DbName, <<"_compact">>] ->
- ok = couch_db:check_is_admin(Db),
- couch_httpd:validate_ctype(Req, "application/json"),
- _ = couch_httpd:body(Req),
- {ok, _} = couch_db:start_compact(Db),
- send_json(Req, 202, {[{ok, true}]});
- [_DbName, <<"_compact">>, DesignName | _] ->
- DesignId = <<"_design/", DesignName/binary>>,
- DDoc = couch_httpd_db:couch_doc_open(
- Db, DesignId, nil, [ejson_body]
- ),
- couch_mrview_http:handle_compact_req(Req, Db, DDoc)
- end;
-handle_compact_req(Req, _Db) ->
- send_method_not_allowed(Req, "POST").
-
-handle_design_req(
- #httpd{
- path_parts = [_DbName, _Design, DesignName, <<"_", _/binary>> = Action | _Rest],
- design_url_handlers = DesignUrlHandlers
- } = Req,
- Db
-) ->
- case couch_db:is_system_db(Db) of
- true ->
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- ok;
- _ ->
- throw(
- {forbidden,
- <<"Only admins can access design document",
- " actions for system databases.">>}
- )
- end;
- false ->
- ok
- end,
-
- % maybe load ddoc through fabric
- DesignId = <<"_design/", DesignName/binary>>,
- case couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]) of
- not_found ->
- DbName = mem3:dbname(couch_db:name(Db)),
- {ok, DDoc} = fabric:open_doc(DbName, DesignId, [?ADMIN_CTX]);
- DDoc ->
- ok
- end,
- Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun(_, _, _) ->
- throw({not_found, <<"missing handler: ", Action/binary>>})
- end),
- Handler(Req, Db, DDoc);
-handle_design_req(Req, Db) ->
- db_req(Req, Db).
-
-handle_design_info_req(
- #httpd{
- method = 'GET',
- path_parts = [_DbName, _Design, DesignName, _]
- } = Req,
- Db,
- _DDoc
-) ->
- DesignId = <<"_design/", DesignName/binary>>,
- DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
- couch_mrview_http:handle_info_req(Req, Db, DDoc).
-
-create_db_req(#httpd{user_ctx = UserCtx} = Req, DbName) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- Engine =
- case couch_httpd:qs_value(Req, "engine") of
- EngineStr when is_list(EngineStr) ->
- [{engine, iolist_to_binary(EngineStr)}];
- _ ->
- []
- end,
- case couch_server:create(DbName, [{user_ctx, UserCtx}] ++ Engine) of
- {ok, Db} ->
- couch_db:close(Db),
- DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
- send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]});
- Error ->
- throw(Error)
- end.
-
-delete_db_req(#httpd{user_ctx = UserCtx} = Req, DbName) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- Options =
- case couch_httpd:qs_value(Req, "sync") of
- "true" -> [sync, {user_ctx, UserCtx}];
- _ -> [{user_ctx, UserCtx}]
- end,
- case couch_server:delete(DbName, Options) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- Error ->
- throw(Error)
- end.
-
-do_db_req(#httpd{user_ctx = UserCtx, path_parts = [DbName | _]} = Req, Fun) ->
- case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
- {ok, Db} ->
- try
- Fun(Req, Db)
- after
- catch couch_db:close(Db)
- end;
- Error ->
- throw(Error)
- end.
-
-db_req(#httpd{method = 'GET', path_parts = [_DbName]} = Req, Db) ->
- {ok, DbInfo} = couch_db:get_db_info(Db),
- send_json(Req, {DbInfo});
-db_req(#httpd{method = 'POST', path_parts = [_DbName]} = Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- Doc = couch_db:doc_from_json_obj_validate(Db, couch_httpd:json_body(Req)),
- validate_attachment_names(Doc),
- Doc2 =
- case Doc#doc.id of
- <<"">> ->
- Doc#doc{id = couch_uuids:new(), revs = {0, []}};
- _ ->
- Doc
- end,
- DocId = Doc2#doc.id,
- update_doc(Req, Db, DocId, Doc2);
-db_req(#httpd{path_parts = [_DbName]} = Req, _Db) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-db_req(#httpd{method = 'POST', path_parts = [_, <<"_ensure_full_commit">>]} = Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- _ = couch_httpd:body(Req),
- StartTime = couch_db:get_instance_start_time(Db),
- send_json(
- Req,
- 201,
- {[
- {ok, true},
- {instance_start_time, StartTime}
- ]}
- );
-db_req(#httpd{path_parts = [_, <<"_ensure_full_commit">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'POST', path_parts = [_, <<"_bulk_docs">>]} = Req, Db) ->
- couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
- couch_httpd:validate_ctype(Req, "application/json"),
- {JsonProps} = couch_httpd:json_body_obj(Req),
- case couch_util:get_value(<<"docs">>, JsonProps) of
- undefined ->
- send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
- DocsArray ->
- couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
- case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit];
- "false" ->
- Options = [delay_commit];
- _ ->
- Options = []
- end,
- case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
- true ->
- Docs = lists:map(
- fun({ObjProps} = JsonObj) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
- validate_attachment_names(Doc),
- Id =
- case Doc#doc.id of
- <<>> -> couch_uuids:new();
- Id0 -> Id0
- end,
- case couch_util:get_value(<<"_rev">>, ObjProps) of
- undefined ->
- Revs = {0, []};
- Rev ->
- {Pos, RevId} = couch_doc:parse_rev(Rev),
- Revs = {Pos, [RevId]}
- end,
- Doc#doc{id = Id, revs = Revs}
- end,
- DocsArray
- ),
- Options2 =
- case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
- true -> [all_or_nothing | Options];
- _ -> Options
- end,
- case couch_db:update_docs(Db, Docs, Options2) of
- {ok, Results} ->
- % output the results
- DocResults = lists:zipwith(
- fun update_doc_result_to_json/2,
- Docs,
- Results
- ),
- send_json(Req, 201, DocResults);
- {aborted, Errors} ->
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 417, ErrorsJson)
- end;
- false ->
- Docs = lists:map(
- fun(JsonObj) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
- validate_attachment_names(Doc),
- Doc
- end,
- DocsArray
- ),
- {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 201, ErrorsJson)
- end
- end;
-db_req(#httpd{path_parts = [_, <<"_bulk_docs">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'POST', path_parts = [_, <<"_purge">>]} = Req, Db) ->
- couch_stats:increment_counter([couchdb, httpd, purge_requests]),
- couch_httpd:validate_ctype(Req, "application/json"),
- {IdRevs} = couch_httpd:json_body_obj(Req),
- PurgeReqs = lists:map(
- fun({Id, JsonRevs}) ->
- {couch_uuids:new(), Id, couch_doc:parse_revs(JsonRevs)}
- end,
- IdRevs
- ),
-
- {ok, Replies} = couch_db:purge_docs(Db, PurgeReqs),
-
- Results = lists:zipwith(
- fun({Id, _}, {ok, Reply}) ->
- {Id, couch_doc:revs_to_strs(Reply)}
- end,
- IdRevs,
- Replies
- ),
-
- {ok, Db2} = couch_db:reopen(Db),
- PurgeSeq = couch_db:get_purge_seq(Db2),
- send_json(Req, 200, {[{purge_seq, PurgeSeq}, {purged, {Results}}]});
-db_req(#httpd{path_parts = [_, <<"_purge">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'POST', path_parts = [_, <<"_missing_revs">>]} = Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
- JsonDocIdRevs2 = [
- {Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]}
- || {Id, RevStrs} <- JsonDocIdRevs
- ],
- {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
- % Skip docs with no missing revs, they have Revs = []
- Results2 = [{Id, Revs} || {Id, Revs, _} <- Results, Revs =/= []],
- Results3 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- Results2],
- send_json(
- Req,
- {[
- {missing_revs, {Results3}}
- ]}
- );
-db_req(#httpd{path_parts = [_, <<"_missing_revs">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'POST', path_parts = [_, <<"_revs_diff">>]} = Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
- JsonDocIdRevs2 =
- [{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs],
- {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
- % Skip docs with no missing revs, they have Revs = []
- Results2 = [Res || {_Id, Revs, _PAs} = Res <- Results, Revs =/= []],
- Results3 =
- lists:map(
- fun({Id, MissingRevs, PossibleAncestors}) ->
- {Id, {
- [{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
- if
- PossibleAncestors == [] ->
- [];
- true ->
- [{possible_ancestors, couch_doc:revs_to_strs(PossibleAncestors)}]
- end
- }}
- end,
- Results2
- ),
- send_json(Req, {Results3});
-db_req(#httpd{path_parts = [_, <<"_revs_diff">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-db_req(#httpd{method = 'PUT', path_parts = [_, <<"_security">>]} = Req, Db) ->
- SecObj = couch_httpd:json_body(Req),
- ok = couch_db:set_security(Db, SecObj),
- send_json(Req, {[{<<"ok">>, true}]});
-db_req(#httpd{method = 'GET', path_parts = [_, <<"_security">>]} = Req, Db) ->
- send_json(Req, couch_db:get_security(Db));
-db_req(#httpd{path_parts = [_, <<"_security">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "PUT,GET");
-db_req(
- #httpd{method = 'PUT', path_parts = [_, <<"_revs_limit">>]} = Req,
- Db
-) ->
- Limit = couch_httpd:json_body(Req),
- case is_integer(Limit) of
- true ->
- ok = couch_db:set_revs_limit(Db, Limit),
- send_json(Req, {[{<<"ok">>, true}]});
- false ->
- throw({bad_request, <<"Rev limit has to be an integer">>})
- end;
-db_req(#httpd{method = 'GET', path_parts = [_, <<"_revs_limit">>]} = Req, Db) ->
- send_json(Req, couch_db:get_revs_limit(Db));
-db_req(#httpd{path_parts = [_, <<"_revs_limit">>]} = Req, _Db) ->
- send_method_not_allowed(Req, "PUT,GET");
-% Special case to enable using an unencoded slash in the URL of design docs,
-% as slashes in document IDs must otherwise be URL encoded.
-db_req(
- #httpd{
- method = 'GET', mochi_req = MochiReq, path_parts = [DbName, <<"_design/", _/binary>> | _]
- } = Req,
- _Db
-) ->
- PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/",
- [_ | PathTail] = re:split(
- MochiReq:get(raw_path),
- "_design%2F",
- [{return, list}]
- ),
- couch_httpd:send_redirect(
- Req,
- PathFront ++ "_design/" ++
- mochiweb_util:join(PathTail, "_design%2F")
- );
-db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name]} = Req, Db) ->
- db_doc_req(Req, Db, <<"_design/", Name/binary>>);
-db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name | FileNameParts]} = Req, Db) ->
- db_attachment_req(Req, Db, <<"_design/", Name/binary>>, FileNameParts);
-% Special case to allow for accessing local documents without %2F
-% encoding the docid. Throws out requests that don't have the second
-% path part or that specify an attachment name.
-db_req(#httpd{path_parts = [_DbName, <<"_local">>]}, _Db) ->
- throw({bad_request, <<"Invalid _local document id.">>});
-db_req(#httpd{path_parts = [_DbName, <<"_local/">>]}, _Db) ->
- throw({bad_request, <<"Invalid _local document id.">>});
-db_req(#httpd{path_parts = [_DbName, <<"_local">>, Name]} = Req, Db) ->
- db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-db_req(#httpd{path_parts = [_DbName, <<"_local">> | _Rest]}, _Db) ->
- throw({bad_request, <<"_local documents do not accept attachments.">>});
-db_req(#httpd{path_parts = [_, DocId]} = Req, Db) ->
- db_doc_req(Req, Db, DocId);
-db_req(#httpd{path_parts = [_, DocId | FileNameParts]} = Req, Db) ->
- db_attachment_req(Req, Db, DocId, FileNameParts).
-
-db_doc_req(#httpd{method = 'DELETE'} = Req, Db, DocId) ->
- % check for the existence of the doc to handle the 404 case.
- couch_doc_open(Db, DocId, nil, []),
- case couch_httpd:qs_value(Req, "rev") of
- undefined ->
- JsonObj = {[{<<"_deleted">>, true}]},
- Doc = couch_doc_from_req(Req, Db, DocId, JsonObj),
- update_doc(Req, Db, DocId, Doc);
- Rev ->
- JsonObj = {[{<<"_rev">>, ?l2b(Rev)}, {<<"_deleted">>, true}]},
- Doc = couch_doc_from_req(Req, Db, DocId, JsonObj),
- update_doc(Req, Db, DocId, Doc)
- end;
-db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
- #doc_query_args{
- rev = Rev,
- open_revs = Revs,
- options = Options1,
- atts_since = AttsSince
- } = parse_doc_query(Req),
- Options =
- case AttsSince of
- nil ->
- Options1;
- RevList when is_list(RevList) ->
- [{atts_since, RevList}, attachments | Options1]
- end,
- case Revs of
- [] ->
- Doc = couch_doc_open(Db, DocId, Rev, Options),
- send_doc(Req, Doc, Options);
- _ ->
- {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
- case MochiReq:accepts_content_type("multipart/mixed") of
- false ->
- {ok, Resp} = start_json_response(Req, 200),
- send_chunk(Resp, "["),
- % We loop through the docs. The first time through the separator
- % is whitespace, then a comma on subsequent iterations.
- lists:foldl(
- fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- {{not_found, missing}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- send_chunk(Resp, AccSeparator ++ Json)
- end,
- % AccSeparator now has a comma
- ","
- end,
- "",
- Results
- ),
- send_chunk(Resp, "]"),
- end_json_response(Resp);
- true ->
- send_docs_multipart(Req, Results, Options)
- end
- end;
-db_doc_req(#httpd{method = 'POST'} = Req, Db, DocId) ->
- couch_httpd:validate_referer(Req),
- couch_db:validate_docid(Db, DocId),
- couch_httpd:validate_ctype(Req, "multipart/form-data"),
- Form = couch_httpd:parse_form(Req),
- case couch_util:get_value("_doc", Form) of
- undefined ->
- Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
- {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
- Json ->
- Doc = couch_doc_from_req(Req, Db, DocId, ?JSON_DECODE(Json))
- end,
- UpdatedAtts = [
- couch_att:new([
- {name, validate_attachment_name(Name)},
- {type, list_to_binary(ContentType)},
- {data, Content}
- ])
- || {Name, {ContentType, _}, Content} <-
- proplists:get_all_values("_attachments", Form)
- ],
- #doc{atts = OldAtts} = Doc,
- OldAtts2 = lists:flatmap(
- fun(Att) ->
- OldName = couch_att:fetch(name, Att),
- case [1 || A <- UpdatedAtts, couch_att:fetch(name, A) == OldName] of
- % the attachment wasn't in the UpdatedAtts, return it
- [] -> [Att];
- % the attachment was in the UpdatedAtts, drop it
- _ -> []
- end
- end,
- OldAtts
- ),
- NewDoc = Doc#doc{
- atts = UpdatedAtts ++ OldAtts2
- },
- update_doc(Req, Db, DocId, NewDoc);
-db_doc_req(#httpd{method = 'PUT'} = Req, Db, DocId) ->
- couch_db:validate_docid(Db, DocId),
-
- case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
- ("multipart/related;" ++ _) = ContentType ->
- couch_httpd:check_max_request_length(Req),
- {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
- ContentType, fun() -> receive_request_data(Req) end
- ),
- Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
- try
- Result = update_doc(Req, Db, DocId, Doc),
- WaitFun(),
- Result
- catch
- throw:Err ->
- % Document rejected by a validate_doc_update function.
- couch_httpd_multipart:abort_multipart_stream(Parser),
- throw(Err)
- end;
- _Else ->
- Body = couch_httpd:json_body(Req),
- Doc = couch_doc_from_req(Req, Db, DocId, Body),
- update_doc(Req, Db, DocId, Doc)
- end;
-db_doc_req(#httpd{method = 'COPY'} = Req, Db, SourceDocId) ->
- SourceRev =
- case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
- missing_rev -> nil;
- Rev -> Rev
- end,
- {TargetDocId0, TargetRevs} = parse_copy_destination_header(Req),
- TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
- % open old doc
- Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
- % save new doc
- update_doc(Req, Db, TargetDocId, Doc#doc{id = TargetDocId, revs = TargetRevs});
-db_doc_req(Req, _Db, _DocId) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
-
-send_doc(Req, Doc, Options) ->
- case Doc#doc.meta of
- [] ->
- DiskEtag = couch_httpd:doc_etag(Doc),
- % output etag only when we have no meta
- couch_httpd:etag_respond(Req, DiskEtag, fun() ->
- send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
- end);
- _ ->
- send_doc_efficiently(Req, Doc, [], Options)
- end.
-
-send_doc_efficiently(Req, #doc{atts = []} = Doc, Headers, Options) ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(
- #httpd{mochi_req = MochiReq} = Req,
- #doc{atts = Atts} = Doc,
- Headers,
- Options
-) ->
- case lists:member(attachments, Options) of
- true ->
- case MochiReq:accepts_content_type("multipart/related") of
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
- true ->
- Boundary = couch_uuids:random(),
- JsonBytes = ?JSON_ENCODE(
- couch_doc:to_json_obj(
- Doc,
- [attachments, follows, att_encoding_info | Options]
- )
- ),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
- Boundary, JsonBytes, Atts, true
- ),
- CType = {"Content-Type", ?b2l(ContentType)},
- {ok, Resp} = start_response_length(Req, 200, [CType | Headers], Len),
- couch_doc:doc_to_multi_part_stream(
- Boundary,
- JsonBytes,
- Atts,
- fun(Data) -> couch_httpd:send(Resp, Data) end,
- true
- )
- end;
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
- end.
-
-send_docs_multipart(Req, Results, Options1) ->
- OuterBoundary = couch_uuids:random(),
- InnerBoundary = couch_uuids:random(),
- Options = [attachments, follows, att_encoding_info | Options1],
- CType = {"Content-Type", "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
- {ok, Resp} = start_chunked_response(Req, 200, [CType]),
- couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
- lists:foreach(
- fun
- ({ok, #doc{atts = Atts} = Doc}) ->
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
- {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
- InnerBoundary, JsonBytes, Atts, true
- ),
- couch_httpd:send_chunk(
- Resp, <<"\r\nContent-Type: ", ContentType/binary, "\r\n\r\n">>
- ),
- couch_doc:doc_to_multi_part_stream(
- InnerBoundary,
- JsonBytes,
- Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data) end,
- true
- ),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- couch_httpd:send_chunk(
- Resp,
- [
- <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json,
- <<"\r\n--", OuterBoundary/binary>>
- ]
- )
- end,
- Results
- ),
- couch_httpd:send_chunk(Resp, <<"--">>),
- couch_httpd:last_chunk(Resp).
-
-send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
- Boundary = couch_uuids:random(),
- CType = {"Content-Type", "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
- {ok, Resp} = start_chunked_response(Req, 206, [CType]),
- couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
- lists:foreach(
- fun({From, To}) ->
- ContentRange = ?l2b(make_content_range(From, To, Len)),
- couch_httpd:send_chunk(
- Resp,
- <<"\r\nContent-Type: ", ContentType/binary, "\r\n", "Content-Range: ",
- ContentRange/binary, "\r\n", "\r\n">>
- ),
- couch_att:range_foldl(
- Att,
- From,
- To + 1,
- fun(Seg, _) -> send_chunk(Resp, Seg) end,
- {ok, Resp}
- ),
- couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
- end,
- Ranges
- ),
- couch_httpd:send_chunk(Resp, <<"--">>),
- couch_httpd:last_chunk(Resp),
- {ok, Resp}.
-
-receive_request_data(Req) ->
- receive_request_data(Req, couch_httpd:body_length(Req)).
-
-receive_request_data(Req, LenLeft) when LenLeft > 0 ->
- Len = erlang:min(4096, LenLeft),
- Data = couch_httpd:recv(Req, Len),
- {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end};
-receive_request_data(_Req, _) ->
- throw(<<"expected more data">>).
-
-make_content_range(From, To, Len) ->
- io_lib:format("bytes ~B-~B/~B", [From, To, Len]).
-
-update_doc_result_to_json({{Id, Rev}, Error}) ->
- {_Code, Err, Msg} = couch_httpd:error_info(Error),
- {[
- {id, Id},
- {rev, couch_doc:rev_to_str(Rev)},
- {error, Err},
- {reason, Msg}
- ]}.
-
-update_doc_result_to_json(#doc{id = DocId}, Result) ->
- update_doc_result_to_json(DocId, Result);
-update_doc_result_to_json(DocId, {ok, NewRev}) ->
- {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
-update_doc_result_to_json(DocId, Error) ->
- {_Code, ErrorStr, Reason} = couch_httpd:error_info(Error),
- {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-
-update_doc(Req, Db, DocId, #doc{deleted = false} = Doc) ->
- DbName = couch_db:name(Db),
- Loc = absolute_uri(
- Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(DocId)
- ),
- update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]);
-update_doc(Req, Db, DocId, Doc) ->
- update_doc(Req, Db, DocId, Doc, []).
-
-update_doc(Req, Db, DocId, Doc, Headers) ->
- #doc_query_args{
- update_type = UpdateType
- } = parse_doc_query(Req),
- update_doc(Req, Db, DocId, Doc, Headers, UpdateType).
-
-update_doc(Req, Db, DocId, #doc{deleted = Deleted} = Doc, Headers, UpdateType) ->
- case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit];
- "false" ->
- Options = [delay_commit];
- _ ->
- Options = []
- end,
- case couch_httpd:qs_value(Req, "batch") of
- "ok" ->
- % async batching
- spawn(fun() ->
- case catch (couch_db:update_doc(Db, Doc, Options, UpdateType)) of
- {ok, _} -> ok;
- Error -> couch_log:info("Batch doc error (~s): ~p", [DocId, Error])
- end
- end),
- send_json(
- Req,
- 202,
- Headers,
- {[
- {ok, true},
- {id, DocId}
- ]}
- );
- _Normal ->
- % normal
- {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers,
- send_json(
- Req,
- if
- Deleted orelse Req#httpd.method == 'DELETE' -> 200;
- true -> 201
- end,
- ResponseHeaders,
- {[
- {ok, true},
- {id, DocId},
- {rev, NewRevStr}
- ]}
- )
- end.
-
-couch_doc_from_req(Req, _Db, DocId, #doc{revs = Revs} = Doc) ->
- validate_attachment_names(Doc),
- Rev =
- case couch_httpd:qs_value(Req, "rev") of
- undefined ->
- undefined;
- QSRev ->
- couch_doc:parse_rev(QSRev)
- end,
- Revs2 =
- case Revs of
- {Start, [RevId | _]} ->
- if
- Rev /= undefined andalso Rev /= {Start, RevId} ->
- throw(
- {bad_request,
- "Document rev from request body and query "
- "string have different values"}
- );
- true ->
- case extract_header_rev(Req, {Start, RevId}) of
- missing_rev -> {0, []};
- _ -> Revs
- end
- end;
- _ ->
- case extract_header_rev(Req, Rev) of
- missing_rev -> {0, []};
- {Pos, RevId2} -> {Pos, [RevId2]}
- end
- end,
- Doc#doc{id = DocId, revs = Revs2};
-couch_doc_from_req(Req, Db, DocId, Json) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, Json),
- couch_doc_from_req(Req, Db, DocId, Doc).
-
-% Useful for debugging
-% couch_doc_open(Db, DocId) ->
-% couch_doc_open(Db, DocId, nil, []).
-
-couch_doc_open(Db, DocId, Rev, Options) ->
- case Rev of
- % open most recent rev
- nil ->
- case couch_db:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- Doc;
- Error ->
- throw(Error)
- end;
- % open a specific rev (deletions come back as stubs)
- _ ->
- case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
- {ok, [{ok, Doc}]} ->
- Doc;
- {ok, [{{not_found, missing}, Rev}]} ->
- throw(not_found);
- {ok, [Else]} ->
- throw(Else)
- end
- end.
-
-% Attachment request handlers
-
-db_attachment_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId, FileNameParts) ->
- FileName = list_to_binary(
- mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts), "/")
- ),
- #doc_query_args{
- rev = Rev,
- options = Options
- } = parse_doc_query(Req),
- #doc{
- atts = Atts
- } = Doc = couch_doc_open(Db, DocId, Rev, Options),
- case [A || A <- Atts, couch_att:fetch(name, A) == FileName] of
- [] ->
- throw({not_found, "Document is missing attachment"});
- [Att] ->
- [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch(
- [type, encoding, disk_len, att_len, md5], Att
- ),
- Etag =
- case Md5 of
- <<>> -> couch_httpd:doc_etag(Doc);
- _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
- end,
- ReqAcceptsAttEnc = lists:member(
- atom_to_list(Enc),
- couch_httpd:accepted_encodings(Req)
- ),
- Len =
- case {Enc, ReqAcceptsAttEnc} of
- {identity, _} ->
- % stored and served in identity form
- DiskLen;
- {_, false} when DiskLen =/= AttLen ->
- % Stored encoded, but client doesn't accept the encoding we used,
- % so we need to decode on the fly. DiskLen is the identity length
- % of the attachment.
- DiskLen;
- {_, true} ->
- % Stored and served encoded. AttLen is the encoded length.
- AttLen;
- _ ->
- % We received an encoded attachment and stored it as such, so we
- % don't know the identity length. The client doesn't accept the
- % encoding, and since we cannot serve a correct Content-Length
- % header we'll fall back to a chunked response.
- undefined
- end,
- Headers =
- [
- {"ETag", Etag},
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", binary_to_list(Type)}
- ] ++
- case ReqAcceptsAttEnc of
- true when Enc =/= identity ->
- % RFC 2616 says that the 'identify' encoding should not be used in
- % the Content-Encoding header
- [{"Content-Encoding", atom_to_list(Enc)}];
- _ ->
- []
- end ++
- case Enc of
- identity ->
- [{"Accept-Ranges", "bytes"}];
- _ ->
- [{"Accept-Ranges", "none"}]
- end,
- AttFun =
- case ReqAcceptsAttEnc of
- false ->
- fun couch_att:foldl_decode/3;
- true ->
- fun couch_att:foldl/3
- end,
- couch_httpd:etag_respond(
- Req,
- Etag,
- fun() ->
- case Len of
- undefined ->
- {ok, Resp} = start_chunked_response(Req, 200, Headers),
- AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- last_chunk(Resp);
- _ ->
- Ranges = parse_ranges(MochiReq:get(range), Len),
- case {Enc, Ranges} of
- {identity, [{From, To}]} ->
- Headers1 =
- [{"Content-Range", make_content_range(From, To, Len)}] ++
- Headers,
- {ok, Resp} = start_response_length(
- Req, 206, Headers1, To - From + 1
- ),
- couch_att:range_foldl(
- Att,
- From,
- To + 1,
- fun(Seg, _) -> send(Resp, Seg) end,
- {ok, Resp}
- );
- {identity, Ranges} when
- is_list(Ranges) andalso length(Ranges) < 10
- ->
- send_ranges_multipart(Req, Type, Len, Att, Ranges);
- _ ->
- Headers1 =
- Headers ++
- if
- Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
- [{"Content-MD5", base64:encode(Md5)}];
- true ->
- []
- end,
- {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
- AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
- end
- end
- end
- )
- end;
-db_attachment_req(
- #httpd{method = Method, mochi_req = MochiReq} = Req, Db, DocId, FileNameParts
-) when
- (Method == 'PUT') or (Method == 'DELETE')
-->
- FileName = validate_attachment_name(
- mochiweb_util:join(
- lists:map(
- fun binary_to_list/1,
- FileNameParts
- ),
- "/"
- )
- ),
- NewAtt =
- case Method of
- 'DELETE' ->
- [];
- _ ->
- MimeType =
- case couch_httpd:header_value(Req, "Content-Type") of
- % We could throw an error here or guess by the FileName.
- % Currently, just giving it a default.
- undefined -> <<"application/octet-stream">>;
- CType -> list_to_binary(CType)
- end,
- Data =
- case couch_httpd:body_length(Req) of
- undefined ->
- <<"">>;
- {unknown_transfer_encoding, Unknown} ->
- exit({unknown_transfer_encoding, Unknown});
- chunked ->
- fun(MaxChunkSize, ChunkFun, InitState) ->
- couch_httpd:recv_chunked(
- Req, MaxChunkSize, ChunkFun, InitState
- )
- end;
- 0 ->
- <<"">>;
- Length when is_integer(Length) ->
- Expect =
- case couch_httpd:header_value(Req, "expect") of
- undefined ->
- undefined;
- Value when is_list(Value) ->
- string:to_lower(Value)
- end,
- case Expect of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _Else ->
- ok
- end,
- fun() -> couch_httpd:recv(Req, 0) end;
- Length ->
- exit({length_not_integer, Length})
- end,
- AttLen =
- case couch_httpd:header_value(Req, "Content-Length") of
- undefined -> undefined;
- Len -> list_to_integer(Len)
- end,
- ContentEnc = string:to_lower(
- string:strip(
- couch_httpd:header_value(Req, "Content-Encoding", "identity")
- )
- ),
- Encoding =
- case ContentEnc of
- "identity" ->
- identity;
- "gzip" ->
- gzip;
- _ ->
- throw({
- bad_ctype,
- "Only gzip and identity content-encodings are supported"
- })
- end,
- [
- couch_att:new([
- {name, FileName},
- {type, MimeType},
- {data, Data},
- {att_len, AttLen},
- {md5, get_md5_header(Req)},
- {encoding, Encoding}
- ])
- ]
- end,
-
- Doc =
- case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
- % make the new doc
- missing_rev ->
- if
- Method =/= 'DELETE' ->
- ok;
- true ->
- % check for the existence of the doc to handle the 404 case.
- couch_doc_open(Db, DocId, nil, [])
- end,
- couch_db:validate_docid(Db, DocId),
- #doc{id = DocId};
- Rev ->
- case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
- {ok, [{ok, Doc0}]} -> Doc0;
- {ok, [{{not_found, missing}, Rev}]} -> throw(conflict);
- {ok, [Error]} -> throw(Error)
- end
- end,
-
- #doc{atts = Atts} = Doc,
- DocEdited = Doc#doc{
- atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName]
- },
-
- Headers =
- case Method of
- 'DELETE' ->
- [];
- _ ->
- [
- {"Location",
- absolute_uri(
- Req,
- "/" ++
- couch_util:url_encode(couch_db:name(Db)) ++ "/" ++
- couch_util:url_encode(DocId) ++ "/" ++
- couch_util:url_encode(FileName)
- )}
- ]
- end,
- update_doc(Req, Db, DocId, DocEdited, Headers);
-db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
-
-parse_ranges(undefined, _Len) ->
- undefined;
-parse_ranges(fail, _Len) ->
- undefined;
-parse_ranges(Ranges, Len) ->
- parse_ranges(Ranges, Len, []).
-
-parse_ranges([], _Len, Acc) ->
- lists:reverse(Acc);
-parse_ranges([{0, none} | _], _Len, _Acc) ->
- undefined;
-parse_ranges([{From, To} | _], _Len, _Acc) when
- is_integer(From) andalso is_integer(To) andalso To < From
-->
- throw(requested_range_not_satisfiable);
-parse_ranges([{From, To} | Rest], Len, Acc) when is_integer(To) andalso To >= Len ->
- parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To} | Rest], Len, Acc) ->
- parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none} | Rest], Len, Acc) ->
- parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, To} | Rest], Len, Acc) ->
- parse_ranges(Rest, Len, [{From, To}] ++ Acc).
-
-get_md5_header(Req) ->
- ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
- Length = couch_httpd:body_length(Req),
- Trailer = couch_httpd:header_value(Req, "Trailer"),
- case {ContentMD5, Length, Trailer} of
- _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
- base64:decode(ContentMD5);
- {_, chunked, undefined} ->
- <<>>;
- {_, chunked, _} ->
- case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
- {match, _} ->
- md5_in_footer;
- _ ->
- <<>>
- end;
- _ ->
- <<>>
- end.
-
-parse_doc_query(Req) ->
- lists:foldl(
- fun({Key, Value}, Args) ->
- case {Key, Value} of
- {"attachments", "true"} ->
- Options = [attachments | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"meta", "true"} ->
- Options = [
- revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options
- ],
- Args#doc_query_args{options = Options};
- {"revs", "true"} ->
- Options = [revs | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"local_seq", "true"} ->
- Options = [local_seq | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"revs_info", "true"} ->
- Options = [revs_info | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"conflicts", "true"} ->
- Options = [conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"deleted_conflicts", "true"} ->
- Options = [deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"rev", Rev} ->
- Args#doc_query_args{rev = couch_doc:parse_rev(Rev)};
- {"open_revs", "all"} ->
- Args#doc_query_args{open_revs = all};
- {"open_revs", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{open_revs = couch_doc:parse_revs(JsonArray)};
- {"latest", "true"} ->
- Options = [latest | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- {"atts_since", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
- {"new_edits", "false"} ->
- Args#doc_query_args{update_type = replicated_changes};
- {"new_edits", "true"} ->
- Args#doc_query_args{update_type = interactive_edit};
- {"att_encoding_info", "true"} ->
- Options = [att_encoding_info | Args#doc_query_args.options],
- Args#doc_query_args{options = Options};
- % unknown key value pair, ignore.
- _Else ->
- Args
- end
- end,
- #doc_query_args{},
- couch_httpd:qs(Req)
- ).
-
-parse_changes_query(Req, Db) ->
- ChangesArgs = lists:foldl(
- fun({Key, Value}, Args) ->
- case {string:to_lower(Key), Value} of
- {"feed", "live"} ->
- %% sugar for continuous
- Args#changes_args{feed = "continuous"};
- {"feed", _} ->
- Args#changes_args{feed = Value};
- {"descending", "true"} ->
- Args#changes_args{dir = rev};
- {"since", "now"} ->
- UpdateSeq = couch_util:with_db(couch_db:name(Db), fun(WDb) ->
- couch_db:get_update_seq(WDb)
- end),
- Args#changes_args{since = UpdateSeq};
- {"since", _} ->
- Args#changes_args{since = list_to_integer(Value)};
- {"last-event-id", _} ->
- Args#changes_args{since = list_to_integer(Value)};
- {"limit", _} ->
- Args#changes_args{limit = list_to_integer(Value)};
- {"style", _} ->
- Args#changes_args{style = list_to_existing_atom(Value)};
- {"heartbeat", "true"} ->
- Args#changes_args{heartbeat = true};
- {"heartbeat", _} ->
- Args#changes_args{heartbeat = list_to_integer(Value)};
- {"timeout", _} ->
- Args#changes_args{timeout = list_to_integer(Value)};
- {"include_docs", "true"} ->
- Args#changes_args{include_docs = true};
- {"attachments", "true"} ->
- Opts = Args#changes_args.doc_options,
- Args#changes_args{doc_options = [attachments | Opts]};
- {"att_encoding_info", "true"} ->
- Opts = Args#changes_args.doc_options,
- Args#changes_args{doc_options = [att_encoding_info | Opts]};
- {"conflicts", "true"} ->
- Args#changes_args{conflicts = true};
- {"filter", _} ->
- Args#changes_args{filter = Value};
- % unknown key value pair, ignore.
- _Else ->
- Args
- end
- end,
- #changes_args{},
- couch_httpd:qs(Req)
- ),
- %% if it's an EventSource request with a Last-event-ID header
- %% that should override the `since` query string, since it's
- %% probably the browser reconnecting.
- case ChangesArgs#changes_args.feed of
- "eventsource" ->
- case couch_httpd:header_value(Req, "last-event-id") of
- undefined ->
- ChangesArgs;
- Value ->
- ChangesArgs#changes_args{since = list_to_integer(Value)}
- end;
- _ ->
- ChangesArgs
- end.
-
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev) ->
- extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
-extract_header_rev(Req, ExplicitRev) ->
- Etag =
- case couch_httpd:header_value(Req, "If-Match") of
- undefined -> undefined;
- Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
- end,
- case {ExplicitRev, Etag} of
- {undefined, undefined} -> missing_rev;
- {_, undefined} -> ExplicitRev;
- {undefined, _} -> Etag;
- _ when ExplicitRev == Etag -> Etag;
- _ -> throw({bad_request, "Document rev and etag have different values"})
- end.
-
-parse_copy_destination_header(Req) ->
- case couch_httpd:header_value(Req, "Destination") of
- undefined ->
- throw({bad_request, "Destination header is mandatory for COPY."});
- Destination ->
- case re:run(Destination, "^https?://", [{capture, none}]) of
- match ->
- throw({bad_request, "Destination URL must be relative."});
- nomatch ->
- % see if ?rev=revid got appended to the Destination header
- case re:run(Destination, "\\?", [{capture, none}]) of
- nomatch ->
- {list_to_binary(Destination), {0, []}};
- match ->
- [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
- [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
- {Pos, RevId} = couch_doc:parse_rev(Rev),
- {list_to_binary(DocId), {Pos, [RevId]}}
- end
- end
- end.
-
-validate_attachment_names(Doc) ->
- lists:foreach(
- fun(Att) ->
- Name = couch_att:fetch(name, Att),
- validate_attachment_name(Name)
- end,
- Doc#doc.atts
- ).
-
-validate_attachment_name(Name) when is_list(Name) ->
- validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_", _/binary>>) ->
- throw({bad_request, <<"Attachment name can't start with '_'">>});
-validate_attachment_name(Name) ->
- case couch_util:validate_utf8(Name) of
- true -> Name;
- false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
- end.
diff --git a/src/couch/src/couch_httpd_handlers.erl b/src/couch/src/couch_httpd_handlers.erl
deleted file mode 100644
index e64287566..000000000
--- a/src/couch/src/couch_httpd_handlers.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1]).
-
-url_handler(_) -> no_match.
-
-db_handler(_) -> no_match.
-
-design_handler(_) -> no_match.
diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl
deleted file mode 100644
index d9c591875..000000000
--- a/src/couch/src/couch_httpd_misc_handlers.erl
+++ /dev/null
@@ -1,313 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_misc_handlers).
-
--export([
- handle_welcome_req/2,
- handle_favicon_req/2,
- handle_utils_dir_req/2,
- handle_all_dbs_req/1,
- handle_uuids_req/1,
- handle_config_req/1,
- handle_task_status_req/1,
- handle_file_req/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--import(
- couch_httpd,
- [
- send_json/2, send_json/3, send_json/4,
- send_method_not_allowed/2,
- start_json_response/2,
- send_chunk/2,
- last_chunk/1,
- end_json_response/1,
- start_chunked_response/3,
- send_error/4
- ]
-).
-
-% httpd global handlers
-
-handle_welcome_req(#httpd{method = 'GET'} = Req, WelcomeMessage) ->
- send_json(Req, {
- [
- {couchdb, WelcomeMessage},
- {uuid, couch_server:get_uuid()},
- {version, list_to_binary(couch_server:get_version())}
- ] ++
- case config:get("vendor") of
- [] ->
- [];
- Properties ->
- [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
- end
- });
-handle_welcome_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_favicon_req(#httpd{method = 'GET'} = Req, DocumentRoot) ->
- {{Year, Month, Day}, Time} = erlang:universaltime(),
- OneYearFromNow = {{Year + 1, Month, Day}, Time},
- CachingHeaders = [
- %favicon should expire a year from now
- {"Cache-Control", "public, max-age=31536000"},
- {"Expires", couch_util:rfc1123_date(OneYearFromNow)}
- ],
- couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders);
-handle_favicon_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_file_req(#httpd{method = 'GET'} = Req, Document) ->
- couch_httpd:serve_file(Req, filename:basename(Document), filename:dirname(Document));
-handle_file_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_utils_dir_req(Req, _) ->
- send_error(
- Req,
- 410,
- <<"no_node_local_fauxton">>,
- ?l2b("The web interface is no longer available on the node-local port.")
- ).
-
-handle_all_dbs_req(#httpd{method = 'GET'} = Req) ->
- {ok, DbNames} = couch_server:all_databases(),
- send_json(Req, DbNames);
-handle_all_dbs_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_task_status_req(#httpd{method = 'GET'} = Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- % convert the list of prop lists to a list of json objects
- send_json(Req, [{Props} || Props <- couch_task_status:all()]);
-handle_task_status_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_uuids_req(#httpd{method = 'GET'} = Req) ->
- Max = config:get_integer("uuids", "max_count", 1000),
- Count =
- try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
- N when N > Max ->
- throw({bad_request, <<"count parameter too large">>});
- N when N < 0 ->
- throw({bad_request, <<"count must be a positive integer">>});
- N ->
- N
- catch
- error:badarg ->
- throw({bad_request, <<"count must be a positive integer">>})
- end,
- UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
- Etag = couch_httpd:make_etag(UUIDs),
- couch_httpd:etag_respond(Req, Etag, fun() ->
- CacheBustingHeaders = [
- {"Date", couch_util:rfc1123_date()},
- {"Cache-Control", "no-cache"},
- % Past date, ON PURPOSE!
- {"Expires", "Mon, 01 Jan 1990 00:00:00 GMT"},
- {"Pragma", "no-cache"},
- {"ETag", Etag}
- ],
- send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
- end);
-handle_uuids_req(Req) ->
- send_method_not_allowed(Req, "GET").
-
-% Config request handler
-
-% GET /_config/
-% GET /_config
-handle_config_req(#httpd{method = 'GET', path_parts = [_]} = Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- Grouped = lists:foldl(
- fun({{Section, Key}, Value}, Acc) ->
- case dict:is_key(Section, Acc) of
- true ->
- dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
- false ->
- dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
- end
- end,
- dict:new(),
- config:all()
- ),
- KVs = dict:fold(
- fun(Section, Values, Acc) ->
- [{list_to_binary(Section), {Values}} | Acc]
- end,
- [],
- Grouped
- ),
- send_json(Req, 200, {KVs});
-% GET /_config/Section
-handle_config_req(#httpd{method = 'GET', path_parts = [_, Section]} = Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- KVs = [
- {list_to_binary(Key), list_to_binary(Value)}
- || {Key, Value} <- config:get(Section)
- ],
- send_json(Req, 200, {KVs});
-% GET /_config/Section/Key
-handle_config_req(#httpd{method = 'GET', path_parts = [_, Section, Key]} = Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- case config:get(Section, Key, undefined) of
- undefined ->
- throw({not_found, unknown_config_value});
- Value ->
- send_json(Req, 200, list_to_binary(Value))
- end;
-% POST /_config/_reload - Flushes unpersisted config values from RAM
-handle_config_req(#httpd{method = 'POST', path_parts = [_, <<"_reload">>]} = Req) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- _ = couch_httpd:body(Req),
- ok = couch_httpd:verify_is_server_admin(Req),
- ok = config:reload(),
- send_json(Req, 200, {[{ok, true}]});
-% PUT or DELETE /_config/Section/Key
-handle_config_req(#httpd{method = Method, path_parts = [_, Section, Key]} = Req) when
- (Method == 'PUT') or (Method == 'DELETE')
-->
- ok = couch_httpd:verify_is_server_admin(Req),
- couch_util:check_config_blacklist(Section),
- Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false",
- case chttpd_util:get_chttpd_config("config_whitelist") of
- undefined ->
- % No whitelist; allow all changes.
- handle_approved_config_req(Req, Persist);
- WhitelistValue ->
- % Provide a failsafe to protect against inadvertently locking
- % onesself out of the config by supplying a syntactically-incorrect
- % Erlang term. To intentionally lock down the whitelist, supply a
- % well-formed list which does not include the whitelist config
- % variable itself.
- FallbackWhitelist = [{<<"chttpd">>, <<"config_whitelist">>}],
-
- Whitelist =
- case couch_util:parse_term(WhitelistValue) of
- {ok, Value} when is_list(Value) ->
- Value;
- {ok, _NonListValue} ->
- FallbackWhitelist;
- {error, _} ->
- [{WhitelistSection, WhitelistKey}] = FallbackWhitelist,
- couch_log:error(
- "Only whitelisting ~s/~s due to error"
- " parsing: ~p",
- [
- WhitelistSection,
- WhitelistKey,
- WhitelistValue
- ]
- ),
- FallbackWhitelist
- end,
-
- IsRequestedKeyVal = fun(Element) ->
- case Element of
- {A, B} ->
- % For readability, tuples may be used instead of binaries
- % in the whitelist.
- case {couch_util:to_binary(A), couch_util:to_binary(B)} of
- {Section, Key} ->
- true;
- {Section, <<"*">>} ->
- true;
- _Else ->
- false
- end;
- _Else ->
- false
- end
- end,
-
- case lists:any(IsRequestedKeyVal, Whitelist) of
- true ->
- % Allow modifying this whitelisted variable.
- handle_approved_config_req(Req, Persist);
- _NotWhitelisted ->
- % Disallow modifying this non-whitelisted variable.
- send_error(
- Req,
- 400,
- <<"modification_not_allowed">>,
- ?l2b("This config variable is read-only")
- )
- end
- end;
-handle_config_req(Req) ->
- send_method_not_allowed(Req, "GET,PUT,POST,DELETE").
-
-% PUT /_config/Section/Key
-% "value"
-handle_approved_config_req(Req, Persist) ->
- Query = couch_httpd:qs(Req),
- UseRawValue =
- case lists:keyfind("raw", 1, Query) of
- % Not specified
- false -> false;
- % Specified with no value, i.e. "?raw" and "?raw="
- {"raw", ""} -> false;
- {"raw", "false"} -> false;
- {"raw", "true"} -> true;
- {"raw", InvalidValue} -> InvalidValue
- end,
- handle_approved_config_req(Req, Persist, UseRawValue).
-
-handle_approved_config_req(
- #httpd{method = 'PUT', path_parts = [_, Section, Key]} = Req,
- Persist,
- UseRawValue
-) when
- UseRawValue =:= false orelse UseRawValue =:= true
-->
- RawValue = couch_httpd:json_body(Req),
- Value =
- case UseRawValue of
- true ->
- % Client requests no change to the provided value.
- RawValue;
- false ->
- % Pre-process the value as necessary.
- case Section of
- <<"admins">> ->
- couch_passwords:hash_admin_password(RawValue);
- _ ->
- couch_util:trim(RawValue)
- end
- end,
- OldValue = config:get(Section, Key, ""),
- case config:set(Section, Key, ?b2l(Value), Persist) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- Error ->
- throw(Error)
- end;
-handle_approved_config_req(#httpd{method = 'PUT'} = Req, _Persist, UseRawValue) ->
- Err = io_lib:format("Bad value for 'raw' option: ~s", [UseRawValue]),
- send_json(Req, 400, {[{error, ?l2b(Err)}]});
-% DELETE /_config/Section/Key
-handle_approved_config_req(
- #httpd{method = 'DELETE', path_parts = [_, Section, Key]} = Req,
- Persist,
- _UseRawValue
-) ->
- case config:get(Section, Key, undefined) of
- undefined ->
- throw({not_found, unknown_config_value});
- OldValue ->
- config:delete(Section, Key, Persist),
- send_json(Req, 200, list_to_binary(OldValue))
- end.
diff --git a/src/couch/src/couch_httpd_multipart.erl b/src/couch/src/couch_httpd_multipart.erl
deleted file mode 100644
index 11ee6790e..000000000
--- a/src/couch/src/couch_httpd_multipart.erl
+++ /dev/null
@@ -1,359 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_multipart).
-
--export([
- abort_multipart_stream/1,
- decode_multipart_stream/3,
- encode_multipart_stream/5,
- length_multipart_stream/3,
- num_mp_writers/0,
- num_mp_writers/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
-decode_multipart_stream(ContentType, DataFun, Ref) ->
- Parent = self(),
- NumMpWriters = num_mp_writers(),
- {Parser, ParserRef} = spawn_monitor(fun() ->
- ParentRef = erlang:monitor(process, Parent),
- put(mp_parent_ref, ParentRef),
- num_mp_writers(NumMpWriters),
- {<<"--", _/binary>>, _, _} = couch_httpd:parse_multipart_request(
- ContentType,
- DataFun,
- fun(Next) -> mp_parse_doc(Next, []) end
- ),
- unlink(Parent)
- end),
- Parser ! {get_doc_bytes, Ref, self()},
- receive
- {started_open_doc_revs, NewRef} ->
- %% FIXME: How to remove the knowledge about this message?
- {{started_open_doc_revs, NewRef}, Parser, ParserRef};
- {doc_bytes, Ref, DocBytes} ->
- {{doc_bytes, Ref, DocBytes}, Parser, ParserRef};
- {'DOWN', ParserRef, _, _, normal} ->
- ok;
- {'DOWN', ParserRef, process, Parser, {{nocatch, {Error, Msg}}, _}} ->
- couch_log:error(
- "Multipart streamer ~p died with reason ~p",
- [ParserRef, Msg]
- ),
- throw({Error, Msg});
- {'DOWN', ParserRef, _, _, Reason} ->
- couch_log:error(
- "Multipart streamer ~p died with reason ~p",
- [ParserRef, Reason]
- ),
- throw({error, Reason})
- end.
-
-mp_parse_doc({headers, H}, []) ->
- case couch_util:get_value("content-type", H) of
- {"application/json", _} ->
- fun(Next) ->
- mp_parse_doc(Next, [])
- end;
- _ ->
- throw({bad_ctype, <<"Content-Type must be application/json">>})
- end;
-mp_parse_doc({body, Bytes}, AccBytes) ->
- fun(Next) ->
- mp_parse_doc(Next, [Bytes | AccBytes])
- end;
-mp_parse_doc(body_end, AccBytes) ->
- receive
- {get_doc_bytes, Ref, From} ->
- From ! {doc_bytes, Ref, lists:reverse(AccBytes)}
- end,
- fun(Next) ->
- mp_parse_atts(Next, {Ref, [], 0, orddict:new(), []})
- end.
-
-mp_parse_atts({headers, _}, Acc) ->
- fun(Next) -> mp_parse_atts(Next, Acc) end;
-mp_parse_atts(body_end, Acc) ->
- fun(Next) -> mp_parse_atts(Next, Acc) end;
-mp_parse_atts({body, Bytes}, {Ref, Chunks, Offset, Counters, Waiting}) ->
- case maybe_send_data({Ref, Chunks ++ [Bytes], Offset, Counters, Waiting}) of
- abort_parsing ->
- fun(Next) -> mp_abort_parse_atts(Next, nil) end;
- NewAcc ->
- fun(Next) -> mp_parse_atts(Next, NewAcc) end
- end;
-mp_parse_atts(eof, {Ref, Chunks, Offset, Counters, Waiting}) ->
- N = num_mp_writers(),
- M = length(Counters),
- case (M == N) andalso Chunks == [] of
- true ->
- ok;
- false ->
- ParentRef = get(mp_parent_ref),
- receive
- abort_parsing ->
- ok;
- {hello_from_writer, Ref, WriterPid} ->
- NewCounters = handle_hello(WriterPid, Counters),
- mp_parse_atts(eof, {Ref, Chunks, Offset, NewCounters, Waiting});
- {get_bytes, Ref, From} ->
- C2 = update_writer(From, Counters),
- case maybe_send_data({Ref, Chunks, Offset, C2, [From | Waiting]}) of
- abort_parsing ->
- ok;
- NewAcc ->
- mp_parse_atts(eof, NewAcc)
- end;
- {'DOWN', ParentRef, _, _, _} ->
- exit(mp_reader_coordinator_died);
- {'DOWN', WriterRef, _, WriterPid, _} ->
- case remove_writer(WriterPid, WriterRef, Counters) of
- abort_parsing ->
- ok;
- C2 ->
- NewAcc = {Ref, Chunks, Offset, C2, Waiting -- [WriterPid]},
- mp_parse_atts(eof, NewAcc)
- end
- after att_writer_timeout() ->
- ok
- end
- end.
-
-mp_abort_parse_atts(eof, _) ->
- ok;
-mp_abort_parse_atts(_, _) ->
- fun(Next) -> mp_abort_parse_atts(Next, nil) end.
-
-maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
- receive
- {hello_from_writer, Ref, WriterPid} ->
- NewCounters = handle_hello(WriterPid, Counters),
- maybe_send_data({Ref, Chunks, Offset, NewCounters, Waiting});
- {get_bytes, Ref, From} ->
- NewCounters = update_writer(From, Counters),
- maybe_send_data({Ref, Chunks, Offset, NewCounters, [From | Waiting]})
- after 0 ->
- % reply to as many writers as possible
- NewWaiting = lists:filter(
- fun(Writer) ->
- {_, WhichChunk} = orddict:fetch(Writer, Counters),
- ListIndex = WhichChunk - Offset,
- if
- ListIndex =< length(Chunks) ->
- Writer ! {bytes, Ref, lists:nth(ListIndex, Chunks)},
- false;
- true ->
- true
- end
- end,
- Waiting
- ),
-
- % check if we can drop a chunk from the head of the list
- SmallestIndex =
- case Counters of
- [] ->
- 0;
- _ ->
- lists:min([C || {_WPid, {_WRef, C}} <- Counters])
- end,
- Size = length(Counters),
- N = num_mp_writers(),
- if
- Size == N andalso SmallestIndex == (Offset + 1) ->
- NewChunks = tl(Chunks),
- NewOffset = Offset + 1;
- true ->
- NewChunks = Chunks,
- NewOffset = Offset
- end,
-
- % we should wait for a writer if no one has written the last chunk
- LargestIndex = lists:max([0] ++ [C || {_WPid, {_WRef, C}} <- Counters]),
- if
- LargestIndex >= (Offset + length(Chunks)) ->
- % someone has written all possible chunks, keep moving
- {Ref, NewChunks, NewOffset, Counters, NewWaiting};
- true ->
- ParentRef = get(mp_parent_ref),
- receive
- abort_parsing ->
- abort_parsing;
- {'DOWN', ParentRef, _, _, _} ->
- exit(mp_reader_coordinator_died);
- {'DOWN', WriterRef, _, WriterPid, _} ->
- case remove_writer(WriterPid, WriterRef, Counters) of
- abort_parsing ->
- abort_parsing;
- C2 ->
- RestWaiting = NewWaiting -- [WriterPid],
- NewAcc = {Ref, NewChunks, NewOffset, C2, RestWaiting},
- maybe_send_data(NewAcc)
- end;
- {hello_from_writer, Ref, WriterPid} ->
- C2 = handle_hello(WriterPid, Counters),
- maybe_send_data({Ref, NewChunks, NewOffset, C2, Waiting});
- {get_bytes, Ref, X} ->
- C2 = update_writer(X, Counters),
- maybe_send_data({Ref, NewChunks, NewOffset, C2, [X | NewWaiting]})
- after att_writer_timeout() ->
- abort_parsing
- end
- end
- end.
-
-handle_hello(WriterPid, Counters) ->
- WriterRef = erlang:monitor(process, WriterPid),
- orddict:store(WriterPid, {WriterRef, 0}, Counters).
-
-update_writer(WriterPid, Counters) ->
- case orddict:find(WriterPid, Counters) of
- {ok, {WriterRef, Count}} ->
- orddict:store(WriterPid, {WriterRef, Count + 1}, Counters);
- error ->
- WriterRef = erlang:monitor(process, WriterPid),
- orddict:store(WriterPid, {WriterRef, 1}, Counters)
- end.
-
-remove_writer(WriterPid, WriterRef, Counters) ->
- case orddict:find(WriterPid, Counters) of
- {ok, {WriterRef, _}} ->
- case num_mp_writers() of
- N when N > 1 ->
- num_mp_writers(N - 1),
- orddict:erase(WriterPid, Counters);
- _ ->
- abort_parsing
- end;
- {ok, _} ->
- % We got a different ref fired for a known worker
- abort_parsing;
- error ->
- % Unknown worker pid?
- abort_parsing
- end.
-
-num_mp_writers(N) ->
- erlang:put(mp_att_writers, N).
-
-num_mp_writers() ->
- case erlang:get(mp_att_writers) of
- undefined -> 1;
- Count -> Count
- end.
-
-att_writer_timeout() ->
- config:get_integer("couchdb", "attachment_writer_timeout", 300000).
-
-encode_multipart_stream(_Boundary, JsonBytes, [], WriteFun, _AttFun) ->
- WriteFun(JsonBytes);
-encode_multipart_stream(Boundary, JsonBytes, Atts, WriteFun, AttFun) ->
- WriteFun([
- <<"--", Boundary/binary, "\r\nContent-Type: application/json\r\n\r\n">>,
- JsonBytes,
- <<"\r\n--", Boundary/binary>>
- ]),
- atts_to_mp(Atts, Boundary, WriteFun, AttFun).
-
-atts_to_mp([], _Boundary, WriteFun, _AttFun) ->
- WriteFun(<<"--">>);
-atts_to_mp(
- [{Att, Name, Len, Type, Encoding} | RestAtts],
- Boundary,
- WriteFun,
- AttFun
-) ->
- LengthBin = list_to_binary(integer_to_list(Len)),
- % write headers
- WriteFun(<<"\r\nContent-Disposition: attachment; filename=\"", Name/binary, "\"">>),
- WriteFun(<<"\r\nContent-Type: ", Type/binary>>),
- WriteFun(<<"\r\nContent-Length: ", LengthBin/binary>>),
- case Encoding of
- identity ->
- ok;
- _ ->
- EncodingBin = atom_to_binary(Encoding, latin1),
- WriteFun(<<"\r\nContent-Encoding: ", EncodingBin/binary>>)
- end,
-
- % write data
- WriteFun(<<"\r\n\r\n">>),
- AttFun(Att, fun(Data, _) -> WriteFun(Data) end, ok),
- WriteFun(<<"\r\n--", Boundary/binary>>),
- atts_to_mp(RestAtts, Boundary, WriteFun, AttFun).
-
-length_multipart_stream(Boundary, JsonBytes, Atts) ->
- AttsSize = lists:foldl(
- fun({_Att, Name, Len, Type, Encoding}, AccAttsSize) ->
- AccAttsSize +
- % "\r\n\r\n"
- 4 +
- length(integer_to_list(Len)) +
- Len +
- % "\r\n--"
- 4 +
- size(Boundary) +
- % attachment headers
- % (the length of the Content-Length has already been set)
- size(Name) +
- size(Type) +
- length("\r\nContent-Disposition: attachment; filename=\"\"") +
- length("\r\nContent-Type: ") +
- length("\r\nContent-Length: ") +
- case Encoding of
- identity ->
- 0;
- _ ->
- length(atom_to_list(Encoding)) +
- length("\r\nContent-Encoding: ")
- end
- end,
- 0,
- Atts
- ),
- if
- AttsSize == 0 ->
- {<<"application/json">>, iolist_size(JsonBytes)};
- true ->
- {
- <<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
- % "--"
- 2 +
- size(Boundary) +
- % "\r\ncontent-type: application/json\r\n\r\n"
- 36 +
- iolist_size(JsonBytes) +
- % "\r\n--"
- 4 +
- size(Boundary) +
- +AttsSize +
- % "--"
- 2
- }
- end.
-
-abort_multipart_stream(Parser) ->
- MonRef = erlang:monitor(process, Parser),
- Parser ! abort_parsing,
- receive
- {'DOWN', MonRef, _, _, _} -> ok
- after 60000 ->
- % One minute is quite on purpose for this timeout. We
- % want to try and read data to keep the socket open
- % when possible but we also don't want to just make
- % this a super long timeout because people have to
- % wait this long to see if they just had an error
- % like a validate_doc_update failure.
- throw(multi_part_abort_timeout)
- end.
diff --git a/src/couch/src/couch_httpd_rewrite.erl b/src/couch/src/couch_httpd_rewrite.erl
deleted file mode 100644
index 97f48a4c0..000000000
--- a/src/couch/src/couch_httpd_rewrite.erl
+++ /dev/null
@@ -1,555 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(couch_httpd_rewrite).
-
--compile(tuple_calls).
-
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-%% doc The http rewrite handler. All rewriting is done from
-%% /dbname/_design/ddocname/_rewrite by default.
-%%
-%% each rules should be in rewrites member of the design doc.
-%% Ex of a complete rule :
-%%
-%% {
-%% ....
-%% "rewrites": [
-%% {
-%% "from": "",
-%% "to": "index.html",
-%% "method": "GET",
-%% "query": {}
-%% }
-%% ]
-%% }
-%%
-%% from: is the path rule used to bind current uri to the rule. It
-%% use pattern matching for that.
-%%
-%% to: rule to rewrite an url. It can contain variables depending on binding
-%% variables discovered during pattern matching and query args (url args and from
-%% the query member.)
-%%
-%% method: method to bind the request method to the rule. by default "*"
-%% query: query args you want to define they can contain dynamic variable
-%% by binding the key to the bindings
-%%
-%%
-%% to and from are path with patterns. pattern can be string starting with ":" or
-%% "*". ex:
-%% /somepath/:var/*
-%%
-%% This path is converted in erlang list by splitting "/". Each var are
-%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
-%% by splitting "/" in request url in a list of token. A string pattern will
-%% match equal token. The star atom ('*' in single quotes) will match any number
-%% of tokens, but may only be present as the last pathtern in a pathspec. If all
-%% tokens are matched and all pathterms are used, then the pathspec matches. It works
-%% like webmachine. Each identified token will be reused in to rule and in query
-%%
-%% The pattern matching is done by first matching the request method to a rule. by
-%% default all methods match a rule. (method is equal to "*" by default). Then
-%% It will try to match the path to one rule. If no rule match, then a 404 error
-%% is displayed.
-%%
-%% Once a rule is found we rewrite the request url using the "to" and
-%% "query" members. The identified token are matched to the rule and
-%% will replace var. if '*' is found in the rule it will contain the remaining
-%% part if it exists.
-%%
-%% Examples:
-%%
-%% Dispatch rule URL TO Tokens
-%%
-%% {"from": "/a/b", /a/b?k=v /some/b?k=v var =:= b
-%% "to": "/some/"} k = v
-%%
-%% {"from": "/a/b", /a/b /some/b?var=b var =:= b
-%% "to": "/some/:var"}
-%%
-%% {"from": "/a", /a /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/*", /a/b/c /some/b/c
-%% "to": "/some/*"}
-%%
-%% {"from": "/a", /a /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/:foo/*", /a/b/c /some/b/c?foo=b foo =:= b
-%% "to": "/some/:foo/*"}
-%%
-%% {"from": "/a/:foo", /a/b /some/?k=b&foo=b foo =:= b
-%% "to": "/some",
-%% "query": {
-%% "k": ":foo"
-%% }}
-%%
-%% {"from": "/a", /a?foo=b /some/b foo =:= b
-%% "to": "/some/:foo",
-%% }}
-
-handle_rewrite_req(
- #httpd{
- path_parts = [DbName, <<"_design">>, DesignName, _Rewrite | PathParts],
- method = Method,
- mochi_req = MochiReq
- } = Req,
- _Db,
- DDoc
-) ->
- % we are in a design handler
- DesignId = <<"_design/", DesignName/binary>>,
- Prefix = <<"/", (?l2b(couch_util:url_encode(DbName)))/binary, "/", DesignId/binary>>,
- QueryList = lists:map(fun decode_query_value/1, couch_httpd:qs(Req)),
-
- RewritesSoFar = erlang:get(?REWRITE_COUNT),
- MaxRewrites = chttpd_util:get_chttpd_config_integer("rewrite_limit", 100),
- case RewritesSoFar >= MaxRewrites of
- true ->
- throw({bad_request, <<"Exceeded rewrite recursion limit">>});
- false ->
- erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
- end,
-
- #doc{body = {Props}} = DDoc,
-
- % get rules from ddoc
- case couch_util:get_value(<<"rewrites">>, Props) of
- undefined ->
- couch_httpd:send_error(
- Req,
- 404,
- <<"rewrite_error">>,
- <<"Invalid path.">>
- );
- Bin when is_binary(Bin) ->
- couch_httpd:send_error(
- Req,
- 400,
- <<"rewrite_error">>,
- <<"Rewrite rules are a String. They must be a JSON Array.">>
- );
- Rules ->
- % create dispatch list from rules
- DispatchList = [make_rule(Rule) || {Rule} <- Rules],
- Method1 = couch_util:to_binary(Method),
-
- % get raw path by matching url to a rule. Throws not_found.
- {NewPathParts0, Bindings0} =
- try_bind_path(DispatchList, Method1, PathParts, QueryList),
- NewPathParts = [quote_plus(X) || X <- NewPathParts0],
- Bindings = maybe_encode_bindings(Bindings0),
-
- Path0 = string:join(NewPathParts, [?SEPARATOR]),
-
- % if path is relative detect it and rewrite path
- Path1 =
- case mochiweb_util:safe_relative_path(Path0) of
- undefined ->
- ?b2l(Prefix) ++ "/" ++ Path0;
- P1 ->
- ?b2l(Prefix) ++ "/" ++ P1
- end,
-
- Path2 = normalize_path(Path1),
-
- Path3 =
- case Bindings of
- [] ->
- Path2;
- _ ->
- [Path2, "?", mochiweb_util:urlencode(Bindings)]
- end,
-
- RawPath1 = ?b2l(iolist_to_binary(Path3)),
-
- % In order to do OAuth correctly, we have to save the
- % requested path. We use default so chained rewriting
- % wont replace the original header.
- Headers = mochiweb_headers:default(
- "x-couchdb-requested-path",
- MochiReq:get(raw_path),
- MochiReq:get(headers)
- ),
-
- couch_log:debug("rewrite to ~p ~n", [RawPath1]),
-
- % build a new mochiweb request
- MochiReq1 = mochiweb_request:new(
- MochiReq:get(socket),
- MochiReq:get(method),
- RawPath1,
- MochiReq:get(version),
- Headers
- ),
-
- % cleanup, It force mochiweb to reparse raw uri.
- MochiReq1:cleanup(),
-
- #httpd{
- db_url_handlers = DbUrlHandlers,
- design_url_handlers = DesignUrlHandlers,
- default_fun = DefaultFun,
- url_handlers = UrlHandlers,
- user_ctx = UserCtx,
- auth = Auth
- } = Req,
-
- erlang:put(pre_rewrite_auth, Auth),
- erlang:put(pre_rewrite_user_ctx, UserCtx),
- couch_httpd:handle_request_int(
- MochiReq1,
- DefaultFun,
- UrlHandlers,
- DbUrlHandlers,
- DesignUrlHandlers
- )
- end.
-
-quote_plus({bind, X}) ->
- mochiweb_util:quote_plus(X);
-quote_plus(X) ->
- mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
- throw(not_found);
-try_bind_path([Dispatch | Rest], Method, PathParts, QueryList) ->
- [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
- case bind_method(Method1, Method) of
- true ->
- case bind_path(PathParts1, PathParts, []) of
- {ok, Remaining, Bindings} ->
- Bindings1 = Bindings ++ QueryList,
- % we parse query args from the rule and fill
- % it eventually with bindings vars
- QueryArgs1 = make_query_list(
- QueryArgs,
- Bindings1,
- Formats,
- []
- ),
- % remove params in QueryLists1 that are already in
- % QueryArgs1
- Bindings2 = lists:foldl(
- fun({K, V}, Acc) ->
- K1 = to_binding(K),
- KV =
- case couch_util:get_value(K1, QueryArgs1) of
- undefined -> [{K1, V}];
- _V1 -> []
- end,
- Acc ++ KV
- end,
- [],
- Bindings1
- ),
-
- FinalBindings = Bindings2 ++ QueryArgs1,
- NewPathParts = make_new_path(
- RedirectPath,
- FinalBindings,
- Remaining,
- []
- ),
- {NewPathParts, FinalBindings};
- fail ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end;
- false ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
- Acc;
-make_query_list([{Key, {Value}} | Rest], Bindings, Formats, Acc) ->
- Value1 = {Value},
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
-make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_binary(Value) ->
- Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
-make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_list(Value) ->
- Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
-make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) ->
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value} | Acc]).
-
-replace_var(<<"*">> = Value, Bindings, Formats) ->
- get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
- get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
- Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
- lists:reverse(
- lists:foldl(
- fun
- (<<":", Var/binary>> = Value1, Acc) ->
- [get_var(Var, Bindings, Value1, Formats) | Acc];
- (Value1, Acc) ->
- [Value1 | Acc]
- end,
- [],
- Value
- )
- );
-replace_var(Value, _Bindings, _Formats) ->
- Value.
-
-maybe_json(Key, Value) ->
- case
- lists:member(Key, [
- <<"key">>,
- <<"startkey">>,
- <<"start_key">>,
- <<"endkey">>,
- <<"end_key">>,
- <<"keys">>
- ])
- of
- true ->
- ?JSON_ENCODE(Value);
- false ->
- Value
- end.
-
-get_var(VarName, Props, Default, Formats) ->
- VarName1 = to_binding(VarName),
- Val = couch_util:get_value(VarName1, Props, Default),
- maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
- case couch_util:get_value(VarName, Formats) of
- undefined ->
- Value;
- Format ->
- format(Format, Value)
- end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
- Value;
-format(<<"int">>, Value) when is_binary(Value) ->
- format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
- case (catch list_to_integer(Value)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Value
- end;
-format(<<"bool">>, Value) when is_binary(Value) ->
- format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
- case string:to_lower(Value) of
- "true" -> true;
- "false" -> false;
- _ -> Value
- end;
-format(_Format, Value) ->
- Value.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
- lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([?MATCH_ALL | _Rest], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([{bind, P} | Rest], Bindings, Remaining, Acc) ->
- P2 =
- case couch_util:get_value({bind, P}, Bindings) of
- undefined -> <<"undefined">>;
- P1 -> iolist_to_binary(P1)
- end,
- make_new_path(Rest, Bindings, Remaining, [P2 | Acc]);
-make_new_path([P | Rest], Bindings, Remaining, Acc) ->
- make_new_path(Rest, Bindings, Remaining, [P | Acc]).
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method) ->
- true;
-bind_method({bind, Method}, Method) ->
- true;
-bind_method(_, _) ->
- false.
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
- {ok, [], Bindings};
-bind_path([?MATCH_ALL], [Match | _RestMatch] = Rest, Bindings) ->
- {ok, Rest, [{?MATCH_ALL, Match} | Bindings]};
-bind_path(_, [], _) ->
- fail;
-bind_path([{bind, Token} | RestToken], [Match | RestMatch], Bindings) ->
- bind_path(RestToken, RestMatch, [{{bind, Token}, Match} | Bindings]);
-bind_path([Token | RestToken], [Token | RestMatch], Bindings) ->
- bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
- fail.
-
-%% normalize path.
-normalize_path(Path) ->
- "/" ++
- string:join(
- normalize_path1(
- string:tokens(
- Path,
- "/"
- ),
- []
- ),
- [?SEPARATOR]
- ).
-
-normalize_path1([], Acc) ->
- lists:reverse(Acc);
-normalize_path1([".." | Rest], Acc) ->
- Acc1 =
- case Acc of
- [] -> [".." | Acc];
- [T | _] when T =:= ".." -> [".." | Acc];
- [_ | R] -> R
- end,
- normalize_path1(Rest, Acc1);
-normalize_path1(["." | Rest], Acc) ->
- normalize_path1(Rest, Acc);
-normalize_path1([Path | Rest], Acc) ->
- normalize_path1(Rest, [Path | Acc]).
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
- Method =
- case couch_util:get_value(<<"method">>, Rule) of
- undefined -> ?MATCH_ALL;
- M -> to_binding(M)
- end,
- QueryArgs =
- case couch_util:get_value(<<"query">>, Rule) of
- undefined -> [];
- {Args} -> Args
- end,
- FromParts =
- case couch_util:get_value(<<"from">>, Rule) of
- undefined -> [?MATCH_ALL];
- From -> parse_path(From)
- end,
- ToParts =
- case couch_util:get_value(<<"to">>, Rule) of
- undefined ->
- throw({error, invalid_rewrite_target});
- To ->
- parse_path(To)
- end,
- Formats =
- case couch_util:get_value(<<"formats">>, Rule) of
- undefined -> [];
- {Fmts} -> Fmts
- end,
- [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
- {ok, SlashRE} = re:compile(<<"\\/">>),
- path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
- lists:reverse(Acc);
-path_to_list([<<>> | R], Acc, DotDotCount) ->
- path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">> | R], Acc, DotDotCount) ->
- path_to_list(R, [?MATCH_ALL | Acc], DotDotCount);
-path_to_list([<<"..">> | R], Acc, DotDotCount) when DotDotCount == 2 ->
- case chttpd_util:get_chttpd_config_boolean("secure_rewrites", true) of
- false ->
- path_to_list(R, [<<"..">> | Acc], DotDotCount + 1);
- true ->
- couch_log:info(
- "insecure_rewrite_rule ~p blocked",
- [lists:reverse(Acc) ++ [<<"..">>] ++ R]
- ),
- throw({insecure_rewrite_rule, "too many ../.. segments"})
- end;
-path_to_list([<<"..">> | R], Acc, DotDotCount) ->
- path_to_list(R, [<<"..">> | Acc], DotDotCount + 1);
-path_to_list([P | R], Acc, DotDotCount) ->
- P1 =
- case P of
- <<":", Var/binary>> ->
- to_binding(Var);
- _ ->
- P
- end,
- path_to_list(R, [P1 | Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
- [];
-maybe_encode_bindings(Props) ->
- lists:foldl(
- fun
- ({{bind, <<"*">>}, _V}, Acc) ->
- Acc;
- ({{bind, K}, V}, Acc) ->
- V1 = iolist_to_binary(maybe_json(K, V)),
- [{K, V1} | Acc]
- end,
- [],
- Props
- ).
-
-decode_query_value({K, V}) ->
- case
- lists:member(K, [
- "key",
- "startkey",
- "start_key",
- "endkey",
- "end_key",
- "keys"
- ])
- of
- true ->
- {to_binding(K), ?JSON_DECODE(V)};
- false ->
- {to_binding(K), ?l2b(V)}
- end.
-
-to_binding({bind, V}) ->
- {bind, V};
-to_binding(V) when is_list(V) ->
- to_binding(?l2b(V));
-to_binding(V) ->
- {bind, V}.
diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl
deleted file mode 100644
index 0bff6a36d..000000000
--- a/src/couch/src/couch_httpd_vhost.erl
+++ /dev/null
@@ -1,457 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_vhost).
--behaviour(gen_server).
--vsn(1).
--behaviour(config_listener).
-
--compile(tuple_calls).
-
--export([start_link/0, reload/0, get_state/0, dispatch_host/1]).
--export([urlsplit_netloc/2, redirect_to_vhost/2]).
--export([host/1, split_host_port/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, '*'}).
--define(RELISTEN_DELAY, 5000).
-
--record(vhosts_state, {
- vhosts,
- vhost_globals,
- vhosts_fun
-}).
-
-%% doc the vhost manager.
-%% This gen_server keep state of vhosts added to the ini and try to
-%% match the Host header (or forwarded) against rules built against
-%% vhost list.
-%%
-%% Declaration of vhosts take place in the configuration file :
-%%
-%% [vhosts]
-%% example.com = /example
-%% *.example.com = /example
-%%
-%% The first line will rewrite the rquest to display the content of the
-%% example database. This rule works only if the Host header is
-%% 'example.com' and won't work for CNAMEs. Second rule on the other hand
-%% match all CNAMES to example db. So www.example.com or db.example.com
-%% will work.
-%%
-%% The wildcard ('*') should always be the last in the cnames:
-%%
-%% "*.db.example.com = /" will match all cname on top of db
-%% examples to the root of the machine.
-%%
-%%
-%% Rewriting Hosts to path
-%% -----------------------
-%%
-%% Like in the _rewrite handler you could match some variable and use
-%them to create the target path. Some examples:
-%%
-%% [vhosts]
-%% *.example.com = /*
-%% :dbname.example.com = /:dbname
-%% :ddocname.:dbname.example.com = /:dbname/_design/:ddocname/_rewrite
-%%
-%% First rule pass wildcard as dbname, second do the same but use a
-%% variable name and the third one allows you to use any app with
-%% @ddocname in any db with @dbname .
-%%
-%% You could also change the default function to handle request by
-%% changing the setting `redirect_vhost_handler` in `httpd` section of
-%% the Ini:
-%%
-%% [httpd]
-%% redirect_vhost_handler = {Module, Fun}
-%%
-%% The function take 2 args : the mochiweb request object and the target
-%%% path.
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-%% @doc reload vhosts rules
-reload() ->
- gen_server:call(?MODULE, reload).
-
-get_state() ->
- gen_server:call(?MODULE, get_state).
-
-%% @doc Try to find a rule matching current Host heade. some rule is
-%% found it rewrite the Mochiweb Request else it return current Request.
-dispatch_host(MochiReq) ->
- case vhost_enabled() of
- true ->
- dispatch_host_int(MochiReq);
- false ->
- MochiReq
- end.
-
-dispatch_host_int(MochiReq) ->
- #vhosts_state{
- vhost_globals = VHostGlobals,
- vhosts = VHosts,
- vhosts_fun = Fun
- } = get_state(),
-
- {"/" ++ VPath, Query, Fragment} = mochiweb_util:urlsplit_path(MochiReq:get(raw_path)),
- VPathParts = string:tokens(VPath, "/"),
-
- VHost = host(MochiReq),
- {VHostParts, VhostPort} = split_host_port(VHost),
- FinalMochiReq =
- case
- try_bind_vhost(
- VHosts,
- lists:reverse(VHostParts),
- VhostPort,
- VPathParts
- )
- of
- no_vhost_matched ->
- MochiReq;
- {VhostTarget, NewPath} ->
- case vhost_global(VHostGlobals, MochiReq) of
- true ->
- MochiReq;
- _Else ->
- NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query, Fragment}),
- MochiReq1 = mochiweb_request:new(
- MochiReq:get(socket),
- MochiReq:get(method),
- NewPath1,
- MochiReq:get(version),
- MochiReq:get(headers)
- ),
- Fun(MochiReq1, VhostTarget)
- end
- end,
- FinalMochiReq.
-
-append_path("/" = _Target, "/" = _Path) ->
- "/";
-append_path(Target, Path) ->
- Target ++ Path.
-
-% default redirect vhost handler
-redirect_to_vhost(MochiReq, VhostTarget) ->
- Path = MochiReq:get(raw_path),
- Target = append_path(VhostTarget, Path),
-
- couch_log:debug("Vhost Target: '~p'~n", [Target]),
-
- Headers = mochiweb_headers:enter(
- "x-couchdb-vhost-path",
- Path,
- MochiReq:get(headers)
- ),
-
- % build a new mochiweb request
- MochiReq1 = mochiweb_request:new(
- MochiReq:get(socket),
- MochiReq:get(method),
- Target,
- MochiReq:get(version),
- Headers
- ),
- % cleanup, It force mochiweb to reparse raw uri.
- MochiReq1:cleanup(),
- MochiReq1.
-
-%% if so, then it will not be rewritten, but will run as a normal couchdb request.
-%* normally you'd use this for _uuids _utils and a few of the others you want to
-%% keep available on vhosts. You can also use it to make databases 'global'.
-vhost_global(VhostGlobals, MochiReq) ->
- RawUri = MochiReq:get(raw_path),
- {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
- Front =
- case couch_httpd:partition(Path) of
- {"", "", ""} ->
- % Special case the root url handler
- "/";
- {FirstPart, _, _} ->
- FirstPart
- end,
- [true] == [true || V <- VhostGlobals, V == Front].
-
-%% bind host
-%% first it try to bind the port then the hostname.
-try_bind_vhost([], _HostParts, _Port, _PathParts) ->
- no_vhost_matched;
-try_bind_vhost([VhostSpec | Rest], HostParts, Port, PathParts) ->
- {{VHostParts, VPort, VPath}, Path} = VhostSpec,
- case bind_port(VPort, Port) of
- ok ->
- case bind_vhost(lists:reverse(VHostParts), HostParts, []) of
- {ok, Bindings, Remainings} ->
- case bind_path(VPath, PathParts) of
- {ok, PathParts1} ->
- Path1 = make_target(Path, Bindings, Remainings, []),
- {make_path(Path1), make_path(PathParts1)};
- fail ->
- try_bind_vhost(
- Rest,
- HostParts,
- Port,
- PathParts
- )
- end;
- fail ->
- try_bind_vhost(Rest, HostParts, Port, PathParts)
- end;
- fail ->
- try_bind_vhost(Rest, HostParts, Port, PathParts)
- end.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-%% TODO: merge code with rewrite. But we need to make sure we are
-%% in string here.
-make_target([], _Bindings, _Remaining, Acc) ->
- lists:reverse(Acc);
-make_target([?MATCH_ALL], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_target([?MATCH_ALL | _Rest], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_target([{bind, P} | Rest], Bindings, Remaining, Acc) ->
- P2 =
- case couch_util:get_value({bind, P}, Bindings) of
- undefined -> "undefined";
- P1 -> P1
- end,
- make_target(Rest, Bindings, Remaining, [P2 | Acc]);
-make_target([P | Rest], Bindings, Remaining, Acc) ->
- make_target(Rest, Bindings, Remaining, [P | Acc]).
-
-%% bind port
-bind_port(Port, Port) -> ok;
-bind_port('*', _) -> ok;
-bind_port(_, _) -> fail.
-
-%% bind bhost
-bind_vhost([], [], Bindings) ->
- {ok, Bindings, []};
-bind_vhost([?MATCH_ALL], [], _Bindings) ->
- fail;
-bind_vhost([?MATCH_ALL], Rest, Bindings) ->
- {ok, Bindings, Rest};
-bind_vhost([], _HostParts, _Bindings) ->
- fail;
-bind_vhost([{bind, Token} | Rest], [Match | RestHost], Bindings) ->
- bind_vhost(Rest, RestHost, [{{bind, Token}, Match} | Bindings]);
-bind_vhost([Cname | Rest], [Cname | RestHost], Bindings) ->
- bind_vhost(Rest, RestHost, Bindings);
-bind_vhost(_, _, _) ->
- fail.
-
-%% bind path
-bind_path([], PathParts) ->
- {ok, PathParts};
-bind_path(_VPathParts, []) ->
- fail;
-bind_path([Path | VRest], [Path | Rest]) ->
- bind_path(VRest, Rest);
-bind_path(_, _) ->
- fail.
-
-% utilities
-
-%% create vhost list from ini
-
-host(MochiReq) ->
- XHost = chttpd_util:get_chttpd_config(
- "x_forwarded_host", "X-Forwarded-Host"
- ),
- case MochiReq:get_header_value(XHost) of
- undefined ->
- case MochiReq:get_header_value("Host") of
- undefined -> [];
- Value1 -> Value1
- end;
- Value ->
- Value
- end.
-
-make_vhosts() ->
- Vhosts = lists:foldl(
- fun
- ({_, ""}, Acc) ->
- Acc;
- ({Vhost, Path}, Acc) ->
- [{parse_vhost(Vhost), split_path(Path)} | Acc]
- end,
- [],
- config:get("vhosts")
- ),
-
- lists:reverse(lists:usort(Vhosts)).
-
-parse_vhost(Vhost) ->
- case urlsplit_netloc(Vhost, []) of
- {[], Path} ->
- {make_spec("*", []), '*', Path};
- {HostPort, []} ->
- {H, P} = split_host_port(HostPort),
- H1 = make_spec(H, []),
- {H1, P, []};
- {HostPort, Path} ->
- {H, P} = split_host_port(HostPort),
- H1 = make_spec(H, []),
- {H1, P, string:tokens(Path, "/")}
- end.
-
-split_host_port(HostAsString) ->
- case string:rchr(HostAsString, $:) of
- 0 ->
- {split_host(HostAsString), '*'};
- N ->
- HostPart = string:substr(HostAsString, 1, N - 1),
- case
- (catch erlang:list_to_integer(
- string:substr(
- HostAsString,
- N + 1,
- length(HostAsString)
- )
- ))
- of
- {'EXIT', _} ->
- {split_host(HostAsString), '*'};
- Port ->
- {split_host(HostPart), Port}
- end
- end.
-
-split_host(HostAsString) ->
- string:tokens(HostAsString, "\.").
-
-split_path(Path) ->
- make_spec(string:tokens(Path, "/"), []).
-
-make_spec([], Acc) ->
- lists:reverse(Acc);
-make_spec(["" | R], Acc) ->
- make_spec(R, Acc);
-make_spec(["*" | R], Acc) ->
- make_spec(R, [?MATCH_ALL | Acc]);
-make_spec([P | R], Acc) ->
- P1 = parse_var(P),
- make_spec(R, [P1 | Acc]).
-
-parse_var(P) ->
- case P of
- ":" ++ Var ->
- {bind, Var};
- _ ->
- P
- end.
-
-% mochiweb doesn't export it.
-urlsplit_netloc("", Acc) ->
- {lists:reverse(Acc), ""};
-urlsplit_netloc(Rest = [C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
- {lists:reverse(Acc), Rest};
-urlsplit_netloc([C | Rest], Acc) ->
- urlsplit_netloc(Rest, [C | Acc]).
-
-make_path(Parts) ->
- "/" ++ string:join(Parts, [?SEPARATOR]).
-
-init(_) ->
- ok = config:listen_for_changes(?MODULE, nil),
-
- %% load configuration
- {VHostGlobals, VHosts, Fun} = load_conf(),
- State = #vhosts_state{
- vhost_globals = VHostGlobals,
- vhosts = VHosts,
- vhosts_fun = Fun
- },
- {ok, State}.
-
-handle_call(reload, _From, _State) ->
- {VHostGlobals, VHosts, Fun} = load_conf(),
- {reply, ok, #vhosts_state{
- vhost_globals = VHostGlobals,
- vhosts = VHosts,
- vhosts_fun = Fun
- }};
-handle_call(get_state, _From, State) ->
- {reply, State, State};
-handle_call(_Msg, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_config_change("vhosts", _, _, _, _) ->
- {ok, ?MODULE:reload()};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-load_conf() ->
- %% get vhost globals
- VHostGlobals = re:split(
- "_utils, _uuids, _session, _users",
- "\\s*,\\s*",
- [{return, list}]
- ),
-
- %% build vhosts matching rules
- VHosts = make_vhosts(),
-
- %% build vhosts handler fun
- DefaultVHostFun = "{couch_httpd_vhost, redirect_to_vhost}",
- Fun = couch_httpd:make_arity_2_fun(DefaultVHostFun),
-
- {VHostGlobals, VHosts, Fun}.
-
-%% cheaply determine if there are any virtual hosts
-%% configured at all.
-vhost_enabled() ->
- case config:get("vhosts") of
- [] ->
- false;
- _ ->
- true
- end.
diff --git a/src/couch/src/couch_io_logger.erl b/src/couch/src/couch_io_logger.erl
deleted file mode 100644
index f859874b6..000000000
--- a/src/couch/src/couch_io_logger.erl
+++ /dev/null
@@ -1,97 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_io_logger).
-
--export([
- start/1,
- log_output/1,
- log_input/1,
- stop_noerror/0,
- stop_error/1
-]).
-
-start(undefined) ->
- ok;
-start(Dir) ->
- case filelib:is_dir(Dir) of
- true ->
- Name = log_name(),
- Path = Dir ++ "/" ++ Name,
- OPath = Path ++ ".out.log_",
- IPath = Path ++ ".in.log_",
- {ok, OFd} = file:open(OPath, [read, write, raw]),
- {ok, IFd} = file:open(IPath, [read, write, raw]),
- ok = file:delete(OPath),
- ok = file:delete(IPath),
- put(logger_path, Path),
- put(logger_out_fd, OFd),
- put(logger_in_fd, IFd),
- ok;
- false ->
- ok
- end.
-
-stop_noerror() ->
- case get(logger_path) of
- undefined ->
- ok;
- _Path ->
- close_logs()
- end.
-
-stop_error(Err) ->
- case get(logger_path) of
- undefined ->
- ok;
- Path ->
- save_error_logs(Path, Err),
- close_logs()
- end.
-
-log_output(Data) ->
- log(get(logger_out_fd), Data).
-
-log_input(Data) ->
- log(get(logger_in_fd), Data).
-
-unix_time() ->
- {Mega, Sec, USec} = os:timestamp(),
- UnixTs = (Mega * 1000000 + Sec) * 1000000 + USec,
- integer_to_list(UnixTs).
-
-log_name() ->
- Ts = unix_time(),
- Pid0 = erlang:pid_to_list(self()),
- Pid1 = string:strip(Pid0, left, $<),
- Pid2 = string:strip(Pid1, right, $>),
- lists:flatten(io_lib:format("~s_~s", [Ts, Pid2])).
-
-close_logs() ->
- file:close(get(logger_out_fd)),
- file:close(get(logger_in_fd)).
-
-save_error_logs(Path, Err) ->
- Otp = erlang:system_info(otp_release),
- Msg = io_lib:format("Error: ~p~nNode: ~p~nOTP: ~p~n", [Err, node(), Otp]),
- file:write_file(Path ++ ".meta", Msg),
- IFd = get(logger_out_fd),
- OFd = get(logger_in_fd),
- file:position(IFd, 0),
- file:position(OFd, 0),
- file:copy(IFd, Path ++ ".out.log"),
- file:copy(OFd, Path ++ ".in.log").
-
-log(undefined, _Data) ->
- ok;
-log(Fd, Data) ->
- ok = file:write(Fd, [Data, io_lib:nl()]).
diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl
deleted file mode 100644
index 6c8637b42..000000000
--- a/src/couch/src/couch_key_tree.erl
+++ /dev/null
@@ -1,603 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% @doc Data structure used to represent document edit histories.
-
-%% A key tree is used to represent the edit history of a document. Each node of
-%% the tree represents a particular version. Relations between nodes represent
-%% the order that these edits were applied. For instance, a set of three edits
-%% would produce a tree of versions A->B->C indicating that edit C was based on
-%% version B which was in turn based on A. In a world without replication (and
-%% no ability to disable MVCC checks), all histories would be forced to be
-%% linear lists of edits due to constraints imposed by MVCC (ie, new edits must
-%% be based on the current version). However, we have replication, so we must
-%% deal with not so easy cases, which lead to trees.
-%%
-%% Consider a document in state A. This doc is replicated to a second node. We
-%% then edit the document on each node leaving it in two different states, B
-%% and C. We now have two key trees, A->B and A->C. When we go to replicate a
-%% second time, the key tree must combine these two trees which gives us
-%% A->(B|C). This is how conflicts are introduced. In terms of the key tree, we
-%% say that we have two leaves (B and C) that are not deleted. The presense of
-%% the multiple leaves indicate conflict. To remove a conflict, one of the
-%% edits (B or C) can be deleted, which results in, A->(B|C->D) where D is an
-%% edit that is specially marked with the a deleted=true flag.
-%%
-%% What makes this a bit more complicated is that there is a limit to the
-%% number of revisions kept, specified in couch_db.hrl (default is 1000). When
-%% this limit is exceeded only the last 1000 are kept. This comes in to play
-%% when branches are merged. The comparison has to begin at the same place in
-%% the branches. A revision id is of the form N-XXXXXXX where N is the current
-%% revision depth. So each path will have a start number, calculated in
-%% couch_doc:to_path using the formula N - length(RevIds) + 1 So, .eg. if a doc
-%% was edit 1003 times this start number would be 4, indicating that 3
-%% revisions were truncated.
-%%
-%% This comes into play in @see merge_at/3 which recursively walks down one
-%% tree or the other until they begin at the same revision.
-
--module(couch_key_tree).
-
--export([
- count_leafs/1,
- find_missing/2,
- fold/3,
- get/2,
- get_all_leafs/1,
- get_all_leafs_full/1,
- get_full_key_paths/2,
- get_key_leafs/2,
- map/2,
- map_leafs/2,
- mapfold/3,
- multi_merge/2,
- merge/2,
- remove_leafs/2,
- stem/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--type treenode() :: {Key :: term(), Value :: term(), [Node :: treenode()]}.
--type tree() :: {Depth :: pos_integer(), [treenode()]}.
--type revtree() :: [tree()].
-
-%% @doc Merge multiple paths into the given tree.
--spec multi_merge(revtree(), tree()) -> revtree().
-multi_merge(RevTree, Trees) ->
- lists:foldl(
- fun(Tree, RevTreeAcc) ->
- {NewRevTree, _} = merge(RevTreeAcc, Tree),
- NewRevTree
- end,
- RevTree,
- lists:sort(Trees)
- ).
-
-%% @doc Merge a path into a tree.
--spec merge(revtree(), tree() | path()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
-merge(RevTree, Tree) ->
- {Merged, Result} = merge_tree(RevTree, Tree, []),
- {lists:sort(Merged), Result}.
-
-%% @private
-%% @doc Attempt to merge Tree into each branch of the RevTree.
-%% If it can't find a branch that the new tree merges into, add it as a
-%% new branch in the RevTree.
--spec merge_tree(revtree(), tree() | path(), revtree()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
-merge_tree([], Tree, []) ->
- {[Tree], new_leaf};
-merge_tree([], Tree, MergeAcc) ->
- {[Tree | MergeAcc], new_branch};
-merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes} = Tree, MergeAcc) ->
- % For the intrepid observer following along at home, notice what we're
- % doing here with (Depth - IDepth). This tells us which of the two
- % branches (Nodes or INodes) we need to seek into. If Depth > IDepth
- % that means we need go into INodes to find where we line up with
- % Nodes. If Depth < IDepth, its obviously the other way. If it turns
- % out that (Depth - IDepth) == 0, then we know that this is where
- % we begin our actual merge operation (ie, looking for key matches).
- % Its helpful to note that this whole moving into sub-branches is due
- % to how we store trees that have been stemmed. When a path is
- % stemmed so that the root node is lost, we wrap it in a tuple with
- % the number keys that have been droped. This number is the depth
- % value that's used throughout this module.
- case merge_at([Nodes], Depth - IDepth, [INodes]) of
- {[Merged], Result} ->
- NewDepth = erlang:min(Depth, IDepth),
- {Rest ++ [{NewDepth, Merged} | MergeAcc], Result};
- fail ->
- merge_tree(Rest, Tree, [{Depth, Nodes} | MergeAcc])
- end.
-
-%% @private
-%% @doc Locate the point at which merging can start.
-%% Because of stemming we may need to seek into one of the branches
-%% before we can start comparing node keys. If one of the branches
-%% ends up running out of nodes we know that these two branches can
-%% not be merged.
--spec merge_at([node()], integer(), [node()]) ->
- {revtree(), new_leaf | new_branch | internal_node} | fail.
-merge_at(_Nodes, _Pos, []) ->
- fail;
-merge_at([], _Pos, _INodes) ->
- fail;
-merge_at(Nodes, Pos, [{IK, IV, [NextINode]}]) when Pos > 0 ->
- % Depth was bigger than IDepth, so we need to discard from the
- % insert path to find where it might start matching.
- case merge_at(Nodes, Pos - 1, [NextINode]) of
- {Merged, Result} -> {[{IK, IV, Merged}], Result};
- fail -> fail
- end;
-merge_at(_Nodes, Pos, [{_IK, _IV, []}]) when Pos > 0 ->
- % We've run out of path on the insert side, there's no way we can
- % merge with this branch
- fail;
-merge_at([{K, V, SubTree} | Sibs], Pos, INodes) when Pos < 0 ->
- % When Pos is negative, Depth was less than IDepth, so we
- % need to discard from the revision tree path
- case merge_at(SubTree, Pos + 1, INodes) of
- {Merged, Result} ->
- {[{K, V, Merged} | Sibs], Result};
- fail ->
- % Merging along the subtree failed. We need to also try
- % merging the insert branch against the siblings of this
- % node.
- case merge_at(Sibs, Pos, INodes) of
- {Merged, Result} -> {[{K, V, SubTree} | Merged], Result};
- fail -> fail
- end
- end;
-merge_at([{K, V1, Nodes} | Sibs], 0, [{K, V2, INodes}]) ->
- % Keys are equal. At this point we have found a possible starting
- % position for our merge to take place.
- {Merged, Result} = merge_extend(Nodes, INodes),
- {[{K, value_pref(V1, V2), Merged} | Sibs], Result};
-merge_at([{K1, _, _} | _], 0, [{K2, _, _}]) when K1 > K2 ->
- % Siblings keys are ordered, no point in continuing
- fail;
-merge_at([Tree | Sibs], 0, INodes) ->
- % INodes key comes after this key, so move on to the next sibling.
- case merge_at(Sibs, 0, INodes) of
- {Merged, Result} -> {[Tree | Merged], Result};
- fail -> fail
- end.
-
--spec merge_extend(revtree(), revtree()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
-merge_extend([], B) when B =/= [] ->
- % Most likely the insert branch simply extends this one, so the new
- % branch is exactly B. Its also possible that B is a branch because
- % its key sorts greater than all siblings of an internal node. This
- % condition is checked in the last clause of this function and the
- % new_leaf result is fixed to be new_branch.
- {B, new_leaf};
-merge_extend(A, []) ->
- % Insert branch ends an internal node in our original revtree()
- % so the end result is exactly our original revtree.
- {A, internal_node};
-merge_extend([{K, V1, SubA} | NextA], [{K, V2, SubB}]) ->
- % Here we're simply extending the path to the next deeper
- % level in the two branches.
- {Merged, Result} = merge_extend(SubA, SubB),
- {[{K, value_pref(V1, V2), Merged} | NextA], Result};
-merge_extend([{K1, _, _} = NodeA | Rest], [{K2, _, _} = NodeB]) when K1 > K2 ->
- % Keys are ordered so we know this is where the insert branch needs
- % to be inserted into the tree. We also know that this creates a new
- % branch so we have a new leaf to report.
- {[NodeB, NodeA | Rest], new_branch};
-merge_extend([Tree | RestA], NextB) ->
- % Here we're moving on to the next sibling to try and extend our
- % merge even deeper. The length check is due to the fact that the
- % key in NextB might be larger than the largest key in RestA which
- % means we've created a new branch.
- {Merged, Result0} = merge_extend(RestA, NextB),
- Result =
- case length(Merged) == length(RestA) of
- true -> Result0;
- false -> new_branch
- end,
- {[Tree | Merged], Result}.
-
-find_missing(_Tree, []) ->
- [];
-find_missing([], SeachKeys) ->
- SeachKeys;
-find_missing([{Start, {Key, Value, SubTree}} | RestTree], SeachKeys) ->
- PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Start],
- ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Start],
- Missing = find_missing_simple(Start, [{Key, Value, SubTree}], PossibleKeys),
- find_missing(RestTree, ImpossibleKeys ++ Missing).
-
-find_missing_simple(_Pos, _Tree, []) ->
- [];
-find_missing_simple(_Pos, [], SeachKeys) ->
- SeachKeys;
-find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) ->
- PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Pos],
- ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Pos],
-
- SrcKeys2 = PossibleKeys -- [{Pos, Key}],
- SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2),
- ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3).
-
-filter_leafs([], _Keys, FilteredAcc, RemovedKeysAcc) ->
- {FilteredAcc, RemovedKeysAcc};
-filter_leafs([{Pos, [{LeafKey, _} | _]} = Path | Rest], Keys, FilteredAcc, RemovedKeysAcc) ->
- FilteredKeys = lists:delete({Pos, LeafKey}, Keys),
- if
- FilteredKeys == Keys ->
- % this leaf is not a key we are looking to remove
- filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc);
- true ->
- % this did match a key, remove both the node and the input key
- filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
- end.
-
-% Removes any branches from the tree whose leaf node(s) are in the Keys
-remove_leafs(Trees, Keys) ->
- % flatten each branch in a tree into a tree path
- Paths = get_all_leafs_full(Trees),
-
- % filter out any that are in the keys list.
- {FilteredPaths, RemovedKeys} = filter_leafs(Paths, Keys, [], []),
-
- SortedPaths = lists:sort(
- [{Pos + 1 - length(Path), Path} || {Pos, Path} <- FilteredPaths]
- ),
-
- % convert paths back to trees
- NewTree = lists:foldl(
- fun({StartPos, Path}, TreeAcc) ->
- [SingleTree] = lists:foldl(
- fun({K, V}, NewTreeAcc) -> [{K, V, NewTreeAcc}] end, [], Path
- ),
- {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
- NewTrees
- end,
- [],
- SortedPaths
- ),
- {NewTree, RemovedKeys}.
-
-% get the leafs in the tree matching the keys. The matching key nodes can be
-% leafs or an inner nodes. If an inner node, then the leafs for that node
-% are returned.
-get_key_leafs(Tree, Keys) ->
- get_key_leafs(Tree, Keys, []).
-
-get_key_leafs(_, [], Acc) ->
- {Acc, []};
-get_key_leafs([], Keys, Acc) ->
- {Acc, Keys};
-get_key_leafs([{Pos, Tree} | Rest], Keys, Acc) ->
- {Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []),
- get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc).
-
-get_key_leafs_simple(_Pos, _Tree, [], _PathAcc) ->
- {[], []};
-get_key_leafs_simple(_Pos, [], Keys, _PathAcc) ->
- {[], Keys};
-get_key_leafs_simple(Pos, [{Key, _, SubTree} = Tree | RestTree], Keys, PathAcc) ->
- case lists:delete({Pos, Key}, Keys) of
- Keys ->
- % Same list, key not found
- NewPathAcc = [Key | PathAcc],
- {ChildLeafs, Keys2} = get_key_leafs_simple(Pos + 1, SubTree, Keys, NewPathAcc),
- {SiblingLeafs, Keys3} = get_key_leafs_simple(Pos, RestTree, Keys2, PathAcc),
- {ChildLeafs ++ SiblingLeafs, Keys3};
- Keys2 ->
- % This is a key we were looking for, get all descendant
- % leafs while removing any requested key we find. Notice
- % that this key will be returned by get_key_leafs_simple2
- % if it's a leaf so there's no need to return it here.
- {ChildLeafs, Keys3} = get_key_leafs_simple2(Pos, [Tree], Keys2, PathAcc),
- {SiblingLeafs, Keys4} = get_key_leafs_simple(Pos, RestTree, Keys3, PathAcc),
- {ChildLeafs ++ SiblingLeafs, Keys4}
- end.
-
-get_key_leafs_simple2(_Pos, [], Keys, _PathAcc) ->
- % No more tree to deal with so no more keys to return.
- {[], Keys};
-get_key_leafs_simple2(Pos, [{Key, Value, []} | RestTree], Keys, PathAcc) ->
- % This is a leaf as defined by having an empty list of
- % child nodes. The assertion is a bit subtle but the function
- % clause match means its a leaf.
- Keys2 = lists:delete({Pos, Key}, Keys),
- {SiblingLeafs, Keys3} = get_key_leafs_simple2(Pos, RestTree, Keys2, PathAcc),
- {[{Value, {Pos, [Key | PathAcc]}} | SiblingLeafs], Keys3};
-get_key_leafs_simple2(Pos, [{Key, _Value, SubTree} | RestTree], Keys, PathAcc) ->
- % This isn't a leaf. Recurse into the subtree and then
- % process any sibling branches.
- Keys2 = lists:delete({Pos, Key}, Keys),
- NewPathAcc = [Key | PathAcc],
- {ChildLeafs, Keys3} = get_key_leafs_simple2(Pos + 1, SubTree, Keys2, NewPathAcc),
- {SiblingLeafs, Keys4} = get_key_leafs_simple2(Pos, RestTree, Keys3, PathAcc),
- {ChildLeafs ++ SiblingLeafs, Keys4}.
-
-get(Tree, KeysToGet) ->
- {KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet),
- FixedResults = [
- {Value, {Pos, [Key0 || {Key0, _} <- Path]}}
- || {Pos, [{_Key, Value} | _] = Path} <- KeyPaths
- ],
- {FixedResults, KeysNotFound}.
-
-get_full_key_paths(Tree, Keys) ->
- get_full_key_paths(Tree, Keys, []).
-
-get_full_key_paths(_, [], Acc) ->
- {Acc, []};
-get_full_key_paths([], Keys, Acc) ->
- {Acc, Keys};
-get_full_key_paths([{Pos, Tree} | Rest], Keys, Acc) ->
- {Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []),
- get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc).
-
-get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) ->
- {[], []};
-get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
- {[], KeysToGet};
-get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) ->
- KeysToGet2 = KeysToGet -- [{Pos, KeyId}],
- CurrentNodeResult =
- case length(KeysToGet2) =:= length(KeysToGet) of
- % not in the key list.
- true ->
- [];
- % this node is the key list. return it
- false ->
- [{Pos, [{KeyId, Value} | KeyPathAcc]}]
- end,
- {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [
- {KeyId, Value} | KeyPathAcc
- ]),
- {KeysGotten2, KeysRemaining2} = get_full_key_paths(Pos, RestTree, KeysRemaining, KeyPathAcc),
- {CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}.
-
-get_all_leafs_full(Tree) ->
- get_all_leafs_full(Tree, []).
-
-get_all_leafs_full([], Acc) ->
- Acc;
-get_all_leafs_full([{Pos, Tree} | Rest], Acc) ->
- get_all_leafs_full(Rest, get_all_leafs_full_simple(Pos, [Tree], []) ++ Acc).
-
-get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) ->
- [];
-get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
- [{Pos, [{KeyId, Value} | KeyPathAcc]} | get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc)];
-get_all_leafs_full_simple(Pos, [{KeyId, Value, SubTree} | RestTree], KeyPathAcc) ->
- get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++
- get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc).
-
-get_all_leafs(Trees) ->
- get_all_leafs(Trees, []).
-
-get_all_leafs([], Acc) ->
- Acc;
-get_all_leafs([{Pos, Tree} | Rest], Acc) ->
- get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc).
-
-get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
- [];
-get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
- [{Value, {Pos, [KeyId | KeyPathAcc]}} | get_all_leafs_simple(Pos, RestTree, KeyPathAcc)];
-get_all_leafs_simple(Pos, [{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) ->
- get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++
- get_all_leafs_simple(Pos, RestTree, KeyPathAcc).
-
-count_leafs([]) ->
- 0;
-count_leafs([{_Pos, Tree} | Rest]) ->
- count_leafs_simple([Tree]) + count_leafs(Rest).
-
-count_leafs_simple([]) ->
- 0;
-count_leafs_simple([{_Key, _Value, []} | RestTree]) ->
- 1 + count_leafs_simple(RestTree);
-count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) ->
- count_leafs_simple(SubTree) + count_leafs_simple(RestTree).
-
-fold(_Fun, Acc, []) ->
- Acc;
-fold(Fun, Acc0, [{Pos, Tree} | Rest]) ->
- Acc1 = fold_simple(Fun, Acc0, Pos, [Tree]),
- fold(Fun, Acc1, Rest).
-
-fold_simple(_Fun, Acc, _Pos, []) ->
- Acc;
-fold_simple(Fun, Acc0, Pos, [{Key, Value, SubTree} | RestTree]) ->
- Type =
- if
- SubTree == [] -> leaf;
- true -> branch
- end,
- Acc1 = Fun({Pos, Key}, Value, Type, Acc0),
- Acc2 = fold_simple(Fun, Acc1, Pos + 1, SubTree),
- fold_simple(Fun, Acc2, Pos, RestTree).
-
-map(_Fun, []) ->
- [];
-map(Fun, [{Pos, Tree} | Rest]) ->
- case erlang:fun_info(Fun, arity) of
- {arity, 2} ->
- [NewTree] = map_simple(fun(A, B, _C) -> Fun(A, B) end, Pos, [Tree]),
- [{Pos, NewTree} | map(Fun, Rest)];
- {arity, 3} ->
- [NewTree] = map_simple(Fun, Pos, [Tree]),
- [{Pos, NewTree} | map(Fun, Rest)]
- end.
-
-map_simple(_Fun, _Pos, []) ->
- [];
-map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
- Value2 = Fun(
- {Pos, Key},
- Value,
- if
- SubTree == [] -> leaf;
- true -> branch
- end
- ),
- [{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)].
-
-mapfold(_Fun, Acc, []) ->
- {[], Acc};
-mapfold(Fun, Acc, [{Pos, Tree} | Rest]) ->
- {[NewTree], Acc2} = mapfold_simple(Fun, Acc, Pos, [Tree]),
- {Rest2, Acc3} = mapfold(Fun, Acc2, Rest),
- {[{Pos, NewTree} | Rest2], Acc3}.
-
-mapfold_simple(_Fun, Acc, _Pos, []) ->
- {[], Acc};
-mapfold_simple(Fun, Acc, Pos, [{Key, Value, SubTree} | RestTree]) ->
- {Value2, Acc2} = Fun(
- {Pos, Key},
- Value,
- if
- SubTree == [] -> leaf;
- true -> branch
- end,
- Acc
- ),
- {SubTree2, Acc3} = mapfold_simple(Fun, Acc2, Pos + 1, SubTree),
- {RestTree2, Acc4} = mapfold_simple(Fun, Acc3, Pos, RestTree),
- {[{Key, Value2, SubTree2} | RestTree2], Acc4}.
-
-map_leafs(_Fun, []) ->
- [];
-map_leafs(Fun, [{Pos, Tree} | Rest]) ->
- [NewTree] = map_leafs_simple(Fun, Pos, [Tree]),
- [{Pos, NewTree} | map_leafs(Fun, Rest)].
-
-map_leafs_simple(_Fun, _Pos, []) ->
- [];
-map_leafs_simple(Fun, Pos, [{Key, Value, []} | RestTree]) ->
- Value2 = Fun({Pos, Key}, Value),
- [{Key, Value2, []} | map_leafs_simple(Fun, Pos, RestTree)];
-map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
- [{Key, Value, map_leafs_simple(Fun, Pos + 1, SubTree)} | map_leafs_simple(Fun, Pos, RestTree)].
-
-stem(Trees, Limit) ->
- try
- {_, Branches} = lists:foldl(
- fun(Tree, {Seen, TreeAcc}) ->
- {NewSeen, NewBranches} = stem_tree(Tree, Limit, Seen),
- {NewSeen, NewBranches ++ TreeAcc}
- end,
- {#{}, []},
- Trees
- ),
- lists:sort(Branches)
- catch
- throw:dupe_keys ->
- repair_tree(Trees, Limit)
- end.
-
-stem_tree({Depth, Child}, Limit, Seen) ->
- case stem_tree(Depth, Child, Limit, Seen) of
- {NewSeen, _, NewChild, NewBranches} ->
- {NewSeen, [{Depth, NewChild} | NewBranches]};
- {NewSeen, _, NewBranches} ->
- {NewSeen, NewBranches}
- end.
-
-stem_tree(_Depth, {Key, _Val, []} = Leaf, Limit, Seen) ->
- {check_key(Key, Seen), Limit - 1, Leaf, []};
-stem_tree(Depth, {Key, Val, Children}, Limit, Seen0) ->
- Seen1 = check_key(Key, Seen0),
- FinalAcc = lists:foldl(
- fun(Child, Acc) ->
- {SeenAcc, LimitPosAcc, ChildAcc, BranchAcc} = Acc,
- case stem_tree(Depth + 1, Child, Limit, SeenAcc) of
- {NewSeenAcc, LimitPos, NewChild, NewBranches} ->
- NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
- NewChildAcc = [NewChild | ChildAcc],
- NewBranchAcc = NewBranches ++ BranchAcc,
- {NewSeenAcc, NewLimitPosAcc, NewChildAcc, NewBranchAcc};
- {NewSeenAcc, LimitPos, NewBranches} ->
- NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
- NewBranchAcc = NewBranches ++ BranchAcc,
- {NewSeenAcc, NewLimitPosAcc, ChildAcc, NewBranchAcc}
- end
- end,
- {Seen1, -1, [], []},
- Children
- ),
- {FinalSeen, FinalLimitPos, FinalChildren, FinalBranches} = FinalAcc,
- case FinalLimitPos of
- N when N > 0, length(FinalChildren) > 0 ->
- FinalNode = {Key, Val, lists:reverse(FinalChildren)},
- {FinalSeen, FinalLimitPos - 1, FinalNode, FinalBranches};
- 0 when length(FinalChildren) > 0 ->
- NewBranches = lists:map(
- fun(Child) ->
- {Depth + 1, Child}
- end,
- lists:reverse(FinalChildren)
- ),
- {FinalSeen, -1, NewBranches ++ FinalBranches};
- N when N < 0, length(FinalChildren) == 0 ->
- {FinalSeen, FinalLimitPos - 1, FinalBranches}
- end.
-
-check_key(Key, Seen) ->
- case Seen of
- #{Key := true} ->
- throw(dupe_keys);
- _ ->
- Seen#{Key => true}
- end.
-
-repair_tree(Trees, Limit) ->
- % flatten each branch in a tree into a tree path, sort by starting rev #
- Paths = lists:sort(
- lists:map(
- fun({Pos, Path}) ->
- StemmedPath = lists:sublist(Path, Limit),
- {Pos + 1 - length(StemmedPath), StemmedPath}
- end,
- get_all_leafs_full(Trees)
- )
- ),
-
- % convert paths back to trees
- lists:foldl(
- fun({StartPos, Path}, TreeAcc) ->
- [SingleTree] = lists:foldl(
- fun({K, V}, NewTreeAcc) -> [{K, V, NewTreeAcc}] end, [], Path
- ),
- {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
- NewTrees
- end,
- [],
- Paths
- ).
-
-value_pref(Tuple, _) when
- is_tuple(Tuple),
- (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4)
-->
- Tuple;
-value_pref(_, Tuple) when
- is_tuple(Tuple),
- (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4)
-->
- Tuple;
-value_pref(?REV_MISSING, Other) ->
- Other;
-value_pref(Other, ?REV_MISSING) ->
- Other;
-value_pref(Last, _) ->
- Last.
diff --git a/src/couch/src/couch_lru.erl b/src/couch/src/couch_lru.erl
deleted file mode 100644
index 1fad20280..000000000
--- a/src/couch/src/couch_lru.erl
+++ /dev/null
@@ -1,68 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_lru).
--export([new/0, insert/2, update/2, close/1]).
-
--include("couch_server_int.hrl").
-
-new() ->
- {gb_trees:empty(), dict:new()}.
-
-insert(DbName, {Tree0, Dict0}) ->
- Lru = couch_util:unique_monotonic_integer(),
- {gb_trees:insert(Lru, DbName, Tree0), dict:store(DbName, Lru, Dict0)}.
-
-update(DbName, {Tree0, Dict0}) ->
- case dict:find(DbName, Dict0) of
- {ok, Old} ->
- New = couch_util:unique_monotonic_integer(),
- Tree = gb_trees:insert(New, DbName, gb_trees:delete(Old, Tree0)),
- Dict = dict:store(DbName, New, Dict0),
- {Tree, Dict};
- error ->
- % We closed this database before processing the update. Ignore
- {Tree0, Dict0}
- end.
-
-%% Attempt to close the oldest idle database.
-close({Tree, _} = Cache) ->
- close_int(gb_trees:next(gb_trees:iterator(Tree)), Cache).
-
-%% internals
-
-close_int(none, _) ->
- false;
-close_int({Lru, DbName, Iter}, {Tree, Dict} = Cache) ->
- CouchDbs = couch_server:couch_dbs(DbName),
- CouchDbsPidToName = couch_server:couch_dbs_pid_to_name(DbName),
-
- case ets:update_element(CouchDbs, DbName, {#entry.lock, locked}) of
- true ->
- [#entry{db = Db, pid = Pid}] = ets:lookup(CouchDbs, DbName),
- case couch_db:is_idle(Db) of
- true ->
- true = ets:delete(CouchDbs, DbName),
- true = ets:delete(CouchDbsPidToName, Pid),
- exit(Pid, kill),
- {true, {gb_trees:delete(Lru, Tree), dict:erase(DbName, Dict)}};
- false ->
- ElemSpec = {#entry.lock, unlocked},
- true = ets:update_element(CouchDbs, DbName, ElemSpec),
- couch_stats:increment_counter([couchdb, couch_server, lru_skip]),
- close_int(gb_trees:next(Iter), update(DbName, Cache))
- end;
- false ->
- NewTree = gb_trees:delete(Lru, Tree),
- NewIter = gb_trees:iterator(NewTree),
- close_int(gb_trees:next(NewIter), {NewTree, dict:erase(DbName, Dict)})
- end.
diff --git a/src/couch/src/couch_multidb_changes.erl b/src/couch/src/couch_multidb_changes.erl
deleted file mode 100644
index adb1b740f..000000000
--- a/src/couch/src/couch_multidb_changes.erl
+++ /dev/null
@@ -1,859 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_multidb_changes).
-
--behaviour(gen_server).
-
--export([
- start_link/4
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
-]).
-
--export([
- changes_reader/3,
- changes_reader_cb/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(CTX, {user_ctx, #user_ctx{roles = [<<"_admin">>, <<"_replicator">>]}}).
-
--define(AVG_DELAY_MSEC, 10).
--define(MAX_DELAY_MSEC, 120000).
-
--record(state, {
- tid :: ets:tid(),
- mod :: atom(),
- ctx :: term(),
- suffix :: binary(),
- event_server :: reference(),
- scanner :: nil | pid(),
- pids :: [{binary(), pid()}],
- skip_ddocs :: boolean()
-}).
-
-% Behavior API
-
-% For each db shard with a matching suffix, report created,
-% deleted, found (discovered) and change events.
-
--callback db_created(DbName :: binary(), Context :: term()) ->
- Context :: term().
-
--callback db_deleted(DbName :: binary(), Context :: term()) ->
- Context :: term().
-
--callback db_found(DbName :: binary(), Context :: term()) ->
- Context :: term().
-
--callback db_change(DbName :: binary(), Change :: term(), Context :: term()) ->
- Context :: term().
-
-% External API
-
-% Opts list can contain:
-% - `skip_ddocs` : Skip design docs
-
--spec start_link(binary(), module(), term(), list()) ->
- {ok, pid()} | ignore | {error, term()}.
-start_link(DbSuffix, Module, Context, Opts) when
- is_binary(DbSuffix), is_atom(Module), is_list(Opts)
-->
- gen_server:start_link(?MODULE, [DbSuffix, Module, Context, Opts], []).
-
-% gen_server callbacks
-
-init([DbSuffix, Module, Context, Opts]) ->
- process_flag(trap_exit, true),
- Server = self(),
- {ok, #state{
- tid = ets:new(?MODULE, [set, protected]),
- mod = Module,
- ctx = Context,
- suffix = DbSuffix,
- event_server = register_with_event_server(Server),
- scanner = spawn_link(fun() -> scan_all_dbs(Server, DbSuffix) end),
- pids = [],
- skip_ddocs = proplists:is_defined(skip_ddocs, Opts)
- }}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call(
- {change, DbName, Change},
- _From,
- #state{skip_ddocs = SkipDDocs, mod = Mod, ctx = Ctx} = State
-) ->
- case {SkipDDocs, is_design_doc(Change)} of
- {true, true} ->
- {reply, ok, State};
- {_, _} ->
- {reply, ok, State#state{ctx = Mod:db_change(DbName, Change, Ctx)}}
- end;
-handle_call({checkpoint, DbName, EndSeq}, _From, #state{tid = Ets} = State) ->
- case ets:lookup(Ets, DbName) of
- [] ->
- true = ets:insert(Ets, {DbName, EndSeq, false});
- [{DbName, _OldSeq, Rescan}] ->
- true = ets:insert(Ets, {DbName, EndSeq, Rescan})
- end,
- {reply, ok, State}.
-
-handle_cast({resume_scan, DbName}, State) ->
- {noreply, resume_scan(DbName, State)}.
-
-handle_info({'$couch_event', DbName, Event}, #state{suffix = Suf} = State) ->
- case Suf =:= couch_db:dbname_suffix(DbName) of
- true ->
- {noreply, db_callback(Event, DbName, State)};
- _ ->
- {noreply, State}
- end;
-handle_info({'DOWN', Ref, _, _, Info}, #state{event_server = Ref} = State) ->
- {stop, {couch_event_server_died, Info}, State};
-handle_info({'EXIT', From, normal}, #state{scanner = From} = State) ->
- {noreply, State#state{scanner = nil}};
-handle_info({'EXIT', From, Reason}, #state{scanner = From} = State) ->
- {stop, {scanner_died, Reason}, State};
-handle_info({'EXIT', From, Reason}, #state{pids = Pids} = State) ->
- couch_log:debug("~p change feed exited ~p", [State#state.suffix, From]),
- case lists:keytake(From, 2, Pids) of
- {value, {DbName, From}, NewPids} ->
- if
- Reason == normal ->
- ok;
- true ->
- Fmt = "~s : Known change feed ~w died :: ~w",
- couch_log:error(Fmt, [?MODULE, From, Reason])
- end,
- NewState = State#state{pids = NewPids},
- case ets:lookup(State#state.tid, DbName) of
- [{DbName, _EndSeq, true}] ->
- {noreply, resume_scan(DbName, NewState)};
- _ ->
- {noreply, NewState}
- end;
- false when Reason == normal ->
- {noreply, State};
- false ->
- Fmt = "~s(~p) : Unknown pid ~w died :: ~w",
- couch_log:error(Fmt, [?MODULE, State#state.suffix, From, Reason]),
- {stop, {unexpected_exit, From, Reason}, State}
- end;
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-% Private functions
-
--spec register_with_event_server(pid()) -> reference().
-register_with_event_server(Server) ->
- Ref = erlang:monitor(process, couch_event_server),
- couch_event:register_all(Server),
- Ref.
-
--spec db_callback(created | deleted | updated, binary(), #state{}) -> #state{}.
-db_callback(created, DbName, #state{mod = Mod, ctx = Ctx} = State) ->
- NewState = State#state{ctx = Mod:db_created(DbName, Ctx)},
- resume_scan(DbName, NewState);
-db_callback(deleted, DbName, #state{mod = Mod, ctx = Ctx} = State) ->
- State#state{ctx = Mod:db_deleted(DbName, Ctx)};
-db_callback(updated, DbName, State) ->
- resume_scan(DbName, State);
-db_callback(_Other, _DbName, State) ->
- State.
-
--spec resume_scan(binary(), #state{}) -> #state{}.
-resume_scan(DbName, #state{pids = Pids, tid = Ets} = State) ->
- case {lists:keyfind(DbName, 1, Pids), ets:lookup(Ets, DbName)} of
- {{DbName, _}, []} ->
- % Found existing change feed, but not entry in ETS
- % Flag a need to rescan from begining
- true = ets:insert(Ets, {DbName, 0, true}),
- State;
- {{DbName, _}, [{DbName, EndSeq, _}]} ->
- % Found existing change feed and entry in ETS
- % Flag a need to rescan from last ETS checkpoint
- true = ets:insert(Ets, {DbName, EndSeq, true}),
- State;
- {false, []} ->
- % No existing change feed running. No entry in ETS.
- % This is first time seeing this db shard.
- % Notify user with a found callback. Insert checkpoint
- % entry in ETS to start from 0. And start a change feed.
- true = ets:insert(Ets, {DbName, 0, false}),
- Mod = State#state.mod,
- Ctx = Mod:db_found(DbName, State#state.ctx),
- Pid = start_changes_reader(DbName, 0),
- State#state{ctx = Ctx, pids = [{DbName, Pid} | Pids]};
- {false, [{DbName, EndSeq, _}]} ->
- % No existing change feed running. Found existing checkpoint.
- % Start a new change reader from last checkpoint.
- true = ets:insert(Ets, {DbName, EndSeq, false}),
- Pid = start_changes_reader(DbName, EndSeq),
- State#state{pids = [{DbName, Pid} | Pids]}
- end.
-
-start_changes_reader(DbName, Since) ->
- spawn_link(?MODULE, changes_reader, [self(), DbName, Since]).
-
-changes_reader(Server, DbName, Since) ->
- {ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]),
- ChangesArgs = #changes_args{
- include_docs = true,
- since = Since,
- feed = "normal",
- timeout = infinity
- },
- ChFun = couch_changes:handle_db_changes(ChangesArgs, {json_req, null}, Db),
- ChFun({fun ?MODULE:changes_reader_cb/3, {Server, DbName}}).
-
-changes_reader_cb({change, Change, _}, _, {Server, DbName}) ->
- ok = gen_server:call(Server, {change, DbName, Change}, infinity),
- {Server, DbName};
-changes_reader_cb({stop, EndSeq}, _, {Server, DbName}) ->
- ok = gen_server:call(Server, {checkpoint, DbName, EndSeq}, infinity),
- {Server, DbName};
-changes_reader_cb(_, _, Acc) ->
- Acc.
-
-scan_all_dbs(Server, DbSuffix) when is_pid(Server) ->
- ok = scan_local_db(Server, DbSuffix),
- {ok, Db} = mem3_util:ensure_exists(
- config:get("mem3", "shards_db", "_dbs")
- ),
- ChangesFun = couch_changes:handle_db_changes(#changes_args{}, nil, Db),
- ChangesFun({fun scan_changes_cb/3, {Server, DbSuffix, 1}}),
- couch_db:close(Db).
-
-scan_changes_cb({change, {Change}, _}, _, {_Server, DbSuffix, _Count} = Acc) ->
- DbName = couch_util:get_value(<<"id">>, Change),
- case DbName of
- <<"_design/", _/binary>> ->
- Acc;
- _Else ->
- NameMatch = DbSuffix =:= couch_db:dbname_suffix(DbName),
- case {NameMatch, couch_replicator_utils:is_deleted(Change)} of
- {false, _} ->
- Acc;
- {true, true} ->
- Acc;
- {true, false} ->
- Shards = local_shards(DbName),
- lists:foldl(fun notify_fold/2, Acc, Shards)
- end
- end;
-scan_changes_cb(_, _, Acc) ->
- Acc.
-
-local_shards(DbName) ->
- try
- [ShardName || #shard{name = ShardName} <- mem3:local_shards(DbName)]
- catch
- error:database_does_not_exist ->
- []
- end.
-
-notify_fold(DbName, {Server, DbSuffix, Count}) ->
- Jitter = jitter(Count),
- spawn_link(fun() ->
- timer:sleep(Jitter),
- gen_server:cast(Server, {resume_scan, DbName})
- end),
- {Server, DbSuffix, Count + 1}.
-
-% Jitter is proportional to the number of shards found so far. This is done to
-% avoid a stampede and notifying the callback function with potentially a large
-% number of shards back to back during startup.
-jitter(N) ->
- Range = min(2 * N * ?AVG_DELAY_MSEC, ?MAX_DELAY_MSEC),
- couch_rand:uniform(Range).
-
-scan_local_db(Server, DbSuffix) when is_pid(Server) ->
- case couch_db:open_int(DbSuffix, [?CTX, sys_db, nologifmissing]) of
- {ok, Db} ->
- gen_server:cast(Server, {resume_scan, DbSuffix}),
- ok = couch_db:close(Db);
- _Error ->
- ok
- end.
-
-is_design_doc({Change}) ->
- case lists:keyfind(<<"id">>, 1, Change) of
- false ->
- false;
- {_, Id} ->
- is_design_doc_id(Id)
- end.
-
-is_design_doc_id(<<?DESIGN_DOC_PREFIX, _/binary>>) ->
- true;
-is_design_doc_id(_) ->
- false.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_eunit.hrl").
-
--define(MOD, multidb_test_module).
--define(SUFFIX, <<"suff">>).
--define(DBNAME, <<"shards/40000000-5fffffff/acct/suff.0123456789">>).
-
-couch_multidb_changes_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_handle_call_change(),
- t_handle_call_change_filter_design_docs(),
- t_handle_call_checkpoint_new(),
- t_handle_call_checkpoint_existing(),
- t_handle_info_created(),
- t_handle_info_deleted(),
- t_handle_info_updated(),
- t_handle_info_other_event(),
- t_handle_info_created_other_db(),
- t_handle_info_scanner_exit_normal(),
- t_handle_info_scanner_crashed(),
- t_handle_info_event_server_exited(),
- t_handle_info_unknown_pid_exited(),
- t_handle_info_change_feed_exited(),
- t_handle_info_change_feed_exited_and_need_rescan(),
- t_spawn_changes_reader(),
- t_changes_reader_cb_change(),
- t_changes_reader_cb_stop(),
- t_changes_reader_cb_other(),
- t_handle_call_resume_scan_no_chfeed_no_ets_entry(),
- t_handle_call_resume_scan_chfeed_no_ets_entry(),
- t_handle_call_resume_scan_chfeed_ets_entry(),
- t_handle_call_resume_scan_no_chfeed_ets_entry(),
- t_start_link(),
- t_start_link_no_ddocs(),
- t_misc_gen_server_callbacks()
- ]
- }
- }.
-
-setup_all() ->
- mock_logs(),
- mock_callback_mod(),
- meck:expect(couch_event, register_all, 1, ok),
- meck:expect(config, get, ["mem3", "shards_db", '_'], "_dbs"),
- meck:expect(mem3_util, ensure_exists, 1, {ok, dbs}),
- ChangesFun = meck:val(fun(_) -> ok end),
- meck:expect(couch_changes, handle_db_changes, 3, ChangesFun),
- meck:expect(
- couch_db,
- open_int,
- fun
- (?DBNAME, [?CTX, sys_db]) -> {ok, db};
- (_, _) -> {not_found, no_db_file}
- end
- ),
- meck:expect(couch_db, close, 1, ok),
- mock_changes_reader(),
- % create process to stand in for couch_event_server
- % mocking erlang:monitor doesn't work, so give it real process to monitor
- EvtPid = spawn_link(fun() ->
- receive
- looper -> ok
- end
- end),
- true = register(couch_event_server, EvtPid),
- EvtPid.
-
-teardown_all(EvtPid) ->
- unlink(EvtPid),
- exit(EvtPid, kill),
- meck:unload().
-
-setup() ->
- meck:reset([
- ?MOD,
- couch_changes,
- couch_db,
- couch_event,
- couch_log
- ]).
-
-teardown(_) ->
- ok.
-
-t_handle_call_change() ->
- ?_test(begin
- State = mock_state(),
- Change = change_row(<<"blah">>),
- handle_call_ok({change, ?DBNAME, Change}, State),
- ?assert(meck:validate(?MOD)),
- ?assert(meck:called(?MOD, db_change, [?DBNAME, Change, zig]))
- end).
-
-t_handle_call_change_filter_design_docs() ->
- ?_test(begin
- State0 = mock_state(),
- State = State0#state{skip_ddocs = true},
- Change = change_row(<<"_design/blah">>),
- handle_call_ok({change, ?DBNAME, Change}, State),
- ?assert(meck:validate(?MOD)),
- ?assertNot(meck:called(?MOD, db_change, [?DBNAME, Change, zig]))
- end).
-
-t_handle_call_checkpoint_new() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- handle_call_ok({checkpoint, ?DBNAME, 1}, State),
- ?assertEqual([{?DBNAME, 1, false}], ets:tab2list(Tid)),
- ets:delete(Tid)
- end).
-
-t_handle_call_checkpoint_existing() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- true = ets:insert(Tid, {?DBNAME, 1, true}),
- handle_call_ok({checkpoint, ?DBNAME, 2}, State),
- ?assertEqual([{?DBNAME, 2, true}], ets:tab2list(Tid)),
- ets:delete(Tid)
- end).
-
-t_handle_info_created() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- handle_info_check({'$couch_event', ?DBNAME, created}, State),
- ?assert(meck:validate(?MOD)),
- ?assert(meck:called(?MOD, db_created, [?DBNAME, zig]))
- end).
-
-t_handle_info_deleted() ->
- ?_test(begin
- State = mock_state(),
- handle_info_check({'$couch_event', ?DBNAME, deleted}, State),
- ?assert(meck:validate(?MOD)),
- ?assert(meck:called(?MOD, db_deleted, [?DBNAME, zig]))
- end).
-
-t_handle_info_updated() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- handle_info_check({'$couch_event', ?DBNAME, updated}, State),
- ?assert(meck:validate(?MOD)),
- ?assert(meck:called(?MOD, db_found, [?DBNAME, zig]))
- end).
-
-t_handle_info_other_event() ->
- ?_test(begin
- State = mock_state(),
- handle_info_check({'$couch_event', ?DBNAME, somethingelse}, State),
- ?assertNot(meck:called(?MOD, db_created, [?DBNAME, somethingelse])),
- ?assertNot(meck:called(?MOD, db_deleted, [?DBNAME, somethingelse])),
- ?assertNot(meck:called(?MOD, db_found, [?DBNAME, somethingelse]))
- end).
-
-t_handle_info_created_other_db() ->
- ?_test(begin
- State = mock_state(),
- handle_info_check({'$couch_event', <<"otherdb">>, created}, State),
- ?assertNot(meck:called(?MOD, db_created, [?DBNAME, zig]))
- end).
-
-t_handle_info_scanner_exit_normal() ->
- ?_test(begin
- Res = handle_info({'EXIT', spid, normal}, mock_state()),
- ?assertMatch({noreply, _}, Res),
- {noreply, RState} = Res,
- ?assertEqual(nil, RState#state.scanner)
- end).
-
-t_handle_info_scanner_crashed() ->
- ?_test(begin
- Res = handle_info({'EXIT', spid, oops}, mock_state()),
- ?assertMatch({stop, {scanner_died, oops}, _State}, Res)
- end).
-
-t_handle_info_event_server_exited() ->
- ?_test(begin
- Res = handle_info({'DOWN', esref, type, espid, reason}, mock_state()),
- ?assertMatch({stop, {couch_event_server_died, reason}, _}, Res)
- end).
-
-t_handle_info_unknown_pid_exited() ->
- ?_test(begin
- State0 = mock_state(),
- Res0 = handle_info({'EXIT', somepid, normal}, State0),
- ?assertMatch({noreply, State0}, Res0),
- State1 = mock_state(),
- Res1 = handle_info({'EXIT', somepid, oops}, State1),
- ?assertMatch({stop, {unexpected_exit, somepid, oops}, State1}, Res1)
- end).
-
-t_handle_info_change_feed_exited() ->
- ?_test(begin
- Tid0 = mock_ets(),
- State0 = mock_state(Tid0, cpid),
- Res0 = handle_info({'EXIT', cpid, normal}, State0),
- ?assertMatch({noreply, _}, Res0),
- {noreply, RState0} = Res0,
- ?assertEqual([], RState0#state.pids),
- ets:delete(Tid0),
- Tid1 = mock_ets(),
- State1 = mock_state(Tid1, cpid),
- Res1 = handle_info({'EXIT', cpid, oops}, State1),
- ?assertMatch({noreply, _}, Res1),
- {noreply, RState1} = Res1,
- ?assertEqual([], RState1#state.pids),
- ets:delete(Tid1)
- end).
-
-t_handle_info_change_feed_exited_and_need_rescan() ->
- ?_test(begin
- Tid = mock_ets(),
- true = ets:insert(Tid, {?DBNAME, 1, true}),
- State = mock_state(Tid, cpid),
- Res = handle_info({'EXIT', cpid, normal}, State),
- ?assertMatch({noreply, _}, Res),
- {noreply, RState} = Res,
- % rescan flag should have been reset to false
- ?assertEqual([{?DBNAME, 1, false}], ets:tab2list(Tid)),
- % a mock change feed process should be running
- [{?DBNAME, Pid}] = RState#state.pids,
- ?assert(is_pid(Pid)),
- ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
- ?assertEqual({self(), ?DBNAME}, ChArgs),
- ets:delete(Tid)
- end).
-
-t_spawn_changes_reader() ->
- ?_test(begin
- Pid = start_changes_reader(?DBNAME, 3),
- ?assert(erlang:is_process_alive(Pid)),
- ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
- ?assertEqual({self(), ?DBNAME}, ChArgs),
- ?assert(meck:validate(couch_db)),
- ?assert(meck:validate(couch_changes)),
- ?assert(meck:called(couch_db, open_int, [?DBNAME, [?CTX, sys_db]])),
- ?assert(
- meck:called(couch_changes, handle_db_changes, [
- #changes_args{
- include_docs = true,
- since = 3,
- feed = "normal",
- timeout = infinity
- },
- {json_req, null},
- db
- ])
- )
- end).
-
-t_changes_reader_cb_change() ->
- ?_test(begin
- {ok, Pid} = start_link(?SUFFIX, ?MOD, zig, []),
- Change = change_row(<<"blah">>),
- ChArg = {change, Change, ignore},
- {Pid, ?DBNAME} = changes_reader_cb(ChArg, chtype, {Pid, ?DBNAME}),
- ?assert(meck:called(?MOD, db_change, [?DBNAME, Change, zig])),
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-t_changes_reader_cb_stop() ->
- ?_test(begin
- {ok, Pid} = start_link(?SUFFIX, ?MOD, zig, []),
- ChArg = {stop, 11},
- {Pid, ?DBNAME} = changes_reader_cb(ChArg, chtype, {Pid, ?DBNAME}),
- % We checkpoint on stop, check if checkpointed at correct sequence
- #state{tid = Tid} = sys:get_state(Pid),
- ?assertEqual([{?DBNAME, 11, false}], ets:tab2list(Tid)),
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-t_changes_reader_cb_other() ->
- ?_assertEqual(acc, changes_reader_cb(other, chtype, acc)).
-
-t_handle_call_resume_scan_no_chfeed_no_ets_entry() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- RState = resume_scan(?DBNAME, State),
- % Check if inserted checkpoint entry in ets starting at 0
- ?assertEqual([{?DBNAME, 0, false}], ets:tab2list(Tid)),
- % Check if called db_found callback
- ?assert(meck:called(?MOD, db_found, [?DBNAME, zig])),
- % Check if started a change reader
- [{?DBNAME, Pid}] = RState#state.pids,
- ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
- ?assertEqual({self(), ?DBNAME}, ChArgs),
- ?assert(
- meck:called(couch_changes, handle_db_changes, [
- #changes_args{
- include_docs = true,
- since = 0,
- feed = "normal",
- timeout = infinity
- },
- {json_req, null},
- db
- ])
- ),
- ets:delete(Tid)
- end).
-
-t_handle_call_resume_scan_chfeed_no_ets_entry() ->
- ?_test(begin
- Tid = mock_ets(),
- Pid = start_changes_reader(?DBNAME, 0),
- State = mock_state(Tid, Pid),
- resume_scan(?DBNAME, State),
- % Check ets checkpoint is set to 0 and rescan = true
- ?assertEqual([{?DBNAME, 0, true}], ets:tab2list(Tid)),
- ets:delete(Tid),
- kill_mock_changes_reader_and_get_its_args(Pid)
- end).
-
-t_handle_call_resume_scan_chfeed_ets_entry() ->
- ?_test(begin
- Tid = mock_ets(),
- true = ets:insert(Tid, [{?DBNAME, 2, false}]),
- Pid = start_changes_reader(?DBNAME, 1),
- State = mock_state(Tid, Pid),
- resume_scan(?DBNAME, State),
- % Check ets checkpoint is set to same endseq but rescan = true
- ?assertEqual([{?DBNAME, 2, true}], ets:tab2list(Tid)),
- ets:delete(Tid),
- kill_mock_changes_reader_and_get_its_args(Pid)
- end).
-
-t_handle_call_resume_scan_no_chfeed_ets_entry() ->
- ?_test(begin
- Tid = mock_ets(),
- true = ets:insert(Tid, [{?DBNAME, 1, true}]),
- State = mock_state(Tid),
- RState = resume_scan(?DBNAME, State),
- % Check if reset rescan to false but kept same endseq
- ?assertEqual([{?DBNAME, 1, false}], ets:tab2list(Tid)),
- % Check if started a change reader
- [{?DBNAME, Pid}] = RState#state.pids,
- ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
- ?assertEqual({self(), ?DBNAME}, ChArgs),
- ?assert(
- meck:called(couch_changes, handle_db_changes, [
- #changes_args{
- include_docs = true,
- since = 1,
- feed = "normal",
- timeout = infinity
- },
- {json_req, null},
- db
- ])
- ),
- ets:delete(Tid)
- end).
-
-t_start_link() ->
- ?_test(begin
- {ok, Pid} = start_link(?SUFFIX, ?MOD, nil, []),
- ?assert(is_pid(Pid)),
- ?assertMatch(
- #state{
- mod = ?MOD,
- suffix = ?SUFFIX,
- ctx = nil,
- pids = [],
- skip_ddocs = false
- },
- sys:get_state(Pid)
- ),
- unlink(Pid),
- exit(Pid, kill),
- ?assert(meck:called(couch_event, register_all, [Pid]))
- end).
-
-t_start_link_no_ddocs() ->
- ?_test(begin
- {ok, Pid} = start_link(?SUFFIX, ?MOD, nil, [skip_ddocs]),
- ?assert(is_pid(Pid)),
- ?assertMatch(
- #state{
- mod = ?MOD,
- suffix = ?SUFFIX,
- ctx = nil,
- pids = [],
- skip_ddocs = true
- },
- sys:get_state(Pid)
- ),
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-t_misc_gen_server_callbacks() ->
- ?_test(begin
- ?assertEqual(ok, terminate(reason, state)),
- ?assertEqual({ok, state}, code_change(old, state, extra))
- end).
-
-scan_dbs_test_() ->
- {
- setup,
- fun() ->
- Ctx = test_util:start_couch([mem3, fabric]),
- GlobalDb = ?tempdb(),
- ok = fabric:create_db(GlobalDb, [?CTX]),
- #shard{name = LocalDb} = hd(mem3:local_shards(GlobalDb)),
- {Ctx, GlobalDb, LocalDb}
- end,
- fun({Ctx, GlobalDb, _LocalDb}) ->
- fabric:delete_db(GlobalDb, [?CTX]),
- test_util:stop_couch(Ctx)
- end,
- {with, [
- fun t_find_shard/1,
- fun t_shard_not_found/1,
- fun t_pass_local/1,
- fun t_fail_local/1
- ]}
- }.
-
-t_find_shard({_, DbName, _}) ->
- ?_test(begin
- ?assertEqual(2, length(local_shards(DbName)))
- end).
-
-t_shard_not_found(_) ->
- ?_test(begin
- ?assertEqual([], local_shards(?tempdb()))
- end).
-
-t_pass_local({_, _, LocalDb}) ->
- ?_test(begin
- scan_local_db(self(), LocalDb),
- receive
- {'$gen_cast', Msg} ->
- ?assertEqual(Msg, {resume_scan, LocalDb})
- after 0 ->
- ?assert(false)
- end
- end).
-
-t_fail_local({_, _, LocalDb}) ->
- ?_test(begin
- scan_local_db(self(), <<"some_other_db">>),
- receive
- {'$gen_cast', Msg} ->
- ?assertNotEqual(Msg, {resume_scan, LocalDb})
- after 0 ->
- ?assert(true)
- end
- end).
-
-% Test helper functions
-
-mock_logs() ->
- meck:expect(couch_log, error, 2, ok),
- meck:expect(couch_log, notice, 2, ok),
- meck:expect(couch_log, info, 2, ok),
- meck:expect(couch_log, debug, 2, ok).
-
-mock_callback_mod() ->
- meck:new(?MOD, [non_strict]),
- meck:expect(?MOD, db_created, fun(_DbName, Ctx) -> Ctx end),
- meck:expect(?MOD, db_deleted, fun(_DbName, Ctx) -> Ctx end),
- meck:expect(?MOD, db_found, fun(_DbName, Ctx) -> Ctx end),
- meck:expect(?MOD, db_change, fun(_DbName, _Change, Ctx) -> Ctx end).
-
-mock_changes_reader_loop({_CbFun, {Server, DbName}}) ->
- receive
- die ->
- exit({Server, DbName})
- end.
-
-kill_mock_changes_reader_and_get_its_args(Pid) ->
- Ref = monitor(process, Pid),
- unlink(Pid),
- Pid ! die,
- receive
- {'DOWN', Ref, _, Pid, {Server, DbName}} ->
- {Server, DbName}
- after 1000 ->
- erlang:error(spawn_change_reader_timeout)
- end.
-
-mock_changes_reader() ->
- meck:expect(
- couch_changes,
- handle_db_changes,
- fun
- (_ChArgs, _Req, db) -> fun mock_changes_reader_loop/1;
- (_ChArgs, _Req, dbs) -> fun(_) -> ok end
- end
- ).
-
-mock_ets() ->
- ets:new(multidb_test_ets, [set, public]).
-
-mock_state() ->
- #state{
- mod = ?MOD,
- ctx = zig,
- suffix = ?SUFFIX,
- event_server = esref,
- scanner = spid,
- pids = []
- }.
-
-mock_state(Ets) ->
- State = mock_state(),
- State#state{tid = Ets}.
-
-mock_state(Ets, Pid) ->
- State = mock_state(Ets),
- State#state{pids = [{?DBNAME, Pid}]}.
-
-change_row(Id) when is_binary(Id) ->
- {[
- {<<"seq">>, 1},
- {<<"id">>, Id},
- {<<"changes">>, [{[{<<"rev">>, <<"1-f00">>}]}]},
- {doc, {[{<<"_id">>, Id}, {<<"_rev">>, <<"1-f00">>}]}}
- ]}.
-
-handle_call_ok(Msg, State) ->
- ?assertMatch({reply, ok, _}, handle_call(Msg, from, State)).
-
-handle_info_check(Msg, State) ->
- ?assertMatch({noreply, _}, handle_info(Msg, State)).
-
--endif.
diff --git a/src/couch/src/couch_native_process.erl b/src/couch/src/couch_native_process.erl
deleted file mode 100644
index feea00c3a..000000000
--- a/src/couch/src/couch_native_process.erl
+++ /dev/null
@@ -1,488 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License");
-% you may not use this file except in compliance with the License.
-%
-% You may obtain a copy of the License at
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing,
-% software distributed under the License is distributed on an
-% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
-% either express or implied.
-%
-% See the License for the specific language governing permissions
-% and limitations under the License.
-%
-% This file drew much inspiration from erlview, which was written by and
-% copyright Michael McDaniel [http://autosys.us], and is also under APL 2.0
-%
-%
-% This module provides the smallest possible native view-server.
-% With this module in-place, you can add the following to your couch INI files:
-% [native_query_servers]
-% erlang={couch_native_process, start_link, []}
-%
-% Which will then allow following example map function to be used:
-%
-% fun({Doc}) ->
-% % Below, we emit a single record - the _id as key, null as value
-% DocId = couch_util:get_value(<<"_id">>, Doc, null),
-% Emit(DocId, null)
-% end.
-%
-% which should be roughly the same as the javascript:
-% emit(doc._id, null);
-%
-% This module exposes enough functions such that a native erlang server can
-% act as a fully-fleged view server, but no 'helper' functions specifically
-% for simplifying your erlang view code. It is expected other third-party
-% extensions will evolve which offer useful layers on top of this view server
-% to help simplify your view code.
--module(couch_native_process).
--behaviour(gen_server).
--vsn(1).
-
--export([
- start_link/0,
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- code_change/3,
- handle_info/2
-]).
--export([set_timeout/2, prompt/2]).
-
--define(STATE, native_proc_state).
--record(evstate, {
- ddocs,
- funs = [],
- query_config = [],
- list_pid = nil,
- timeout = 5000,
- idle = 5000
-}).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link() ->
- gen_server:start_link(?MODULE, [], []).
-
-% this is a bit messy, see also couch_query_servers handle_info
-% stop(_Pid) ->
-% ok.
-
-set_timeout(Pid, TimeOut) ->
- gen_server:call(Pid, {set_timeout, TimeOut}).
-
-prompt(Pid, Data) when is_list(Data) ->
- gen_server:call(Pid, {prompt, Data}).
-
-% gen_server callbacks
-init([]) ->
- V = config:get("query_server_config", "os_process_idle_limit", "300"),
- Idle = list_to_integer(V) * 1000,
- {ok, #evstate{ddocs = dict:new(), idle = Idle}, Idle}.
-
-handle_call({set_timeout, TimeOut}, _From, State) ->
- {reply, ok, State#evstate{timeout = TimeOut}, State#evstate.idle};
-handle_call({prompt, Data}, _From, State) ->
- couch_log:debug("Prompt native qs: ~s", [?JSON_ENCODE(Data)]),
- {NewState, Resp} =
- try run(State, to_binary(Data)) of
- {S, R} -> {S, R}
- catch
- throw:{error, Why} ->
- {State, [<<"error">>, Why, Why]}
- end,
-
- Idle = State#evstate.idle,
- case Resp of
- {error, Reason} ->
- Msg = io_lib:format("couch native server error: ~p", [Reason]),
- Error = [<<"error">>, <<"native_query_server">>, list_to_binary(Msg)],
- {reply, Error, NewState, Idle};
- [<<"error">> | Rest] ->
- % Msg = io_lib:format("couch native server error: ~p", [Rest]),
- % TODO: markh? (jan)
- {reply, [<<"error">> | Rest], NewState, Idle};
- [<<"fatal">> | Rest] ->
- % Msg = io_lib:format("couch native server error: ~p", [Rest]),
- % TODO: markh? (jan)
- {stop, fatal, [<<"error">> | Rest], NewState};
- Resp ->
- {reply, Resp, NewState, Idle}
- end.
-
-handle_cast(garbage_collect, State) ->
- erlang:garbage_collect(),
- {noreply, State, State#evstate.idle};
-handle_cast(stop, State) ->
- {stop, normal, State};
-handle_cast(_Msg, State) ->
- {noreply, State, State#evstate.idle}.
-
-handle_info(timeout, State) ->
- gen_server:cast(couch_proc_manager, {os_proc_idle, self()}),
- erlang:garbage_collect(),
- {noreply, State, State#evstate.idle};
-handle_info({'EXIT', _, normal}, State) ->
- {noreply, State, State#evstate.idle};
-handle_info({'EXIT', _, Reason}, State) ->
- {stop, Reason, State}.
-terminate(_Reason, _State) -> ok.
-code_change(_OldVersion, State, _Extra) -> {ok, State}.
-
-run(#evstate{list_pid = Pid} = State, [<<"list_row">>, Row]) when is_pid(Pid) ->
- Pid ! {self(), list_row, Row},
- receive
- {Pid, chunks, Data} ->
- {State, [<<"chunks">>, Data]};
- {Pid, list_end, Data} ->
- receive
- {'EXIT', Pid, normal} -> ok
- after State#evstate.timeout ->
- throw({timeout, list_cleanup})
- end,
- process_flag(trap_exit, erlang:get(do_trap)),
- {State#evstate{list_pid = nil}, [<<"end">>, Data]}
- after State#evstate.timeout ->
- throw({timeout, list_row})
- end;
-run(#evstate{list_pid = Pid} = State, [<<"list_end">>]) when is_pid(Pid) ->
- Pid ! {self(), list_end},
- Resp =
- receive
- {Pid, list_end, Data} ->
- receive
- {'EXIT', Pid, normal} -> ok
- after State#evstate.timeout ->
- throw({timeout, list_cleanup})
- end,
- [<<"end">>, Data]
- after State#evstate.timeout ->
- throw({timeout, list_end})
- end,
- process_flag(trap_exit, erlang:get(do_trap)),
- {State#evstate{list_pid = nil}, Resp};
-run(#evstate{list_pid = Pid} = State, _Command) when is_pid(Pid) ->
- {State, [<<"error">>, list_error, list_error]};
-run(#evstate{ddocs = DDocs}, [<<"reset">>]) ->
- {#evstate{ddocs = DDocs}, true};
-run(#evstate{ddocs = DDocs, idle = Idle}, [<<"reset">>, QueryConfig]) ->
- NewState = #evstate{
- ddocs = DDocs,
- query_config = QueryConfig,
- idle = Idle
- },
- {NewState, true};
-run(#evstate{funs = Funs} = State, [<<"add_fun">>, BinFunc]) ->
- FunInfo = makefun(State, BinFunc),
- {State#evstate{funs = Funs ++ [FunInfo]}, true};
-run(State, [<<"map_doc">>, Doc]) ->
- Resp = lists:map(
- fun({Sig, Fun}) ->
- erlang:put(Sig, []),
- Fun(Doc),
- lists:reverse(erlang:get(Sig))
- end,
- State#evstate.funs
- ),
- {State, Resp};
-run(State, [<<"reduce">>, Funs, KVs]) ->
- {Keys, Vals} =
- lists:foldl(
- fun([K, V], {KAcc, VAcc}) ->
- {[K | KAcc], [V | VAcc]}
- end,
- {[], []},
- KVs
- ),
- Keys2 = lists:reverse(Keys),
- Vals2 = lists:reverse(Vals),
- {State, catch reduce(State, Funs, Keys2, Vals2, false)};
-run(State, [<<"rereduce">>, Funs, Vals]) ->
- {State, catch reduce(State, Funs, null, Vals, true)};
-run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
- DDocs2 = store_ddoc(DDocs, DDocId, DDoc),
- {State#evstate{ddocs = DDocs2}, true};
-run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, DDocId | Rest]) ->
- DDoc = load_ddoc(DDocs, DDocId),
- ddoc(State, DDoc, Rest);
-run(_, Unknown) ->
- couch_log:error("Native Process: Unknown command: ~p~n", [Unknown]),
- throw({error, unknown_command}).
-
-ddoc(State, {DDoc}, [FunPath, Args]) ->
- % load fun from the FunPath
- BFun = lists:foldl(
- fun
- (Key, {Props}) when is_list(Props) ->
- couch_util:get_value(Key, Props, nil);
- (_Key, Fun) when is_binary(Fun) ->
- Fun;
- (_Key, nil) ->
- throw({error, not_found});
- (_Key, _Fun) ->
- throw({error, malformed_ddoc})
- end,
- {DDoc},
- FunPath
- ),
- ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args).
-
-ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) ->
- {State, (catch apply(Fun, Args))};
-ddoc(State, {_, Fun}, [<<"rewrites">>], Args) ->
- {State, (catch apply(Fun, Args))};
-ddoc(State, {_, Fun}, [<<"filters">> | _], [Docs, Req]) ->
- FilterFunWrapper = fun(Doc) ->
- case catch Fun(Doc, Req) of
- true -> true;
- false -> false;
- {'EXIT', Error} -> couch_log:error("~p", [Error])
- end
- end,
- Resp = lists:map(FilterFunWrapper, Docs),
- {State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"views">> | _], [Docs]) ->
- MapFunWrapper = fun(Doc) ->
- case catch Fun(Doc) of
- undefined -> true;
- ok -> false;
- false -> false;
- [_ | _] -> true;
- {'EXIT', Error} -> couch_log:error("~p", [Error])
- end
- end,
- Resp = lists:map(MapFunWrapper, Docs),
- {State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"shows">> | _], Args) ->
- Resp =
- case (catch apply(Fun, Args)) of
- FunResp when is_list(FunResp) ->
- FunResp;
- {FunResp} ->
- [<<"resp">>, {FunResp}];
- FunResp ->
- FunResp
- end,
- {State, Resp};
-ddoc(State, {_, Fun}, [<<"updates">> | _], Args) ->
- Resp =
- case (catch apply(Fun, Args)) of
- [JsonDoc, JsonResp] ->
- [<<"up">>, JsonDoc, JsonResp]
- end,
- {State, Resp};
-ddoc(State, {Sig, Fun}, [<<"lists">> | _], Args) ->
- Self = self(),
- SpawnFun = fun() ->
- LastChunk = (catch apply(Fun, Args)),
- case start_list_resp(Self, Sig) of
- started ->
- receive
- {Self, list_row, _Row} -> ignore;
- {Self, list_end} -> ignore
- after State#evstate.timeout ->
- throw({timeout, list_cleanup_pid})
- end;
- _ ->
- ok
- end,
- LastChunks =
- case erlang:get(Sig) of
- undefined -> [LastChunk];
- OtherChunks -> [LastChunk | OtherChunks]
- end,
- Self ! {self(), list_end, lists:reverse(LastChunks)}
- end,
- erlang:put(do_trap, process_flag(trap_exit, true)),
- Pid = spawn_link(SpawnFun),
- Resp =
- receive
- {Pid, start, Chunks, JsonResp} ->
- [<<"start">>, Chunks, JsonResp]
- after State#evstate.timeout ->
- throw({timeout, list_start})
- end,
- {State#evstate{list_pid = Pid}, Resp}.
-
-store_ddoc(DDocs, DDocId, DDoc) ->
- dict:store(DDocId, DDoc, DDocs).
-load_ddoc(DDocs, DDocId) ->
- try dict:fetch(DDocId, DDocs) of
- {DDoc} -> {DDoc}
- catch
- _:_Else ->
- throw(
- {error,
- ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s", [DDocId]))}
- )
- end.
-
-bindings(State, Sig) ->
- bindings(State, Sig, nil).
-bindings(State, Sig, DDoc) ->
- Self = self(),
-
- Log = fun(Msg) ->
- couch_log:info(Msg, [])
- end,
-
- Emit = fun(Id, Value) ->
- Curr = erlang:get(Sig),
- erlang:put(Sig, [[Id, Value] | Curr])
- end,
-
- Start = fun(Headers) ->
- erlang:put(list_headers, Headers)
- end,
-
- Send = fun(Chunk) ->
- Curr =
- case erlang:get(Sig) of
- undefined -> [];
- Else -> Else
- end,
- erlang:put(Sig, [Chunk | Curr])
- end,
-
- GetRow = fun() ->
- case start_list_resp(Self, Sig) of
- started ->
- ok;
- _ ->
- Chunks =
- case erlang:get(Sig) of
- undefined -> [];
- CurrChunks -> CurrChunks
- end,
- Self ! {self(), chunks, lists:reverse(Chunks)}
- end,
- erlang:put(Sig, []),
- receive
- {Self, list_row, Row} -> Row;
- {Self, list_end} -> nil
- after State#evstate.timeout ->
- throw({timeout, list_pid_getrow})
- end
- end,
-
- FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end,
-
- Bindings = [
- {'Log', Log},
- {'Emit', Emit},
- {'Start', Start},
- {'Send', Send},
- {'GetRow', GetRow},
- {'FoldRows', FoldRows}
- ],
- case DDoc of
- {_Props} ->
- Bindings ++ [{'DDoc', DDoc}];
- _Else ->
- Bindings
- end.
-
-% thanks to erlview, via:
-% http://erlang.org/pipermail/erlang-questions/2003-November/010544.html
-makefun(State, Source) ->
- Sig = couch_hash:md5_hash(Source),
- BindFuns = bindings(State, Sig),
- {Sig, makefun(State, Source, BindFuns)}.
-makefun(State, Source, {DDoc}) ->
- Sig = couch_hash:md5_hash(lists:flatten([Source, term_to_binary(DDoc)])),
- BindFuns = bindings(State, Sig, {DDoc}),
- {Sig, makefun(State, Source, BindFuns)};
-makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
- FunStr = binary_to_list(Source),
- {ok, Tokens, _} = erl_scan:string(FunStr),
- Form =
- case (catch erl_parse:parse_exprs(Tokens)) of
- {ok, [ParsedForm]} ->
- ParsedForm;
- {error, {LineNum, _Mod, [Mesg, Params]}} = Error ->
- couch_log:error(
- "Syntax error on line: ~p~n~s~p~n",
- [LineNum, Mesg, Params]
- ),
- throw(Error)
- end,
- Bindings = lists:foldl(
- fun({Name, Fun}, Acc) ->
- erl_eval:add_binding(Name, Fun, Acc)
- end,
- erl_eval:new_bindings(),
- BindFuns
- ),
- {value, Fun, _} = erl_eval:expr(Form, Bindings),
- Fun.
-
-reduce(State, BinFuns, Keys, Vals, ReReduce) ->
- Funs =
- case is_list(BinFuns) of
- true ->
- lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
- _ ->
- [makefun(State, BinFuns)]
- end,
- Reds = lists:map(
- fun({_Sig, Fun}) ->
- Fun(Keys, Vals, ReReduce)
- end,
- Funs
- ),
- [true, Reds].
-
-foldrows(GetRow, ProcRow, Acc) ->
- case GetRow() of
- nil ->
- {ok, Acc};
- Row ->
- case (catch ProcRow(Row, Acc)) of
- {ok, Acc2} ->
- foldrows(GetRow, ProcRow, Acc2);
- {stop, Acc2} ->
- {ok, Acc2}
- end
- end.
-
-start_list_resp(Self, Sig) ->
- case erlang:get(list_started) of
- undefined ->
- Headers =
- case erlang:get(list_headers) of
- undefined -> {[{<<"headers">>, {[]}}]};
- CurrHdrs -> CurrHdrs
- end,
- Chunks =
- case erlang:get(Sig) of
- undefined -> [];
- CurrChunks -> CurrChunks
- end,
- Self ! {self(), start, lists:reverse(Chunks), Headers},
- erlang:put(list_started, true),
- erlang:put(Sig, []),
- started;
- _ ->
- ok
- end.
-
-to_binary({Data}) ->
- Pred = fun({Key, Value}) ->
- {to_binary(Key), to_binary(Value)}
- end,
- {lists:map(Pred, Data)};
-to_binary(Data) when is_list(Data) ->
- [to_binary(D) || D <- Data];
-to_binary(null) ->
- null;
-to_binary(true) ->
- true;
-to_binary(false) ->
- false;
-to_binary(Data) when is_atom(Data) ->
- list_to_binary(atom_to_list(Data));
-to_binary(Data) ->
- Data.
diff --git a/src/couch/src/couch_os_process.erl b/src/couch/src/couch_os_process.erl
deleted file mode 100644
index da5df5134..000000000
--- a/src/couch/src/couch_os_process.erl
+++ /dev/null
@@ -1,274 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_os_process).
--behaviour(gen_server).
--vsn(1).
-
--export([start_link/1, start_link/2, start_link/3, stop/1]).
--export([set_timeout/2, prompt/2, killer/1]).
--export([send/2, writeline/2, readline/1, writejson/2, readjson/1]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]).
-
--record(os_proc, {
- command,
- port,
- writer,
- reader,
- timeout = 5000,
- idle
-}).
-
-start_link(Command) ->
- start_link(Command, []).
-start_link(Command, Options) ->
- start_link(Command, Options, ?PORT_OPTIONS).
-start_link(Command, Options, PortOptions) ->
- gen_server:start_link(couch_os_process, [Command, Options, PortOptions], []).
-
-stop(Pid) ->
- gen_server:cast(Pid, stop).
-
-% Read/Write API
-set_timeout(Pid, TimeOut) when is_integer(TimeOut) ->
- ok = gen_server:call(Pid, {set_timeout, TimeOut}, infinity).
-
-% Used by couch_event_os_process.erl
-send(Pid, Data) ->
- gen_server:cast(Pid, {send, Data}).
-
-prompt(Pid, Data) ->
- case ioq:call(Pid, {prompt, Data}, erlang:get(io_priority)) of
- {ok, Result} ->
- Result;
- Error ->
- couch_log:error("OS Process Error ~p :: ~p", [Pid, Error]),
- throw(Error)
- end.
-
-% Utility functions for reading and writing
-% in custom functions
-writeline(OsProc, Data) when is_record(OsProc, os_proc) ->
- Res = port_command(OsProc#os_proc.port, [Data, $\n]),
- couch_io_logger:log_output(Data),
- Res.
-
-readline(#os_proc{} = OsProc) ->
- Res = readline(OsProc, []),
- couch_io_logger:log_input(Res),
- Res.
-readline(#os_proc{port = Port} = OsProc, Acc) ->
- receive
- {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
- readline(OsProc, <<Acc/binary, Data/binary>>);
- {Port, {data, {noeol, Data}}} when is_binary(Data) ->
- readline(OsProc, Data);
- {Port, {data, {noeol, Data}}} ->
- readline(OsProc, [Data | Acc]);
- {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
- [<<Acc/binary, Data/binary>>];
- {Port, {data, {eol, Data}}} when is_binary(Data) ->
- [Data];
- {Port, {data, {eol, Data}}} ->
- lists:reverse(Acc, Data);
- {Port, Err} ->
- catch port_close(Port),
- throw({os_process_error, Err})
- after OsProc#os_proc.timeout ->
- catch port_close(Port),
- throw({os_process_error, "OS process timed out."})
- end.
-
-% Standard JSON functions
-writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
- JsonData = ?JSON_ENCODE(Data),
- couch_log:debug(
- "OS Process ~p Input :: ~s",
- [OsProc#os_proc.port, JsonData]
- ),
- true = writeline(OsProc, JsonData).
-
-readjson(OsProc) when is_record(OsProc, os_proc) ->
- Line = iolist_to_binary(readline(OsProc)),
- couch_log:debug("OS Process ~p Output :: ~s", [OsProc#os_proc.port, Line]),
- try
- % Don't actually parse the whole JSON. Just try to see if it's
- % a command or a doc map/reduce/filter/show/list/update output.
- % If it's a command then parse the whole JSON and execute the
- % command, otherwise return the raw JSON line to the caller.
- pick_command(Line)
- catch
- throw:abort ->
- {json, Line};
- throw:{cmd, _Cmd} ->
- case ?JSON_DECODE(Line) of
- [<<"log">>, Msg] when is_binary(Msg) ->
- % we got a message to log. Log it and continue
- couch_log:info(
- "OS Process ~p Log :: ~s",
- [OsProc#os_proc.port, Msg]
- ),
- readjson(OsProc);
- [<<"error">>, Id, Reason] ->
- throw({error, {couch_util:to_existing_atom(Id), Reason}});
- [<<"fatal">>, Id, Reason] ->
- couch_log:info(
- "OS Process ~p Fatal Error :: ~s ~p",
- [OsProc#os_proc.port, Id, Reason]
- ),
- throw({couch_util:to_existing_atom(Id), Reason});
- _Result ->
- {json, Line}
- end
- end.
-
-pick_command(Line) ->
- json_stream_parse:events(Line, fun pick_command0/1).
-
-pick_command0(array_start) ->
- fun pick_command1/1;
-pick_command0(_) ->
- throw(abort).
-
-pick_command1(<<"log">> = Cmd) ->
- throw({cmd, Cmd});
-pick_command1(<<"error">> = Cmd) ->
- throw({cmd, Cmd});
-pick_command1(<<"fatal">> = Cmd) ->
- throw({cmd, Cmd});
-pick_command1(_) ->
- throw(abort).
-
-% gen_server API
-init([Command, Options, PortOptions]) ->
- couch_io_logger:start(os:getenv("COUCHDB_IO_LOG_DIR")),
- PrivDir = couch_util:priv_dir(),
- Spawnkiller = "\"" ++ filename:join(PrivDir, "couchspawnkillable") ++ "\"",
- V = config:get("query_server_config", "os_process_idle_limit", "300"),
- IdleLimit = list_to_integer(V) * 1000,
- BaseProc = #os_proc{
- command = Command,
- port = open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
- writer = fun ?MODULE:writejson/2,
- reader = fun ?MODULE:readjson/1,
- idle = IdleLimit
- },
- KillCmd = iolist_to_binary(readline(BaseProc)),
- Pid = self(),
- couch_log:debug("OS Process Start :: ~p", [BaseProc#os_proc.port]),
- spawn(fun() ->
- % this ensure the real os process is killed when this process dies.
- erlang:monitor(process, Pid),
- killer(?b2l(KillCmd))
- end),
- OsProc =
- lists:foldl(
- fun(Opt, Proc) ->
- case Opt of
- {writer, Writer} when is_function(Writer) ->
- Proc#os_proc{writer = Writer};
- {reader, Reader} when is_function(Reader) ->
- Proc#os_proc{reader = Reader};
- {timeout, TimeOut} when is_integer(TimeOut) ->
- Proc#os_proc{timeout = TimeOut}
- end
- end,
- BaseProc,
- Options
- ),
- {ok, OsProc, IdleLimit}.
-
-terminate(Reason, #os_proc{port = Port}) ->
- catch port_close(Port),
- case Reason of
- normal ->
- couch_io_logger:stop_noerror();
- Error ->
- couch_io_logger:stop_error(Error)
- end,
- ok.
-
-handle_call({set_timeout, TimeOut}, _From, #os_proc{idle = Idle} = OsProc) ->
- {reply, ok, OsProc#os_proc{timeout = TimeOut}, Idle};
-handle_call({prompt, Data}, _From, #os_proc{idle = Idle} = OsProc) ->
- #os_proc{writer = Writer, reader = Reader} = OsProc,
- try
- Writer(OsProc, Data),
- {reply, {ok, Reader(OsProc)}, OsProc, Idle}
- catch
- throw:{error, OsError} ->
- {reply, OsError, OsProc, Idle};
- throw:{fatal, OsError} ->
- {stop, normal, OsError, OsProc};
- throw:OtherError ->
- {stop, normal, OtherError, OsProc}
- after
- garbage_collect()
- end.
-
-handle_cast({send, Data}, #os_proc{writer = Writer, idle = Idle} = OsProc) ->
- try
- Writer(OsProc, Data),
- {noreply, OsProc, Idle}
- catch
- throw:OsError ->
- couch_log:error("Failed sending data: ~p -> ~p", [Data, OsError]),
- {stop, normal, OsProc}
- end;
-handle_cast(garbage_collect, #os_proc{idle = Idle} = OsProc) ->
- erlang:garbage_collect(),
- {noreply, OsProc, Idle};
-handle_cast(stop, OsProc) ->
- {stop, normal, OsProc};
-handle_cast(Msg, #os_proc{idle = Idle} = OsProc) ->
- couch_log:debug("OS Proc: Unknown cast: ~p", [Msg]),
- {noreply, OsProc, Idle}.
-
-handle_info(timeout, #os_proc{idle = Idle} = OsProc) ->
- gen_server:cast(couch_proc_manager, {os_proc_idle, self()}),
- erlang:garbage_collect(),
- {noreply, OsProc, Idle};
-handle_info({Port, {exit_status, 0}}, #os_proc{port = Port} = OsProc) ->
- couch_log:info("OS Process terminated normally", []),
- {stop, normal, OsProc};
-handle_info({Port, {exit_status, Status}}, #os_proc{port = Port} = OsProc) ->
- couch_log:error("OS Process died with status: ~p", [Status]),
- {stop, {exit_status, Status}, OsProc};
-handle_info(Msg, #os_proc{idle = Idle} = OsProc) ->
- couch_log:debug("OS Proc: Unknown info: ~p", [Msg]),
- {noreply, OsProc, Idle}.
-
-code_change(_, {os_proc, Cmd, Port, W, R, Timeout}, _) ->
- V = config:get("query_server_config", "os_process_idle_limit", "300"),
- State = #os_proc{
- command = Cmd,
- port = Port,
- writer = W,
- reader = R,
- timeout = Timeout,
- idle = list_to_integer(V) * 1000
- },
- {ok, State};
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-killer(KillCmd) ->
- receive
- _ ->
- os:cmd(KillCmd)
- after 1000 ->
- ?MODULE:killer(KillCmd)
- end.
diff --git a/src/couch/src/couch_partition.erl b/src/couch/src/couch_partition.erl
deleted file mode 100644
index 101b5b324..000000000
--- a/src/couch/src/couch_partition.erl
+++ /dev/null
@@ -1,155 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_partition).
-
--export([
- extract/1,
- from_docid/1,
- is_member/2,
-
- start_key/1,
- end_key/1,
- shard_key/1,
-
- validate_dbname/2,
- validate_docid/1,
- validate_partition/1,
-
- hash/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
-extract(Value) when is_binary(Value) ->
- case binary:split(Value, <<":">>) of
- [Partition, Rest] ->
- {Partition, Rest};
- _ ->
- undefined
- end;
-extract(_) ->
- undefined.
-
-from_docid(DocId) ->
- case extract(DocId) of
- undefined ->
- throw({illegal_docid, <<"Doc id must be of form partition:id">>});
- {Partition, _} ->
- Partition
- end.
-
-is_member(DocId, Partition) ->
- case extract(DocId) of
- {Partition, _} ->
- true;
- _ ->
- false
- end.
-
-start_key(Partition) ->
- <<Partition/binary, ":">>.
-
-end_key(Partition) ->
- <<Partition/binary, ";">>.
-
-shard_key(Partition) ->
- <<Partition/binary, ":foo">>.
-
-validate_dbname(DbName, Options) when is_list(DbName) ->
- validate_dbname(?l2b(DbName), Options);
-validate_dbname(DbName, Options) when is_binary(DbName) ->
- Props = couch_util:get_value(props, Options, []),
- IsPartitioned = couch_util:get_value(partitioned, Props, false),
-
- if
- not IsPartitioned ->
- ok;
- true ->
- DbsDbName = config:get("mem3", "shards_db", "_dbs"),
- NodesDbName = config:get("mem3", "nodes_db", "_nodes"),
- UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"),
- Suffix = couch_db:dbname_suffix(DbName),
-
- SysDbNames = [
- iolist_to_binary(DbsDbName),
- iolist_to_binary(NodesDbName)
- | ?SYSTEM_DATABASES
- ],
-
- Suffices = [
- <<"_replicator">>,
- <<"_users">>,
- iolist_to_binary(UsersDbSuffix)
- ],
-
- IsSysDb =
- lists:member(DbName, SysDbNames) orelse
- lists:member(Suffix, Suffices),
-
- if
- not IsSysDb -> ok;
- true -> throw({bad_request, <<"Cannot partition a system database">>})
- end
- end.
-
-validate_docid(<<"_design/", _/binary>>) ->
- ok;
-validate_docid(<<"_local/", _/binary>>) ->
- ok;
-validate_docid(DocId) when is_binary(DocId) ->
- % When this function is called we already know that
- % DocId is already valid thus we only need to
- % ensure that the partition exists and is not empty.
- case extract(DocId) of
- undefined ->
- throw({illegal_docid, <<"Doc id must be of form partition:id">>});
- {Partition, PartitionedDocId} ->
- validate_partition(Partition),
- couch_doc:validate_docid(PartitionedDocId)
- end.
-
-validate_partition(<<>>) ->
- throw({illegal_partition, <<"Partition must not be empty">>});
-validate_partition(Partition) when is_binary(Partition) ->
- case Partition of
- <<"_", _/binary>> ->
- Msg1 = <<"Partition must not start with an underscore">>,
- throw({illegal_partition, Msg1});
- _ ->
- ok
- end,
- case couch_util:validate_utf8(Partition) of
- true ->
- ok;
- false ->
- Msg2 = <<"Partition must be valid UTF-8">>,
- throw({illegal_partition, Msg2})
- end,
- case extract(Partition) of
- {_, _} ->
- Msg3 = <<"Partition must not contain a colon">>,
- throw({illegal_partition, Msg3});
- undefined ->
- ok
- end;
-validate_partition(_) ->
- throw({illegal_partition, <<"Partition must be a string">>}).
-
-% Document ids that start with an underscore
-% (i.e., _local and _design) do not contain a
-% partition and thus do not use the partition
-% hashing.
-hash(<<"_", _/binary>> = DocId) ->
- erlang:crc32(DocId);
-hash(DocId) when is_binary(DocId) ->
- erlang:crc32(from_docid(DocId)).
diff --git a/src/couch/src/couch_passwords.erl b/src/couch/src/couch_passwords.erl
deleted file mode 100644
index 828d2f68b..000000000
--- a/src/couch/src/couch_passwords.erl
+++ /dev/null
@@ -1,200 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_passwords).
-
--export([simple/2, pbkdf2/3, pbkdf2/4, verify/2]).
--export([hash_admin_password/1, get_unhashed_admins/0]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(MAX_DERIVED_KEY_LENGTH, (1 bsl 32 - 1)).
--define(SHA1_OUTPUT_LENGTH, 20).
-
-%% legacy scheme, not used for new passwords.
--spec simple(binary(), binary()) -> binary().
-simple(Password, Salt) when is_binary(Password), is_binary(Salt) ->
- ?l2b(couch_util:to_hex(crypto:hash(sha, <<Password/binary, Salt/binary>>)));
-simple(Password, Salt) when is_binary(Salt) ->
- Msg = io_lib:format("Password value of '~p' is invalid.", [Password]),
- throw({forbidden, Msg});
-simple(Password, Salt) when is_binary(Password) ->
- Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]),
- throw({forbidden, Msg}).
-
-%% CouchDB utility functions
--spec hash_admin_password(binary() | list()) -> binary().
-hash_admin_password(ClearPassword) when is_list(ClearPassword) ->
- hash_admin_password(?l2b(ClearPassword));
-hash_admin_password(ClearPassword) when is_binary(ClearPassword) ->
- %% Support both schemes to smooth migration from legacy scheme
- Scheme = chttpd_util:get_chttpd_auth_config("password_scheme", "pbkdf2"),
- hash_admin_password(Scheme, ClearPassword).
-
-% deprecated
-hash_admin_password("simple", ClearPassword) ->
- Salt = couch_uuids:random(),
- Hash = crypto:hash(sha, <<ClearPassword/binary, Salt/binary>>),
- ?l2b("-hashed-" ++ couch_util:to_hex(Hash) ++ "," ++ ?b2l(Salt));
-hash_admin_password("pbkdf2", ClearPassword) ->
- Iterations = chttpd_util:get_chttpd_auth_config("iterations", "10"),
- Salt = couch_uuids:random(),
- DerivedKey = couch_passwords:pbkdf2(
- couch_util:to_binary(ClearPassword),
- Salt,
- list_to_integer(Iterations)
- ),
- ?l2b(
- "-pbkdf2-" ++ ?b2l(DerivedKey) ++ "," ++
- ?b2l(Salt) ++ "," ++
- Iterations
- ).
-
--spec get_unhashed_admins() -> list().
-get_unhashed_admins() ->
- lists:filter(
- fun
- ({_User, "-hashed-" ++ _}) ->
- % already hashed
- false;
- ({_User, "-pbkdf2-" ++ _}) ->
- % already hashed
- false;
- ({_User, _ClearPassword}) ->
- true
- end,
- config:get("admins")
- ).
-
-%% Current scheme, much stronger.
--spec pbkdf2(binary(), binary(), integer()) -> binary().
-pbkdf2(Password, Salt, Iterations) when
- is_binary(Password),
- is_binary(Salt),
- is_integer(Iterations),
- Iterations > 0
-->
- {ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH),
- Result;
-pbkdf2(Password, Salt, Iterations) when
- is_binary(Salt),
- is_integer(Iterations),
- Iterations > 0
-->
- Msg = io_lib:format("Password value of '~p' is invalid.", [Password]),
- throw({forbidden, Msg});
-pbkdf2(Password, Salt, Iterations) when
- is_binary(Password),
- is_integer(Iterations),
- Iterations > 0
-->
- Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]),
- throw({forbidden, Msg}).
-
--spec pbkdf2(binary(), binary(), integer(), integer()) ->
- {ok, binary()} | {error, derived_key_too_long}.
-pbkdf2(_Password, _Salt, _Iterations, DerivedLength) when
- DerivedLength > ?MAX_DERIVED_KEY_LENGTH
-->
- {error, derived_key_too_long};
-pbkdf2(Password, Salt, Iterations, DerivedLength) when
- is_binary(Password),
- is_binary(Salt),
- is_integer(Iterations),
- Iterations > 0,
- is_integer(DerivedLength)
-->
- L = ceiling(DerivedLength / ?SHA1_OUTPUT_LENGTH),
- <<Bin:DerivedLength/binary, _/binary>> =
- iolist_to_binary(pbkdf2(Password, Salt, Iterations, L, 1, [])),
- {ok, ?l2b(couch_util:to_hex(Bin))}.
-
--spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist()) ->
- iolist().
-pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc) when
- BlockIndex > BlockCount
-->
- lists:reverse(Acc);
-pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex, Acc) ->
- Block = pbkdf2(Password, Salt, Iterations, BlockIndex, 1, <<>>, <<>>),
- pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block | Acc]).
-
--spec pbkdf2(
- binary(),
- binary(),
- integer(),
- integer(),
- integer(),
- binary(),
- binary()
-) -> binary().
-pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc) when
- Iteration > Iterations
-->
- Acc;
-pbkdf2(Password, Salt, Iterations, BlockIndex, 1, _Prev, _Acc) ->
- InitialBlock = couch_util:hmac(
- sha,
- Password,
- <<Salt/binary, BlockIndex:32/integer>>
- ),
- pbkdf2(
- Password,
- Salt,
- Iterations,
- BlockIndex,
- 2,
- InitialBlock,
- InitialBlock
- );
-pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) ->
- Next = couch_util:hmac(sha, Password, Prev),
- pbkdf2(
- Password,
- Salt,
- Iterations,
- BlockIndex,
- Iteration + 1,
- Next,
- crypto:exor(Next, Acc)
- ).
-
-%% verify two lists for equality without short-circuits to avoid timing attacks.
--spec verify(string(), string(), integer()) -> boolean().
-verify([X | RestX], [Y | RestY], Result) ->
- verify(RestX, RestY, (X bxor Y) bor Result);
-verify([], [], Result) ->
- Result == 0.
-
--spec verify
- (binary(), binary()) -> boolean();
- (list(), list()) -> boolean().
-verify(<<X/binary>>, <<Y/binary>>) ->
- verify(?b2l(X), ?b2l(Y));
-verify(X, Y) when is_list(X) and is_list(Y) ->
- case length(X) == length(Y) of
- true ->
- verify(X, Y, 0);
- false ->
- false
- end;
-verify(_X, _Y) ->
- false.
-
--spec ceiling(number()) -> integer().
-ceiling(X) ->
- T = erlang:trunc(X),
- case (X - T) of
- Neg when Neg < 0 -> T;
- Pos when Pos > 0 -> T + 1;
- _ -> T
- end.
diff --git a/src/couch/src/couch_primary_sup.erl b/src/couch/src/couch_primary_sup.erl
deleted file mode 100644
index 4f2917f98..000000000
--- a/src/couch/src/couch_primary_sup.erl
+++ /dev/null
@@ -1,34 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_primary_sup).
--behaviour(supervisor).
--export([init/1, start_link/0]).
-
-start_link() ->
- supervisor:start_link({local, couch_primary_services}, ?MODULE, []).
-
-init([]) ->
- Children =
- [
- {couch_task_status, {couch_task_status, start_link, []}, permanent, brutal_kill, worker,
- [couch_task_status]}
- ] ++ couch_servers(),
- {ok, {{one_for_one, 10, 3600}, Children}}.
-
-couch_servers() ->
- N = couch_server:num_servers(),
- [couch_server(I) || I <- lists:seq(1, N)].
-
-couch_server(N) ->
- Name = couch_server:couch_server(N),
- {Name, {couch_server, sup_start_link, [N]}, permanent, brutal_kill, worker, [couch_server]}.
diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl
deleted file mode 100644
index 6d86c16a7..000000000
--- a/src/couch/src/couch_proc_manager.erl
+++ /dev/null
@@ -1,576 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_proc_manager).
--behaviour(gen_server).
--behaviour(config_listener).
--vsn(3).
-
--export([
- start_link/0,
- get_proc_count/0,
- get_stale_proc_count/0,
- new_proc/1,
- reload/0,
- terminate_stale_procs/0,
- get_servers_from_env/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--export([
- handle_config_change/5,
- handle_config_terminate/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(PROCS, couch_proc_manager_procs).
--define(WAITERS, couch_proc_manager_waiters).
--define(OPENING, couch_proc_manager_opening).
--define(SERVERS, couch_proc_manager_servers).
--define(RELISTEN_DELAY, 5000).
-
--record(state, {
- config,
- counts,
- threshold_ts,
- hard_limit,
- soft_limit
-}).
-
--type docid() :: iodata().
--type revision() :: {integer(), binary()}.
-
--record(client, {
- timestamp :: os:timestamp() | '_',
- from :: undefined | {pid(), reference()} | '_',
- lang :: binary() | '_',
- ddoc :: #doc{} | '_',
- ddoc_key :: undefined | {DDocId :: docid(), Rev :: revision()} | '_'
-}).
-
--record(proc_int, {
- pid,
- lang,
- client,
- ddoc_keys = [],
- prompt_fun,
- set_timeout_fun,
- stop_fun,
- t0 = os:timestamp()
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_proc_count() ->
- gen_server:call(?MODULE, get_proc_count).
-
-get_stale_proc_count() ->
- gen_server:call(?MODULE, get_stale_proc_count).
-
-reload() ->
- gen_server:call(?MODULE, set_threshold_ts).
-
-terminate_stale_procs() ->
- gen_server:call(?MODULE, terminate_stale_procs).
-
-init([]) ->
- process_flag(trap_exit, true),
- ok = config:listen_for_changes(?MODULE, undefined),
-
- TableOpts = [public, named_table, ordered_set],
- ets:new(?PROCS, TableOpts ++ [{keypos, #proc_int.pid}]),
- ets:new(?WAITERS, TableOpts ++ [{keypos, #client.timestamp}]),
- ets:new(?OPENING, [public, named_table, set]),
- ets:new(?SERVERS, [public, named_table, set]),
- ets:insert(?SERVERS, get_servers_from_env("COUCHDB_QUERY_SERVER_")),
- ets:insert(?SERVERS, get_servers_from_env("COUCHDB_NATIVE_QUERY_SERVER_")),
- ets:insert(?SERVERS, [{"QUERY", {mango_native_proc, start_link, []}}]),
- maybe_configure_erlang_native_servers(),
-
- {ok, #state{
- config = get_proc_config(),
- counts = dict:new(),
- threshold_ts = os:timestamp(),
- hard_limit = get_hard_limit(),
- soft_limit = get_soft_limit()
- }}.
-
-terminate(_Reason, _State) ->
- ets:foldl(
- fun(#proc_int{pid = P}, _) ->
- couch_util:shutdown_sync(P)
- end,
- 0,
- ?PROCS
- ),
- ok.
-
-handle_call(get_proc_count, _From, State) ->
- NumProcs = ets:info(?PROCS, size),
- NumOpening = ets:info(?OPENING, size),
- {reply, NumProcs + NumOpening, State};
-handle_call(get_stale_proc_count, _From, State) ->
- #state{threshold_ts = T0} = State,
- MatchSpec = [{#proc_int{t0 = '$1', _ = '_'}, [{'<', '$1', {T0}}], [true]}],
- {reply, ets:select_count(?PROCS, MatchSpec), State};
-handle_call({get_proc, #doc{body = {Props}} = DDoc, DDocKey}, From, State) ->
- LangStr = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
- Lang = couch_util:to_binary(LangStr),
- Client = #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey},
- add_waiting_client(Client),
- {noreply, flush_waiters(State, Lang)};
-handle_call({get_proc, LangStr}, From, State) ->
- Lang = couch_util:to_binary(LangStr),
- Client = #client{from = From, lang = Lang},
- add_waiting_client(Client),
- {noreply, flush_waiters(State, Lang)};
-handle_call({ret_proc, #proc{client = Ref} = Proc}, _From, State) ->
- erlang:demonitor(Ref, [flush]),
- NewState =
- case ets:lookup(?PROCS, Proc#proc.pid) of
- [#proc_int{} = ProcInt] ->
- return_proc(State, ProcInt);
- [] ->
- % Proc must've died and we already
- % cleared it out of the table in
- % the handle_info clause.
- State
- end,
- {reply, true, NewState};
-handle_call(set_threshold_ts, _From, State) ->
- FoldFun = fun
- (#proc_int{client = undefined} = Proc, StateAcc) ->
- remove_proc(StateAcc, Proc);
- (_, StateAcc) ->
- StateAcc
- end,
- NewState = ets:foldl(FoldFun, State, ?PROCS),
- {reply, ok, NewState#state{threshold_ts = os:timestamp()}};
-handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) ->
- FoldFun = fun
- (#proc_int{client = undefined, t0 = Ts2} = Proc, StateAcc) ->
- case Ts1 > Ts2 of
- true ->
- remove_proc(StateAcc, Proc);
- false ->
- StateAcc
- end;
- (_, StateAcc) ->
- StateAcc
- end,
- NewState = ets:foldl(FoldFun, State, ?PROCS),
- {reply, ok, NewState};
-handle_call(_Call, _From, State) ->
- {reply, ignored, State}.
-
-handle_cast({os_proc_idle, Pid}, #state{counts = Counts} = State) ->
- NewState =
- case ets:lookup(?PROCS, Pid) of
- [#proc_int{client = undefined, lang = Lang} = Proc] ->
- case dict:find(Lang, Counts) of
- {ok, Count} when Count >= State#state.soft_limit ->
- couch_log:info("Closing idle OS Process: ~p", [Pid]),
- remove_proc(State, Proc);
- {ok, _} ->
- State
- end;
- _ ->
- State
- end,
- {noreply, NewState};
-handle_cast(reload_config, State) ->
- NewState = State#state{
- config = get_proc_config(),
- hard_limit = get_hard_limit(),
- soft_limit = get_soft_limit()
- },
- maybe_configure_erlang_native_servers(),
- {noreply, flush_waiters(NewState)};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(shutdown, State) ->
- {stop, shutdown, State};
-handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid, _} = From}}, State) ->
- ets:delete(?OPENING, Pid),
- link(Proc0#proc_int.pid),
- Proc = assign_proc(ClientPid, Proc0),
- gen_server:reply(From, {ok, Proc, State#state.config}),
- {noreply, State};
-handle_info({'EXIT', Pid, spawn_error}, State) ->
- [{Pid, #client{lang = Lang}}] = ets:lookup(?OPENING, Pid),
- ets:delete(?OPENING, Pid),
- NewState = State#state{
- counts = dict:update_counter(Lang, -1, State#state.counts)
- },
- {noreply, flush_waiters(NewState, Lang)};
-handle_info({'EXIT', Pid, Reason}, State) ->
- couch_log:info("~p ~p died ~p", [?MODULE, Pid, Reason]),
- case ets:lookup(?PROCS, Pid) of
- [#proc_int{} = Proc] ->
- NewState = remove_proc(State, Proc),
- {noreply, flush_waiters(NewState, Proc#proc_int.lang)};
- [] ->
- {noreply, State}
- end;
-handle_info({'DOWN', Ref, _, _, _Reason}, State0) ->
- case ets:match_object(?PROCS, #proc_int{client = Ref, _ = '_'}) of
- [#proc_int{} = Proc] ->
- {noreply, return_proc(State0, Proc)};
- [] ->
- {noreply, State0}
- end;
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-code_change(_OldVsn, #state{} = State, _Extra) ->
- {ok, State}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- gen_server:cast(?MODULE, reload_config),
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-handle_config_change("native_query_servers", _, _, _, _) ->
- gen_server:cast(?MODULE, reload_config),
- {ok, undefined};
-handle_config_change("query_server_config", _, _, _, _) ->
- gen_server:cast(?MODULE, reload_config),
- {ok, undefined};
-handle_config_change(_, _, _, _, _) ->
- {ok, undefined}.
-
-find_proc(#client{lang = Lang, ddoc_key = undefined}) ->
- Pred = fun(_) ->
- true
- end,
- find_proc(Lang, Pred);
-find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) ->
- Pred = fun(#proc_int{ddoc_keys = DDocKeys}) ->
- lists:member(DDocKey, DDocKeys)
- end,
- case find_proc(Lang, Pred) of
- not_found ->
- case find_proc(Client#client{ddoc_key = undefined}) of
- {ok, Proc} ->
- teach_ddoc(DDoc, DDocKey, Proc);
- Else ->
- Else
- end;
- Else ->
- Else
- end.
-
-find_proc(Lang, Fun) ->
- try iter_procs(Lang, Fun)
- catch ?STACKTRACE(error, Reason, StackTrace)
- couch_log:error("~p ~p ~p", [?MODULE, Reason, StackTrace]),
- {error, Reason}
- end.
-
-iter_procs(Lang, Fun) when is_binary(Lang) ->
- Pattern = #proc_int{lang = Lang, client = undefined, _ = '_'},
- MSpec = [{Pattern, [], ['$_']}],
- case ets:select_reverse(?PROCS, MSpec, 25) of
- '$end_of_table' ->
- not_found;
- Continuation ->
- iter_procs_int(Continuation, Fun)
- end.
-
-iter_procs_int({[], Continuation0}, Fun) ->
- case ets:select_reverse(Continuation0) of
- '$end_of_table' ->
- not_found;
- Continuation1 ->
- iter_procs_int(Continuation1, Fun)
- end;
-iter_procs_int({[Proc | Rest], Continuation}, Fun) ->
- case Fun(Proc) of
- true ->
- {ok, Proc};
- false ->
- iter_procs_int({Rest, Continuation}, Fun)
- end.
-
-spawn_proc(State, Client) ->
- Pid = spawn_link(?MODULE, new_proc, [Client]),
- ets:insert(?OPENING, {Pid, Client}),
- Counts = State#state.counts,
- Lang = Client#client.lang,
- State#state{
- counts = dict:update_counter(Lang, 1, Counts)
- }.
-
-new_proc(#client{ddoc = undefined, ddoc_key = undefined} = Client) ->
- #client{from = From, lang = Lang} = Client,
- Resp =
- try
- case new_proc_int(From, Lang) of
- {ok, Proc} ->
- {spawn_ok, Proc, From};
- Error ->
- gen_server:reply(From, {error, Error}),
- spawn_error
- end
- catch
- _:_ ->
- spawn_error
- end,
- exit(Resp);
-new_proc(Client) ->
- #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client,
- Resp =
- try
- case new_proc_int(From, Lang) of
- {ok, NewProc} ->
- {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc),
- {spawn_ok, Proc, From};
- Error ->
- gen_server:reply(From, {error, Error}),
- spawn_error
- end
- catch
- _:_ ->
- spawn_error
- end,
- exit(Resp).
-
-split_string_if_longer(String, Pos) ->
- case length(String) > Pos of
- true -> lists:split(Pos, String);
- false -> false
- end.
-
-split_by_char(String, Char) ->
- %% 17.5 doesn't have string:split
- %% the function doesn't handle errors
- %% it is designed to be used only in specific context
- Pos = string:chr(String, Char),
- {Key, [_Eq | Value]} = lists:split(Pos - 1, String),
- {Key, Value}.
-
-get_servers_from_env(Spec) ->
- SpecLen = length(Spec),
- % loop over os:getenv(), match SPEC_
- lists:filtermap(
- fun(EnvStr) ->
- case split_string_if_longer(EnvStr, SpecLen) of
- {Spec, Rest} ->
- {true, split_by_char(Rest, $=)};
- _ ->
- false
- end
- end,
- os:getenv()
- ).
-
-get_query_server(LangStr) ->
- case ets:lookup(?SERVERS, string:to_upper(LangStr)) of
- [{_, Command}] -> Command;
- _ -> undefined
- end.
-
-native_query_server_enabled() ->
- % 1. [native_query_server] enable_erlang_query_server = true | false
- % 2. if [native_query_server] erlang == {couch_native_process, start_link, []} -> pretend true as well
- NativeEnabled = config:get_boolean("native_query_servers", "enable_erlang_query_server", false),
- NativeLegacyConfig = config:get("native_query_servers", "erlang", ""),
- NativeLegacyEnabled = NativeLegacyConfig =:= "{couch_native_process, start_link, []}",
- NativeEnabled orelse NativeLegacyEnabled.
-
-maybe_configure_erlang_native_servers() ->
- case native_query_server_enabled() of
- true ->
- ets:insert(?SERVERS, [
- {"ERLANG", {couch_native_process, start_link, []}}
- ]);
- _Else ->
- ok
- end.
-
-new_proc_int(From, Lang) when is_binary(Lang) ->
- LangStr = binary_to_list(Lang),
- case get_query_server(LangStr) of
- undefined ->
- gen_server:reply(From, {unknown_query_language, Lang});
- {M, F, A} ->
- {ok, Pid} = apply(M, F, A),
- make_proc(Pid, Lang, M);
- Command ->
- {ok, Pid} = couch_os_process:start_link(Command),
- make_proc(Pid, Lang, couch_os_process)
- end.
-
-teach_ddoc(DDoc, {DDocId, _Rev} = DDocKey, #proc_int{ddoc_keys = Keys} = Proc) ->
- % send ddoc over the wire
- % we only share the rev with the client we know to update code
- % but it only keeps the latest copy, per each ddoc, around.
- true = couch_query_servers:proc_prompt(
- export_proc(Proc),
- [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]
- ),
- % we should remove any other ddocs keys for this docid
- % because the query server overwrites without the rev
- Keys2 = [{D, R} || {D, R} <- Keys, D /= DDocId],
- % add ddoc to the proc
- {ok, Proc#proc_int{ddoc_keys = [DDocKey | Keys2]}}.
-
-make_proc(Pid, Lang, Mod) when is_binary(Lang) ->
- Proc = #proc_int{
- lang = Lang,
- pid = Pid,
- prompt_fun = {Mod, prompt},
- set_timeout_fun = {Mod, set_timeout},
- stop_fun = {Mod, stop}
- },
- unlink(Pid),
- {ok, Proc}.
-
-assign_proc(Pid, #proc_int{client = undefined} = Proc0) when is_pid(Pid) ->
- Proc = Proc0#proc_int{client = erlang:monitor(process, Pid)},
- ets:insert(?PROCS, Proc),
- export_proc(Proc);
-assign_proc(#client{} = Client, #proc_int{client = undefined} = Proc) ->
- {Pid, _} = Client#client.from,
- assign_proc(Pid, Proc).
-
-return_proc(#state{} = State, #proc_int{} = ProcInt) ->
- #proc_int{pid = Pid, lang = Lang} = ProcInt,
- NewState =
- case is_process_alive(Pid) of
- true ->
- case ProcInt#proc_int.t0 < State#state.threshold_ts of
- true ->
- remove_proc(State, ProcInt);
- false ->
- gen_server:cast(Pid, garbage_collect),
- true = ets:update_element(?PROCS, Pid, [
- {#proc_int.client, undefined}
- ]),
- State
- end;
- false ->
- remove_proc(State, ProcInt)
- end,
- flush_waiters(NewState, Lang).
-
-remove_proc(State, #proc_int{} = Proc) ->
- ets:delete(?PROCS, Proc#proc_int.pid),
- case is_process_alive(Proc#proc_int.pid) of
- true ->
- unlink(Proc#proc_int.pid),
- gen_server:cast(Proc#proc_int.pid, stop);
- false ->
- ok
- end,
- Counts = State#state.counts,
- Lang = Proc#proc_int.lang,
- State#state{
- counts = dict:update_counter(Lang, -1, Counts)
- }.
-
--spec export_proc(#proc_int{}) -> #proc{}.
-export_proc(#proc_int{} = ProcInt) ->
- ProcIntList = tuple_to_list(ProcInt),
- ProcLen = record_info(size, proc),
- [_ | Data] = lists:sublist(ProcIntList, ProcLen),
- list_to_tuple([proc | Data]).
-
-flush_waiters(State) ->
- dict:fold(
- fun(Lang, Count, StateAcc) ->
- case Count < State#state.hard_limit of
- true ->
- flush_waiters(StateAcc, Lang);
- false ->
- StateAcc
- end
- end,
- State,
- State#state.counts
- ).
-
-flush_waiters(State, Lang) ->
- CanSpawn = can_spawn(State, Lang),
- case get_waiting_client(Lang) of
- #client{from = From} = Client ->
- case find_proc(Client) of
- {ok, ProcInt} ->
- Proc = assign_proc(Client, ProcInt),
- gen_server:reply(From, {ok, Proc, State#state.config}),
- remove_waiting_client(Client),
- flush_waiters(State, Lang);
- {error, Error} ->
- gen_server:reply(From, {error, Error}),
- remove_waiting_client(Client),
- flush_waiters(State, Lang);
- not_found when CanSpawn ->
- NewState = spawn_proc(State, Client),
- remove_waiting_client(Client),
- flush_waiters(NewState, Lang);
- not_found ->
- State
- end;
- undefined ->
- State
- end.
-
-add_waiting_client(Client) ->
- ets:insert(?WAITERS, Client#client{timestamp = os:timestamp()}).
-
--spec get_waiting_client(Lang :: binary()) -> undefined | #client{}.
-get_waiting_client(Lang) ->
- case ets:match_object(?WAITERS, #client{lang = Lang, _ = '_'}, 1) of
- '$end_of_table' ->
- undefined;
- {[#client{} = Client], _} ->
- Client
- end.
-
-remove_waiting_client(#client{timestamp = Timestamp}) ->
- ets:delete(?WAITERS, Timestamp).
-
-can_spawn(#state{hard_limit = HardLimit, counts = Counts}, Lang) ->
- case dict:find(Lang, Counts) of
- {ok, Count} -> Count < HardLimit;
- error -> true
- end.
-
-get_proc_config() ->
- Limit = config:get_boolean("query_server_config", "reduce_limit", true),
- Timeout = config:get_integer("couchdb", "os_process_timeout", 5000),
- {[
- {<<"reduce_limit">>, Limit},
- {<<"timeout">>, Timeout}
- ]}.
-
-get_hard_limit() ->
- LimStr = config:get("query_server_config", "os_process_limit", "100"),
- list_to_integer(LimStr).
-
-get_soft_limit() ->
- config:get_integer("query_server_config", "os_process_soft_limit", 100).
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
deleted file mode 100644
index 5dd7c4a4b..000000000
--- a/src/couch/src/couch_query_servers.erl
+++ /dev/null
@@ -1,934 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_query_servers).
-
--export([try_compile/4]).
--export([start_doc_map/3, map_doc_raw/2, stop_doc_map/1, raw_to_ejson/1]).
--export([reduce/3, rereduce/3, validate_doc_update/5]).
--export([filter_docs/5]).
--export([filter_view/3]).
--export([finalize/2]).
--export([rewrite/3]).
-
--export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
-
-% For 210-os-proc-pool.t
--export([get_os_process/1, get_ddoc_process/2, ret_os_process/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(SUMERROR, <<
- "The _sum function requires that map values be numbers, "
- "arrays of numbers, or objects. Objects cannot be mixed with other "
- "data structures. Objects can be arbitrarily nested, provided that the values "
- "for all fields are themselves numbers, arrays of numbers, or objects."
->>).
-
--define(STATERROR, <<
- "The _stats function requires that map values be numbers "
- "or arrays of numbers, not '~p'"
->>).
-
-try_compile(Proc, FunctionType, FunctionName, FunctionSource) ->
- try
- proc_prompt(Proc, [<<"add_fun">>, FunctionSource]),
- ok
- catch
- {compilation_error, E} ->
- Fmt = "Compilation of the ~s function in the '~s' view failed: ~s",
- Msg = io_lib:format(Fmt, [FunctionType, FunctionName, E]),
- throw({compilation_error, Msg});
- {os_process_error, {exit_status, ExitStatus}} ->
- Fmt = "Compilation of the ~s function in the '~s' view failed with exit status: ~p",
- Msg = io_lib:format(Fmt, [FunctionType, FunctionName, ExitStatus]),
- throw({compilation_error, Msg})
- end.
-
-start_doc_map(Lang, Functions, Lib) ->
- Proc = get_os_process(Lang),
- case Lib of
- {[]} -> ok;
- Lib -> true = proc_prompt(Proc, [<<"add_lib">>, Lib])
- end,
- lists:foreach(
- fun(FunctionSource) ->
- true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
- end,
- Functions
- ),
- {ok, Proc}.
-
-map_doc_raw(Proc, Doc) ->
- Json = couch_doc:to_json_obj(Doc, []),
- {ok, proc_prompt_raw(Proc, [<<"map_doc">>, Json])}.
-
-stop_doc_map(nil) ->
- ok;
-stop_doc_map(Proc) ->
- ok = ret_os_process(Proc).
-
-group_reductions_results([]) ->
- [];
-group_reductions_results(List) ->
- {Heads, Tails} = lists:foldl(
- fun([H | T], {HAcc, TAcc}) ->
- {[H | HAcc], [T | TAcc]}
- end,
- {[], []},
- List
- ),
- case Tails of
- % no tails left
- [[] | _] ->
- [Heads];
- _ ->
- [Heads | group_reductions_results(Tails)]
- end.
-
-finalize(<<"_approx_count_distinct", _/binary>>, Reduction) ->
- true = hyper:is_hyper(Reduction),
- {ok, round(hyper:card(Reduction))};
-finalize(<<"_stats", _/binary>>, Unpacked) ->
- {ok, pack_stats(Unpacked)};
-finalize(_RedSrc, Reduction) ->
- {ok, Reduction}.
-
-rereduce(_Lang, [], _ReducedValues) ->
- {ok, []};
-rereduce(Lang, RedSrcs, ReducedValues) ->
- Grouped = group_reductions_results(ReducedValues),
- Results = lists:zipwith(
- fun
- (<<"_", _/binary>> = FunSrc, Values) ->
- {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
- Result;
- (FunSrc, Values) ->
- os_rereduce(Lang, [FunSrc], Values)
- end,
- RedSrcs,
- Grouped
- ),
- {ok, Results}.
-
-reduce(_Lang, [], _KVs) ->
- {ok, []};
-reduce(Lang, RedSrcs, KVs) ->
- {OsRedSrcs, BuiltinReds} = lists:partition(
- fun
- (<<"_", _/binary>>) -> false;
- (_OsFun) -> true
- end,
- RedSrcs
- ),
- {ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs),
- {ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []),
- recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []).
-
-recombine_reduce_results([], [], [], Acc) ->
- {ok, lists:reverse(Acc)};
-recombine_reduce_results([<<"_", _/binary>> | RedSrcs], OsResults, [BRes | BuiltinResults], Acc) ->
- recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes | Acc]);
-recombine_reduce_results([_OsFun | RedSrcs], [OsR | OsResults], BuiltinResults, Acc) ->
- recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR | Acc]).
-
-os_reduce(_Lang, [], _KVs) ->
- {ok, []};
-os_reduce(Lang, OsRedSrcs, KVs) ->
- Proc = get_os_process(Lang),
- OsResults =
- try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
- [true, Reductions] -> Reductions
- catch
- throw:{reduce_overflow_error, Msg} ->
- [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs]
- after
- ok = ret_os_process(Proc)
- end,
- {ok, OsResults}.
-
-os_rereduce(Lang, OsRedSrcs, KVs) ->
- case get_overflow_error(KVs) of
- undefined ->
- Proc = get_os_process(Lang),
- try proc_prompt(Proc, [<<"rereduce">>, OsRedSrcs, KVs]) of
- [true, [Reduction]] -> Reduction
- catch
- throw:{reduce_overflow_error, Msg} ->
- {[{reduce_overflow_error, Msg}]}
- after
- ok = ret_os_process(Proc)
- end;
- Error ->
- Error
- end.
-
-get_overflow_error([]) ->
- undefined;
-get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) ->
- Error;
-get_overflow_error([_ | Rest]) ->
- get_overflow_error(Rest).
-
-builtin_reduce(_Re, [], _KVs, Acc) ->
- {ok, lists:reverse(Acc)};
-builtin_reduce(Re, [<<"_sum", _/binary>> | BuiltinReds], KVs, Acc) ->
- Sum = builtin_sum_rows(KVs, 0),
- Red = check_sum_overflow(?term_size(KVs), ?term_size(Sum), Sum),
- builtin_reduce(Re, BuiltinReds, KVs, [Red | Acc]);
-builtin_reduce(reduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) ->
- Count = length(KVs),
- builtin_reduce(reduce, BuiltinReds, KVs, [Count | Acc]);
-builtin_reduce(rereduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) ->
- Count = builtin_sum_rows(KVs, 0),
- builtin_reduce(rereduce, BuiltinReds, KVs, [Count | Acc]);
-builtin_reduce(Re, [<<"_stats", _/binary>> | BuiltinReds], KVs, Acc) ->
- Stats = builtin_stats(Re, KVs),
- builtin_reduce(Re, BuiltinReds, KVs, [Stats | Acc]);
-builtin_reduce(Re, [<<"_approx_count_distinct", _/binary>> | BuiltinReds], KVs, Acc) ->
- Distinct = approx_count_distinct(Re, KVs),
- builtin_reduce(Re, BuiltinReds, KVs, [Distinct | Acc]).
-
-builtin_sum_rows([], Acc) ->
- Acc;
-builtin_sum_rows([[_Key, Value] | RestKVs], Acc) ->
- try sum_values(Value, Acc) of
- NewAcc ->
- builtin_sum_rows(RestKVs, NewAcc)
- catch
- throw:{builtin_reduce_error, Obj} ->
- Obj;
- throw:{invalid_value, Reason, Cause} ->
- {[
- {<<"error">>, <<"builtin_reduce_error">>},
- {<<"reason">>, Reason},
- {<<"caused_by">>, Cause}
- ]}
- end.
-
-sum_values(Value, Acc) when is_number(Value), is_number(Acc) ->
- Acc + Value;
-sum_values(Value, Acc) when is_list(Value), is_list(Acc) ->
- sum_arrays(Acc, Value);
-sum_values(Value, Acc) when is_number(Value), is_list(Acc) ->
- sum_arrays(Acc, [Value]);
-sum_values(Value, Acc) when is_list(Value), is_number(Acc) ->
- sum_arrays([Acc], Value);
-sum_values({Props}, Acc) ->
- case lists:keyfind(<<"error">>, 1, Props) of
- {<<"error">>, <<"builtin_reduce_error">>} ->
- throw({builtin_reduce_error, {Props}});
- false ->
- ok
- end,
- case Acc of
- 0 ->
- {Props};
- {AccProps} ->
- {sum_objects(lists:sort(Props), lists:sort(AccProps))}
- end;
-sum_values(Else, _Acc) ->
- throw_sum_error(Else).
-
-sum_objects([{K1, V1} | Rest1], [{K1, V2} | Rest2]) ->
- [{K1, sum_values(V1, V2)} | sum_objects(Rest1, Rest2)];
-sum_objects([{K1, V1} | Rest1], [{K2, V2} | Rest2]) when K1 < K2 ->
- [{K1, V1} | sum_objects(Rest1, [{K2, V2} | Rest2])];
-sum_objects([{K1, V1} | Rest1], [{K2, V2} | Rest2]) when K1 > K2 ->
- [{K2, V2} | sum_objects([{K1, V1} | Rest1], Rest2)];
-sum_objects([], Rest) ->
- Rest;
-sum_objects(Rest, []) ->
- Rest.
-
-sum_arrays([], []) ->
- [];
-sum_arrays([_ | _] = Xs, []) ->
- Xs;
-sum_arrays([], [_ | _] = Ys) ->
- Ys;
-sum_arrays([X | Xs], [Y | Ys]) when is_number(X), is_number(Y) ->
- [X + Y | sum_arrays(Xs, Ys)];
-sum_arrays(Else, _) ->
- throw_sum_error(Else).
-
-check_sum_overflow(InSize, OutSize, Sum) ->
- Overflowed = OutSize > 4906 andalso OutSize * 2 > InSize,
- case config:get("query_server_config", "reduce_limit", "true") of
- "true" when Overflowed ->
- Msg = log_sum_overflow(InSize, OutSize),
- {[
- {<<"error">>, <<"builtin_reduce_error">>},
- {<<"reason">>, Msg}
- ]};
- "log" when Overflowed ->
- log_sum_overflow(InSize, OutSize),
- Sum;
- _ ->
- Sum
- end.
-
-log_sum_overflow(InSize, OutSize) ->
- Fmt =
- "Reduce output must shrink more rapidly: "
- "input size: ~b "
- "output size: ~b",
- Msg = iolist_to_binary(io_lib:format(Fmt, [InSize, OutSize])),
- couch_log:error(Msg, []),
- Msg.
-
-builtin_stats(_, []) ->
- {0, 0, 0, 0, 0};
-builtin_stats(_, [[_, First] | Rest]) ->
- lists:foldl(
- fun([_Key, Value], Acc) ->
- stat_values(Value, Acc)
- end,
- build_initial_accumulator(First),
- Rest
- ).
-
-stat_values(Value, Acc) when is_list(Value), is_list(Acc) ->
- lists:zipwith(fun stat_values/2, Value, Acc);
-stat_values({PreRed}, Acc) when is_list(PreRed) ->
- stat_values(unpack_stats({PreRed}), Acc);
-stat_values(Value, Acc) when is_number(Value) ->
- stat_values({Value, 1, Value, Value, Value * Value}, Acc);
-stat_values(Value, Acc) when is_number(Acc) ->
- stat_values(Value, {Acc, 1, Acc, Acc, Acc * Acc});
-stat_values(Value, Acc) when is_tuple(Value), is_tuple(Acc) ->
- {Sum0, Cnt0, Min0, Max0, Sqr0} = Value,
- {Sum1, Cnt1, Min1, Max1, Sqr1} = Acc,
- {
- Sum0 + Sum1,
- Cnt0 + Cnt1,
- erlang:min(Min0, Min1),
- erlang:max(Max0, Max1),
- Sqr0 + Sqr1
- };
-stat_values(Else, _Acc) ->
- throw_stat_error(Else).
-
-build_initial_accumulator(L) when is_list(L) ->
- [build_initial_accumulator(X) || X <- L];
-build_initial_accumulator(X) when is_number(X) ->
- {X, 1, X, X, X * X};
-build_initial_accumulator({_, _, _, _, _} = AlreadyUnpacked) ->
- AlreadyUnpacked;
-build_initial_accumulator({Props}) ->
- unpack_stats({Props});
-build_initial_accumulator(Else) ->
- Msg = io_lib:format("non-numeric _stats input: ~w", [Else]),
- throw({invalid_value, iolist_to_binary(Msg)}).
-
-unpack_stats({PreRed}) when is_list(PreRed) ->
- {
- get_number(<<"sum">>, PreRed),
- get_number(<<"count">>, PreRed),
- get_number(<<"min">>, PreRed),
- get_number(<<"max">>, PreRed),
- get_number(<<"sumsqr">>, PreRed)
- }.
-
-pack_stats({Sum, Cnt, Min, Max, Sqr}) ->
- {[
- {<<"sum">>, Sum},
- {<<"count">>, Cnt},
- {<<"min">>, Min},
- {<<"max">>, Max},
- {<<"sumsqr">>, Sqr}
- ]};
-pack_stats({Packed}) ->
- % Legacy code path before we had the finalize operation
- {Packed};
-pack_stats(Stats) when is_list(Stats) ->
- lists:map(fun pack_stats/1, Stats).
-
-get_number(Key, Props) ->
- case couch_util:get_value(Key, Props) of
- X when is_number(X) ->
- X;
- undefined when is_binary(Key) ->
- get_number(binary_to_atom(Key, latin1), Props);
- undefined ->
- Msg = io_lib:format(
- "user _stats input missing required field ~s (~p)",
- [Key, Props]
- ),
- throw({invalid_value, iolist_to_binary(Msg)});
- Else ->
- Msg = io_lib:format(
- "non-numeric _stats input received for ~s: ~w",
- [Key, Else]
- ),
- throw({invalid_value, iolist_to_binary(Msg)})
- end.
-
-% TODO allow customization of precision in the ddoc.
-approx_count_distinct(reduce, KVs) ->
- lists:foldl(
- fun([[Key, _Id], _Value], Filter) ->
- hyper:insert(term_to_binary(Key), Filter)
- end,
- hyper:new(11),
- KVs
- );
-approx_count_distinct(rereduce, Reds) ->
- hyper:union([Filter || [_, Filter] <- Reds]).
-
-% use the function stored in ddoc.validate_doc_update to test an update.
--spec validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> ok when
- DDoc :: ddoc(),
- EditDoc :: doc(),
- DiskDoc :: doc() | nil,
- Ctx :: user_ctx(),
- SecObj :: sec_obj().
-
-validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
- JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
- JsonDiskDoc = json_doc(DiskDoc),
- Resp = ddoc_prompt(
- DDoc,
- [<<"validate_doc_update">>],
- [JsonEditDoc, JsonDiskDoc, Ctx, SecObj]
- ),
- if
- Resp == 1 -> ok;
- true -> couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1)
- end,
- case Resp of
- RespCode when RespCode =:= 1; RespCode =:= ok; RespCode =:= true ->
- ok;
- {[{<<"forbidden">>, Message}]} ->
- throw({forbidden, Message});
- {[{<<"unauthorized">>, Message}]} ->
- throw({unauthorized, Message});
- {[{_, Message}]} ->
- throw({unknown_error, Message});
- Message when is_binary(Message) ->
- throw({unknown_error, Message})
- end.
-
-rewrite(Req, Db, DDoc) ->
- Fields = [
- F
- || F <- chttpd_external:json_req_obj_fields(),
- F =/= <<"info">>,
- F =/= <<"form">>,
- F =/= <<"uuid">>,
- F =/= <<"id">>
- ],
- JsonReq = chttpd_external:json_req_obj(Req, Db, null, Fields),
- case couch_query_servers:ddoc_prompt(DDoc, [<<"rewrites">>], [JsonReq]) of
- {[{<<"forbidden">>, Message}]} ->
- throw({forbidden, Message});
- {[{<<"unauthorized">>, Message}]} ->
- throw({unauthorized, Message});
- [<<"no_dispatch_rule">>] ->
- undefined;
- [<<"ok">>, {V} = Rewrite] when is_list(V) ->
- ok = validate_rewrite_response(Rewrite),
- Rewrite;
- [<<"ok">>, _] ->
- throw_rewrite_error(<<"bad rewrite">>);
- V ->
- couch_log:error("bad rewrite return ~p", [V]),
- throw({unknown_error, V})
- end.
-
-validate_rewrite_response({Fields}) when is_list(Fields) ->
- validate_rewrite_response_fields(Fields).
-
-validate_rewrite_response_fields([{Key, Value} | Rest]) ->
- validate_rewrite_response_field(Key, Value),
- validate_rewrite_response_fields(Rest);
-validate_rewrite_response_fields([]) ->
- ok.
-
-validate_rewrite_response_field(<<"method">>, Method) when is_binary(Method) ->
- ok;
-validate_rewrite_response_field(<<"method">>, _) ->
- throw_rewrite_error(<<"bad method">>);
-validate_rewrite_response_field(<<"path">>, Path) when is_binary(Path) ->
- ok;
-validate_rewrite_response_field(<<"path">>, _) ->
- throw_rewrite_error(<<"bad path">>);
-validate_rewrite_response_field(<<"body">>, Body) when is_binary(Body) ->
- ok;
-validate_rewrite_response_field(<<"body">>, _) ->
- throw_rewrite_error(<<"bad body">>);
-validate_rewrite_response_field(<<"headers">>, {Props} = Headers) when is_list(Props) ->
- validate_object_fields(Headers);
-validate_rewrite_response_field(<<"headers">>, _) ->
- throw_rewrite_error(<<"bad headers">>);
-validate_rewrite_response_field(<<"query">>, {Props} = Query) when is_list(Props) ->
- validate_object_fields(Query);
-validate_rewrite_response_field(<<"query">>, _) ->
- throw_rewrite_error(<<"bad query">>);
-validate_rewrite_response_field(<<"code">>, Code) when
- is_integer(Code) andalso Code >= 200 andalso Code < 600
-->
- ok;
-validate_rewrite_response_field(<<"code">>, _) ->
- throw_rewrite_error(<<"bad code">>);
-validate_rewrite_response_field(K, V) ->
- couch_log:debug("unknown rewrite field ~p=~p", [K, V]),
- ok.
-
-validate_object_fields({Props}) when is_list(Props) ->
- lists:foreach(
- fun
- ({Key, Value}) when is_binary(Key) andalso is_binary(Value) ->
- ok;
- ({Key, Value}) ->
- Reason = io_lib:format(
- "object key/value must be strings ~p=~p", [Key, Value]
- ),
- throw_rewrite_error(Reason);
- (Value) ->
- throw_rewrite_error(io_lib:format("bad value ~p", [Value]))
- end,
- Props
- ).
-
-throw_rewrite_error(Reason) when is_list(Reason) ->
- throw_rewrite_error(iolist_to_binary(Reason));
-throw_rewrite_error(Reason) when is_binary(Reason) ->
- throw({rewrite_error, Reason}).
-
-json_doc_options() ->
- json_doc_options([]).
-
-json_doc_options(Options) ->
- Limit = config:get_integer("query_server_config", "revs_limit", 20),
- [{revs, Limit} | Options].
-
-json_doc(Doc) ->
- json_doc(Doc, json_doc_options()).
-
-json_doc(nil, _) ->
- null;
-json_doc(Doc, Options) ->
- couch_doc:to_json_obj(Doc, Options).
-
-filter_view(DDoc, VName, Docs) ->
- Options = json_doc_options(),
- JsonDocs = [json_doc(Doc, Options) || Doc <- Docs],
- [true, Passes] = ddoc_prompt(DDoc, [<<"views">>, VName, <<"map">>], [JsonDocs]),
- {ok, Passes}.
-
-filter_docs(Req, Db, DDoc, FName, Docs) ->
- JsonReq =
- case Req of
- {json_req, JsonObj} ->
- JsonObj;
- #httpd{} = HttpReq ->
- chttpd_external:json_req_obj(HttpReq, Db)
- end,
- Options = json_doc_options(),
- JsonDocs = [json_doc(Doc, Options) || Doc <- Docs],
- try
- {ok, filter_docs_int(DDoc, FName, JsonReq, JsonDocs)}
- catch
- throw:{os_process_error, {exit_status, 1}} ->
- %% batch used too much memory, retry sequentially.
- Fun = fun(JsonDoc) ->
- filter_docs_int(DDoc, FName, JsonReq, [JsonDoc])
- end,
- {ok, lists:flatmap(Fun, JsonDocs)}
- end.
-
-filter_docs_int(DDoc, FName, JsonReq, JsonDocs) ->
- [true, Passes] = ddoc_prompt(
- DDoc,
- [<<"filters">>, FName],
- [JsonDocs, JsonReq]
- ),
- Passes.
-
-ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
- proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]).
-
-ddoc_prompt(DDoc, FunPath, Args) ->
- with_ddoc_proc(DDoc, fun({Proc, DDocId}) ->
- proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args])
- end).
-
-with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) ->
- Rev = couch_doc:rev_to_str({Start, DiskRev}),
- DDocKey = {DDocId, Rev},
- Proc = get_ddoc_process(DDoc, DDocKey),
- try Fun({Proc, DDocId}) of
- Resp ->
- ok = ret_os_process(Proc),
- Resp
- catch ?STACKTRACE(Tag, Err, Stack)
- catch proc_stop(Proc),
- erlang:raise(Tag, Err, Stack)
- end.
-
-proc_prompt(Proc, Args) ->
- case proc_prompt_raw(Proc, Args) of
- {json, Json} ->
- raw_to_ejson({json, Json});
- EJson ->
- EJson
- end.
-
-proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
- apply(Mod, Func, [Proc#proc.pid, Args]).
-
-raw_to_ejson({json, Json}) ->
- try
- ?JSON_DECODE(Json)
- catch
- throw:{invalid_json, {_, invalid_string}} ->
- Forced =
- try
- force_utf8(Json)
- catch
- _:_ ->
- Json
- end,
- ?JSON_DECODE(Forced)
- end;
-raw_to_ejson(EJson) ->
- EJson.
-
-force_utf8(Bin) ->
- case binary:match(Bin, <<"\\u">>) of
- {Start, 2} ->
- <<Prefix:Start/binary, Rest1/binary>> = Bin,
- {Insert, Rest3} =
- case check_uescape(Rest1) of
- {ok, Skip} ->
- <<Skipped:Skip/binary, Rest2/binary>> = Rest1,
- {Skipped, Rest2};
- {error, Skip} ->
- <<_:Skip/binary, Rest2/binary>> = Rest1,
- {<<16#EF, 16#BF, 16#BD>>, Rest2}
- end,
- RestForced = force_utf8(Rest3),
- <<Prefix/binary, Insert/binary, RestForced/binary>>;
- nomatch ->
- Bin
- end.
-
-check_uescape(Data) ->
- case extract_uescape(Data) of
- {Hi, Rest} when Hi >= 16#D800, Hi < 16#DC00 ->
- case extract_uescape(Rest) of
- {Lo, _} when Lo >= 16#DC00, Lo =< 16#DFFF ->
- % A low surrogate pair
- UTF16 = <<
- Hi:16/big-unsigned-integer,
- Lo:16/big-unsigned-integer
- >>,
- try
- [_] = xmerl_ucs:from_utf16be(UTF16),
- {ok, 12}
- catch
- _:_ ->
- {error, 6}
- end;
- {_, _} ->
- % Found a uescape that's not a low half
- {error, 6};
- false ->
- % No hex escape found
- {error, 6}
- end;
- {Hi, _} when Hi >= 16#DC00, Hi =< 16#DFFF ->
- % Found a low surrogate half without a high half
- {error, 6};
- {_, _} ->
- % Found a uescape we don't care about
- {ok, 6};
- false ->
- % Incomplete uescape which we don't care about
- {ok, 2}
- end.
-
-extract_uescape(<<"\\u", Code:4/binary, Rest/binary>>) ->
- {binary_to_integer(Code, 16), Rest};
-extract_uescape(_) ->
- false.
-
-proc_stop(Proc) ->
- {Mod, Func} = Proc#proc.stop_fun,
- apply(Mod, Func, [Proc#proc.pid]).
-
-proc_set_timeout(Proc, Timeout) ->
- {Mod, Func} = Proc#proc.set_timeout_fun,
- apply(Mod, Func, [Proc#proc.pid, Timeout]).
-
-get_os_process_timeout() ->
- config:get_integer("couchdb", "os_process_timeout", 5000).
-
-get_ddoc_process(#doc{} = DDoc, DDocKey) ->
- % remove this case statement
- case gen_server:call(couch_proc_manager, {get_proc, DDoc, DDocKey}, get_os_process_timeout()) of
- {ok, Proc, {QueryConfig}} ->
- % process knows the ddoc
- case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
- true ->
- proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
- Proc;
- _ ->
- catch proc_stop(Proc),
- get_ddoc_process(DDoc, DDocKey)
- end;
- Error ->
- throw(Error)
- end.
-
-get_os_process(Lang) ->
- case gen_server:call(couch_proc_manager, {get_proc, Lang}, get_os_process_timeout()) of
- {ok, Proc, {QueryConfig}} ->
- case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
- true ->
- proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
- Proc;
- _ ->
- catch proc_stop(Proc),
- get_os_process(Lang)
- end;
- Error ->
- throw(Error)
- end.
-
-ret_os_process(Proc) ->
- true = gen_server:call(couch_proc_manager, {ret_proc, Proc}, infinity),
- catch unlink(Proc#proc.pid),
- ok.
-
-throw_sum_error(Else) ->
- throw({invalid_value, ?SUMERROR, Else}).
-
-throw_stat_error(Else) ->
- throw({invalid_value, iolist_to_binary(io_lib:format(?STATERROR, [Else]))}).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-builtin_sum_rows_negative_test() ->
- A = [{[{<<"a">>, 1}]}, {[{<<"a">>, 2}]}, {[{<<"a">>, 3}]}],
- E = {[{<<"error">>, <<"builtin_reduce_error">>}]},
- ?assertEqual(E, builtin_sum_rows([["K", E]], [])),
- % The below case is where the value is invalid, but no error because
- % it's only one document.
- ?assertEqual(A, builtin_sum_rows([["K", A]], [])),
- {Result} = builtin_sum_rows([["K", A]], [1, 2, 3]),
- ?assertEqual(
- {<<"error">>, <<"builtin_reduce_error">>},
- lists:keyfind(<<"error">>, 1, Result)
- ).
-
-sum_values_test() ->
- ?assertEqual(3, sum_values(1, 2)),
- ?assertEqual([2, 4, 6], sum_values(1, [1, 4, 6])),
- ?assertEqual([3, 5, 7], sum_values([3, 2, 4], [0, 3, 3])),
- X =
- {[
- {<<"a">>, 1},
- {<<"b">>, [1, 2]},
- {<<"c">>, {[{<<"d">>, 3}]}},
- {<<"g">>, 1}
- ]},
- Y =
- {[
- {<<"a">>, 2},
- {<<"b">>, 3},
- {<<"c">>, {[{<<"e">>, 5}]}},
- {<<"f">>, 1},
- {<<"g">>, 1}
- ]},
- Z =
- {[
- {<<"a">>, 3},
- {<<"b">>, [4, 2]},
- {<<"c">>, {[{<<"d">>, 3}, {<<"e">>, 5}]}},
- {<<"f">>, 1},
- {<<"g">>, 2}
- ]},
- ?assertEqual(Z, sum_values(X, Y)),
- ?assertEqual(Z, sum_values(Y, X)).
-
-sum_values_negative_test() ->
- % invalid value
- A = [{[{<<"a">>, 1}]}, {[{<<"a">>, 2}]}, {[{<<"a">>, 3}]}],
- B = ["error 1", "error 2"],
- C = [<<"error 3">>, <<"error 4">>],
- KV =
- {[
- {<<"error">>, <<"builtin_reduce_error">>},
- {<<"reason">>, ?SUMERROR},
- {<<"caused_by">>, <<"some cause">>}
- ]},
- ?assertThrow({invalid_value, _, _}, sum_values(A, [1, 2, 3])),
- ?assertThrow({invalid_value, _, _}, sum_values(A, 0)),
- ?assertThrow({invalid_value, _, _}, sum_values(B, [1, 2])),
- ?assertThrow({invalid_value, _, _}, sum_values(C, [0])),
- ?assertThrow({builtin_reduce_error, KV}, sum_values(KV, [0])).
-
-stat_values_test() ->
- ?assertEqual({1, 2, 0, 1, 1}, stat_values(1, 0)),
- ?assertEqual({11, 2, 1, 10, 101}, stat_values(1, 10)),
- ?assertEqual(
- [
- {9, 2, 2, 7, 53},
- {14, 2, 3, 11, 130},
- {18, 2, 5, 13, 194}
- ],
- stat_values([2, 3, 5], [7, 11, 13])
- ).
-
-reduce_stats_test() ->
- ?assertEqual(
- [
- {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
- ],
- test_reduce(<<"_stats">>, [[[null, key], 2]])
- ),
-
- ?assertEqual(
- [
- [
- {[
- {<<"sum">>, 1},
- {<<"count">>, 1},
- {<<"min">>, 1},
- {<<"max">>, 1},
- {<<"sumsqr">>, 1}
- ]},
- {[
- {<<"sum">>, 2},
- {<<"count">>, 1},
- {<<"min">>, 2},
- {<<"max">>, 2},
- {<<"sumsqr">>, 4}
- ]}
- ]
- ],
- test_reduce(<<"_stats">>, [[[null, key], [1, 2]]])
- ),
-
- ?assertEqual(
- {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]},
- element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))
- ),
-
- ?assertEqual(
- [
- {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
- {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
- ],
- element(
- 2,
- finalize(<<"_stats">>, [
- {1, 1, 1, 1, 1},
- {2, 1, 2, 2, 4}
- ])
- )
- ),
-
- ?assertEqual(
- [
- {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
- {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
- ],
- element(
- 2,
- finalize(<<"_stats">>, [
- {1, 1, 1, 1, 1},
- {[
- {<<"sum">>, 2},
- {<<"count">>, 1},
- {<<"min">>, 2},
- {<<"max">>, 2},
- {<<"sumsqr">>, 4}
- ]}
- ])
- )
- ),
-
- ?assertEqual(
- [
- {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
- {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
- ],
- element(
- 2,
- finalize(<<"_stats">>, [
- {[
- {<<"sum">>, 1},
- {<<"count">>, 1},
- {<<"min">>, 1},
- {<<"max">>, 1},
- {<<"sumsqr">>, 1}
- ]},
- {2, 1, 2, 2, 4}
- ])
- )
- ),
- ok.
-
-test_reduce(Reducer, KVs) ->
- ?assertMatch({ok, _}, reduce(<<"javascript">>, [Reducer], KVs)),
- {ok, Reduced} = reduce(<<"javascript">>, [Reducer], KVs),
- {ok, Finalized} = finalize(Reducer, Reduced),
- Finalized.
-
-force_utf8_test() ->
- % "\uDCA5\uD83D"
- Ok = [
- <<"foo">>,
- <<"\\u00A0">>,
- <<"\\u0032">>,
- <<"\\uD83D\\uDCA5">>,
- <<"foo\\uD83D\\uDCA5bar">>,
- % Truncated but we doesn't break replacements
- <<"\\u0FA">>
- ],
- lists:foreach(
- fun(Case) ->
- ?assertEqual(Case, force_utf8(Case))
- end,
- Ok
- ),
-
- NotOk = [
- <<"\\uDCA5">>,
- <<"\\uD83D">>,
- <<"fo\\uDCA5bar">>,
- <<"foo\\uD83Dbar">>,
- <<"\\uDCA5\\uD83D">>,
- <<"\\uD83Df\\uDCA5">>,
- <<"\\uDCA5\\u00A0">>,
- <<"\\uD83D\\u00A0">>
- ],
- ToJSON = fun(Bin) -> <<34, Bin/binary, 34>> end,
- lists:foreach(
- fun(Case) ->
- try
- ?assertNotEqual(Case, force_utf8(Case)),
- ?assertThrow(_, ?JSON_DECODE(ToJSON(Case))),
- ?assertMatch(<<_/binary>>, ?JSON_DECODE(ToJSON(force_utf8(Case))))
- catch
- T:R ->
- io:format(standard_error, "~p~n~p~n", [T, R])
- end
- end,
- NotOk
- ).
-
--endif.
diff --git a/src/couch/src/couch_rand.erl b/src/couch/src/couch_rand.erl
deleted file mode 100644
index bc30956a4..000000000
--- a/src/couch/src/couch_rand.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_rand).
-
--export([
- uniform/0,
- uniform/1
-]).
-
-uniform() ->
- rand:uniform().
-
-uniform(N) ->
- rand:uniform(N).
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
deleted file mode 100644
index cfe38bbd4..000000000
--- a/src/couch/src/couch_secondary_sup.erl
+++ /dev/null
@@ -1,79 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_secondary_sup).
--behaviour(supervisor).
--export([init/1, start_link/0]).
-
-start_link() ->
- supervisor:start_link({local, couch_secondary_services}, ?MODULE, []).
-
-init([]) ->
- SecondarySupervisors = [
- {couch_plugin_event, {gen_event, start_link, [{local, couch_plugin}]}, permanent,
- brutal_kill, worker, dynamic}
- ],
- Daemons =
- [
- {query_servers, {couch_proc_manager, start_link, []}},
- {vhosts, {couch_httpd_vhost, start_link, []}},
- {uuids, {couch_uuids, start, []}}
- ] ++ couch_index_servers(),
-
- MaybeHttp =
- case http_enabled() of
- true ->
- [{httpd, {couch_httpd, start_link, []}}];
- false ->
- couch_httpd:set_auth_handlers(),
- []
- end,
-
- MaybeHttps =
- case https_enabled() of
- true -> [{httpsd, {chttpd, start_link, [https]}}];
- false -> []
- end,
-
- Children =
- SecondarySupervisors ++
- [
- begin
- {Module, Fun, Args} = Spec,
-
- {Name, {Module, Fun, Args}, permanent, brutal_kill, worker, [Module]}
- end
- || {Name, Spec} <-
- Daemons ++ MaybeHttp ++ MaybeHttps,
- Spec /= ""
- ],
- {ok, {{one_for_one, 50, 3600}, couch_epi:register_service(couch_db_epi, Children)}}.
-
-http_enabled() ->
- config:get_boolean("httpd", "enable", false).
-
-https_enabled() ->
- % 1. [ssl] enable = true | false
- % 2. if [daemons] httpsd == {chttpd, start_link, [https]} -> pretend true as well
- SSLEnabled = config:get_boolean("ssl", "enable", false),
- LegacySSL = config:get("daemons", "httpsd"),
- LegacySSLEnabled = LegacySSL =:= "{chttpd, start_link, [https]}",
-
- SSLEnabled orelse LegacySSLEnabled.
-
-couch_index_servers() ->
- N = couch_index_server:num_servers(),
- [couch_index_server(I) || I <- lists:seq(1, N)].
-
-couch_index_server(N) ->
- Name = couch_index_server:server_name(N),
- {Name, {couch_index_server, start_link, [N]}}.
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
deleted file mode 100644
index 74217894c..000000000
--- a/src/couch/src/couch_server.erl
+++ /dev/null
@@ -1,1097 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_server).
--behaviour(gen_server).
--behaviour(config_listener).
--vsn(3).
-
--export([open/2, create/2, delete/2, get_version/0, get_version/1, get_git_sha/0, get_uuid/0]).
--export([all_databases/0, all_databases/2]).
--export([init/1, handle_call/3, sup_start_link/1]).
--export([handle_cast/2, code_change/3, handle_info/2, terminate/2]).
--export([dev_start/0, is_admin/2, has_admins/0, get_stats/0]).
--export([close_db_if_idle/1]).
--export([delete_compaction_files/1]).
--export([is_compacting/1]).
--export([exists/1]).
--export([get_engine_extensions/0]).
--export([get_engine_path/2]).
--export([lock/2, unlock/1]).
--export([db_updated/1]).
--export([num_servers/0, couch_server/1, couch_dbs_pid_to_name/1, couch_dbs/1]).
--export([aggregate_queue_len/0, get_spidermonkey_version/0]).
--export([names/0]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_server_int.hrl").
-
--define(MAX_DBS_OPEN, 500).
--define(RELISTEN_DELAY, 5000).
--define(DEFAULT_ENGINE, "couch").
-
--record(server, {
- root_dir = [],
- engines = [],
- max_dbs_open = ?MAX_DBS_OPEN,
- dbs_open = 0,
- start_time = "",
- update_lru_on_read = true,
- lru = couch_lru:new(),
- couch_dbs,
- couch_dbs_pid_to_name,
- couch_dbs_locks
-}).
-
-dev_start() ->
- couch:stop(),
- up_to_date = make:all([load, debug_info]),
- couch:start().
-
-get_version() ->
- %% Defined in rebar.config.script
- ?COUCHDB_VERSION.
-get_version(short) ->
- %% strip git hash from version string
- [Version | _Rest] = string:tokens(get_version(), "+"),
- Version.
-
-get_git_sha() -> ?COUCHDB_GIT_SHA.
-
-get_uuid() ->
- case config:get("couchdb", "uuid", undefined) of
- undefined ->
- UUID = couch_uuids:random(),
- config:set("couchdb", "uuid", ?b2l(UUID)),
- UUID;
- UUID ->
- ?l2b(UUID)
- end.
-
-get_stats() ->
- Fun = fun(N, {TimeAcc, OpenAcc}) ->
- {ok, #server{start_time = Time, dbs_open = Open}} =
- gen_server:call(couch_server(N), get_server),
- {max(Time, TimeAcc), Open + OpenAcc}
- end,
- {Time, Open} =
- lists:foldl(Fun, {0, 0}, lists:seq(1, num_servers())),
- [{start_time, ?l2b(Time)}, {dbs_open, Open}].
-
-get_spidermonkey_version() -> list_to_binary(?COUCHDB_SPIDERMONKEY_VERSION).
-
-sup_start_link(N) ->
- gen_server:start_link({local, couch_server(N)}, couch_server, [N], []).
-
-open(DbName, Options) ->
- try
- validate_open_or_create(DbName, Options),
- open_int(DbName, Options)
- catch
- throw:{?MODULE, Error} ->
- Error
- end.
-
-open_int(DbName, Options0) ->
- Ctx = couch_util:get_value(user_ctx, Options0, #user_ctx{}),
- case ets:lookup(couch_dbs(DbName), DbName) of
- [#entry{db = Db0, lock = Lock} = Entry] when Lock =/= locked ->
- update_lru(DbName, Entry#entry.db_options),
- {ok, Db1} = couch_db:incref(Db0),
- couch_db:set_user_ctx(Db1, Ctx);
- _ ->
- Options = maybe_add_sys_db_callbacks(DbName, Options0),
- Timeout = couch_util:get_value(timeout, Options, infinity),
- Create = couch_util:get_value(create_if_missing, Options, false),
- case gen_server:call(couch_server(DbName), {open, DbName, Options}, Timeout) of
- {ok, Db0} ->
- {ok, Db1} = couch_db:incref(Db0),
- couch_db:set_user_ctx(Db1, Ctx);
- {not_found, no_db_file} when Create ->
- couch_log:warning("creating missing database: ~s", [DbName]),
- couch_server:create(DbName, Options);
- Error ->
- Error
- end
- end.
-
-update_lru(DbName, Options) ->
- case config:get_boolean("couchdb", "update_lru_on_read", false) of
- true ->
- case lists:member(sys_db, Options) of
- false -> gen_server:cast(couch_server(DbName), {update_lru, DbName});
- true -> ok
- end;
- false ->
- ok
- end.
-
-create(DbName, Options) ->
- try
- validate_open_or_create(DbName, Options),
- create_int(DbName, Options)
- catch
- throw:{?MODULE, Error} ->
- Error
- end.
-
-create_int(DbName, Options0) ->
- Options = maybe_add_sys_db_callbacks(DbName, Options0),
- couch_partition:validate_dbname(DbName, Options),
- case gen_server:call(couch_server(DbName), {create, DbName, Options}, infinity) of
- {ok, Db0} ->
- Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
- {ok, Db1} = couch_db:incref(Db0),
- couch_db:set_user_ctx(Db1, Ctx);
- Error ->
- Error
- end.
-
-delete(DbName, Options) ->
- gen_server:call(couch_server(DbName), {delete, DbName, Options}, infinity).
-
-exists(DbName) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- Engines = get_configured_engines(),
- Possible = get_possible_engines(DbName, RootDir, Engines),
- Possible /= [].
-
-delete_compaction_files(DbName) ->
- delete_compaction_files(DbName, []).
-
-delete_compaction_files(DbName, DelOpts) when is_list(DbName) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- lists:foreach(
- fun({Ext, Engine}) ->
- FPath = make_filepath(RootDir, DbName, Ext),
- couch_db_engine:delete_compaction_files(Engine, RootDir, FPath, DelOpts)
- end,
- get_configured_engines()
- ),
- ok;
-delete_compaction_files(DbName, DelOpts) when is_binary(DbName) ->
- delete_compaction_files(?b2l(DbName), DelOpts).
-
-is_compacting(DbName) ->
- lists:any(
- fun({_, Engine}) ->
- couch_db_engine:is_compacting(Engine, DbName)
- end,
- get_configured_engines()
- ).
-
-maybe_add_sys_db_callbacks(DbName, Options) when is_binary(DbName) ->
- maybe_add_sys_db_callbacks(?b2l(DbName), Options);
-maybe_add_sys_db_callbacks(DbName, Options) ->
- DbsDbName = config:get("mem3", "shards_db", "_dbs"),
- NodesDbName = config:get("mem3", "nodes_db", "_nodes"),
-
- IsReplicatorDb = path_ends_with(DbName, "_replicator"),
- UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"),
- IsUsersDb =
- path_ends_with(DbName, "_users") orelse
- path_ends_with(DbName, UsersDbSuffix),
- if
- DbName == DbsDbName ->
- [
- {before_doc_update, fun mem3_bdu:before_doc_update/3},
- sys_db
- | Options
- ];
- DbName == NodesDbName ->
- [sys_db | Options];
- IsReplicatorDb ->
- [
- {before_doc_update, fun couch_replicator_docs:before_doc_update/3},
- {after_doc_read, fun couch_replicator_docs:after_doc_read/2},
- sys_db
- | Options
- ];
- IsUsersDb ->
- [
- {before_doc_update, fun couch_users_db:before_doc_update/3},
- {after_doc_read, fun couch_users_db:after_doc_read/2},
- sys_db
- | Options
- ];
- true ->
- Options
- end.
-
-path_ends_with(Path, Suffix) when is_binary(Suffix) ->
- Suffix =:= couch_db:dbname_suffix(Path);
-path_ends_with(Path, Suffix) when is_list(Suffix) ->
- path_ends_with(Path, ?l2b(Suffix)).
-
-check_dbname(DbName) ->
- couch_db:validate_dbname(DbName).
-
-is_admin(User, ClearPwd) ->
- case config:get("admins", User) of
- "-hashed-" ++ HashedPwdAndSalt ->
- [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
- couch_util:to_hex(crypto:hash(sha, ClearPwd ++ Salt)) == HashedPwd;
- _Else ->
- false
- end.
-
-has_admins() ->
- config:get("admins") /= [].
-
-hash_admin_passwords() ->
- hash_admin_passwords(true).
-
-hash_admin_passwords(Persist) ->
- lists:foreach(
- fun({User, ClearPassword}) ->
- HashedPassword = couch_passwords:hash_admin_password(ClearPassword),
- config:set("admins", User, ?b2l(HashedPassword), Persist)
- end,
- couch_passwords:get_unhashed_admins()
- ).
-
-close_db_if_idle(DbName) ->
- case ets:lookup(couch_dbs(DbName), DbName) of
- [#entry{}] ->
- gen_server:cast(couch_server(DbName), {close_db_if_idle, DbName});
- [] ->
- ok
- end.
-
-init([N]) ->
- couch_util:set_mqd_off_heap(?MODULE),
- couch_util:set_process_priority(?MODULE, high),
-
- % Mark pluggable storage engines as a supported feature
- config:enable_feature('pluggable-storage-engines'),
-
- % Mark partitioned databases as a supported feature
- config:enable_feature(partitioned),
-
- % Mark being able to receive documents with an _access property as a supported feature
- config:enable_feature('access-ready'),
-
- % Mark if fips is enabled
- case
- erlang:function_exported(crypto, info_fips, 0) andalso
- crypto:info_fips() == enabled
- of
- true ->
- config:enable_feature('fips');
- false ->
- ok
- end,
-
- % read config and register for configuration changes
-
- % just stop if one of the config settings change. couch_server_sup
- % will restart us and then we will pick up the new settings.
-
- RootDir = config:get("couchdb", "database_dir", "."),
- Engines = get_configured_engines(),
- MaxDbsOpen = config:get_integer("couchdb", "max_dbs_open", ?MAX_DBS_OPEN),
- UpdateLruOnRead = config:get_boolean(
- "couchdb", "update_lru_on_read", false
- ),
- ok = config:listen_for_changes(?MODULE, N),
- ok = couch_file:init_delete_dir(RootDir),
- hash_admin_passwords(),
- ets:new(couch_dbs(N), [
- set,
- protected,
- named_table,
- {keypos, #entry.name},
- {read_concurrency, true}
- ]),
- ets:new(couch_dbs_pid_to_name(N), [set, protected, named_table]),
- ets:new(couch_dbs_locks(N), [
- set,
- public,
- named_table,
- {read_concurrency, true}
- ]),
- process_flag(trap_exit, true),
- {ok, #server{
- root_dir = RootDir,
- engines = Engines,
- max_dbs_open = per_couch_server(MaxDbsOpen),
- update_lru_on_read = UpdateLruOnRead,
- start_time = couch_util:rfc1123_date(),
- couch_dbs = couch_dbs(N),
- couch_dbs_pid_to_name = couch_dbs_pid_to_name(N),
- couch_dbs_locks = couch_dbs_locks(N)
- }}.
-
-terminate(Reason, Srv) ->
- couch_log:error(
- "couch_server terminating with ~p, state ~2048p",
- [
- Reason,
- Srv#server{lru = redacted}
- ]
- ),
- ets:foldl(
- fun(#entry{db = Db}, _) ->
- % Filter out any entry records for open_async
- % processes that haven't finished.
- if
- Db == undefined -> ok;
- true -> couch_util:shutdown_sync(couch_db:get_pid(Db))
- end
- end,
- nil,
- couch_dbs(Srv)
- ),
- ok.
-
-handle_config_change("couchdb", "database_dir", _, _, _) ->
- exit(whereis(couch_server), config_change),
- remove_handler;
-handle_config_change("couchdb", "update_lru_on_read", "true", _, N) ->
- gen_server:call(couch_server(N), {set_update_lru_on_read, true}),
- {ok, N};
-handle_config_change("couchdb", "update_lru_on_read", _, _, N) ->
- gen_server:call(couch_server(N), {set_update_lru_on_read, false}),
- {ok, N};
-handle_config_change("couchdb", "max_dbs_open", Max0, _, N) when is_list(Max0) ->
- Max1 = per_couch_server(list_to_integer(Max0)),
- gen_server:call(couch_server(N), {set_max_dbs_open, Max1}),
- {ok, N};
-handle_config_change("couchdb", "max_dbs_open", _, _, N) ->
- Max = per_couch_server(?MAX_DBS_OPEN),
- gen_server:call(couch_server(N), {set_max_dbs_open, Max}),
- {ok, N};
-handle_config_change("couchdb_engines", _, _, _, N) ->
- gen_server:call(couch_server(N), reload_engines),
- {ok, N};
-handle_config_change("admins", _, _, Persist, N) ->
- % spawn here so couch event manager doesn't deadlock
- spawn(fun() -> hash_admin_passwords(Persist) end),
- {ok, N};
-handle_config_change("httpd", "authentication_handlers", _, _, N) ->
- couch_httpd:stop(),
- {ok, N};
-handle_config_change("httpd", "bind_address", _, _, N) ->
- couch_httpd:stop(),
- {ok, N};
-handle_config_change("httpd", "port", _, _, N) ->
- couch_httpd:stop(),
- {ok, N};
-handle_config_change("httpd", "max_connections", _, _, N) ->
- couch_httpd:stop(),
- {ok, N};
-handle_config_change(_, _, _, _, N) ->
- {ok, N}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, N) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), {restart_config_listener, N}).
-
-per_couch_server(X) ->
- erlang:max(1, X div num_servers()).
-
-all_databases() ->
- {ok, DbList} = all_databases(
- fun(DbName, Acc) -> {ok, [DbName | Acc]} end, []
- ),
- {ok, lists:usort(DbList)}.
-
-all_databases(Fun, Acc0) ->
- {ok, #server{root_dir = Root}} = gen_server:call(couch_server_1, get_server),
- NormRoot = couch_util:normpath(Root),
- Extensions = get_engine_extensions(),
- ExtRegExp = "(" ++ string:join(Extensions, "|") ++ ")",
- RegExp =
- % stock CouchDB name regex
- "^[a-z0-9\\_\\$()\\+\\-]*"
- % optional shard timestamp
- "(\\.[0-9]{10,})?"
- % filename extension
- "\\." ++ ExtRegExp ++ "$",
- FinalAcc =
- try
- couch_util:fold_files(
- Root,
- RegExp,
- true,
- fun(Filename, AccIn) ->
- NormFilename = couch_util:normpath(Filename),
- case NormFilename -- NormRoot of
- [$/ | RelativeFilename] -> ok;
- RelativeFilename -> ok
- end,
- Ext = filename:extension(RelativeFilename),
- case Fun(?l2b(filename:rootname(RelativeFilename, Ext)), AccIn) of
- {ok, NewAcc} -> NewAcc;
- {stop, NewAcc} -> throw({stop, Fun, NewAcc})
- end
- end,
- Acc0
- )
- catch
- throw:{stop, Fun, Acc1} ->
- Acc1
- end,
- {ok, FinalAcc}.
-
-make_room(Server, Options) ->
- case lists:member(sys_db, Options) of
- false -> maybe_close_lru_db(Server);
- true -> {ok, Server}
- end.
-
-maybe_close_lru_db(#server{dbs_open = NumOpen, max_dbs_open = MaxOpen} = Server) when
- NumOpen < MaxOpen
-->
- {ok, Server};
-maybe_close_lru_db(#server{lru = Lru} = Server) ->
- case couch_lru:close(Lru) of
- {true, NewLru} ->
- {ok, db_closed(Server#server{lru = NewLru}, [])};
- false ->
- {error, all_dbs_active}
- end.
-
-open_async(Server, From, DbName, Options) ->
- NoLRUServer = Server#server{
- lru = redacted
- },
- Parent = self(),
- T0 = os:timestamp(),
- Opener = spawn_link(fun() ->
- Res = open_async_int(NoLRUServer, DbName, Options),
- IsSuccess =
- case Res of
- {ok, _} -> true;
- _ -> false
- end,
- case IsSuccess andalso lists:member(create, Options) of
- true ->
- couch_event:notify(DbName, created);
- false ->
- ok
- end,
- gen_server:call(Parent, {open_result, DbName, Res}, infinity),
- unlink(Parent),
- case IsSuccess of
- true ->
- % Track latency times for successful opens
- Diff = timer:now_diff(os:timestamp(), T0) / 1000,
- couch_stats:update_histogram([couchdb, db_open_time], Diff);
- false ->
- % Log unsuccessful open results
- couch_log:info("open_result error ~p for ~s", [Res, DbName])
- end
- end),
- ReqType =
- case lists:member(create, Options) of
- true -> create;
- false -> open
- end,
- true = ets:insert(couch_dbs(Server), #entry{
- name = DbName,
- pid = Opener,
- lock = locked,
- waiters = [From],
- req_type = ReqType,
- db_options = Options
- }),
- true = ets:insert(couch_dbs_pid_to_name(Server), {Opener, DbName}),
- db_opened(Server, Options).
-
-open_async_int(Server, DbName, Options) ->
- DbNameList = binary_to_list(DbName),
- case check_dbname(DbNameList) of
- ok ->
- case get_engine(Server, DbNameList, Options) of
- {ok, {Module, FilePath}} ->
- couch_db:start_link(Module, DbName, FilePath, Options);
- Error2 ->
- Error2
- end;
- Error1 ->
- Error1
- end.
-
-handle_call(close_lru, _From, #server{lru = Lru} = Server) ->
- case couch_lru:close(Lru) of
- {true, NewLru} ->
- {reply, ok, db_closed(Server#server{lru = NewLru}, [])};
- false ->
- {reply, {error, all_dbs_active}, Server}
- end;
-handle_call(open_dbs_count, _From, Server) ->
- {reply, Server#server.dbs_open, Server};
-handle_call({set_update_lru_on_read, UpdateOnRead}, _From, Server) ->
- {reply, ok, Server#server{update_lru_on_read = UpdateOnRead}};
-handle_call({set_max_dbs_open, Max}, _From, Server) ->
- {reply, ok, Server#server{max_dbs_open = Max}};
-handle_call(reload_engines, _From, Server) ->
- {reply, ok, Server#server{engines = get_configured_engines()}};
-handle_call(get_server, _From, Server) ->
- {reply, {ok, Server}, Server};
-handle_call({open_result, DbName, {ok, Db}}, {Opener, _}, Server) ->
- true = ets:delete(couch_dbs_pid_to_name(Server), Opener),
- DbPid = couch_db:get_pid(Db),
- case ets:lookup(couch_dbs(Server), DbName) of
- [] ->
- % db was deleted during async open
- exit(DbPid, kill),
- {reply, ok, Server};
- [#entry{pid = Opener, req_type = ReqType, waiters = Waiters} = Entry] ->
- link(DbPid),
- [gen_server:reply(Waiter, {ok, Db}) || Waiter <- Waiters],
- % Cancel the creation request if it exists.
- case ReqType of
- {create, DbName, _Options, CrFrom} ->
- gen_server:reply(CrFrom, file_exists);
- _ ->
- ok
- end,
- true = ets:insert(couch_dbs(Server), #entry{
- name = DbName,
- db = Db,
- pid = DbPid,
- lock = unlocked,
- db_options = Entry#entry.db_options,
- start_time = couch_db:get_instance_start_time(Db)
- }),
- true = ets:insert(couch_dbs_pid_to_name(Server), {DbPid, DbName}),
- Lru =
- case couch_db:is_system_db(Db) of
- false ->
- couch_lru:insert(DbName, Server#server.lru);
- true ->
- Server#server.lru
- end,
- {reply, ok, Server#server{lru = Lru}};
- [#entry{}] ->
- % A mismatched opener pid means that this open_result message
- % was in our mailbox but is now stale. Mostly ignore
- % it except to ensure that the db pid is super dead.
- exit(couch_db:get_pid(Db), kill),
- {reply, ok, Server}
- end;
-handle_call({open_result, DbName, {error, eexist}}, From, Server) ->
- handle_call({open_result, DbName, file_exists}, From, Server);
-handle_call({open_result, DbName, Error}, {Opener, _}, Server) ->
- case ets:lookup(couch_dbs(Server), DbName) of
- [] ->
- % db was deleted during async open
- {reply, ok, Server};
- [#entry{pid = Opener, req_type = ReqType, waiters = Waiters} = Entry] ->
- [gen_server:reply(Waiter, Error) || Waiter <- Waiters],
- true = ets:delete(couch_dbs(Server), DbName),
- true = ets:delete(couch_dbs_pid_to_name(Server), Opener),
- NewServer =
- case ReqType of
- {create, DbName, Options, CrFrom} ->
- open_async(Server, CrFrom, DbName, Options);
- _ ->
- Server
- end,
- {reply, ok, db_closed(NewServer, Entry#entry.db_options)};
- [#entry{}] ->
- % A mismatched pid means that this open_result message
- % was in our mailbox and is now stale. Ignore it.
- {reply, ok, Server}
- end;
-handle_call({open, DbName, Options}, From, Server) ->
- case ets:lookup(couch_dbs(Server), DbName) of
- [] ->
- case make_room(Server, Options) of
- {ok, Server2} ->
- {noreply, open_async(Server2, From, DbName, Options)};
- CloseError ->
- {reply, CloseError, Server}
- end;
- [#entry{waiters = Waiters} = Entry] when is_list(Waiters) ->
- true = ets:insert(couch_dbs(Server), Entry#entry{waiters = [From | Waiters]}),
- NumWaiters = length(Waiters),
- if
- NumWaiters =< 10 orelse NumWaiters rem 10 /= 0 ->
- ok;
- true ->
- Fmt = "~b clients waiting to open db ~s",
- couch_log:info(Fmt, [length(Waiters), DbName])
- end,
- {noreply, Server};
- [#entry{db = Db}] ->
- {reply, {ok, Db}, Server}
- end;
-handle_call({create, DbName, Options}, From, Server) ->
- case ets:lookup(couch_dbs(Server), DbName) of
- [] ->
- case make_room(Server, Options) of
- {ok, Server2} ->
- CrOptions = [create | Options],
- {noreply, open_async(Server2, From, DbName, CrOptions)};
- CloseError ->
- {reply, CloseError, Server}
- end;
- [#entry{req_type = open} = Entry] ->
- % We're trying to create a database while someone is in
- % the middle of trying to open it. We allow one creator
- % to wait while we figure out if it'll succeed.
- CrOptions = [create | Options],
- Req = {create, DbName, CrOptions, From},
- true = ets:insert(couch_dbs(Server), Entry#entry{req_type = Req}),
- {noreply, Server};
- [_AlreadyRunningDb] ->
- {reply, file_exists, Server}
- end;
-handle_call({delete, DbName, Options}, _From, Server) ->
- DbNameList = binary_to_list(DbName),
- case check_dbname(DbNameList) of
- ok ->
- Server2 =
- case ets:lookup(couch_dbs(Server), DbName) of
- [] ->
- Server;
- [#entry{pid = Pid, waiters = Waiters} = Entry] when is_list(Waiters) ->
- true = ets:delete(couch_dbs(Server), DbName),
- true = ets:delete(couch_dbs_pid_to_name(Server), Pid),
- exit(Pid, kill),
- [gen_server:reply(Waiter, not_found) || Waiter <- Waiters],
- db_closed(Server, Entry#entry.db_options);
- [#entry{pid = Pid} = Entry] ->
- true = ets:delete(couch_dbs(Server), DbName),
- true = ets:delete(couch_dbs_pid_to_name(Server), Pid),
- exit(Pid, kill),
- db_closed(Server, Entry#entry.db_options)
- end,
-
- couch_db_plugin:on_delete(DbName, Options),
-
- DelOpt = [{context, delete} | Options],
-
- % Make sure and remove all compaction data
- delete_compaction_files(DbNameList, Options),
-
- {ok, {Engine, FilePath}} = get_engine(Server, DbNameList),
- RootDir = Server#server.root_dir,
- case couch_db_engine:delete(Engine, RootDir, FilePath, DelOpt) of
- ok ->
- couch_event:notify(DbName, deleted),
- {reply, ok, Server2};
- {error, enoent} ->
- {reply, not_found, Server2};
- Else ->
- {reply, Else, Server2}
- end;
- Error ->
- {reply, Error, Server}
- end;
-handle_call({db_updated, Db}, _From, Server0) ->
- DbName = couch_db:name(Db),
- StartTime = couch_db:get_instance_start_time(Db),
- Server =
- try ets:lookup_element(couch_dbs(Server0), DbName, #entry.start_time) of
- StartTime ->
- true = ets:update_element(couch_dbs(Server0), DbName, {#entry.db, Db}),
- Lru =
- case couch_db:is_system_db(Db) of
- false -> couch_lru:update(DbName, Server0#server.lru);
- true -> Server0#server.lru
- end,
- Server0#server{lru = Lru};
- _ ->
- Server0
- catch
- _:_ ->
- Server0
- end,
- {reply, ok, Server}.
-
-handle_cast({update_lru, DbName}, #server{lru = Lru, update_lru_on_read = true} = Server) ->
- {noreply, Server#server{lru = couch_lru:update(DbName, Lru)}};
-handle_cast({update_lru, _DbName}, Server) ->
- {noreply, Server};
-handle_cast({close_db_if_idle, DbName}, Server) ->
- case ets:update_element(couch_dbs(Server), DbName, {#entry.lock, locked}) of
- true ->
- [#entry{db = Db, db_options = DbOpts}] = ets:lookup(couch_dbs(Server), DbName),
- case couch_db:is_idle(Db) of
- true ->
- DbPid = couch_db:get_pid(Db),
- true = ets:delete(couch_dbs(Server), DbName),
- true = ets:delete(couch_dbs_pid_to_name(Server), DbPid),
- exit(DbPid, kill),
- {noreply, db_closed(Server, DbOpts)};
- false ->
- true = ets:update_element(
- couch_dbs(Server), DbName, {#entry.lock, unlocked}
- ),
- {noreply, Server}
- end;
- false ->
- {noreply, Server}
- end;
-handle_cast(Msg, Server) ->
- {stop, {unknown_cast_message, Msg}, Server}.
-
-code_change(_OldVsn, #server{} = State, _Extra) ->
- {ok, State}.
-
-handle_info({'EXIT', _Pid, config_change}, Server) ->
- {stop, config_change, Server};
-handle_info({'EXIT', Pid, Reason}, Server) ->
- case ets:lookup(couch_dbs_pid_to_name(Server), Pid) of
- [{Pid, DbName}] ->
- [#entry{waiters = Waiters} = Entry] = ets:lookup(couch_dbs(Server), DbName),
- if
- Reason /= snappy_nif_not_loaded ->
- ok;
- true ->
- Msg = io_lib:format(
- "To open the database `~s`, Apache CouchDB "
- "must be built with Erlang OTP R13B04 or higher.",
- [DbName]
- ),
- couch_log:error(Msg, [])
- end,
- % We kill databases on purpose so there's no reason
- % to log that fact. So we restrict logging to "interesting"
- % reasons.
- if
- Reason == normal orelse Reason == killed -> ok;
- true -> couch_log:info("db ~s died with reason ~p", [DbName, Reason])
- end,
- if
- not is_list(Waiters) -> ok;
- true -> [gen_server:reply(Waiter, Reason) || Waiter <- Waiters]
- end,
- true = ets:delete(couch_dbs(Server), DbName),
- true = ets:delete(couch_dbs_pid_to_name(Server), Pid),
- {noreply, db_closed(Server, Entry#entry.db_options)};
- [] ->
- {noreply, Server}
- end;
-handle_info({restart_config_listener, N}, State) ->
- ok = config:listen_for_changes(?MODULE, N),
- {noreply, State};
-handle_info(Info, Server) ->
- {stop, {unknown_message, Info}, Server}.
-
-db_opened(Server, Options) ->
- case lists:member(sys_db, Options) of
- false -> Server#server{dbs_open = Server#server.dbs_open + 1};
- true -> Server
- end.
-
-db_closed(Server, Options) ->
- case lists:member(sys_db, Options) of
- false -> Server#server{dbs_open = Server#server.dbs_open - 1};
- true -> Server
- end.
-
-validate_open_or_create(DbName, Options) ->
- case check_dbname(DbName) of
- ok ->
- ok;
- DbNameError ->
- throw({?MODULE, DbNameError})
- end,
-
- case check_engine(Options) of
- ok ->
- ok;
- EngineError ->
- throw({?MODULE, EngineError})
- end,
-
- case ets:lookup(couch_dbs_locks(DbName), DbName) of
- [] ->
- ok;
- [{DbName, Reason}] ->
- throw({?MODULE, {error, {locked, Reason}}})
- end.
-
-get_configured_engines() ->
- ConfigEntries = config:get("couchdb_engines"),
- Engines = lists:flatmap(
- fun({Extension, ModuleStr}) ->
- try
- [{Extension, list_to_atom(ModuleStr)}]
- catch
- _T:_R ->
- []
- end
- end,
- ConfigEntries
- ),
- case Engines of
- [] ->
- [{"couch", couch_bt_engine}];
- Else ->
- Else
- end.
-
-get_engine(Server, DbName, Options) ->
- #server{
- root_dir = RootDir,
- engines = Engines
- } = Server,
- case couch_util:get_value(engine, Options) of
- Ext when is_binary(Ext) ->
- ExtStr = binary_to_list(Ext),
- case lists:keyfind(ExtStr, 1, Engines) of
- {ExtStr, Engine} ->
- Path = make_filepath(RootDir, DbName, ExtStr),
- {ok, {Engine, Path}};
- false ->
- {error, {invalid_engine_extension, Ext}}
- end;
- _ ->
- get_engine(Server, DbName)
- end.
-
-get_engine(Server, DbName) ->
- #server{
- root_dir = RootDir,
- engines = Engines
- } = Server,
- Possible = get_possible_engines(DbName, RootDir, Engines),
- case Possible of
- [] ->
- get_default_engine(Server, DbName);
- [Engine] ->
- {ok, Engine};
- _ ->
- erlang:error(engine_conflict)
- end.
-
-get_possible_engines(DbName, RootDir, Engines) ->
- lists:foldl(
- fun({Extension, Engine}, Acc) ->
- Path = make_filepath(RootDir, DbName, Extension),
- case couch_db_engine:exists(Engine, Path) of
- true ->
- [{Engine, Path} | Acc];
- false ->
- Acc
- end
- end,
- [],
- Engines
- ).
-
-get_default_engine(Server, DbName) ->
- #server{
- root_dir = RootDir,
- engines = Engines
- } = Server,
- Default = {couch_bt_engine, make_filepath(RootDir, DbName, "couch")},
- Extension = config:get("couchdb", "default_engine", ?DEFAULT_ENGINE),
- case lists:keyfind(Extension, 1, Engines) of
- {Extension, Module} ->
- {ok, {Module, make_filepath(RootDir, DbName, Extension)}};
- false ->
- Fmt =
- "Invalid storage engine extension ~s,"
- " configured engine extensions are: ~s",
- Exts = [E || {E, _} <- Engines],
- Args = [Extension, string:join(Exts, ", ")],
- couch_log:error(Fmt, Args),
- {ok, Default}
- end.
-
-make_filepath(RootDir, DbName, Extension) when is_binary(RootDir) ->
- make_filepath(binary_to_list(RootDir), DbName, Extension);
-make_filepath(RootDir, DbName, Extension) when is_binary(DbName) ->
- make_filepath(RootDir, binary_to_list(DbName), Extension);
-make_filepath(RootDir, DbName, Extension) when is_binary(Extension) ->
- make_filepath(RootDir, DbName, binary_to_list(Extension));
-make_filepath(RootDir, DbName, Extension) ->
- filename:join([RootDir, "./" ++ DbName ++ "." ++ Extension]).
-
-get_engine_extensions() ->
- case config:get("couchdb_engines") of
- [] ->
- ["couch"];
- Entries ->
- [Ext || {Ext, _Mod} <- Entries]
- end.
-
-check_engine(Options) ->
- case couch_util:get_value(engine, Options) of
- Ext when is_binary(Ext) ->
- ExtStr = binary_to_list(Ext),
- Extensions = get_engine_extensions(),
- case lists:member(ExtStr, Extensions) of
- true ->
- ok;
- false ->
- {error, {invalid_engine_extension, Ext}}
- end;
- _ ->
- ok
- end.
-
-get_engine_path(DbName, Engine) when is_binary(DbName), is_atom(Engine) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- case lists:keyfind(Engine, 2, get_configured_engines()) of
- {Ext, Engine} ->
- {ok, make_filepath(RootDir, DbName, Ext)};
- false ->
- {error, {invalid_engine, Engine}}
- end.
-
-lock(DbName, Reason) when is_binary(DbName), is_binary(Reason) ->
- case ets:lookup(couch_dbs(DbName), DbName) of
- [] ->
- true = ets:insert(couch_dbs_locks(DbName), {DbName, Reason}),
- ok;
- [#entry{}] ->
- {error, already_opened}
- end.
-
-unlock(DbName) when is_binary(DbName) ->
- true = ets:delete(couch_dbs_locks(DbName), DbName),
- ok.
-
-db_updated(Db) ->
- DbName = couch_db:name(Db),
- gen_server:call(couch_server(DbName), {db_updated, Db}, infinity).
-
-couch_server(Arg) ->
- name("couch_server", Arg).
-
-couch_dbs(Arg) ->
- name("couch_dbs", Arg).
-
-couch_dbs_pid_to_name(Arg) ->
- name("couch_dbs_pid_to_name", Arg).
-
-couch_dbs_locks(Arg) ->
- name("couch_dbs_locks", Arg).
-
-name("couch_dbs", #server{} = Server) ->
- Server#server.couch_dbs;
-name("couch_dbs_pid_to_name", #server{} = Server) ->
- Server#server.couch_dbs_pid_to_name;
-name("couch_dbs_locks", #server{} = Server) ->
- Server#server.couch_dbs_locks;
-name(BaseName, DbName) when is_list(DbName) ->
- name(BaseName, ?l2b(DbName));
-name(BaseName, DbName) when is_binary(DbName) ->
- N = 1 + erlang:phash2(DbName, num_servers()),
- name(BaseName, N);
-name(BaseName, N) when is_integer(N), N > 0 ->
- list_to_atom(BaseName ++ "_" ++ integer_to_list(N)).
-
-num_servers() ->
- erlang:system_info(schedulers).
-
-aggregate_queue_len() ->
- N = num_servers(),
- Names = [couch_server(I) || I <- lists:seq(1, N)],
- MQs = [
- process_info(whereis(Name), message_queue_len)
- || Name <- Names
- ],
- lists:sum([X || {_, X} <- MQs]).
-
-names() ->
- N = couch_server:num_servers(),
- [couch_server:couch_server(I) || I <- lists:seq(1, N)].
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- ok = meck:new(config, [passthrough]),
- ok = meck:expect(config, get, fun config_get/3),
- ok.
-
-teardown_all(_) ->
- meck:unload().
-
-config_get("couchdb", "users_db_suffix", _) -> "users_db";
-config_get(_, _, _) -> undefined.
-
-maybe_add_sys_db_callbacks_pass_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- fun should_add_sys_db_callbacks/0,
- fun should_not_add_sys_db_callbacks/0
- ]
- }.
-
-should_add_sys_db_callbacks() ->
- Cases = [
- "shards/00000000-3fffffff/foo/users_db.1415960794.couch",
- "shards/00000000-3fffffff/foo/users_db.1415960794",
- "shards/00000000-3fffffff/foo/users_db",
- "shards/00000000-3fffffff/users_db.1415960794.couch",
- "shards/00000000-3fffffff/users_db.1415960794",
- "shards/00000000-3fffffff/users_db",
-
- "shards/00000000-3fffffff/_users.1415960794.couch",
- "shards/00000000-3fffffff/_users.1415960794",
- "shards/00000000-3fffffff/_users",
-
- "foo/users_db.couch",
- "foo/users_db",
- "users_db.couch",
- "users_db",
- "foo/_users.couch",
- "foo/_users",
- "_users.couch",
- "_users",
-
- "shards/00000000-3fffffff/foo/_replicator.1415960794.couch",
- "shards/00000000-3fffffff/foo/_replicator.1415960794",
- "shards/00000000-3fffffff/_replicator",
- "foo/_replicator.couch",
- "foo/_replicator",
- "_replicator.couch",
- "_replicator"
- ],
- lists:foreach(
- fun(DbName) ->
- check_case(DbName, true),
- check_case(?l2b(DbName), true)
- end,
- Cases
- ).
-
-should_not_add_sys_db_callbacks() ->
- Cases = [
- "shards/00000000-3fffffff/foo/mydb.1415960794.couch",
- "shards/00000000-3fffffff/foo/mydb.1415960794",
- "shards/00000000-3fffffff/mydb",
- "foo/mydb.couch",
- "foo/mydb",
- "mydb.couch",
- "mydb"
- ],
- lists:foreach(
- fun(DbName) ->
- check_case(DbName, false),
- check_case(?l2b(DbName), false)
- end,
- Cases
- ).
-
-check_case(DbName, IsAdded) ->
- Options = maybe_add_sys_db_callbacks(DbName, [other_options]),
- ?assertEqual(IsAdded, lists:member(sys_db, Options)).
-
--endif.
diff --git a/src/couch/src/couch_server_int.hrl b/src/couch/src/couch_server_int.hrl
deleted file mode 100644
index 537a6abb9..000000000
--- a/src/couch/src/couch_server_int.hrl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--record(entry, {
- name,
- db,
- pid,
- lock,
- waiters,
- req_type,
- db_options,
- start_time
-}).
diff --git a/src/couch/src/couch_stream.erl b/src/couch/src/couch_stream.erl
deleted file mode 100644
index 12b290820..000000000
--- a/src/couch/src/couch_stream.erl
+++ /dev/null
@@ -1,302 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stream).
--behaviour(gen_server).
--vsn(1).
-
--export([
- open/1,
- open/2,
- close/1,
-
- copy/2,
- write/2,
- to_disk_term/1,
-
- foldl/3,
- foldl/4,
- foldl_decode/5,
- range_foldl/5
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(DEFAULT_BUFFER_SIZE, 4096).
-
--record(stream, {
- engine,
- opener_monitor,
- written_pointers = [],
- buffer_list = [],
- buffer_len = 0,
- max_buffer,
- written_len = 0,
- md5,
- % md5 of the content without any transformation applied (e.g. compression)
- % needed for the attachment upload integrity check (ticket 558)
- identity_md5,
- identity_len = 0,
- encoding_fun,
- end_encoding_fun
-}).
-
-open({_StreamEngine, _StreamEngineState} = Engine) ->
- open(Engine, []).
-
-open({_StreamEngine, _StreamEngineState} = Engine, Options) ->
- gen_server:start_link(?MODULE, {Engine, self(), erlang:get(io_priority), Options}, []).
-
-close(Pid) ->
- gen_server:call(Pid, close, infinity).
-
-copy(Src, Dst) ->
- foldl(
- Src,
- fun(Bin, _) ->
- ok = write(Dst, Bin)
- end,
- ok
- ).
-
-write(_Pid, <<>>) ->
- ok;
-write(Pid, Bin) ->
- gen_server:call(Pid, {write, Bin}, infinity).
-
-to_disk_term({Engine, EngineState}) ->
- Engine:to_disk_term(EngineState).
-
-foldl({Engine, EngineState}, Fun, Acc) ->
- Engine:foldl(EngineState, Fun, Acc).
-
-foldl(Engine, <<>>, Fun, Acc) ->
- foldl(Engine, Fun, Acc);
-foldl(Engine, Md5, UserFun, UserAcc) ->
- InitAcc = {couch_hash:md5_hash_init(), UserFun, UserAcc},
- {Md5Acc, _, OutAcc} = foldl(Engine, fun foldl_md5/2, InitAcc),
- Md5 = couch_hash:md5_hash_final(Md5Acc),
- OutAcc.
-
-foldl_decode(Engine, Md5, Enc, UserFun, UserAcc1) ->
- {DecDataFun, DecEndFun} =
- case Enc of
- gzip -> ungzip_init();
- identity -> identity_enc_dec_funs()
- end,
- InitAcc = {DecDataFun, UserFun, UserAcc1},
- {_, _, UserAcc2} = foldl(Engine, Md5, fun foldl_decode/2, InitAcc),
- DecEndFun(),
- UserAcc2.
-
-range_foldl(Engine, From, To, UserFun, UserAcc) when To >= From ->
- NewEngine = do_seek(Engine, From),
- InitAcc = {To - From, UserFun, UserAcc},
- try
- {_, _, UserAcc2} = foldl(NewEngine, fun foldl_length/2, InitAcc),
- UserAcc2
- catch
- throw:{finished, UserAcc3} ->
- UserAcc3
- end.
-
-foldl_md5(Bin, {Md5Acc, UserFun, UserAcc}) ->
- NewMd5Acc = couch_hash:md5_hash_update(Md5Acc, Bin),
- {NewMd5Acc, UserFun, UserFun(Bin, UserAcc)}.
-
-foldl_decode(EncBin, {DecFun, UserFun, UserAcc}) ->
- case DecFun(EncBin) of
- <<>> -> {DecFun, UserFun, UserAcc};
- Dec -> {DecFun, UserFun, UserFun(Dec, UserAcc)}
- end.
-
-foldl_length(Bin, {Length, UserFun, UserAcc}) ->
- BinSize = size(Bin),
- case BinSize =< Length of
- true ->
- {Length - BinSize, UserFun, UserFun(Bin, UserAcc)};
- false ->
- <<Trunc:Length/binary, _/binary>> = Bin,
- throw({finished, UserFun(Trunc, UserAcc)})
- end.
-
-gzip_init(Options) ->
- case couch_util:get_value(compression_level, Options, 0) of
- Lvl when Lvl >= 1 andalso Lvl =< 9 ->
- Z = zlib:open(),
- % 15 = ?MAX_WBITS (defined in the zlib module)
- % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
- ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default),
- {
- fun(Data) ->
- zlib:deflate(Z, Data)
- end,
- fun() ->
- Last = zlib:deflate(Z, [], finish),
- ok = zlib:deflateEnd(Z),
- ok = zlib:close(Z),
- Last
- end
- };
- _ ->
- identity_enc_dec_funs()
- end.
-
-ungzip_init() ->
- Z = zlib:open(),
- zlib:inflateInit(Z, 16 + 15),
- {
- fun(Data) ->
- zlib:inflate(Z, Data)
- end,
- fun() ->
- ok = zlib:inflateEnd(Z),
- ok = zlib:close(Z)
- end
- }.
-
-identity_enc_dec_funs() ->
- {
- fun(Data) -> Data end,
- fun() -> [] end
- }.
-
-init({Engine, OpenerPid, OpenerPriority, Options}) ->
- erlang:put(io_priority, OpenerPriority),
- {EncodingFun, EndEncodingFun} =
- case couch_util:get_value(encoding, Options, identity) of
- identity -> identity_enc_dec_funs();
- gzip -> gzip_init(Options)
- end,
- {ok, #stream{
- engine = Engine,
- opener_monitor = erlang:monitor(process, OpenerPid),
- md5 = couch_hash:md5_hash_init(),
- identity_md5 = couch_hash:md5_hash_init(),
- encoding_fun = EncodingFun,
- end_encoding_fun = EndEncodingFun,
- max_buffer = couch_util:get_value(
- buffer_size, Options, ?DEFAULT_BUFFER_SIZE
- )
- }}.
-
-terminate(_Reason, _Stream) ->
- ok.
-
-handle_call({write, Bin}, _From, Stream) ->
- BinSize = iolist_size(Bin),
- #stream{
- engine = Engine,
- written_len = WrittenLen,
- buffer_len = BufferLen,
- buffer_list = Buffer,
- max_buffer = Max,
- md5 = Md5,
- identity_md5 = IdenMd5,
- identity_len = IdenLen,
- encoding_fun = EncodingFun
- } = Stream,
- if
- BinSize + BufferLen > Max ->
- WriteBin = lists:reverse(Buffer, [Bin]),
- IdenMd5_2 = couch_hash:md5_hash_update(IdenMd5, WriteBin),
- case EncodingFun(WriteBin) of
- [] ->
- % case where the encoder did some internal buffering
- % (zlib does it for example)
- NewEngine = Engine,
- WrittenLen2 = WrittenLen,
- Md5_2 = Md5;
- WriteBin2 ->
- NewEngine = do_write(Engine, WriteBin2),
- WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
- Md5_2 = couch_hash:md5_hash_update(Md5, WriteBin2)
- end,
-
- {reply, ok,
- Stream#stream{
- engine = NewEngine,
- written_len = WrittenLen2,
- buffer_list = [],
- buffer_len = 0,
- md5 = Md5_2,
- identity_md5 = IdenMd5_2,
- identity_len = IdenLen + BinSize
- },
- hibernate};
- true ->
- {reply, ok, Stream#stream{
- buffer_list = [Bin | Buffer],
- buffer_len = BufferLen + BinSize,
- identity_len = IdenLen + BinSize
- }}
- end;
-handle_call(close, _From, Stream) ->
- #stream{
- engine = Engine,
- opener_monitor = MonRef,
- written_len = WrittenLen,
- buffer_list = Buffer,
- md5 = Md5,
- identity_md5 = IdenMd5,
- identity_len = IdenLen,
- encoding_fun = EncodingFun,
- end_encoding_fun = EndEncodingFun
- } = Stream,
-
- WriteBin = lists:reverse(Buffer),
- IdenMd5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(IdenMd5, WriteBin)),
- WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(),
- Md5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(Md5, WriteBin2)),
- Result =
- case WriteBin2 of
- [] ->
- {do_finalize(Engine), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
- _ ->
- NewEngine = do_write(Engine, WriteBin2),
- StreamLen = WrittenLen + iolist_size(WriteBin2),
- {do_finalize(NewEngine), StreamLen, IdenLen, Md5Final, IdenMd5Final}
- end,
- erlang:demonitor(MonRef),
- {stop, normal, Result, Stream}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_info({'DOWN', Ref, _, _, _}, #stream{opener_monitor = Ref} = State) ->
- {stop, normal, State};
-handle_info(_Info, State) ->
- {noreply, State}.
-
-do_seek({Engine, EngineState}, Offset) ->
- {ok, NewState} = Engine:seek(EngineState, Offset),
- {Engine, NewState}.
-
-do_write({Engine, EngineState}, Data) ->
- {ok, NewState} = Engine:write(EngineState, Data),
- {Engine, NewState}.
-
-do_finalize({Engine, EngineState}) ->
- {ok, NewState} = Engine:finalize(EngineState),
- {Engine, NewState}.
diff --git a/src/couch/src/couch_sup.erl b/src/couch/src/couch_sup.erl
deleted file mode 100644
index f13bc9917..000000000
--- a/src/couch/src/couch_sup.erl
+++ /dev/null
@@ -1,170 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_sup).
--behaviour(supervisor).
--vsn(1).
--behaviour(config_listener).
-
--export([
- start_link/0,
- init/1,
- handle_config_change/5,
- handle_config_terminate/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link() ->
- assert_no_monsters(),
- assert_admins(),
- maybe_launch_admin_annoyance_reporter(),
- write_pidfile(),
- notify_starting(),
-
- case supervisor:start_link({local, ?MODULE}, ?MODULE, []) of
- {ok, _} = Resp ->
- notify_started(),
- Resp;
- Else ->
- notify_error(Else),
- Else
- end.
-
-init(_Args) ->
- couch_log:info("Starting ~s", [?MODULE]),
- {ok,
- {{one_for_one, 10, 60}, [
- {
- config_listener_mon,
- {config_listener_mon, start_link, [?MODULE, nil]},
- permanent,
- 5000,
- worker,
- [config_listener_mon]
- },
- {
- couch_primary_services,
- {couch_primary_sup, start_link, []},
- permanent,
- infinity,
- supervisor,
- [couch_primary_sup]
- },
- {
- couch_secondary_services,
- {couch_secondary_sup, start_link, []},
- permanent,
- infinity,
- supervisor,
- [couch_secondary_sup]
- }
- ]}}.
-
-handle_config_change("daemons", _, _, _, _) ->
- exit(whereis(?MODULE), shutdown),
- remove_handler;
-handle_config_change("couchdb", "util_driver_dir", _, _, _) ->
- [Pid] = [
- P
- || {collation_driver, P, _, _} <-
- supervisor:which_children(couch_primary_services)
- ],
- Pid ! reload_driver,
- {ok, nil};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_Server, _Reason, _State) ->
- ok.
-
-assert_no_monsters() ->
- couch_log:info("Preflight check: Checking For Monsters~n", []),
- case erlang:get_cookie() of
- monster ->
- couch_log:info(
- "~n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n" ++
- " Monster detected ohno!, aborting startup. ~n" ++
- " Please change the Erlang cookie in vm.args to the same ~n" ++
- " securely generated random value on all nodes of this cluster. ~n" ++
- "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n",
- []
- ),
- % Wait a second so the log message can make it to the log
- timer:sleep(500),
- erlang:halt(1);
- _ ->
- ok
- end.
-
-assert_admins() ->
- couch_log:info("Preflight check: Asserting Admin Account~n", []),
- case {config:get("admins"), os:getenv("COUCHDB_TEST_ADMIN_PARTY_OVERRIDE")} of
- {[], false} ->
- couch_log:info(
- "~n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n" ++
- " No Admin Account Found, aborting startup. ~n" ++
- " Please configure an admin account in your local.ini file. ~n" ++
- "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n",
- []
- ),
- % Wait a second so the log message can make it to the log
- timer:sleep(500),
- erlang:halt(1);
- _ ->
- ok
- end.
-
-send_no_admin_account_error_message() ->
- couch_log:error(
- "No Admin Account configured." ++
- " Please configure an Admin Account in your local.ini file and restart CouchDB.~n",
- []
- ),
- FiveMinutes = 5 * 1000 * 60,
- timer:sleep(FiveMinutes),
- send_no_admin_account_error_message().
-
-maybe_launch_admin_annoyance_reporter() ->
- case os:getenv("COUCHDB_TEST_ADMIN_PARTY_OVERRIDE") of
- false -> ok;
- _ -> spawn_link(fun send_no_admin_account_error_message/0)
- end.
-
-notify_starting() ->
- couch_log:info("Apache CouchDB ~s is starting.~n", [
- couch_server:get_version()
- ]).
-
-notify_started() ->
- couch_log:info("Apache CouchDB has started. Time to relax.~n", []).
-
-notify_error(Error) ->
- couch_log:error("Error starting Apache CouchDB:~n~n ~p~n~n", [Error]).
-
-write_pidfile() ->
- case init:get_argument(pidfile) of
- {ok, [PidFile]} ->
- write_file(PidFile, os:getpid());
- _ ->
- ok
- end.
-
-write_file(FileName, Contents) ->
- case file:write_file(FileName, Contents) of
- ok ->
- ok;
- {error, Reason} ->
- Args = [FileName, file:format_error(Reason)],
- couch_log:error("Failed ot write ~s :: ~s", Args),
- throw({error, Reason})
- end.
diff --git a/src/couch/src/couch_task_status.erl b/src/couch/src/couch_task_status.erl
deleted file mode 100644
index 42d7c4f62..000000000
--- a/src/couch/src/couch_task_status.erl
+++ /dev/null
@@ -1,154 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_task_status).
--behaviour(gen_server).
--vsn(1).
-
-% This module is used to track the status of long running tasks.
-% Long running tasks register themselves, via a call to add_task/1, and then
-% update their status properties via update/1. The status of a task is a
-% list of properties. Each property is a tuple, with the first element being
-% either an atom or a binary and the second element must be an EJSON value. When
-% a task updates its status, it can override some or all of its properties.
-% The properties {started_on, UnitTimestamp}, {updated_on, UnixTimestamp} and
-% {pid, ErlangPid} are automatically added by this module.
-% When a tracked task dies, its status will be automatically removed from
-% memory. To get the tasks list, call the all/0 function.
-
--export([start_link/0, stop/0]).
--export([all/0, add_task/1, update/1, get/1, set_update_frequency/1]).
--export([is_task_added/0]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(set(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-stop() ->
- gen_server:cast(?MODULE, stop).
-
-all() ->
- gen_server:call(?MODULE, all).
-
-add_task(Props) ->
- put(task_status_update, {{0, 0, 0}, 0}),
- Ts = timestamp(),
- TaskProps = lists:ukeysort(
- 1, [{started_on, Ts}, {updated_on, Ts} | Props]
- ),
- put(task_status_props, TaskProps),
- gen_server:call(?MODULE, {add_task, TaskProps}).
-
-is_task_added() ->
- is_list(erlang:get(task_status_props)).
-
-set_update_frequency(Msecs) ->
- put(task_status_update, {{0, 0, 0}, Msecs * 1000}).
-
-update(Props) ->
- MergeProps = lists:ukeysort(1, Props),
- CurrProps = erlang:get(task_status_props),
- TaskProps = lists:ukeymerge(1, MergeProps, CurrProps),
- case TaskProps == CurrProps of
- true ->
- maybe_persist(TaskProps);
- false ->
- persist(TaskProps)
- end.
-
-get(Props) when is_list(Props) ->
- TaskProps = erlang:get(task_status_props),
- [couch_util:get_value(P, TaskProps) || P <- Props];
-get(Prop) ->
- TaskProps = erlang:get(task_status_props),
- couch_util:get_value(Prop, TaskProps).
-
-maybe_persist(TaskProps) ->
- {LastUpdateTime, Frequency} = erlang:get(task_status_update),
- case timer:now_diff(Now = os:timestamp(), LastUpdateTime) >= Frequency of
- true ->
- put(task_status_update, {Now, Frequency}),
- persist(TaskProps);
- false ->
- ok
- end.
-
-persist(TaskProps0) ->
- TaskProps = ?set(TaskProps0, updated_on, timestamp(os:timestamp())),
- put(task_status_props, TaskProps),
- gen_server:cast(?MODULE, {update_status, self(), TaskProps}).
-
-init([]) ->
- % read configuration settings and register for configuration changes
- ets:new(?MODULE, [ordered_set, protected, named_table]),
- {ok, nil}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call({add_task, TaskProps}, {From, _}, Server) ->
- case ets:lookup(?MODULE, From) of
- [] ->
- true = ets:insert(?MODULE, {From, TaskProps}),
- erlang:monitor(process, From),
- {reply, ok, Server};
- [_] ->
- {reply, {add_task_error, already_registered}, Server}
- end;
-handle_call(all, _, Server) ->
- All = [
- [{pid, ?l2b(pid_to_list(Pid))}, process_status(Pid) | TaskProps]
- || {Pid, TaskProps} <- ets:tab2list(?MODULE)
- ],
- {reply, All, Server}.
-
-handle_cast({update_status, Pid, NewProps}, Server) ->
- case ets:lookup(?MODULE, Pid) of
- [{Pid, _CurProps}] ->
- couch_log:debug("New task status for ~p: ~p", [Pid, NewProps]),
- true = ets:insert(?MODULE, {Pid, NewProps});
- _ ->
- % Task finished/died in the meanwhile and we must have received
- % a monitor message before this call - ignore.
- ok
- end,
- {noreply, Server};
-handle_cast(stop, State) ->
- {stop, normal, State}.
-
-handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) ->
- %% should we also erlang:demonitor(_MonitorRef), ?
- ets:delete(?MODULE, Pid),
- {noreply, Server}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-timestamp() ->
- timestamp(os:timestamp()).
-
-timestamp({Mega, Secs, _}) ->
- Mega * 1000000 + Secs.
-
-process_status(Pid) ->
- case process_info(Pid, status) of
- undefined ->
- {process_status, exiting};
- {status, Status} ->
- {process_status, Status}
- end.
diff --git a/src/couch/src/couch_totp.erl b/src/couch/src/couch_totp.erl
deleted file mode 100644
index 3eff9a583..000000000
--- a/src/couch/src/couch_totp.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_totp).
-
--export([generate/5]).
-
-generate(Alg, Key, CounterSecs, StepSecs, OutputLen) when
- is_atom(Alg),
- is_binary(Key),
- is_integer(CounterSecs),
- is_integer(StepSecs),
- is_integer(OutputLen)
-->
- couch_hotp:generate(Alg, Key, CounterSecs div StepSecs, OutputLen).
diff --git a/src/couch/src/couch_users_db.erl b/src/couch/src/couch_users_db.erl
deleted file mode 100644
index 7ef3aee78..000000000
--- a/src/couch/src/couch_users_db.erl
+++ /dev/null
@@ -1,228 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_users_db).
-
--export([before_doc_update/3, after_doc_read/2, strip_non_public_fields/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(NAME, <<"name">>).
--define(PASSWORD, <<"password">>).
--define(DERIVED_KEY, <<"derived_key">>).
--define(PASSWORD_SCHEME, <<"password_scheme">>).
--define(SIMPLE, <<"simple">>).
--define(PASSWORD_SHA, <<"password_sha">>).
--define(PBKDF2, <<"pbkdf2">>).
--define(ITERATIONS, <<"iterations">>).
--define(SALT, <<"salt">>).
--define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
--define(REQUIREMENT_ERROR, "Password does not conform to requirements.").
--define(PASSWORD_SERVER_ERROR, "Server cannot hash passwords at this time.").
-
-% If the request's userCtx identifies an admin
-% -> save_doc (see below)
-%
-% If the request's userCtx.name is null:
-% -> save_doc
-% // this is an anonymous user registering a new document
-% // in case a user doc with the same id already exists, the anonymous
-% // user will get a regular doc update conflict.
-% If the request's userCtx.name doesn't match the doc's name
-% -> 404 // Not Found
-% Else
-% -> save_doc
-before_doc_update(Doc, Db, _UpdateType) ->
- #user_ctx{name = Name} = couch_db:get_user_ctx(Db),
- DocName = get_doc_name(Doc),
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- save_doc(Doc);
- _ when Name =:= DocName orelse Name =:= null ->
- save_doc(Doc);
- _ ->
- throw(not_found)
- end.
-
-% If newDoc.password == null || newDoc.password == undefined:
-% ->
-% noop
-% Else -> // calculate password hash server side
-% newDoc.password_sha = hash_pw(newDoc.password + salt)
-% newDoc.salt = salt
-% newDoc.password = null
-save_doc(#doc{body = {Body}} = Doc) ->
- %% Support both schemes to smooth migration from legacy scheme
- Scheme = chttpd_util:get_chttpd_auth_config("password_scheme", "pbkdf2"),
- case {couch_util:get_value(?PASSWORD, Body), Scheme} of
- % server admins don't have a user-db password entry
- {null, _} ->
- Doc;
- {undefined, _} ->
- Doc;
- % deprecated
- {ClearPassword, "simple"} ->
- ok = validate_password(ClearPassword),
- Salt = couch_uuids:random(),
- PasswordSha = couch_passwords:simple(ClearPassword, Salt),
- Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE),
- Body1 = ?replace(Body0, ?SALT, Salt),
- Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha),
- Body3 = proplists:delete(?PASSWORD, Body2),
- Doc#doc{body = {Body3}};
- {ClearPassword, "pbkdf2"} ->
- ok = validate_password(ClearPassword),
- Iterations = chttpd_util:get_chttpd_auth_config_integer(
- "iterations", 10
- ),
- Salt = couch_uuids:random(),
- DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
- Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2),
- Body1 = ?replace(Body0, ?ITERATIONS, Iterations),
- Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey),
- Body3 = ?replace(Body2, ?SALT, Salt),
- Body4 = proplists:delete(?PASSWORD, Body3),
- Doc#doc{body = {Body4}};
- {_ClearPassword, Scheme} ->
- couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
- throw({forbidden, ?PASSWORD_SERVER_ERROR})
- end.
-
-% Validate if a new password matches all RegExp in the password_regexp setting.
-% Throws if not.
-% In this function the [couch_httpd_auth] password_regexp config is parsed.
-validate_password(ClearPassword) ->
- case chttpd_util:get_chttpd_auth_config("password_regexp", "") of
- "" ->
- ok;
- "[]" ->
- ok;
- ValidateConfig ->
- RequirementList =
- case couch_util:parse_term(ValidateConfig) of
- {ok, RegExpList} when is_list(RegExpList) ->
- RegExpList;
- {ok, NonListValue} ->
- couch_log:error(
- "[couch_httpd_auth] password_regexp value of '~p'"
- " is not a list.",
- [NonListValue]
- ),
- throw({forbidden, ?PASSWORD_SERVER_ERROR});
- {error, ErrorInfo} ->
- couch_log:error(
- "[couch_httpd_auth] password_regexp value of '~p'"
- " could not get parsed. ~p",
- [ValidateConfig, ErrorInfo]
- ),
- throw({forbidden, ?PASSWORD_SERVER_ERROR})
- end,
- % Check the password on every RegExp.
- lists:foreach(
- fun(RegExpTuple) ->
- case get_password_regexp_and_error_msg(RegExpTuple) of
- {ok, RegExp, PasswordErrorMsg} ->
- check_password(ClearPassword, RegExp, PasswordErrorMsg);
- {error} ->
- couch_log:error(
- "[couch_httpd_auth] password_regexp part of '~p' "
- "is not a RegExp string or "
- "a RegExp and Reason tuple.",
- [RegExpTuple]
- ),
- throw({forbidden, ?PASSWORD_SERVER_ERROR})
- end
- end,
- RequirementList
- ),
- ok
- end.
-
-% Get the RegExp out of the tuple and combine the the error message.
-% First is with a Reason string.
-get_password_regexp_and_error_msg({RegExp, Reason}) when
- is_list(RegExp) andalso is_list(Reason) andalso
- length(Reason) > 0
-->
- {ok, RegExp, lists:concat([?REQUIREMENT_ERROR, " ", Reason])};
-% With a not correct Reason string.
-get_password_regexp_and_error_msg({RegExp, _Reason}) when is_list(RegExp) ->
- {ok, RegExp, ?REQUIREMENT_ERROR};
-% Without a Reason string.
-get_password_regexp_and_error_msg({RegExp}) when is_list(RegExp) ->
- {ok, RegExp, ?REQUIREMENT_ERROR};
-% If the RegExp is only a list/string.
-get_password_regexp_and_error_msg(RegExp) when is_list(RegExp) ->
- {ok, RegExp, ?REQUIREMENT_ERROR};
-% Not correct RegExpValue.
-get_password_regexp_and_error_msg(_) ->
- {error}.
-
-% Check the password if it matches a RegExp.
-check_password(Password, RegExp, ErrorMsg) ->
- case re:run(Password, RegExp, [{capture, none}]) of
- match ->
- ok;
- _ ->
- throw({bad_request, ErrorMsg})
- end.
-
-% If the doc is a design doc
-% If the request's userCtx identifies an admin
-% -> return doc
-% Else
-% -> 403 // Forbidden
-% If the request's userCtx identifies an admin
-% -> return doc
-% If the request's userCtx.name doesn't match the doc's name
-% -> 404 // Not Found
-% Else
-% -> return doc
-after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) ->
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- Doc;
- _ ->
- throw(
- {forbidden, <<"Only administrators can view design docs in the users database.">>}
- )
- end;
-after_doc_read(Doc, Db) ->
- #user_ctx{name = Name} = couch_db:get_user_ctx(Db),
- DocName = get_doc_name(Doc),
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- Doc;
- _ when Name =:= DocName ->
- Doc;
- _ ->
- Doc1 = strip_non_public_fields(Doc),
- case Doc1 of
- #doc{body = {[]}} ->
- throw(not_found);
- _ ->
- Doc1
- end
- end.
-
-get_doc_name(#doc{id = <<"org.couchdb.user:", Name/binary>>}) ->
- Name;
-get_doc_name(_) ->
- undefined.
-
-strip_non_public_fields(#doc{body = {Props}} = Doc) ->
- Public = re:split(
- chttpd_util:get_chttpd_auth_config("public_fields", ""),
- "\\s*,\\s*",
- [{return, binary}]
- ),
- Doc#doc{body = {[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
deleted file mode 100644
index 912c6dd8a..000000000
--- a/src/couch/src/couch_util.erl
+++ /dev/null
@@ -1,817 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_util).
-
--export([priv_dir/0, normpath/1, fold_files/5]).
--export([should_flush/0, should_flush/1, to_existing_atom/1]).
--export([rand32/0, implode/2]).
--export([abs_pathname/1, abs_pathname/2, trim/1, drop_dot_couch_ext/1]).
--export([encodeBase64Url/1, decodeBase64Url/1]).
--export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]).
--export([get_nested_json_value/2, json_user_ctx/1]).
--export([proplist_apply_field/2, json_apply_field/2]).
--export([to_binary/1, to_integer/1, to_list/1, url_encode/1]).
--export([json_encode/1, json_decode/1, json_decode/2]).
--export([verify/2, simple_call/2, shutdown_sync/1]).
--export([get_value/2, get_value/3]).
--export([reorder_results/2, reorder_results/3]).
--export([url_strip_password/1]).
--export([encode_doc_id/1]).
--export([normalize_ddoc_id/1]).
--export([with_db/2]).
--export([rfc1123_date/0, rfc1123_date/1]).
--export([integer_to_boolean/1, boolean_to_integer/1]).
--export([validate_positive_int/1]).
--export([find_in_binary/2]).
--export([callback_exists/3, validate_callback_exists/3]).
--export([with_proc/4]).
--export([process_dict_get/2, process_dict_get/3]).
--export([unique_monotonic_integer/0]).
--export([check_config_blacklist/1]).
--export([check_md5/2]).
--export([set_mqd_off_heap/1]).
--export([set_process_priority/2]).
--export([hmac/3]).
--export([version_to_binary/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% arbitrarily chosen amount of memory to use before flushing to disk
--define(FLUSH_MAX_MEM, 10000000).
-
--define(BLACKLIST_CONFIG_SECTIONS, [
- <<"daemons">>,
- <<"external">>,
- <<"httpd_design_handlers">>,
- <<"httpd_db_handlers">>,
- <<"httpd_global_handlers">>,
- <<"native_query_servers">>,
- <<"os_daemons">>,
- <<"query_servers">>,
- <<"feature_flags">>
-]).
-
-priv_dir() ->
- case code:priv_dir(couch) of
- {error, bad_name} ->
- % small hack, in dev mode "app" is couchdb. Fixing requires
- % renaming src/couch to src/couch. Not really worth the hassle.
- % -Damien
- code:priv_dir(couchdb);
- Dir ->
- Dir
- end.
-
-% Normalize a pathname by removing .. and . components.
-normpath(Path) ->
- normparts(filename:split(Path), []).
-
-normparts([], Acc) ->
- filename:join(lists:reverse(Acc));
-normparts([".." | RestParts], [_Drop | RestAcc]) ->
- normparts(RestParts, RestAcc);
-normparts(["." | RestParts], Acc) ->
- normparts(RestParts, Acc);
-normparts([Part | RestParts], Acc) ->
- normparts(RestParts, [Part | Acc]).
-
-% This is implementation is similar the builtin filelib:fold_files/5
-% except that this version will run the user supplied function
-% on directories that match the regular expression as well.
-%
-% This is motivated by the case when couch_server is searching
-% for pluggable storage engines. This change allows a
-% database to be either a file or a directory.
-fold_files(Dir, RegExp, Recursive, Fun, Acc) ->
- {ok, Re} = re:compile(RegExp, [unicode]),
- fold_files1(Dir, Re, Recursive, Fun, Acc).
-
-fold_files1(Dir, RegExp, Recursive, Fun, Acc) ->
- case file:list_dir(Dir) of
- {ok, Files} ->
- fold_files2(Files, Dir, RegExp, Recursive, Fun, Acc);
- {error, _} ->
- Acc
- end.
-
-fold_files2([], _Dir, _RegExp, _Recursive, _Fun, Acc) ->
- Acc;
-fold_files2([File | Rest], Dir, RegExp, Recursive, Fun, Acc0) ->
- FullName = filename:join(Dir, File),
- case (catch re:run(File, RegExp, [{capture, none}])) of
- match ->
- Acc1 = Fun(FullName, Acc0),
- fold_files2(Rest, Dir, RegExp, Recursive, Fun, Acc1);
- _ ->
- case Recursive andalso filelib:is_dir(FullName) of
- true ->
- Acc1 = fold_files1(FullName, RegExp, Recursive, Fun, Acc0),
- fold_files2(Rest, Dir, RegExp, Recursive, Fun, Acc1);
- false ->
- fold_files2(Rest, Dir, RegExp, Recursive, Fun, Acc0)
- end
- end.
-
-% works like list_to_existing_atom, except can be list or binary and it
-% gives you the original value instead of an error if no existing atom.
-to_existing_atom(V) when is_list(V) ->
- try
- list_to_existing_atom(V)
- catch
- _:_ -> V
- end;
-to_existing_atom(V) when is_binary(V) ->
- try
- list_to_existing_atom(?b2l(V))
- catch
- _:_ -> V
- end;
-to_existing_atom(V) when is_atom(V) ->
- V.
-
-shutdown_sync(Pid) when not is_pid(Pid) ->
- ok;
-shutdown_sync(Pid) ->
- MRef = erlang:monitor(process, Pid),
- try
- catch unlink(Pid),
- catch exit(Pid, shutdown),
- receive
- {'DOWN', MRef, _, _, _} ->
- ok
- end
- after
- erlang:demonitor(MRef, [flush])
- end.
-
-simple_call(Pid, Message) ->
- MRef = erlang:monitor(process, Pid),
- try
- Pid ! {self(), Message},
- receive
- {Pid, Result} ->
- Result;
- {'DOWN', MRef, _, _, Reason} ->
- exit(Reason)
- end
- after
- erlang:demonitor(MRef, [flush])
- end.
-
-validate_utf8(Data) when is_list(Data) ->
- validate_utf8(?l2b(Data));
-validate_utf8(Bin) when is_binary(Bin) ->
- validate_utf8_fast(Bin, 0).
-
-validate_utf8_fast(B, O) ->
- case B of
- <<_:O/binary>> ->
- true;
- <<_:O/binary, C1, _/binary>> when
- C1 < 128
- ->
- validate_utf8_fast(B, 1 + O);
- <<_:O/binary, C1, C2, _/binary>> when
- C1 >= 194,
- C1 =< 223,
- C2 >= 128,
- C2 =< 191
- ->
- validate_utf8_fast(B, 2 + O);
- <<_:O/binary, C1, C2, C3, _/binary>> when
- C1 >= 224,
- C1 =< 239,
- C2 >= 128,
- C2 =< 191,
- C3 >= 128,
- C3 =< 191
- ->
- validate_utf8_fast(B, 3 + O);
- <<_:O/binary, C1, C2, C3, C4, _/binary>> when
- C1 >= 240,
- C1 =< 244,
- C2 >= 128,
- C2 =< 191,
- C3 >= 128,
- C3 =< 191,
- C4 >= 128,
- C4 =< 191
- ->
- validate_utf8_fast(B, 4 + O);
- _ ->
- false
- end.
-
-to_hex(<<Hi:4, Lo:4, Rest/binary>>) ->
- [nibble_to_hex(Hi), nibble_to_hex(Lo) | to_hex(Rest)];
-to_hex(<<>>) ->
- [];
-to_hex(List) when is_list(List) ->
- to_hex(list_to_binary(List)).
-
-nibble_to_hex(0) -> $0;
-nibble_to_hex(1) -> $1;
-nibble_to_hex(2) -> $2;
-nibble_to_hex(3) -> $3;
-nibble_to_hex(4) -> $4;
-nibble_to_hex(5) -> $5;
-nibble_to_hex(6) -> $6;
-nibble_to_hex(7) -> $7;
-nibble_to_hex(8) -> $8;
-nibble_to_hex(9) -> $9;
-nibble_to_hex(10) -> $a;
-nibble_to_hex(11) -> $b;
-nibble_to_hex(12) -> $c;
-nibble_to_hex(13) -> $d;
-nibble_to_hex(14) -> $e;
-nibble_to_hex(15) -> $f.
-
-parse_term(Bin) when is_binary(Bin) ->
- parse_term(binary_to_list(Bin));
-parse_term(List) ->
- {ok, Tokens, _} = erl_scan:string(List ++ "."),
- erl_parse:parse_term(Tokens).
-
-get_value(Key, List) ->
- get_value(Key, List, undefined).
-
-get_value(Key, List, Default) ->
- case lists:keysearch(Key, 1, List) of
- {value, {Key, Value}} ->
- Value;
- false ->
- Default
- end.
-
-get_nested_json_value({Props}, [Key | Keys]) ->
- case couch_util:get_value(Key, Props, nil) of
- nil -> throw({not_found, <<"missing json key: ", Key/binary>>});
- Value -> get_nested_json_value(Value, Keys)
- end;
-get_nested_json_value(Value, []) ->
- Value;
-get_nested_json_value(_NotJSONObj, _) ->
- throw({not_found, json_mismatch}).
-
-proplist_apply_field(H, L) ->
- {R} = json_apply_field(H, {L}),
- R.
-
-json_apply_field(H, {L}) ->
- json_apply_field(H, L, []).
-json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
- json_apply_field({Key, NewValue}, Headers, Acc);
-json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
- json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
-json_apply_field({Key, NewValue}, [], Acc) ->
- {[{Key, NewValue} | Acc]}.
-
-json_user_ctx(Db) ->
- ShardName = couch_db:name(Db),
- Ctx = couch_db:get_user_ctx(Db),
- {[
- {<<"db">>, mem3:dbname(ShardName)},
- {<<"name">>, Ctx#user_ctx.name},
- {<<"roles">>, Ctx#user_ctx.roles}
- ]}.
-
-% returns a random integer
-rand32() ->
- <<I:32>> = crypto:strong_rand_bytes(4),
- I.
-
-% given a pathname "../foo/bar/" it gives back the fully qualified
-% absolute pathname.
-abs_pathname(" " ++ Filename) ->
- % strip leading whitspace
- abs_pathname(Filename);
-abs_pathname([$/ | _] = Filename) ->
- Filename;
-abs_pathname(Filename) ->
- {ok, Cwd} = file:get_cwd(),
- {Filename2, Args} = separate_cmd_args(Filename, ""),
- abs_pathname(Filename2, Cwd) ++ Args.
-
-abs_pathname(Filename, Dir) ->
- Name = filename:absname(Filename, Dir ++ "/"),
- OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
- % If the filename is a dir (last char slash, put back end slash
- case string:right(Filename, 1) of
- "/" ->
- OutFilename ++ "/";
- "\\" ->
- OutFilename ++ "/";
- _Else ->
- OutFilename
- end.
-
-% if this as an executable with arguments, seperate out the arguments
-% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
-separate_cmd_args("", CmdAcc) ->
- {lists:reverse(CmdAcc), ""};
-% handle skipped value
-separate_cmd_args("\\ " ++ Rest, CmdAcc) ->
- separate_cmd_args(Rest, " \\" ++ CmdAcc);
-separate_cmd_args(" " ++ Rest, CmdAcc) ->
- {lists:reverse(CmdAcc), " " ++ Rest};
-separate_cmd_args([Char | Rest], CmdAcc) ->
- separate_cmd_args(Rest, [Char | CmdAcc]).
-
-% Is a character whitespace (from https://en.wikipedia.org/wiki/Whitespace_character#Unicode)?
-is_whitespace(9) -> true;
-is_whitespace(10) -> true;
-is_whitespace(11) -> true;
-is_whitespace(12) -> true;
-is_whitespace(13) -> true;
-is_whitespace(32) -> true;
-is_whitespace(133) -> true;
-is_whitespace(160) -> true;
-is_whitespace(5760) -> true;
-is_whitespace(8192) -> true;
-is_whitespace(8193) -> true;
-is_whitespace(8194) -> true;
-is_whitespace(8195) -> true;
-is_whitespace(8196) -> true;
-is_whitespace(8197) -> true;
-is_whitespace(8198) -> true;
-is_whitespace(8199) -> true;
-is_whitespace(8200) -> true;
-is_whitespace(8201) -> true;
-is_whitespace(8202) -> true;
-is_whitespace(8232) -> true;
-is_whitespace(8233) -> true;
-is_whitespace(8239) -> true;
-is_whitespace(8287) -> true;
-is_whitespace(12288) -> true;
-is_whitespace(6158) -> true;
-is_whitespace(8203) -> true;
-is_whitespace(8204) -> true;
-is_whitespace(8205) -> true;
-is_whitespace(8288) -> true;
-is_whitespace(65279) -> true;
-is_whitespace(_Else) -> false.
-
-% removes leading and trailing whitespace from a string
-trim(String) when is_binary(String) ->
- % mirror string:trim() behaviour of returning a binary when a binary is passed in
- ?l2b(trim(?b2l(String)));
-trim(String) ->
- String2 = lists:dropwhile(fun is_whitespace/1, String),
- lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
-
-drop_dot_couch_ext(DbName) when is_binary(DbName) ->
- PrefixLen = size(DbName) - 6,
- case DbName of
- <<Prefix:PrefixLen/binary, ".couch">> ->
- Prefix;
- Else ->
- Else
- end;
-drop_dot_couch_ext(DbName) when is_list(DbName) ->
- binary_to_list(drop_dot_couch_ext(iolist_to_binary(DbName))).
-
-% takes a heirarchical list of dirs and removes the dots ".", double dots
-% ".." and the corresponding parent dirs.
-fix_path_list([], Acc) ->
- lists:reverse(Acc);
-fix_path_list([".." | Rest], [_PrevAcc | RestAcc]) ->
- fix_path_list(Rest, RestAcc);
-fix_path_list(["." | Rest], Acc) ->
- fix_path_list(Rest, Acc);
-fix_path_list([Dir | Rest], Acc) ->
- fix_path_list(Rest, [Dir | Acc]).
-
-implode(List, Sep) ->
- implode(List, Sep, []).
-
-implode([], _Sep, Acc) ->
- lists:flatten(lists:reverse(Acc));
-implode([H], Sep, Acc) ->
- implode([], Sep, [H | Acc]);
-implode([H | T], Sep, Acc) ->
- implode(T, Sep, [Sep, H | Acc]).
-
-should_flush() ->
- should_flush(?FLUSH_MAX_MEM).
-
-should_flush(MemThreshHold) ->
- {memory, ProcMem} = process_info(self(), memory),
- BinMem = lists:foldl(
- fun({_Id, Size, _NRefs}, Acc) -> Size + Acc end,
- 0,
- element(2, process_info(self(), binary))
- ),
- if
- ProcMem + BinMem > 2 * MemThreshHold ->
- garbage_collect(),
- {memory, ProcMem2} = process_info(self(), memory),
- BinMem2 = lists:foldl(
- fun({_Id, Size, _NRefs}, Acc) -> Size + Acc end,
- 0,
- element(2, process_info(self(), binary))
- ),
- ProcMem2 + BinMem2 > MemThreshHold;
- true ->
- false
- end.
-
-encodeBase64Url(Url) ->
- b64url:encode(Url).
-
-decodeBase64Url(Url64) ->
- b64url:decode(Url64).
-
-dict_find(Key, Dict, DefaultValue) ->
- case dict:find(Key, Dict) of
- {ok, Value} ->
- Value;
- error ->
- DefaultValue
- end.
-
-to_binary(V) when is_binary(V) ->
- V;
-to_binary(V) when is_list(V) ->
- try
- list_to_binary(V)
- catch
- _:_ ->
- list_to_binary(io_lib:format("~p", [V]))
- end;
-to_binary(V) when is_atom(V) ->
- list_to_binary(atom_to_list(V));
-to_binary(V) ->
- list_to_binary(io_lib:format("~p", [V])).
-
-to_integer(V) when is_integer(V) ->
- V;
-to_integer(V) when is_list(V) ->
- erlang:list_to_integer(V);
-to_integer(V) when is_binary(V) ->
- erlang:list_to_integer(binary_to_list(V)).
-
-to_list(V) when is_list(V) ->
- V;
-to_list(V) when is_binary(V) ->
- binary_to_list(V);
-to_list(V) when is_atom(V) ->
- atom_to_list(V);
-to_list(V) ->
- lists:flatten(io_lib:format("~p", [V])).
-
-url_encode(Bin) when is_binary(Bin) ->
- url_encode(binary_to_list(Bin));
-url_encode([H | T]) ->
- if
- H >= $a, $z >= H ->
- [H | url_encode(T)];
- H >= $A, $Z >= H ->
- [H | url_encode(T)];
- H >= $0, $9 >= H ->
- [H | url_encode(T)];
- H == $_; H == $.; H == $-; H == $: ->
- [H | url_encode(T)];
- true ->
- case lists:flatten(io_lib:format("~.16.0B", [H])) of
- [X, Y] ->
- [$%, X, Y | url_encode(T)];
- [X] ->
- [$%, $0, X | url_encode(T)]
- end
- end;
-url_encode([]) ->
- [].
-
-json_encode(V) ->
- jiffy:encode(V, [force_utf8]).
-
-json_decode(V) ->
- json_decode(V, []).
-
-json_decode(V, Opts) ->
- try
- jiffy:decode(V, [dedupe_keys | Opts])
- catch
- error:Error ->
- throw({invalid_json, Error})
- end.
-
-verify([X | RestX], [Y | RestY], Result) ->
- verify(RestX, RestY, (X bxor Y) bor Result);
-verify([], [], Result) ->
- Result == 0.
-
-verify(<<X/binary>>, <<Y/binary>>) ->
- verify(?b2l(X), ?b2l(Y));
-verify(X, Y) when is_list(X) and is_list(Y) ->
- case length(X) == length(Y) of
- true ->
- verify(X, Y, 0);
- false ->
- false
- end;
-verify(_X, _Y) ->
- false.
-
-reorder_results(Keys, SortedResults) ->
- Map = maps:from_list(SortedResults),
- [maps:get(Key, Map) || Key <- Keys].
-
-reorder_results(Keys, SortedResults, Default) ->
- Map = maps:from_list(SortedResults),
- [maps:get(Key, Map, Default) || Key <- Keys].
-
-url_strip_password(Url) ->
- re:replace(
- Url,
- "(http|https|socks5)://([^:]+):[^@]+@(.*)$",
- "\\1://\\2:*****@\\3",
- [{return, list}]
- ).
-
-encode_doc_id(#doc{id = Id}) ->
- encode_doc_id(Id);
-encode_doc_id(Id) when is_list(Id) ->
- encode_doc_id(?l2b(Id));
-encode_doc_id(<<"_design/", Rest/binary>>) ->
- "_design/" ++ url_encode(Rest);
-encode_doc_id(<<"_local/", Rest/binary>>) ->
- "_local/" ++ url_encode(Rest);
-encode_doc_id(Id) ->
- url_encode(Id).
-
-normalize_ddoc_id(<<"_design/", _/binary>> = DDocId) ->
- DDocId;
-normalize_ddoc_id(DDocId) when is_binary(DDocId) ->
- <<"_design/", DDocId/binary>>.
-
-with_db(DbName, Fun) when is_binary(DbName) ->
- case couch_db:open_int(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- try
- Fun(Db)
- after
- catch couch_db:close(Db)
- end;
- Else ->
- throw(Else)
- end;
-with_db(Db, Fun) ->
- case couch_db:is_db(Db) of
- true ->
- Fun(Db);
- false ->
- erlang:error({invalid_db, Db})
- end.
-
-rfc1123_date() ->
- {{YYYY, MM, DD}, {Hour, Min, Sec}} = calendar:universal_time(),
- DayNumber = calendar:day_of_the_week({YYYY, MM, DD}),
- lists:flatten(
- io_lib:format(
- "~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
- [day(DayNumber), DD, month(MM), YYYY, Hour, Min, Sec]
- )
- ).
-
-rfc1123_date(undefined) ->
- undefined;
-rfc1123_date(UniversalTime) ->
- {{YYYY, MM, DD}, {Hour, Min, Sec}} = UniversalTime,
- DayNumber = calendar:day_of_the_week({YYYY, MM, DD}),
- lists:flatten(
- io_lib:format(
- "~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
- [day(DayNumber), DD, month(MM), YYYY, Hour, Min, Sec]
- )
- ).
-
-%% day
-
-day(1) -> "Mon";
-day(2) -> "Tue";
-day(3) -> "Wed";
-day(4) -> "Thu";
-day(5) -> "Fri";
-day(6) -> "Sat";
-day(7) -> "Sun".
-
-%% month
-
-month(1) -> "Jan";
-month(2) -> "Feb";
-month(3) -> "Mar";
-month(4) -> "Apr";
-month(5) -> "May";
-month(6) -> "Jun";
-month(7) -> "Jul";
-month(8) -> "Aug";
-month(9) -> "Sep";
-month(10) -> "Oct";
-month(11) -> "Nov";
-month(12) -> "Dec".
-
-integer_to_boolean(1) ->
- true;
-integer_to_boolean(0) ->
- false.
-
-boolean_to_integer(true) ->
- 1;
-boolean_to_integer(false) ->
- 0.
-
-validate_positive_int(N) when is_list(N) ->
- try
- I = list_to_integer(N),
- validate_positive_int(I)
- catch
- error:badarg ->
- false
- end;
-validate_positive_int(N) when is_integer(N), N > 0 -> true;
-validate_positive_int(_) ->
- false.
-
-find_in_binary(_B, <<>>) ->
- not_found;
-find_in_binary(B, Data) ->
- case binary:match(Data, [B], []) of
- nomatch ->
- MatchLength = erlang:min(byte_size(B), byte_size(Data)),
- match_prefix_at_end(
- binary:part(B, {0, MatchLength}),
- binary:part(Data, {byte_size(Data), -MatchLength}),
- MatchLength,
- byte_size(Data) - MatchLength
- );
- {Pos, _Len} ->
- {exact, Pos}
- end.
-
-match_prefix_at_end(Prefix, Data, PrefixLength, N) ->
- FirstCharMatches = binary:matches(Data, [binary:part(Prefix, {0, 1})], []),
- match_rest_of_prefix(FirstCharMatches, Prefix, Data, PrefixLength, N).
-
-match_rest_of_prefix([], _Prefix, _Data, _PrefixLength, _N) ->
- not_found;
-match_rest_of_prefix([{Pos, _Len} | Rest], Prefix, Data, PrefixLength, N) ->
- case
- binary:match(
- binary:part(Data, {PrefixLength, Pos - PrefixLength}),
- [binary:part(Prefix, {0, PrefixLength - Pos})],
- []
- )
- of
- nomatch ->
- match_rest_of_prefix(Rest, Prefix, Data, PrefixLength, N);
- {_Pos, _Len1} ->
- {partial, N + Pos}
- end.
-
-callback_exists(Module, Function, Arity) ->
- case ensure_loaded(Module) of
- true ->
- InfoList = Module:module_info(exports),
- lists:member({Function, Arity}, InfoList);
- false ->
- false
- end.
-
-validate_callback_exists(Module, Function, Arity) ->
- case callback_exists(Module, Function, Arity) of
- true ->
- ok;
- false ->
- CallbackStr = lists:flatten(
- io_lib:format("~w:~w/~w", [Module, Function, Arity])
- ),
- throw({error, {undefined_callback, CallbackStr, {Module, Function, Arity}}})
- end.
-
-check_md5(_NewSig, <<>>) -> ok;
-check_md5(Sig, Sig) -> ok;
-check_md5(_, _) -> throw(md5_mismatch).
-
-set_mqd_off_heap(Module) ->
- case config:get_boolean("off_heap_mqd", atom_to_list(Module), true) of
- true ->
- try
- erlang:process_flag(message_queue_data, off_heap),
- ok
- catch
- error:badarg ->
- ok
- end;
- false ->
- ok
- end.
-
-set_process_priority(Module, Level) ->
- case config:get_boolean("process_priority", atom_to_list(Module), false) of
- true ->
- process_flag(priority, Level),
- ok;
- false ->
- ok
- end.
-
-ensure_loaded(Module) when is_atom(Module) ->
- case code:ensure_loaded(Module) of
- {module, Module} ->
- true;
- {error, embedded} ->
- true;
- {error, _} ->
- false
- end;
-ensure_loaded(_Module) ->
- false.
-
-%% This is especially useful in gen_servers when you need to call
-%% a function that does a receive as it would hijack incoming messages.
-with_proc(M, F, A, Timeout) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- exit({reply, erlang:apply(M, F, A)})
- end),
- receive
- {'DOWN', Ref, process, Pid, {reply, Resp}} ->
- {ok, Resp};
- {'DOWN', Ref, process, Pid, Error} ->
- {error, Error}
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- {error, timeout}
- end.
-
-process_dict_get(Pid, Key) ->
- process_dict_get(Pid, Key, undefined).
-
-process_dict_get(Pid, Key, DefaultValue) ->
- case process_info(Pid, dictionary) of
- {dictionary, Dict} ->
- case lists:keyfind(Key, 1, Dict) of
- false ->
- DefaultValue;
- {Key, Value} ->
- Value
- end;
- undefined ->
- DefaultValue
- end.
-
-unique_monotonic_integer() ->
- erlang:unique_integer([monotonic, positive]).
-
-check_config_blacklist(Section) ->
- case lists:member(Section, ?BLACKLIST_CONFIG_SECTIONS) of
- true ->
- Msg = <<"Config section blacklisted for modification over HTTP API.">>,
- throw({forbidden, Msg});
- _ ->
- ok
- end.
-
--ifdef(OTP_RELEASE).
-
--if(?OTP_RELEASE >= 22).
-
-% OTP >= 22
-hmac(Alg, Key, Message) ->
- crypto:mac(hmac, Alg, Key, Message).
-
--else.
-
-% OTP >= 21, < 22
-hmac(Alg, Key, Message) ->
- crypto:hmac(Alg, Key, Message).
-
-% -if(?OTP_RELEASE >= 22)
--endif.
-
--else.
-
-% OTP < 21
-hmac(Alg, Key, Message) ->
- crypto:hmac(Alg, Key, Message).
-
-% -ifdef(OTP_RELEASE)
--endif.
-
-version_to_binary(Ver) when is_tuple(Ver) ->
- version_to_binary(tuple_to_list(Ver));
-version_to_binary(Ver) when is_list(Ver) ->
- IsZero = fun(N) -> N == 0 end,
- Ver1 = lists:reverse(lists:dropwhile(IsZero, lists:reverse(Ver))),
- Ver2 = [erlang:integer_to_list(N) || N <- Ver1],
- ?l2b(lists:join(".", Ver2)).
diff --git a/src/couch/src/couch_uuids.erl b/src/couch/src/couch_uuids.erl
deleted file mode 100644
index be6089dff..000000000
--- a/src/couch/src/couch_uuids.erl
+++ /dev/null
@@ -1,188 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_uuids).
--include_lib("couch/include/couch_db.hrl").
-
--behaviour(gen_server).
--vsn(3).
--behaviour(config_listener).
-
--export([start/0, stop/0]).
--export([new/0, random/0]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
--define(RELISTEN_DELAY, 5000).
-
-start() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-stop() ->
- gen_server:cast(?MODULE, stop).
-
-new() ->
- gen_server:call(?MODULE, create).
-
-random() ->
- list_to_binary(couch_util:to_hex(crypto:strong_rand_bytes(16))).
-
-init([]) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {ok, state()}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call(create, _From, random) ->
- {reply, random(), random};
-handle_call(create, _From, {utc_random, ClockSeq}) ->
- {UtcRandom, NewClockSeq} = utc_random(ClockSeq),
- {reply, UtcRandom, {utc_random, NewClockSeq}};
-handle_call(create, _From, {utc_id, UtcIdSuffix, ClockSeq}) ->
- Now = os:timestamp(),
- {UtcId, NewClockSeq} = utc_suffix(UtcIdSuffix, ClockSeq, Now),
- {reply, UtcId, {utc_id, UtcIdSuffix, NewClockSeq}};
-handle_call(create, _From, {sequential, Pref, Seq}) ->
- Result = ?l2b(Pref ++ io_lib:format("~6.16.0b", [Seq])),
- case Seq >= 16#fff000 of
- true ->
- {reply, Result, {sequential, new_prefix(), inc()}};
- _ ->
- {reply, Result, {sequential, Pref, Seq + inc()}}
- end.
-
-handle_cast(change, _State) ->
- {noreply, state()};
-handle_cast(stop, State) ->
- {stop, normal, State};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(_Info, State) ->
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_config_change("uuids", _, _, _, _) ->
- {ok, gen_server:cast(?MODULE, change)};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- gen_server:cast(?MODULE, change),
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-new_prefix() ->
- couch_util:to_hex((crypto:strong_rand_bytes(13))).
-
-inc() ->
- couch_rand:uniform(16#ffd).
-
-state() ->
- AlgoStr = config:get("uuids", "algorithm", "sequential"),
- case couch_util:to_existing_atom(AlgoStr) of
- random ->
- random;
- utc_random ->
- ClockSeq = micros_since_epoch(os:timestamp()),
- {utc_random, ClockSeq};
- utc_id ->
- ClockSeq = micros_since_epoch(os:timestamp()),
- UtcIdSuffix = config:get("uuids", "utc_id_suffix", ""),
- {utc_id, UtcIdSuffix, ClockSeq};
- sequential ->
- {sequential, new_prefix(), inc()};
- Unknown ->
- throw({unknown_uuid_algorithm, Unknown})
- end.
-
-micros_since_epoch({_, _, Micro} = Now) ->
- Nowish = calendar:now_to_universal_time(Now),
- Nowsecs = calendar:datetime_to_gregorian_seconds(Nowish),
- Then = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}),
- (Nowsecs - Then) * 1000000 + Micro.
-
-utc_random(ClockSeq) ->
- Suffix = couch_util:to_hex(crypto:strong_rand_bytes(9)),
- utc_suffix(Suffix, ClockSeq, os:timestamp()).
-
-utc_suffix(Suffix, ClockSeq, Now) ->
- OsMicros = micros_since_epoch(Now),
- NewClockSeq =
- if
- OsMicros =< ClockSeq ->
- % Timestamp is lagging, use ClockSeq as Timestamp
- ClockSeq + 1;
- OsMicros > ClockSeq ->
- % Timestamp advanced, use it, and reset ClockSeq with it
- OsMicros
- end,
- Prefix = io_lib:format("~14.16.0b", [NewClockSeq]),
- {list_to_binary(Prefix ++ Suffix), NewClockSeq}.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-utc_id_time_does_not_advance_test() ->
- % Timestamp didn't advance but local clock sequence should and new UUIds
- % should be generated
- Now = {0, 1, 2},
- ClockSeq0 = micros_since_epoch({3, 4, 5}),
- {UtcId0, ClockSeq1} = utc_suffix("", ClockSeq0, Now),
- ?assert(is_binary(UtcId0)),
- ?assertEqual(ClockSeq0 + 1, ClockSeq1),
- {UtcId1, ClockSeq2} = utc_suffix("", ClockSeq1, Now),
- ?assertNotEqual(UtcId0, UtcId1),
- ?assertEqual(ClockSeq1 + 1, ClockSeq2).
-
-utc_id_time_advanced_test() ->
- % Timestamp advanced, a new UUID generated and also the last clock sequence
- % is updated to that timestamp.
- Now0 = {0, 1, 2},
- ClockSeq0 = micros_since_epoch({3, 4, 5}),
- {UtcId0, ClockSeq1} = utc_suffix("", ClockSeq0, Now0),
- ?assert(is_binary(UtcId0)),
- ?assertEqual(ClockSeq0 + 1, ClockSeq1),
- Now1 = {9, 9, 9},
- {UtcId1, ClockSeq2} = utc_suffix("", ClockSeq1, Now1),
- ?assert(is_binary(UtcId1)),
- ?assertNotEqual(UtcId0, UtcId1),
- ?assertEqual(micros_since_epoch(Now1), ClockSeq2).
-
-utc_random_test_time_does_not_advance_test() ->
- {MSec, Sec, USec} = os:timestamp(),
- Future = {MSec + 10, Sec, USec},
- ClockSeqFuture = micros_since_epoch(Future),
- {UtcRandom, NextClockSeq} = utc_random(ClockSeqFuture),
- ?assert(is_binary(UtcRandom)),
- ?assertEqual(32, byte_size(UtcRandom)),
- ?assertEqual(ClockSeqFuture + 1, NextClockSeq).
-
-utc_random_test_time_advance_test() ->
- ClockSeqPast = micros_since_epoch({1, 1, 1}),
- {UtcRandom, NextClockSeq} = utc_random(ClockSeqPast),
- ?assert(is_binary(UtcRandom)),
- ?assertEqual(32, byte_size(UtcRandom)),
- ?assert(NextClockSeq > micros_since_epoch({1000, 0, 0})).
-
--endif.
diff --git a/src/couch/src/couch_work_queue.erl b/src/couch/src/couch_work_queue.erl
deleted file mode 100644
index d767a33be..000000000
--- a/src/couch/src/couch_work_queue.erl
+++ /dev/null
@@ -1,174 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_work_queue).
--behaviour(gen_server).
--vsn(1).
-
--include_lib("couch/include/couch_db.hrl").
-
-% public API
--export([new/1, queue/2, dequeue/1, dequeue/2, close/1, item_count/1, size/1]).
-
-% gen_server callbacks
--export([init/1, terminate/2]).
--export([handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
-
--record(q, {
- queue = queue:new(),
- blocked = [],
- max_size,
- max_items,
- items = 0,
- size = 0,
- work_waiters = [],
- close_on_dequeue = false,
- multi_workers = false
-}).
-
-new(Options) ->
- gen_server:start_link(couch_work_queue, Options, []).
-
-queue(Wq, Item) when is_binary(Item) ->
- gen_server:call(Wq, {queue, Item, byte_size(Item)}, infinity);
-queue(Wq, Item) ->
- gen_server:call(Wq, {queue, Item, ?term_size(Item)}, infinity).
-
-dequeue(Wq) ->
- dequeue(Wq, all).
-
-dequeue(Wq, MaxItems) ->
- try
- gen_server:call(Wq, {dequeue, MaxItems}, infinity)
- catch
- _:_ -> closed
- end.
-
-item_count(Wq) ->
- try
- gen_server:call(Wq, item_count, infinity)
- catch
- _:_ -> closed
- end.
-
-size(Wq) ->
- try
- gen_server:call(Wq, size, infinity)
- catch
- _:_ -> closed
- end.
-
-close(Wq) ->
- gen_server:cast(Wq, close).
-
-init(Options) ->
- Q = #q{
- max_size = couch_util:get_value(max_size, Options, nil),
- max_items = couch_util:get_value(max_items, Options, nil),
- multi_workers = couch_util:get_value(multi_workers, Options, false)
- },
- {ok, Q, hibernate}.
-
-terminate(_Reason, #q{work_waiters = Workers}) ->
- lists:foreach(fun({W, _}) -> gen_server:reply(W, closed) end, Workers).
-
-handle_call({queue, Item, Size}, From, #q{work_waiters = []} = Q0) ->
- Q = Q0#q{
- size = Q0#q.size + Size,
- items = Q0#q.items + 1,
- queue = queue:in({Item, Size}, Q0#q.queue)
- },
- case
- (Q#q.size >= Q#q.max_size) orelse
- (Q#q.items >= Q#q.max_items)
- of
- true ->
- {noreply, Q#q{blocked = [From | Q#q.blocked]}, hibernate};
- false ->
- {reply, ok, Q, hibernate}
- end;
-handle_call({queue, Item, _}, _From, #q{work_waiters = [{W, _Max} | Rest]} = Q) ->
- gen_server:reply(W, {ok, [Item]}),
- {reply, ok, Q#q{work_waiters = Rest}, hibernate};
-handle_call({dequeue, Max}, From, Q) ->
- #q{work_waiters = Workers, multi_workers = Multi, items = Count} = Q,
- case {Workers, Multi} of
- {[_ | _], false} ->
- exit("Only one caller allowed to wait for this work at a time");
- {[_ | _], true} ->
- {noreply, Q#q{work_waiters = Workers ++ [{From, Max}]}};
- _ ->
- case Count of
- 0 ->
- {noreply, Q#q{work_waiters = Workers ++ [{From, Max}]}};
- C when C > 0 ->
- deliver_queue_items(Max, Q)
- end
- end;
-handle_call(item_count, _From, Q) ->
- {reply, Q#q.items, Q};
-handle_call(size, _From, Q) ->
- {reply, Q#q.size, Q}.
-
-deliver_queue_items(Max, Q) ->
- #q{
- queue = Queue,
- items = Count,
- size = Size,
- close_on_dequeue = Close,
- blocked = Blocked
- } = Q,
- case (Max =:= all) orelse (Max >= Count) of
- false ->
- {Items, Size2, Queue2, Blocked2} = dequeue_items(
- Max, Size, Queue, Blocked, []
- ),
- Q2 = Q#q{
- items = Count - Max, size = Size2, blocked = Blocked2, queue = Queue2
- },
- {reply, {ok, Items}, Q2};
- true ->
- lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked),
- Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()},
- Items = [Item || {Item, _} <- queue:to_list(Queue)],
- case Close of
- false ->
- {reply, {ok, Items}, Q2};
- true ->
- {stop, normal, {ok, Items}, Q2}
- end
- end.
-
-dequeue_items(0, Size, Queue, Blocked, DequeuedAcc) ->
- {lists:reverse(DequeuedAcc), Size, Queue, Blocked};
-dequeue_items(NumItems, Size, Queue, Blocked, DequeuedAcc) ->
- {{value, {Item, ItemSize}}, Queue2} = queue:out(Queue),
- case Blocked of
- [] ->
- Blocked2 = Blocked;
- [From | Blocked2] ->
- gen_server:reply(From, ok)
- end,
- dequeue_items(
- NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]
- ).
-
-handle_cast(close, #q{items = 0} = Q) ->
- {stop, normal, Q};
-handle_cast(close, Q) ->
- {noreply, Q#q{close_on_dequeue = true}}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_info(X, Q) ->
- {stop, X, Q}.
diff --git a/src/couch/src/test_request.erl b/src/couch/src/test_request.erl
deleted file mode 100644
index d7364012f..000000000
--- a/src/couch/src/test_request.erl
+++ /dev/null
@@ -1,110 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(test_request).
-
--export([copy/1, copy/2, copy/3]).
--export([get/1, get/2, get/3]).
--export([post/2, post/3, post/4]).
--export([put/2, put/3, put/4]).
--export([delete/1, delete/2, delete/3]).
--export([options/1, options/2, options/3]).
--export([request/3, request/4, request/5]).
-
-copy(Url) ->
- copy(Url, []).
-
-copy(Url, Headers) ->
- copy(Url, Headers, []).
-
-copy(Url, Headers, Opts) ->
- request(copy, Url, Headers, [], Opts).
-
-get(Url) ->
- get(Url, []).
-
-get(Url, Headers) ->
- get(Url, Headers, []).
-
-get(Url, Headers, Opts) ->
- request(get, Url, Headers, [], Opts).
-
-post(Url, Body) ->
- post(Url, [], Body).
-
-post(Url, Headers, Body) ->
- post(Url, Headers, Body, []).
-
-post(Url, Headers, Body, Opts) ->
- request(post, Url, Headers, Body, Opts).
-
-put(Url, Body) ->
- put(Url, [], Body).
-
-put(Url, Headers, Body) ->
- put(Url, Headers, Body, []).
-
-put(Url, Headers, Body, Opts) ->
- request(put, Url, Headers, Body, Opts).
-
-delete(Url) ->
- delete(Url, []).
-
-delete(Url, Opts) ->
- delete(Url, [], Opts).
-
-delete(Url, Headers, Opts) ->
- request(delete, Url, Headers, [], Opts).
-
-options(Url) ->
- options(Url, []).
-
-options(Url, Headers) ->
- options(Url, Headers, []).
-
-options(Url, Headers, Opts) ->
- request(options, Url, Headers, [], Opts).
-
-request(Method, Url, Headers) ->
- request(Method, Url, Headers, []).
-
-request(Method, Url, Headers, Body) ->
- request(Method, Url, Headers, Body, [], 3).
-
-request(Method, Url, Headers, Body, Opts) ->
- request(Method, Url, Headers, Body, Opts, 3).
-
-request(_Method, _Url, _Headers, _Body, _Opts, 0) ->
- {error, request_failed};
-request(Method, Url, Headers, Body, Opts, N) ->
- case code:is_loaded(ibrowse) of
- false ->
- {ok, _} = ibrowse:start();
- _ ->
- ok
- end,
- case ibrowse:send_req(Url, Headers, Method, Body, Opts) of
- {ok, Code0, RespHeaders, RespBody0} ->
- Code = list_to_integer(Code0),
- RespBody = iolist_to_binary(RespBody0),
- {ok, Code, RespHeaders, RespBody};
- {error, {'EXIT', {normal, _}}} ->
- % Connection closed right after a successful request that
- % used the same connection.
- request(Method, Url, Headers, Body, Opts, N - 1);
- {error, retry_later} ->
- % CouchDB is busy, let’s wait a bit
- timer:sleep(3000 div N),
- request(Method, Url, Headers, Body, Opts, N - 1);
- Error ->
- Error
- end.
diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl
deleted file mode 100644
index 9f6d758c5..000000000
--- a/src/couch/src/test_util.erl
+++ /dev/null
@@ -1,429 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(test_util).
-
--include_lib("couch/include/couch_eunit.hrl").
--include("couch_db.hrl").
--include("couch_db_int.hrl").
--include("couch_bt_engine.hrl").
-
--export([init_code_path/0]).
--export([source_file/1, build_file/1]).
-%% -export([run/2]).
-
--export([start_couch/0, start_couch/1, start_couch/2, stop_couch/0, stop_couch/1]).
--export([start_config/1, stop_config/1]).
--export([start_applications/1, stop_applications/1]).
-
--export([stop_sync/1, stop_sync/2, stop_sync/3]).
-
--export([stop_sync_throw/2, stop_sync_throw/3, stop_sync_throw/4]).
-
--export([with_process_restart/1, with_process_restart/2, with_process_restart/3]).
--export([wait_process/1, wait_process/2]).
--export([wait/1, wait/2, wait/3]).
--export([wait_value/2, wait_other_value/2]).
--export([with_processes_restart/2, with_processes_restart/4]).
--export([with_couch_server_restart/1]).
-
--export([start/1, start/2, start/3, stop/1]).
-
--export([fake_db/1]).
-
--record(test_context, {mocked = [], started = [], module}).
-
--define(DEFAULT_APPS, [inets, ibrowse, ssl, config, couch_epi, couch_event, couch]).
-
-srcdir() ->
- code:priv_dir(couch) ++ "/../../".
-
-builddir() ->
- code:priv_dir(couch) ++ "/../../../".
-
-init_code_path() ->
- Paths = [
- "couchdb",
- "jiffy",
- "ibrowse",
- "mochiweb",
- "snappy"
- ],
- lists:foreach(
- fun(Name) ->
- code:add_patha(filename:join([builddir(), "src", Name]))
- end,
- Paths
- ).
-
-source_file(Name) ->
- filename:join([srcdir(), Name]).
-
-build_file(Name) ->
- filename:join([builddir(), Name]).
-
-start_couch() ->
- start_couch(?CONFIG_CHAIN, []).
-
-start_couch(ExtraApps) ->
- start_couch(?CONFIG_CHAIN, ExtraApps).
-
-start_couch(IniFiles, ExtraApps) ->
- load_applications_with_stats(),
- ok = application:set_env(config, ini_files, IniFiles),
- Apps = start_applications(?DEFAULT_APPS ++ ExtraApps),
- ok = config:delete("compactions", "_default", false),
- #test_context{started = Apps}.
-
-stop_couch() ->
- ok = stop_applications(?DEFAULT_APPS).
-
-stop_couch(#test_context{started = Apps}) ->
- stop_applications(Apps);
-stop_couch(_) ->
- stop_couch().
-
-with_couch_server_restart(Fun) ->
- Servers = couch_server:names(),
- test_util:with_processes_restart(Servers, Fun).
-
-start_applications(Apps) ->
- StartOrder = calculate_start_order(Apps),
- start_applications(StartOrder, []).
-
-start_applications([], Acc) ->
- lists:reverse(Acc);
-start_applications([App | Apps], Acc) when App == kernel; App == stdlib ->
- start_applications(Apps, Acc);
-start_applications([App | Apps], Acc) ->
- case application:start(App) of
- {error, {already_started, crypto}} ->
- start_applications(Apps, [crypto | Acc]);
- {error, {already_started, App}} ->
- io:format(standard_error, "Application ~s was left running!~n", [App]),
- application:stop(App),
- start_applications([App | Apps], Acc);
- {error, Reason} ->
- io:format(standard_error, "Cannot start application '~s', reason ~p~n", [App, Reason]),
- throw({error, {cannot_start, App, Reason}});
- ok ->
- start_applications(Apps, [App | Acc])
- end.
-
-stop_applications(Apps) ->
- [application:stop(App) || App <- lists:reverse(Apps)],
- ok.
-
-start_config(Chain) ->
- case config:start_link(Chain) of
- {ok, Pid} ->
- {ok, Pid};
- {error, {already_started, OldPid}} ->
- ok = stop_config(OldPid),
- start_config(Chain)
- end.
-
-stop_config(Pid) ->
- Timeout = 1000,
- case stop_sync(Pid, fun() -> config:stop() end, Timeout) of
- timeout ->
- throw({timeout_error, config_stop});
- _Else ->
- ok
- end.
-
-stop_sync(Name) ->
- stop_sync(Name, shutdown).
-stop_sync(Name, Reason) ->
- stop_sync(Name, Reason, 5000).
-
-stop_sync(Name, Reason, Timeout) when is_atom(Name) ->
- stop_sync(whereis(Name), Reason, Timeout);
-stop_sync(Pid, Reason, Timeout) when is_atom(Reason) and is_pid(Pid) ->
- stop_sync(Pid, fun() -> exit(Pid, Reason) end, Timeout);
-stop_sync(Pid, Fun, Timeout) when is_function(Fun) and is_pid(Pid) ->
- MRef = erlang:monitor(process, Pid),
- try
- begin
- catch unlink(Pid),
- Res = (catch Fun()),
- receive
- {'DOWN', MRef, _, _, _} ->
- Res
- after Timeout ->
- timeout
- end
- end
- after
- erlang:demonitor(MRef, [flush])
- end;
-stop_sync(_, _, _) ->
- error(badarg).
-
-stop_sync_throw(Name, Error) ->
- stop_sync_throw(Name, shutdown, Error).
-stop_sync_throw(Name, Reason, Error) ->
- stop_sync_throw(Name, Reason, Error, 5000).
-
-stop_sync_throw(Pid, Fun, Error, Timeout) ->
- case stop_sync(Pid, Fun, Timeout) of
- timeout ->
- throw(Error);
- Else ->
- Else
- end.
-
-with_process_restart(Name) ->
- {Pid, true} = with_process_restart(
- Name, fun() -> exit(whereis(Name), shutdown) end
- ),
- Pid.
-
-with_process_restart(Name, Fun) ->
- with_process_restart(Name, Fun, 5000).
-
-with_process_restart(Name, Fun, Timeout) ->
- Res = stop_sync(Name, Fun),
- case wait_process(Name, Timeout) of
- timeout ->
- timeout;
- Pid ->
- {Pid, Res}
- end.
-
-wait_process(Name) ->
- wait_process(Name, 5000).
-wait_process(Name, Timeout) ->
- wait(
- fun() ->
- case whereis(Name) of
- undefined ->
- wait;
- Pid ->
- Pid
- end
- end,
- Timeout
- ).
-
-wait(Fun) ->
- wait(Fun, 5000, 50).
-
-wait(Fun, Timeout) ->
- wait(Fun, Timeout, 50).
-
-wait(Fun, Timeout, Delay) ->
- Now = now_us(),
- wait(Fun, Timeout * 1000, Delay, Now, Now).
-
-wait(_Fun, Timeout, _Delay, Started, Prev) when Prev - Started > Timeout ->
- timeout;
-wait(Fun, Timeout, Delay, Started, _Prev) ->
- case Fun() of
- wait ->
- ok = timer:sleep(Delay),
- wait(Fun, Timeout, Delay, Started, now_us());
- Else ->
- Else
- end.
-
-wait_value(Fun, Value) ->
- wait(fun() ->
- case Fun() of
- Value -> Value;
- _ -> wait
- end
- end).
-
-wait_other_value(Fun, Value) ->
- wait(fun() ->
- case Fun() of
- Value -> wait;
- Other -> Other
- end
- end).
-
-with_processes_restart(Processes, Fun) ->
- with_processes_restart(Processes, Fun, 5000, 50).
-
-with_processes_restart(Names, Fun, Timeout, Delay) ->
- Processes = lists:foldl(
- fun(Name, Acc) ->
- [{Name, whereis(Name)} | Acc]
- end,
- [],
- Names
- ),
- [catch unlink(Pid) || {_, Pid} <- Processes],
- Res = (catch Fun()),
- {wait_start(Processes, Timeout, Delay), Res}.
-
-wait_start(Processses, TimeoutInSec, Delay) ->
- Now = now_us(),
- wait_start(Processses, TimeoutInSec * 1000, Delay, Now, Now, #{}).
-
-wait_start(_, Timeout, _Delay, Started, Prev, _) when Prev - Started > Timeout ->
- timeout;
-wait_start([], _Timeout, _Delay, _Started, _Prev, Res) ->
- Res;
-wait_start([{Name, Pid} | Rest] = Processes, Timeout, Delay, Started, _Prev, Res) ->
- case whereis(Name) of
- NewPid when is_pid(NewPid) andalso NewPid =/= Pid ->
- wait_start(Rest, Timeout, Delay, Started, now_us(), maps:put(Name, NewPid, Res));
- _ ->
- ok = timer:sleep(Delay),
- wait_start(Processes, Timeout, Delay, Started, now_us(), Res)
- end.
-
-start(Module) ->
- start(Module, [], []).
-
-start(Module, ExtraApps) ->
- start(Module, ExtraApps, []).
-
-start(Module, ExtraApps, Options) ->
- Apps = start_applications([config, couch_log, ioq, couch_epi | ExtraApps]),
- ToMock = [config, couch_stats] -- proplists:get_value(dont_mock, Options, []),
- mock(ToMock),
- #test_context{module = Module, mocked = ToMock, started = Apps}.
-
-stop(#test_context{mocked = Mocked, started = Apps}) ->
- meck:unload(Mocked),
- stop_applications(Apps).
-
-fake_db(Fields0) ->
- {ok, Db, Fields} = maybe_set_engine(Fields0),
- Indexes = lists:zip(
- record_info(fields, db),
- lists:seq(2, record_info(size, db))
- ),
- lists:foldl(
- fun({FieldName, Value}, Acc) ->
- Idx = couch_util:get_value(FieldName, Indexes),
- setelement(Idx, Acc, Value)
- end,
- Db,
- Fields
- ).
-
-maybe_set_engine(Fields0) ->
- case lists:member(engine, Fields0) of
- true ->
- {ok, #db{}, Fields0};
- false ->
- {ok, Header, Fields} = get_engine_header(Fields0),
- Db = #db{engine = {couch_bt_engine, #st{header = Header}}},
- {ok, Db, Fields}
- end.
-
-get_engine_header(Fields) ->
- Keys = [
- disk_version,
- update_seq,
- unused,
- id_tree_state,
- seq_tree_state,
- local_tree_state,
- purge_seq,
- purged_docs,
- security_ptr,
- revs_limit,
- uuid,
- epochs,
- compacted_seq
- ],
- {HeadFields, RestFields} = lists:partition(
- fun({K, _}) -> lists:member(K, Keys) end, Fields
- ),
- Header0 = couch_bt_engine_header:new(),
- Header = couch_bt_engine_header:set(Header0, HeadFields),
- {ok, Header, RestFields}.
-
-now_us() ->
- {MegaSecs, Secs, MicroSecs} = os:timestamp(),
- (MegaSecs * 1000000 + Secs) * 1000000 + MicroSecs.
-
-mock(Modules) when is_list(Modules) ->
- [mock(Module) || Module <- Modules];
-mock(config) ->
- meck:new(config, [passthrough]),
- meck:expect(config, get, fun(_, _) -> undefined end),
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- ok;
-mock(couch_stats) ->
- meck:new(couch_stats, [passthrough]),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
- meck:expect(couch_stats, increment_counter, fun(_, _) -> ok end),
- meck:expect(couch_stats, decrement_counter, fun(_) -> ok end),
- meck:expect(couch_stats, decrement_counter, fun(_, _) -> ok end),
- meck:expect(couch_stats, update_histogram, fun(_, _) -> ok end),
- meck:expect(couch_stats, update_gauge, fun(_, _) -> ok end),
- ok.
-
-load_applications_with_stats() ->
- Wildcard = filename:join([?BUILDDIR(), "src/*/priv/stats_descriptions.cfg"]),
- [application:load(stats_file_to_app(File)) || File <- filelib:wildcard(Wildcard)],
- ok.
-
-stats_file_to_app(File) ->
- [_Desc, _Priv, App | _] = lists:reverse(filename:split(File)),
- erlang:list_to_atom(App).
-
-calculate_start_order(Apps) ->
- AllApps = calculate_start_order(sort_apps(Apps), []),
- % AllApps may not be the same list as Apps if we
- % loaded any dependencies. We recurse here when
- % that changes so that our sort_apps function has
- % a global view of all applications to start.
- case lists:usort(AllApps) == lists:usort(Apps) of
- true -> AllApps;
- false -> calculate_start_order(AllApps)
- end.
-
-calculate_start_order([], StartOrder) ->
- lists:reverse(StartOrder);
-calculate_start_order([App | RestApps], StartOrder) ->
- NewStartOrder = load_app_deps(App, StartOrder),
- calculate_start_order(RestApps, NewStartOrder).
-
-load_app_deps(App, StartOrder) ->
- case lists:member(App, StartOrder) of
- true ->
- StartOrder;
- false ->
- case application:load(App) of
- ok -> ok;
- {error, {already_loaded, App}} -> ok
- end,
- {ok, Apps} = application:get_key(App, applications),
- Deps =
- case App of
- kernel -> Apps;
- stdlib -> Apps;
- _ -> lists:usort([kernel, stdlib | Apps])
- end,
- NewStartOrder = lists:foldl(
- fun(Dep, Acc) ->
- load_app_deps(Dep, Acc)
- end,
- StartOrder,
- Deps
- ),
- [App | NewStartOrder]
- end.
-
-sort_apps(Apps) ->
- Weighted = [weight_app(App) || App <- Apps],
- element(2, lists:unzip(lists:sort(Weighted))).
-
-weight_app(couch_log) -> {0.0, couch_log};
-weight_app(Else) -> {1.0, Else}.
diff --git a/src/couch/test/eunit/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl
deleted file mode 100644
index 63f67c243..000000000
--- a/src/couch/test/eunit/chttpd_endpoints_tests.erl
+++ /dev/null
@@ -1,108 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_endpoints_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-endpoints_test_() ->
- {
- "Checking dynamic endpoints",
- {
- setup,
- fun() ->
- test_util:start_couch([chttpd])
- end,
- fun test_util:stop/1,
- [
- fun url_handlers/0,
- fun db_handlers/0,
- fun design_handlers/0
- ]
- }
- }.
-
-url_handlers() ->
- Handlers = [
- {<<"">>, chttpd_misc, handle_welcome_req},
- {<<"favicon.ico">>, chttpd_misc, handle_favicon_req},
- {<<"_utils">>, chttpd_misc, handle_utils_dir_req},
- {<<"_all_dbs">>, chttpd_misc, handle_all_dbs_req},
- {<<"_dbs_info">>, chttpd_misc, handle_dbs_info_req},
- {<<"_active_tasks">>, chttpd_misc, handle_task_status_req},
- {<<"_node">>, chttpd_node, handle_node_req},
- {<<"_reload_query_servers">>, chttpd_misc, handle_reload_query_servers_req},
- {<<"_replicate">>, chttpd_misc, handle_replicate_req},
- {<<"_uuids">>, chttpd_misc, handle_uuids_req},
- {<<"_session">>, chttpd_auth, handle_session_req},
- {<<"_up">>, chttpd_misc, handle_up_req},
- {<<"_membership">>, mem3_httpd, handle_membership_req},
- {<<"_db_updates">>, global_changes_httpd, handle_global_changes_req},
- {<<"_cluster_setup">>, setup_httpd, handle_setup_req}
- ],
-
- lists:foreach(
- fun({Path, Mod, Fun}) ->
- Handler = chttpd_handlers:url_handler(Path, undefined),
- Expect = fun Mod:Fun/1,
- ?assertEqual(Expect, Handler)
- end,
- Handlers
- ),
-
- ?assertEqual(undefined, chttpd_handlers:url_handler("foo", undefined)).
-
-db_handlers() ->
- Handlers = [
- {<<"_view_cleanup">>, chttpd_db, handle_view_cleanup_req},
- {<<"_compact">>, chttpd_db, handle_compact_req},
- {<<"_design">>, chttpd_db, handle_design_req},
- {<<"_temp_view">>, chttpd_view, handle_temp_view_req},
- {<<"_changes">>, chttpd_db, handle_changes_req},
- {<<"_shards">>, mem3_httpd, handle_shards_req},
- {<<"_index">>, mango_httpd, handle_req},
- {<<"_explain">>, mango_httpd, handle_req},
- {<<"_find">>, mango_httpd, handle_req}
- ],
-
- lists:foreach(
- fun({Path, Mod, Fun}) ->
- Handler = chttpd_handlers:db_handler(Path, undefined),
- Expect = fun Mod:Fun/2,
- ?assertEqual(Expect, Handler)
- end,
- Handlers
- ),
-
- ?assertEqual(undefined, chttpd_handlers:db_handler("bam", undefined)).
-
-design_handlers() ->
- Handlers = [
- {<<"_view">>, chttpd_view, handle_view_req},
- {<<"_show">>, chttpd_show, handle_doc_show_req},
- {<<"_list">>, chttpd_show, handle_view_list_req},
- {<<"_update">>, chttpd_show, handle_doc_update_req},
- {<<"_info">>, chttpd_db, handle_design_info_req},
- {<<"_rewrite">>, chttpd_rewrite, handle_rewrite_req}
- ],
-
- lists:foreach(
- fun({Path, Mod, Fun}) ->
- Handler = chttpd_handlers:design_handler(Path, undefined),
- Expect = fun Mod:Fun/3,
- ?assertEqual(Expect, Handler)
- end,
- Handlers
- ),
-
- ?assertEqual(undefined, chttpd_handlers:design_handler("baz", undefined)).
diff --git a/src/couch/test/eunit/couch_auth_cache_tests.erl b/src/couch/test/eunit/couch_auth_cache_tests.erl
deleted file mode 100644
index a4c31083a..000000000
--- a/src/couch/test/eunit/couch_auth_cache_tests.erl
+++ /dev/null
@@ -1,382 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_auth_cache_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(SALT, <<"SALT">>).
--define(DB_TIMEOUT, 15000).
-
-start() ->
- test_util:start_couch([ioq]).
-
-setup() ->
- DbName = ?tempdb(),
- config:set(
- "couch_httpd_auth",
- "authentication_db",
- ?b2l(DbName),
- false
- ),
- DbName.
-
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-couch_auth_cache_test_() ->
- {
- "CouchDB auth cache tests",
- {
- setup,
- fun start/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_get_nil_on_missed_cache/1,
- fun should_get_right_password_hash/1,
- fun should_ensure_doc_hash_equals_cached_one/1,
- fun should_update_password/1,
- fun should_cleanup_cache_after_userdoc_deletion/1,
- fun should_restore_cache_after_userdoc_recreation/1,
- fun should_drop_cache_on_auth_db_change/1,
- fun should_restore_cache_on_auth_db_change/1,
- fun should_recover_cache_after_shutdown/1,
- fun should_get_admin_from_config/1
- ]
- }
- }
- }.
-
-auth_vdu_test_() ->
- Cases = [
- %% Old , New , Result
- %% [Roles, Type] , [Roles, Type] ,
-
- %% Updating valid user doc with valid one
- {[custom, user], [custom, user], "ok"},
-
- %% Updating invalid doc (missing type or roles field) with valid one
- {[missing, missing], [custom, user], "ok"},
- {[missing, user], [custom, user], "ok"},
- {[custom, missing], [custom, user], "ok"},
-
- %% Updating invalid doc (wrong type) with valid one
- {[missing, other], [custom, user], "ok"},
- {[custom, other], [custom, user], "ok"},
-
- %% Updating valid document with invalid one
- {[custom, user], [missing, missing], "doc.type must be user"},
- {[custom, user], [missing, user], "doc.roles must exist"},
- {[custom, user], [custom, missing], "doc.type must be user"},
- {[custom, user], [missing, other], "doc.type must be user"},
- {[custom, user], [custom, other], "doc.type must be user"},
-
- %% Updating invalid doc with invalid one
- {[missing, missing], [missing, missing], "doc.type must be user"},
- {[missing, missing], [missing, user], "doc.roles must exist"},
- {[missing, missing], [custom, missing], "doc.type must be user"},
- {[missing, missing], [missing, other], "doc.type must be user"},
- {[missing, missing], [custom, other], "doc.type must be user"},
-
- {[missing, user], [missing, missing], "doc.type must be user"},
- {[missing, user], [missing, user], "doc.roles must exist"},
- {[missing, user], [custom, missing], "doc.type must be user"},
- {[missing, user], [missing, other], "doc.type must be user"},
- {[missing, user], [custom, other], "doc.type must be user"},
-
- {[missing, other], [missing, missing], "doc.type must be user"},
- {[missing, other], [missing, user], "doc.roles must exist"},
- {[missing, other], [custom, missing], "doc.type must be user"},
- {[missing, other], [missing, other], "doc.type must be user"},
- {[missing, other], [custom, other], "doc.type must be user"},
-
- {[custom, missing], [missing, missing], "doc.type must be user"},
- {[custom, missing], [missing, user], "doc.roles must exist"},
- {[custom, missing], [custom, missing], "doc.type must be user"},
- {[custom, missing], [missing, other], "doc.type must be user"},
- {[custom, missing], [custom, other], "doc.type must be user"},
-
- {[custom, other], [missing, missing], "doc.type must be user"},
- {[custom, other], [missing, user], "doc.roles must exist"},
- {[custom, other], [custom, missing], "doc.type must be user"},
- {[custom, other], [missing, other], "doc.type must be user"},
- {[custom, other], [custom, other], "doc.type must be user"}
- ],
-
- %% Make sure we covered all combinations
- AllPossibleDocs = couch_tests_combinatorics:product([
- [missing, custom],
- [missing, user, other]
- ]),
- AllPossibleCases = couch_tests_combinatorics:product(
- [AllPossibleDocs, AllPossibleDocs]
- ),
- ?assertEqual([], AllPossibleCases -- [[A, B] || {A, B, _} <- Cases]),
-
- {
- "Check User doc validation",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- [make_validate_test(Case) || Case <- Cases]
- }
- }.
-
-should_get_nil_on_missed_cache(_) ->
- ?_assertEqual(nil, couch_auth_cache:get_user_creds("joe")).
-
-should_get_right_password_hash(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass1"),
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(
- PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds)
- )
- end).
-
-should_ensure_doc_hash_equals_cached_one(DbName) ->
- ?_test(begin
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
-
- CachedHash = couch_util:get_value(<<"password_sha">>, Creds),
- StoredHash = get_user_doc_password_sha(DbName, "joe"),
- ?assertEqual(StoredHash, CachedHash)
- end).
-
-should_update_password(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass2"),
- {ok, Rev} = update_user_doc(DbName, "joe", "pass1"),
- {ok, _} = update_user_doc(DbName, "joe", "pass2", Rev),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(
- PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds)
- )
- end).
-
-should_cleanup_cache_after_userdoc_deletion(DbName) ->
- ?_test(begin
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- delete_user_doc(DbName, "joe"),
- ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
- end).
-
-should_restore_cache_after_userdoc_recreation(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass5"),
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- delete_user_doc(DbName, "joe"),
- ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")),
-
- {ok, _} = update_user_doc(DbName, "joe", "pass5"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
-
- ?assertEqual(
- PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds)
- )
- end).
-
-should_drop_cache_on_auth_db_change(DbName) ->
- ?_test(begin
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- config:set(
- "couch_httpd_auth",
- "authentication_db",
- ?b2l(?tempdb()),
- false
- ),
- ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
- end).
-
-should_restore_cache_on_auth_db_change(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass1"),
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
-
- DbName1 = ?tempdb(),
- config:set(
- "couch_httpd_auth",
- "authentication_db",
- ?b2l(DbName1),
- false
- ),
-
- {ok, _} = update_user_doc(DbName1, "joe", "pass5"),
-
- config:set(
- "couch_httpd_auth",
- "authentication_db",
- ?b2l(DbName),
- false
- ),
-
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(
- PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds)
- )
- end).
-
-should_recover_cache_after_shutdown(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass2"),
- {ok, Rev0} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Rev1} = update_user_doc(DbName, "joe", "pass2", Rev0),
- shutdown_db(DbName),
- {ok, Rev1} = get_doc_rev(DbName, "joe"),
- ?assertEqual(PasswordHash, get_user_doc_password_sha(DbName, "joe"))
- end).
-
-should_get_admin_from_config(_DbName) ->
- ?_test(begin
- config:set("admins", "testadmin", "password", false),
- Creds = test_util:wait(fun() ->
- case couch_auth_cache:get_user_creds("testadmin") of
- {ok, Creds0, _} -> Creds0;
- nil -> wait
- end
- end),
- Roles = couch_util:get_value(<<"roles">>, Creds),
- ?assertEqual([<<"_admin">>], Roles)
- end).
-
-update_user_doc(DbName, UserName, Password) ->
- update_user_doc(DbName, UserName, Password, nil).
-
-update_user_doc(DbName, UserName, Password, Rev) ->
- ok = couch_auth_cache:ensure_users_db_exists(),
- User = iolist_to_binary(UserName),
- Doc = couch_doc:from_json_obj({
- [
- {<<"_id">>, <<"org.couchdb.user:", User/binary>>},
- {<<"name">>, User},
- {<<"type">>, <<"user">>},
- {<<"salt">>, ?SALT},
- {<<"password_sha">>, hash_password(Password)},
- {<<"roles">>, []}
- ] ++
- case Rev of
- nil -> [];
- _ -> [{<<"_rev">>, Rev}]
- end
- }),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []),
- ok = couch_db:close(AuthDb),
- {ok, couch_doc:rev_to_str(NewRev)}.
-
-hash_password(Password) ->
- ?l2b(couch_util:to_hex(crypto:hash(sha, iolist_to_binary([Password, ?SALT])))).
-
-shutdown_db(DbName) ->
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(AuthDb),
- couch_util:shutdown_sync(couch_db:get_pid(AuthDb)),
- ok = timer:sleep(1000).
-
-get_doc_rev(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- UpdateRev =
- case couch_db:open_doc(AuthDb, DocId, []) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- couch_util:get_value(<<"_rev">>, Props);
- {not_found, missing} ->
- nil
- end,
- ok = couch_db:close(AuthDb),
- {ok, UpdateRev}.
-
-get_user_doc_password_sha(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
- ok = couch_db:close(AuthDb),
- {Props} = couch_doc:to_json_obj(Doc, []),
- couch_util:get_value(<<"password_sha">>, Props).
-
-delete_user_doc(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
- {Props} = couch_doc:to_json_obj(Doc, []),
- DeletedDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DocId},
- {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)},
- {<<"_deleted">>, true}
- ]}
- ),
- {ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []),
- ok = couch_db:close(AuthDb).
-
-make_validate_test({Old, New, "ok"} = Case) ->
- {test_id(Case), ?_assertEqual(ok, validate(doc(Old), doc(New)))};
-make_validate_test({Old, New, Reason} = Case) ->
- Failure = ?l2b(Reason),
- {test_id(Case), ?_assertThrow({forbidden, Failure}, validate(doc(Old), doc(New)))}.
-
-test_id({[OldRoles, OldType], [NewRoles, NewType], Result}) ->
- lists:flatten(
- io_lib:format(
- "(roles: ~w, type: ~w) -> (roles: ~w, type: ~w) ==> \"~s\"",
- [OldRoles, OldType, NewRoles, NewType, Result]
- )
- ).
-
-doc([Roles, Type]) ->
- couch_doc:from_json_obj({
- [
- {<<"_id">>, <<"org.couchdb.user:foo">>},
- {<<"_rev">>, <<"1-281c81adb1bf10927a6160f246dc0468">>},
- {<<"name">>, <<"foo">>},
- {<<"password_scheme">>, <<"simple">>},
- {<<"salt">>, <<"00000000000000000000000000000000">>},
- {<<"password_sha">>, <<"111111111111111111111111111111111111">>}
- ] ++
- type(Type) ++ roles(Roles)
- }).
-
-roles(custom) -> [{<<"roles">>, [<<"custom">>]}];
-roles(missing) -> [].
-
-type(user) -> [{<<"type">>, <<"user">>}];
-type(other) -> [{<<"type">>, <<"other">>}];
-type(missing) -> [].
-
-validate(DiskDoc, NewDoc) ->
- JSONCtx =
- {[
- {<<"db">>, <<"foo/bar">>},
- {<<"name">>, <<"foo">>},
- {<<"roles">>, [<<"_admin">>]}
- ]},
- validate(DiskDoc, NewDoc, JSONCtx).
-
-validate(DiskDoc, NewDoc, JSONCtx) ->
- {ok, DDoc0} = couch_auth_cache:auth_design_doc(<<"_design/anything">>),
- DDoc = DDoc0#doc{revs = {1, [<<>>]}},
- couch_query_servers:validate_doc_update(DDoc, NewDoc, DiskDoc, JSONCtx, []).
diff --git a/src/couch/test/eunit/couch_base32_tests.erl b/src/couch/test/eunit/couch_base32_tests.erl
deleted file mode 100644
index 7e4d59a09..000000000
--- a/src/couch/test/eunit/couch_base32_tests.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_base32_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-base32_test() ->
- roundtrip(<<"">>, <<"">>),
- roundtrip(<<"f">>, <<"MY======">>),
- roundtrip(<<"fo">>, <<"MZXQ====">>),
- roundtrip(<<"foo">>, <<"MZXW6===">>),
- roundtrip(<<"foob">>, <<"MZXW6YQ=">>),
- roundtrip(<<"fooba">>, <<"MZXW6YTB">>),
- roundtrip(<<"foobar">>, <<"MZXW6YTBOI======">>).
-
-roundtrip(Plain, Encoded) ->
- ?assertEqual(Plain, couch_base32:decode(Encoded)),
- ?assertEqual(Encoded, couch_base32:encode(Plain)).
diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl b/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl
deleted file mode 100644
index 72b780a7f..000000000
--- a/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl
+++ /dev/null
@@ -1,101 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor_ev).
-
--export([
- init/0,
- terminate/0,
- clear/0,
-
- set_wait/1,
- set_crash/1,
-
- event/1
-]).
-
--define(TAB, couch_db_updater_ev_tab).
-
-init() ->
- ets:new(?TAB, [set, public, named_table]).
-
-terminate() ->
- ets:delete(?TAB).
-
-clear() ->
- ets:delete_all_objects(?TAB).
-
-set_wait(Event) ->
- Self = self(),
- WaitFun = fun(_) ->
- receive
- {Self, go} ->
- Self ! {self(), ok}
- end,
- ets:delete(?TAB, Event)
- end,
- ContinueFun = fun(Pid) ->
- Pid ! {Self, go},
- receive
- {Pid, ok} -> ok
- end
- end,
- ets:insert(?TAB, {Event, WaitFun}),
- {ok, ContinueFun}.
-
-set_crash(Event) ->
- Reason = {couch_db_updater_ev_crash, Event},
- CrashFun = fun(_) -> exit(Reason) end,
- ets:insert(?TAB, {Event, CrashFun}),
- {ok, Reason}.
-
-event(Event) ->
- NewEvent =
- case Event of
- seq_init ->
- put(?MODULE, 0),
- Event;
- seq_copy ->
- Count = get(?MODULE),
- put(?MODULE, Count + 1),
- {seq_copy, Count};
- id_init ->
- put(?MODULE, 0),
- Event;
- id_copy ->
- Count = get(?MODULE),
- put(?MODULE, Count + 1),
- {id_copy, Count};
- md_copy_init ->
- put(?MODULE, 0),
- Event;
- md_copy_row ->
- Count = get(?MODULE),
- put(?MODULE, Count + 1),
- {md_copy_row, Count};
- _ ->
- Event
- end,
- handle_event(NewEvent).
-
-handle_event(Event) ->
- try
- case ets:lookup(?TAB, Event) of
- [{Event, ActionFun}] ->
- ActionFun(Event);
- [] ->
- ok
- end
- catch
- error:badarg ->
- ok
- end.
diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl
deleted file mode 100644
index 007c74d06..000000000
--- a/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl
+++ /dev/null
@@ -1,336 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor_ev_tests).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/src/couch_server_int.hrl").
-
--define(TIMEOUT_EUNIT, 60).
--define(EV_MOD, couch_bt_engine_compactor_ev).
--define(INIT_DOCS, 2500).
--define(WRITE_DOCS, 20).
-
-% The idea behind the tests in this module are to attempt to
-% cover the number of restart/recopy events during compaction
-% so that we can be as sure as possible that the compactor
-% is resilient to errors in the face of external conditions
-% (i.e., the VM rebooted). The single linear pass is easy enough
-% to prove, however restarting is important enough that we don't
-% want to waste work if a VM happens to bounce a lot.
-%
-% To try and cover as many restart situations we have created a
-% number of events in the compactor code that are present during
-% a test compiled version of the module. These events can then
-% be used (via meck) to introduce errors and coordinate writes
-% to the database while compaction is in progress.
-
-% This list of events is where we'll insert our errors.
-
-events() ->
- [
- % The compactor process is spawned
- init,
- % After compaction files have opened
- files_opened,
-
- % Just before apply purge changes
- purge_init,
- % Just after finish purge updates
- purge_done,
-
- % The firs phase is when we copy all document body and attachment
- % data to the new database file in order of update sequence so
- % that we can resume on crash.
-
- % Before the first change is copied
- seq_init,
- % After change N is copied
- {seq_copy, 0},
- {seq_copy, ?INIT_DOCS div 2},
- {seq_copy, ?INIT_DOCS - 2},
- % After last change is copied
- seq_done,
-
- % The id copy phases come in two flavors. Before a compaction
- % swap is attempted they're copied from the id_tree in the
- % database being compacted. After a swap attempt they are
- % stored in an emsort file on disk. Thus the two sets of
- % related events here.
-
- % Just before metadata sort starts
- md_sort_init,
- % Justa after metadata sort finished
- md_sort_done,
- % Just before metadata copy starts
- md_copy_init,
- % After docid N is copied
- {md_copy_row, 0},
- {md_copy_row, ?INIT_DOCS div 2},
- {md_copy_row, ?INIT_DOCS - 2},
- % Just after the last docid is copied
- md_copy_done,
-
- % And then the final steps before we finish
-
- % Just before final sync
- before_final_sync,
- % Just after the final sync
- after_final_sync,
- % Just before the final notification
- before_notify
- ].
-
-% Mark which evens only happen when documents are present
-
-requires_docs({seq_copy, _}) -> true;
-requires_docs(md_sort_init) -> true;
-requires_docs(md_sort_done) -> true;
-requires_docs(md_copy_init) -> true;
-requires_docs({md_copy_row, _}) -> true;
-requires_docs(md_copy_done) -> true;
-requires_docs(_) -> false.
-
-% Mark which events only happen when there's write activity during
-% a compaction.
-
-requires_write(md_sort_init) -> true;
-requires_write(md_sort_done) -> true;
-requires_write(md_copy_init) -> true;
-requires_write({md_copy_row, _}) -> true;
-requires_write(md_copy_done) -> true;
-requires_write(_) -> false.
-
-setup() ->
- purge_module(),
- ?EV_MOD:init(),
- test_util:start_couch().
-
-teardown(Ctx) ->
- test_util:stop_couch(Ctx),
- ?EV_MOD:terminate().
-
-start_empty_db_test(_Event) ->
- ?EV_MOD:clear(),
- DbName = ?tempdb(),
- {ok, _} = couch_db:create(DbName, [?ADMIN_CTX]),
- DbName.
-
-start_populated_db_test(Event) ->
- DbName = start_empty_db_test(Event),
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- populate_db(Db, ?INIT_DOCS)
- after
- couch_db:close(Db)
- end,
- DbName.
-
-stop_test(_Event, DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]).
-
-static_empty_db_test_() ->
- FiltFun = fun(E) ->
- not (requires_docs(E) or requires_write(E))
- end,
- Events = lists:filter(FiltFun, events()) -- [init],
- {
- "Idle empty database",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- {
- foreachx,
- fun start_empty_db_test/1,
- fun stop_test/2,
- [{Event, fun run_static_init/2} || Event <- Events]
- }
- ]
- }
- }.
-
-static_populated_db_test_() ->
- FiltFun = fun(E) -> not requires_write(E) end,
- Events = lists:filter(FiltFun, events()) -- [init],
- {
- "Idle populated database",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- {
- foreachx,
- fun start_populated_db_test/1,
- fun stop_test/2,
- [{Event, fun run_static_init/2} || Event <- Events]
- }
- ]
- }
- }.
-
-dynamic_empty_db_test_() ->
- FiltFun = fun(E) -> not requires_docs(E) end,
- Events = lists:filter(FiltFun, events()) -- [init],
- {
- "Writes to empty database",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- {
- foreachx,
- fun start_empty_db_test/1,
- fun stop_test/2,
- [{Event, fun run_dynamic_init/2} || Event <- Events]
- }
- ]
- }
- }.
-
-dynamic_populated_db_test_() ->
- Events = events() -- [init],
- {
- "Writes to populated database",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- {
- foreachx,
- fun start_populated_db_test/1,
- fun stop_test/2,
- [{Event, fun run_dynamic_init/2} || Event <- Events]
- }
- ]
- }
- }.
-
-run_static_init(Event, DbName) ->
- Name = lists:flatten(io_lib:format("~p", [Event])),
- Test = {timeout, ?TIMEOUT_EUNIT, ?_test(run_static(Event, DbName))},
- {Name, Test}.
-
-run_static(Event, DbName) ->
- {ok, ContinueFun} = ?EV_MOD:set_wait(init),
- {ok, Reason} = ?EV_MOD:set_crash(Event),
- {ok, Db} = couch_db:open_int(DbName, []),
- Ref = couch_db:monitor(Db),
- {ok, CPid} = couch_db:start_compact(Db),
- ContinueFun(CPid),
- receive
- {'DOWN', Ref, _, _, Reason} ->
- wait_db_cleared(Db)
- end,
- run_successful_compaction(DbName),
- couch_db:close(Db).
-
-run_dynamic_init(Event, DbName) ->
- Name = lists:flatten(io_lib:format("~p", [Event])),
- Test = {timeout, ?TIMEOUT_EUNIT, ?_test(run_dynamic(Event, DbName))},
- {Name, Test}.
-
-run_dynamic(Event, DbName) ->
- {ok, ContinueFun} = ?EV_MOD:set_wait(init),
- {ok, Reason} = ?EV_MOD:set_crash(Event),
- {ok, Db} = couch_db:open_int(DbName, []),
- Ref = couch_db:monitor(Db),
- {ok, CPid} = couch_db:start_compact(Db),
- ok = populate_db(Db, 10),
- ContinueFun(CPid),
- receive
- {'DOWN', Ref, _, _, Reason} ->
- wait_db_cleared(Db)
- end,
- run_successful_compaction(DbName),
- couch_db:close(Db).
-
-run_successful_compaction(DbName) ->
- ?EV_MOD:clear(),
- {ok, ContinueFun} = ?EV_MOD:set_wait(init),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, CPid} = couch_db:start_compact(Db),
- Ref = erlang:monitor(process, CPid),
- ContinueFun(CPid),
- receive
- {'DOWN', Ref, _, _, normal} -> ok
- end,
- Pid = couch_db:get_pid(Db),
- {ok, NewDb} = gen_server:call(Pid, get_db),
- validate_compaction(NewDb),
- couch_db:close(Db).
-
-wait_db_cleared(Db) ->
- wait_db_cleared(Db, 5).
-
-wait_db_cleared(Db, N) when N < 0 ->
- erlang:error({db_clear_timeout, couch_db:name(Db)});
-wait_db_cleared(Db, N) ->
- Tab = couch_server:couch_dbs(couch_db:name(Db)),
- case ets:lookup(Tab, couch_db:name(Db)) of
- [] ->
- ok;
- [#entry{db = NewDb}] ->
- OldPid = couch_db:get_pid(Db),
- NewPid = couch_db:get_pid(NewDb),
- if
- NewPid /= OldPid ->
- ok;
- true ->
- timer:sleep(100),
- wait_db_cleared(Db, N - 1)
- end
- end.
-
-populate_db(_Db, NumDocs) when NumDocs =< 0 ->
- ok;
-populate_db(Db, NumDocs) ->
- String = [$a || _ <- lists:seq(1, erlang:min(NumDocs, 500))],
- Docs = lists:map(
- fun(_) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, couch_uuids:random()},
- {<<"string">>, list_to_binary(String)}
- ]}
- )
- end,
- lists:seq(1, 500)
- ),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- populate_db(Db, NumDocs - 500).
-
-validate_compaction(Db) ->
- {ok, DocCount} = couch_db:get_doc_count(Db),
- {ok, DelDocCount} = couch_db:get_del_doc_count(Db),
- NumChanges = couch_db:count_changes_since(Db, 0),
- FoldFun = fun(FDI, {PrevId, CountAcc}) ->
- ?assert(FDI#full_doc_info.id > PrevId),
- {ok, {FDI#full_doc_info.id, CountAcc + 1}}
- end,
- {ok, {_, LastCount}} = couch_db:fold_docs(Db, FoldFun, {<<>>, 0}),
- ?assertEqual(DocCount + DelDocCount, LastCount),
- ?assertEqual(NumChanges, LastCount).
-
-purge_module() ->
- case code:which(couch_db_updater) of
- cover_compiled ->
- ok;
- _ ->
- code:delete(couch_db_updater),
- code:purge(couch_db_updater)
- end.
diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
deleted file mode 100644
index 73428b0a9..000000000
--- a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
+++ /dev/null
@@ -1,124 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DELAY, 100).
--define(WAIT_DELAY_COUNT, 50).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_docs(DbName),
- DbName.
-
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-compaction_resume_test_() ->
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun compaction_resume/1
- ]
- }
- }.
-
-compaction_resume(DbName) ->
- ?_test(begin
- check_db_validity(DbName),
- compact_db(DbName),
- check_db_validity(DbName),
-
- % Force an error when copying document ids
- with_mecked_emsort(fun() ->
- compact_db(DbName)
- end),
-
- check_db_validity(DbName),
- compact_db(DbName),
- check_db_validity(DbName)
- end).
-
-check_db_validity(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:count_changes_since(Db, 0))
- end).
-
-with_mecked_emsort(Fun) ->
- meck:new(couch_emsort, [passthrough]),
- meck:expect(couch_emsort, iter, fun(_) -> erlang:error(kaboom) end),
- try
- Fun()
- after
- meck:unload()
- end.
-
-create_docs(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- Doc1 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
- ]}
- ),
- Doc2 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
- ]}
- ),
- Doc3 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
- ]}
- ),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3])
- end).
-
-compact_db(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT).
-
-wait_db_compact_done(_DbName, 0) ->
- Failure = [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}
- ],
- erlang:error({assertion_failed, Failure});
-wait_db_compact_done(DbName, N) ->
- IsDone = couch_util:with_db(DbName, fun(Db) ->
- not is_pid(couch_db:get_compactor_pid(Db))
- end),
- if
- IsDone ->
- ok;
- true ->
- timer:sleep(?DELAY),
- wait_db_compact_done(DbName, N - 1)
- end.
diff --git a/src/couch/test/eunit/couch_bt_engine_tests.erl b/src/couch/test/eunit/couch_bt_engine_tests.erl
deleted file mode 100644
index 56d18d3a4..000000000
--- a/src/couch/test/eunit/couch_bt_engine_tests.erl
+++ /dev/null
@@ -1,18 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-couch_bt_engine_test_() ->
- cpse_util:create_tests(couch, couch_bt_engine, "couch").
diff --git a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
deleted file mode 100644
index 62f128a4f..000000000
--- a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
+++ /dev/null
@@ -1,255 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_upgrade_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-% seconds
--define(TIMEOUT, 60).
-
-setup(_) ->
- Ctx = test_util:start_couch(),
- DbDir = config:get("couchdb", "database_dir"),
- DbFileNames = [
- "db_v6_without_purge_req.couch",
- "db_v6_with_1_purge_req.couch",
- "db_v6_with_2_purge_req.couch",
- "db_v6_with_1_purge_req_for_2_docs.couch",
- "db_v7_without_purge_req.couch",
- "db_v7_with_1_purge_req.couch",
- "db_v7_with_2_purge_req.couch",
- "db_v7_with_1_purge_req_for_2_docs.couch"
- ],
- NewPaths = lists:map(
- fun(DbFileName) ->
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
- NewDbFilePath = filename:join([DbDir, DbFileName]),
- ok = filelib:ensure_dir(NewDbFilePath),
- file:delete(NewDbFilePath),
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
- NewDbFilePath
- end,
- DbFileNames
- ),
- {Ctx, NewPaths}.
-
-teardown(_, {Ctx, Paths}) ->
- test_util:stop_couch(Ctx),
- lists:foreach(
- fun(Path) ->
- file:delete(Path)
- end,
- Paths
- ).
-
-upgrade_test_() ->
- From = [6, 7],
- {
- "Couch Bt Engine Upgrade tests",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{F, fun t_upgrade_without_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_1_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_N_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_1_purge_req_for_2_docs/2} || F <- From]
- }
- }.
-
-t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- % There are three documents in the fixture
- % db with zero purge entries
- DbName = ?l2b(
- "db_v" ++ integer_to_list(VersionFrom) ++
- "_without_purge_req"
- ),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(0, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
- end),
- ?assertEqual([], UpgradedPurged),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- {ok, Rev} = save_doc(
- DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
- ),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 5}, couch_db:get_doc_count(Db)),
- ?assertEqual(0, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(1, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- % There are two documents in the fixture database
- % with a single purge entry
- DbName = ?l2b(
- "db_v" ++ integer_to_list(VersionFrom) ++
- "_with_1_purge_req"
- ),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(1, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{1, <<"doc1">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(
- DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
- ),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(1, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(2, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- % There is one document in the fixture database
- % with two docs that have been purged
- DbName = ?l2b(
- "db_v" ++ integer_to_list(VersionFrom) ++
- "_with_2_purge_req"
- ),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(2, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{2, <<"doc2">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(2, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- % There are two documents (Doc4 and Doc5) in the fixture database
- % with three docs (Doc1, Doc2 and Doc3) that have been purged, and
- % with one purge req for Doc1 and another purge req for Doc 2 and Doc3
- DbName = ?l2b(
- "db_v" ++ integer_to_list(VersionFrom) ++
- "_with_1_purge_req_for_2_docs"
- ),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(3, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{3, <<"doc2">>}, {2, <<"doc3">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc7">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc6">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(4, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-save_doc(DbName, Json) ->
- Doc = couch_doc:from_json_obj(Json),
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:update_doc(Db, Doc, [])
- end).
-
-fold_fun({PSeq, _UUID, Id, _Revs}, Acc) ->
- {ok, [{PSeq, Id} | Acc]}.
-
-get_disk_version_from_header(DbFileName) ->
- DbDir = config:get("couchdb", "database_dir"),
- DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]),
- {ok, Fd} = couch_file:open(DbFilePath, []),
- {ok, Header} = couch_file:read_header(Fd),
- DiskVerison = couch_bt_engine_header:disk_version(Header),
- couch_file:close(Fd),
- DiskVerison.
diff --git a/src/couch/test/eunit/couch_btree_tests.erl b/src/couch/test/eunit/couch_btree_tests.erl
deleted file mode 100644
index 1c9ba7771..000000000
--- a/src/couch/test/eunit/couch_btree_tests.erl
+++ /dev/null
@@ -1,693 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(ROWS, 1000).
-% seconds
--define(TIMEOUT, 60).
-
-setup() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Btree} = couch_btree:open(nil, Fd, [
- {compression, none},
- {reduce, fun reduce_fun/2}
- ]),
- {Fd, Btree}.
-
-setup_kvs(_) ->
- setup().
-
-setup_red() ->
- {_, EvenOddKVs} = lists:foldl(
- fun(Idx, {Key, Acc}) ->
- case Key of
- "even" -> {"odd", [{{Key, Idx}, 1} | Acc]};
- _ -> {"even", [{{Key, Idx}, 1} | Acc]}
- end
- end,
- {"odd", []},
- lists:seq(1, ?ROWS)
- ),
- {Fd, Btree} = setup(),
- {ok, Btree1} = couch_btree:add_remove(Btree, EvenOddKVs, []),
- {Fd, Btree1}.
-setup_red(_) ->
- setup_red().
-
-teardown(Fd) when is_pid(Fd) ->
- ok = couch_file:close(Fd);
-teardown({Fd, _}) ->
- teardown(Fd).
-teardown(_, {Fd, _}) ->
- teardown(Fd).
-
-kvs_test_funs() ->
- [
- fun should_set_fd_correctly/2,
- fun should_set_root_correctly/2,
- fun should_create_zero_sized_btree/2,
- fun should_set_reduce_option/2,
- fun should_fold_over_empty_btree/2,
- fun should_add_all_keys/2,
- fun should_continuously_add_new_kv/2,
- fun should_continuously_remove_keys/2,
- fun should_insert_keys_in_reversed_order/2,
- fun should_add_every_odd_key_remove_every_even/2,
- fun should_add_every_even_key_remove_every_old/2
- ].
-
-red_test_funs() ->
- [
- fun should_reduce_whole_range/2,
- fun should_reduce_first_half/2,
- fun should_reduce_second_half/2
- ].
-
-btree_open_test_() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]),
- {
- "Ensure that created btree is really a btree record",
- ?_assert(is_record(Btree, btree))
- }.
-
-sorted_kvs_test_() ->
- Funs = kvs_test_funs(),
- Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
- {
- "BTree with sorted keys",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end,
- fun test_util:stop/1,
- {
- foreachx,
- fun setup_kvs/1,
- fun teardown/2,
- [{Sorted, Fun} || Fun <- Funs]
- }
- }
- }.
-
-rsorted_kvs_test_() ->
- Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
- Funs = kvs_test_funs(),
- Reversed = Sorted,
- {
- "BTree with backward sorted keys",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end,
- fun test_util:stop/1,
- {
- foreachx,
- fun setup_kvs/1,
- fun teardown/2,
- [{Reversed, Fun} || Fun <- Funs]
- }
- }
- }.
-
-shuffled_kvs_test_() ->
- Funs = kvs_test_funs(),
- Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
- Shuffled = shuffle(Sorted),
- {
- "BTree with shuffled keys",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end,
- fun test_util:stop/1,
- {
- foreachx,
- fun setup_kvs/1,
- fun teardown/2,
- [{Shuffled, Fun} || Fun <- Funs]
- }
- }
- }.
-
-reductions_test_() ->
- {
- "BTree reductions",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end,
- fun test_util:stop/1,
- [
- {
- "Common tests",
- {
- foreach,
- fun setup_red/0,
- fun teardown/1,
- [
- fun should_reduce_without_specified_direction/1,
- fun should_reduce_forward/1,
- fun should_reduce_backward/1
- ]
- }
- },
- {
- "Range requests",
- [
- {
- "Forward direction",
- {
- foreachx,
- fun setup_red/1,
- fun teardown/2,
- [{fwd, F} || F <- red_test_funs()]
- }
- },
- {
- "Backward direction",
- {
- foreachx,
- fun setup_red/1,
- fun teardown/2,
- [{rev, F} || F <- red_test_funs()]
- }
- }
- ]
- }
- ]
- }
- }.
-
-should_set_fd_correctly(_, {Fd, Btree}) ->
- ?_assertMatch(Fd, Btree#btree.fd).
-
-should_set_root_correctly(_, {_, Btree}) ->
- ?_assertMatch(nil, Btree#btree.root).
-
-should_create_zero_sized_btree(_, {_, Btree}) ->
- ?_assertMatch(0, couch_btree:size(Btree)).
-
-should_set_reduce_option(_, {_, Btree}) ->
- ReduceFun = fun reduce_fun/2,
- Btree1 = couch_btree:set_options(Btree, [{reduce, ReduceFun}]),
- ?_assertMatch(ReduceFun, Btree1#btree.reduce).
-
-should_fold_over_empty_btree(_, {_, Btree}) ->
- {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X + 1} end, 0),
- ?_assertEqual(EmptyRes, 0).
-
-should_add_all_keys(KeyValues, {Fd, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- [
- should_return_complete_btree_on_adding_all_keys(KeyValues, Btree1),
- should_have_non_zero_size(Btree1),
- should_have_lesser_size_than_file(Fd, Btree1),
- should_keep_root_pointer_to_kp_node(Fd, Btree1),
- should_remove_all_keys(KeyValues, Btree1)
- ].
-
-should_return_complete_btree_on_adding_all_keys(KeyValues, Btree) ->
- ?_assert(test_btree(Btree, KeyValues)).
-
-should_have_non_zero_size(Btree) ->
- ?_assert(couch_btree:size(Btree) > 0).
-
-should_have_lesser_size_than_file(Fd, Btree) ->
- ?_assert((couch_btree:size(Btree) =< couch_file:bytes(Fd))).
-
-should_keep_root_pointer_to_kp_node(Fd, Btree) ->
- ?_assertMatch(
- {ok, {kp_node, _}},
- couch_file:pread_term(Fd, element(1, Btree#btree.root))
- ).
-
-should_remove_all_keys(KeyValues, Btree) ->
- Keys = keys(KeyValues),
- {ok, Btree1} = couch_btree:add_remove(Btree, [], Keys),
- {
- "Should remove all the keys",
- [
- should_produce_valid_btree(Btree1, []),
- should_be_empty(Btree1)
- ]
- }.
-
-should_continuously_add_new_kv(KeyValues, {_, Btree}) ->
- {Btree1, _} = lists:foldl(
- fun(KV, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- ?assert(couch_btree:size(BtAcc2) > PrevSize),
- {BtAcc2, couch_btree:size(BtAcc2)}
- end,
- {Btree, couch_btree:size(Btree)},
- KeyValues
- ),
- {
- "Should continuously add key-values to btree",
- [
- should_produce_valid_btree(Btree1, KeyValues),
- should_not_be_empty(Btree1)
- ]
- }.
-
-should_continuously_remove_keys(KeyValues, {_, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {Btree2, _} = lists:foldl(
- fun({K, _}, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
- ?assert(couch_btree:size(BtAcc2) < PrevSize),
- {BtAcc2, couch_btree:size(BtAcc2)}
- end,
- {Btree1, couch_btree:size(Btree1)},
- KeyValues
- ),
- {
- "Should continuously remove keys from btree",
- [
- should_produce_valid_btree(Btree2, []),
- should_be_empty(Btree2)
- ]
- }.
-
-should_insert_keys_in_reversed_order(KeyValues, {_, Btree}) ->
- KeyValuesRev = lists:reverse(KeyValues),
- {Btree1, _} = lists:foldl(
- fun(KV, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- ?assert(couch_btree:size(BtAcc2) > PrevSize),
- {BtAcc2, couch_btree:size(BtAcc2)}
- end,
- {Btree, couch_btree:size(Btree)},
- KeyValuesRev
- ),
- should_produce_valid_btree(Btree1, KeyValues).
-
-should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {_, Rem2Keys0, Rem2Keys1} = lists:foldl(
- fun(X, {Count, Left, Right}) ->
- case Count rem 2 == 0 of
- true -> {Count + 1, [X | Left], Right};
- false -> {Count + 1, Left, [X | Right]}
- end
- end,
- {0, [], []},
- KeyValues
- ),
- {timeout, ?TIMEOUT, ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1))}.
-
-should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {_, Rem2Keys0, Rem2Keys1} = lists:foldl(
- fun(X, {Count, Left, Right}) ->
- case Count rem 2 == 0 of
- true -> {Count + 1, [X | Left], Right};
- false -> {Count + 1, Left, [X | Right]}
- end
- end,
- {0, [], []},
- KeyValues
- ),
- {timeout, ?TIMEOUT, ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0))}.
-
-should_reduce_without_specified_direction({_, Btree}) ->
- ?_assertMatch(
- {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [])
- ).
-
-should_reduce_forward({_, Btree}) ->
- ?_assertMatch(
- {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd}])
- ).
-
-should_reduce_backward({_, Btree}) ->
- ?_assertMatch(
- {ok, [{{"even", _}, ?ROWS div 2}, {{"odd", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev}])
- ).
-
-should_reduce_whole_range(fwd, {_, Btree}) ->
- {SK, EK} = {{"even", 0}, {"odd", ?ROWS - 1}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [
- {{"odd", 1}, ?ROWS div 2},
- {{"even", 2}, ?ROWS div 2}
- ]},
- fold_reduce(Btree, [
- {dir, fwd},
- {start_key, SK},
- {end_key, EK}
- ])
- )
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [
- {{"odd", 1}, (?ROWS div 2) - 1},
- {{"even", 2}, ?ROWS div 2}
- ]},
- fold_reduce(Btree, [
- {dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}
- ])
- )
- }
- ];
-should_reduce_whole_range(rev, {_, Btree}) ->
- {SK, EK} = {{"odd", ?ROWS - 1}, {"even", 2}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [
- {{"even", ?ROWS}, ?ROWS div 2},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}
- ]},
- fold_reduce(Btree, [
- {dir, rev},
- {start_key, SK},
- {end_key, EK}
- ])
- )
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [
- {{"even", ?ROWS}, (?ROWS div 2) - 1},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}
- ]},
- fold_reduce(Btree, [
- {dir, rev},
- {start_key, SK},
- {end_key_gt, EK}
- ])
- )
- }
- ].
-
-should_reduce_first_half(fwd, {_, Btree}) ->
- {SK, EK} = {{"even", 0}, {"odd", (?ROWS div 2) - 1}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [
- {{"odd", 1}, ?ROWS div 4},
- {{"even", 2}, ?ROWS div 2}
- ]},
- fold_reduce(Btree, [
- {dir, fwd},
- {start_key, SK},
- {end_key, EK}
- ])
- )
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [
- {{"odd", 1}, (?ROWS div 4) - 1},
- {{"even", 2}, ?ROWS div 2}
- ]},
- fold_reduce(Btree, [
- {dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}
- ])
- )
- }
- ];
-should_reduce_first_half(rev, {_, Btree}) ->
- {SK, EK} = {{"odd", ?ROWS - 1}, {"even", ?ROWS div 2}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [
- {{"even", ?ROWS}, (?ROWS div 4) + 1},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}
- ]},
- fold_reduce(Btree, [
- {dir, rev},
- {start_key, SK},
- {end_key, EK}
- ])
- )
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [
- {{"even", ?ROWS}, ?ROWS div 4},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}
- ]},
- fold_reduce(Btree, [
- {dir, rev},
- {start_key, SK},
- {end_key_gt, EK}
- ])
- )
- }
- ].
-
-should_reduce_second_half(fwd, {_, Btree}) ->
- {SK, EK} = {{"even", ?ROWS div 2}, {"odd", ?ROWS - 1}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [
- {{"odd", 1}, ?ROWS div 2},
- {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}
- ]},
- fold_reduce(Btree, [
- {dir, fwd},
- {start_key, SK},
- {end_key, EK}
- ])
- )
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [
- {{"odd", 1}, (?ROWS div 2) - 1},
- {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}
- ]},
- fold_reduce(Btree, [
- {dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}
- ])
- )
- }
- ];
-should_reduce_second_half(rev, {_, Btree}) ->
- {SK, EK} = {{"odd", (?ROWS div 2) + 1}, {"even", 2}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [
- {{"even", ?ROWS}, ?ROWS div 2},
- {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}
- ]},
- fold_reduce(Btree, [
- {dir, rev},
- {start_key, SK},
- {end_key, EK}
- ])
- )
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [
- {{"even", ?ROWS}, (?ROWS div 2) - 1},
- {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}
- ]},
- fold_reduce(Btree, [
- {dir, rev},
- {start_key, SK},
- {end_key_gt, EK}
- ])
- )
- }
- ].
-
-should_produce_valid_btree(Btree, KeyValues) ->
- ?_assert(test_btree(Btree, KeyValues)).
-
-should_be_empty(Btree) ->
- ?_assertEqual(couch_btree:size(Btree), 0).
-
-should_not_be_empty(Btree) ->
- ?_assert(couch_btree:size(Btree) > 0).
-
-fold_reduce(Btree, Opts) ->
- GroupFun = fun({K1, _}, {K2, _}) ->
- K1 == K2
- end,
- FoldFun = fun(GroupedKey, Unreduced, Acc) ->
- {ok, [{GroupedKey, couch_btree:final_reduce(Btree, Unreduced)} | Acc]}
- end,
- couch_btree:fold_reduce(
- Btree,
- FoldFun,
- [],
- [{key_group_fun, GroupFun}] ++ Opts
- ).
-
-keys(KVs) ->
- [K || {K, _} <- KVs].
-
-reduce_fun(reduce, KVs) ->
- length(KVs);
-reduce_fun(rereduce, Reds) ->
- lists:sum(Reds).
-
-shuffle(List) ->
- randomize(round(math:log(length(List)) + 0.5), List).
-
-randomize(1, List) ->
- randomize(List);
-randomize(T, List) ->
- lists:foldl(
- fun(_E, Acc) ->
- randomize(Acc)
- end,
- randomize(List),
- lists:seq(1, (T - 1))
- ).
-
-randomize(List) ->
- D = lists:map(fun(A) -> {couch_rand:uniform(), A} end, List),
- {_, D1} = lists:unzip(lists:keysort(1, D)),
- D1.
-
-test_btree(Btree, KeyValues) ->
- ok = test_key_access(Btree, KeyValues),
- ok = test_lookup_access(Btree, KeyValues),
- ok = test_final_reductions(Btree, KeyValues),
- ok = test_traversal_callbacks(Btree, KeyValues),
- true.
-
-test_add_remove(Btree, OutKeyValues, RemainingKeyValues) ->
- Btree2 = lists:foldl(
- fun({K, _}, BtAcc) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
- BtAcc2
- end,
- Btree,
- OutKeyValues
- ),
- true = test_btree(Btree2, RemainingKeyValues),
-
- Btree3 = lists:foldl(
- fun(KV, BtAcc) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- BtAcc2
- end,
- Btree2,
- OutKeyValues
- ),
- true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues).
-
-test_key_access(Btree, List) ->
- FoldFun = fun(Element, {[HAcc | TAcc], Count}) ->
- case Element == HAcc of
- true -> {ok, {TAcc, Count + 1}};
- _ -> {ok, {TAcc, Count + 1}}
- end
- end,
- Length = length(List),
- Sorted = lists:sort(List),
- {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}),
- {ok, _, {[], Length}} = couch_btree:fold(
- Btree,
- FoldFun,
- {Sorted, 0},
- [{dir, rev}]
- ),
- ok.
-
-test_lookup_access(Btree, KeyValues) ->
- FoldFun = fun({Key, Value}, {Key, Value}) -> {stop, true} end,
- lists:foreach(
- fun({Key, Value}) ->
- [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]),
- {ok, _, true} = couch_btree:foldl(
- Btree,
- FoldFun,
- {Key, Value},
- [{start_key, Key}]
- )
- end,
- KeyValues
- ).
-
-test_final_reductions(Btree, KeyValues) ->
- KVLen = length(KeyValues),
- FoldLFun = fun(_X, LeadingReds, Acc) ->
- CountToStart = KVLen div 3 + Acc,
- CountToStart = couch_btree:final_reduce(Btree, LeadingReds),
- {ok, Acc + 1}
- end,
- FoldRFun = fun(_X, LeadingReds, Acc) ->
- CountToEnd = KVLen - KVLen div 3 + Acc,
- CountToEnd = couch_btree:final_reduce(Btree, LeadingReds),
- {ok, Acc + 1}
- end,
- {LStartKey, _} =
- case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues))
- end,
- {RStartKey, _} =
- case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3, lists:sort(KeyValues))
- end,
- {ok, _, FoldLRed} = couch_btree:foldl(
- Btree,
- FoldLFun,
- 0,
- [{start_key, LStartKey}]
- ),
- {ok, _, FoldRRed} = couch_btree:fold(
- Btree,
- FoldRFun,
- 0,
- [{dir, rev}, {start_key, RStartKey}]
- ),
- KVLen = FoldLRed + FoldRRed,
- ok.
-
-test_traversal_callbacks(Btree, _KeyValues) ->
- FoldFun = fun
- (visit, _GroupedKey, _Unreduced, Acc) ->
- {ok, Acc andalso false};
- (traverse, _LK, _Red, Acc) ->
- {skip, Acc andalso true}
- end,
- % With 250 items the root is a kp. Always skipping should reduce to true.
- {ok, _, true} = couch_btree:fold(Btree, FoldFun, true, [{dir, fwd}]),
- ok.
diff --git a/src/couch/test/eunit/couch_changes_tests.erl b/src/couch/test/eunit/couch_changes_tests.erl
deleted file mode 100644
index 02b69f132..000000000
--- a/src/couch/test/eunit/couch_changes_tests.erl
+++ /dev/null
@@ -1,1093 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 6000).
--define(TEST_TIMEOUT, 10000).
-
--record(row, {
- id,
- seq,
- deleted = false,
- doc = nil
-}).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = create_db(DbName),
- Revs = [
- R
- || {ok, R} <- [
- save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc5">>}]})
- ]
- ],
- Rev = lists:nth(3, Revs),
- {ok, Db1} = couch_db:reopen(Db),
-
- {ok, Rev1} = save_doc(Db1, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev}]}),
- Revs1 = Revs ++ [Rev1],
- Revs2 =
- Revs1 ++
- [
- R
- || {ok, R} <- [
- save_doc(Db1, {[{<<"_id">>, <<"doc6">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"_design/foo">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]})
- ]
- ],
- config:set(
- "native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist = false
- ),
- {DbName, list_to_tuple(Revs2)}.
-
-teardown({DbName, _}) ->
- config:delete("native_query_servers", "erlang", _Persist = false),
- delete_db(DbName),
- ok.
-
-changes_test_() ->
- {
- "Changes feed",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- [
- filter_by_selector(),
- filter_by_doc_id(),
- filter_by_design(),
- continuous_feed(),
- %%filter_by_custom_function()
- filter_by_filter_function(),
- filter_by_view()
- ]
- }
- }.
-
-filter_by_doc_id() ->
- {
- "Filter _doc_id",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_filter_by_specific_doc_ids/1,
- fun should_filter_by_specific_doc_ids_descending/1,
- fun should_filter_by_specific_doc_ids_with_since/1,
- fun should_filter_by_specific_doc_ids_no_result/1,
- fun should_handle_deleted_docs/1
- ]
- }
- }.
-
-filter_by_selector() ->
- {
- "Filter _selector",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_select_basic/1,
- fun should_select_with_since/1,
- fun should_select_when_no_result/1,
- fun should_select_with_deleted_docs/1,
- fun should_select_with_continuous/1,
- fun should_stop_selector_when_db_deleted/1,
- fun should_select_with_empty_fields/1,
- fun should_select_with_fields/1
- ]
- }
- }.
-
-filter_by_design() ->
- {
- "Filter _design",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_emit_only_design_documents/1
- ]
- }
- }.
-
-%% filter_by_custom_function() ->
-%% {
-%% "Filter function",
-%% {
-%% foreach,
-%% fun setup/0, fun teardown/1,
-%% [
-%% fun should_receive_heartbeats/1
-%% ]
-%% }
-%% }.
-
-filter_by_filter_function() ->
- {
- "Filter by filters",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_filter_by_doc_attribute/1,
- fun should_filter_by_user_ctx/1
- ]
- }
- }.
-
-filter_by_view() ->
- {
- "Filter _view",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_filter_by_view/1,
- fun should_filter_by_erlang_view/1
- ]
- }
- }.
-
-continuous_feed() ->
- {
- "Continuous Feed",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_filter_continuous_feed_by_specific_doc_ids/1,
- fun should_end_changes_when_db_deleted/1
- ]
- }
- }.
-
-should_filter_by_specific_doc_ids({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(2, length(Rows)),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- ?assertEqual(<<"doc4">>, Id1),
- ?assertEqual(4, Seq1),
- ?assertEqual(<<"doc3">>, Id2),
- ?assertEqual(6, Seq2),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-should_filter_by_specific_doc_ids_descending({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids",
- dir = rev
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(2, length(Rows)),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- ?assertEqual(<<"doc3">>, Id1),
- ?assertEqual(6, Seq1),
- ?assertEqual(<<"doc4">>, Id2),
- ?assertEqual(4, Seq2),
- ?assertEqual(4, LastSeq)
- end
- ).
-
-should_filter_by_specific_doc_ids_with_since({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids",
- since = 5
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq1, id = Id1}] = Rows,
- ?assertEqual(<<"doc3">>, Id1),
- ?assertEqual(6, Seq1),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-should_filter_by_specific_doc_ids_no_result({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids",
- since = 6
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(0, length(Rows)),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-should_handle_deleted_docs({DbName, Revs}) ->
- ?_test(
- begin
- Rev3_2 = element(6, Revs),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = save_doc(
- Db,
- {[
- {<<"_id">>, <<"doc3">>},
- {<<"_deleted">>, true},
- {<<"_rev">>, Rev3_2}
- ]}
- ),
-
- ChArgs = #changes_args{
- filter = "_doc_ids",
- since = 9
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(1, length(Rows)),
- ?assertMatch(
- [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}],
- Rows
- ),
- ?assertEqual(11, LastSeq)
- end
- ).
-
-should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) ->
- ?_test(
- begin
- {ok, Db} = couch_db:open_int(DbName, []),
- ChangesArgs = #changes_args{
- filter = "_doc_ids",
- feed = "continuous"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- reset_row_notifications(),
- Consumer = spawn_consumer(DbName, ChangesArgs, Req),
- ?assertEqual(ok, wait_row_notifications(2)),
- ok = pause(Consumer),
-
- Rows = get_rows(Consumer),
- ?assertEqual(2, length(Rows)),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- ?assertEqual(<<"doc4">>, Id1),
- ?assertEqual(4, Seq1),
- ?assertEqual(<<"doc3">>, Id2),
- ?assertEqual(6, Seq2),
-
- clear_rows(Consumer),
- {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
- {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
- ok = unpause(Consumer),
- timer:sleep(100),
- ok = pause(Consumer),
- ?assertEqual([], get_rows(Consumer)),
-
- Rev4 = element(4, Revs),
- Rev3_2 = element(6, Revs),
- {ok, Rev4_2} = save_doc(
- Db,
- {[
- {<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4}
- ]}
- ),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
- {ok, _} = save_doc(
- Db,
- {[
- {<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4_2}
- ]}
- ),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
- {ok, Rev3_3} = save_doc(
- Db,
- {[
- {<<"_id">>, <<"doc3">>},
- {<<"_rev">>, Rev3_2}
- ]}
- ),
- reset_row_notifications(),
- ok = unpause(Consumer),
- ?assertEqual(ok, wait_row_notifications(2)),
- ok = pause(Consumer),
-
- NewRows = get_rows(Consumer),
- ?assertEqual(2, length(NewRows)),
- [Row14, Row16] = NewRows,
- ?assertEqual(<<"doc4">>, Row14#row.id),
- ?assertEqual(15, Row14#row.seq),
- ?assertEqual(<<"doc3">>, Row16#row.id),
- ?assertEqual(17, Row16#row.seq),
-
- clear_rows(Consumer),
- {ok, _Rev3_4} = save_doc(
- Db,
- {[
- {<<"_id">>, <<"doc3">>},
- {<<"_rev">>, Rev3_3}
- ]}
- ),
- reset_row_notifications(),
- ok = unpause(Consumer),
- ?assertEqual(ok, wait_row_notifications(1)),
- ok = pause(Consumer),
-
- FinalRows = get_rows(Consumer),
-
- ok = unpause(Consumer),
- stop_consumer(Consumer),
-
- ?assertMatch([#row{seq = 18, id = <<"doc3">>}], FinalRows)
- end
- ).
-
-should_end_changes_when_db_deleted({DbName, _Revs}) ->
- ?_test(begin
- {ok, _Db} = couch_db:open_int(DbName, []),
- ChangesArgs = #changes_args{
- filter = "_doc_ids",
- feed = "continuous"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- Consumer = spawn_consumer(DbName, ChangesArgs, Req),
- ok = pause(Consumer),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok = unpause(Consumer),
- {_Rows, _LastSeq} = wait_finished(Consumer),
- stop_consumer(Consumer),
- ok
- end).
-
-should_select_basic({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-should_select_with_since({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector", since = 9},
- GteDoc2 = {[{<<"$gte">>, <<"doc1">>}]},
- Selector = {[{<<"_id">>, GteDoc2}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc8">>, Id),
- ?assertEqual(10, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-should_select_when_no_result({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector"},
- Selector = {[{<<"_id">>, <<"nopers">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(0, length(Rows)),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-should_select_with_deleted_docs({DbName, Revs}) ->
- ?_test(
- begin
- Rev3_2 = element(6, Revs),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = save_doc(
- Db,
- {[
- {<<"_id">>, <<"doc3">>},
- {<<"_deleted">>, true},
- {<<"_rev">>, Rev3_2}
- ]}
- ),
- ChArgs = #changes_args{filter = "_selector"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req),
- ?assertMatch(
- [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}],
- Rows
- ),
- ?assertEqual(11, LastSeq)
- end
- ).
-
-should_select_with_continuous({DbName, Revs}) ->
- ?_test(
- begin
- {ok, Db} = couch_db:open_int(DbName, []),
- ChArgs = #changes_args{filter = "_selector", feed = "continuous"},
- GteDoc8 = {[{<<"$gte">>, <<"doc8">>}]},
- Selector = {[{<<"_id">>, GteDoc8}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- reset_row_notifications(),
- Consumer = spawn_consumer(DbName, ChArgs, Req),
- ?assertEqual(ok, wait_row_notifications(1)),
- ok = pause(Consumer),
- Rows = get_rows(Consumer),
- ?assertMatch(
- [#row{seq = 10, id = <<"doc8">>, deleted = false}],
- Rows
- ),
- clear_rows(Consumer),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc01">>}]}),
- ok = unpause(Consumer),
- timer:sleep(100),
- ok = pause(Consumer),
- ?assertEqual([], get_rows(Consumer)),
- Rev4 = element(4, Revs),
- Rev8 = element(10, Revs),
- {ok, _} = save_doc(
- Db,
- {[
- {<<"_id">>, <<"doc8">>},
- {<<"_rev">>, Rev8}
- ]}
- ),
- {ok, _} = save_doc(
- Db,
- {[
- {<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4}
- ]}
- ),
- reset_row_notifications(),
- ok = unpause(Consumer),
- ?assertEqual(ok, wait_row_notifications(1)),
- ok = pause(Consumer),
- NewRows = get_rows(Consumer),
- ?assertMatch(
- [#row{seq = _, id = <<"doc8">>, deleted = false}],
- NewRows
- )
- end
- ).
-
-should_stop_selector_when_db_deleted({DbName, _Revs}) ->
- ?_test(
- begin
- {ok, _Db} = couch_db:open_int(DbName, []),
- ChArgs = #changes_args{filter = "_selector", feed = "continuous"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- Consumer = spawn_consumer(DbName, ChArgs, Req),
- ok = pause(Consumer),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok = unpause(Consumer),
- {_Rows, _LastSeq} = wait_finished(Consumer),
- stop_consumer(Consumer),
- ok
- end
- ).
-
-should_select_with_empty_fields({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector", include_docs = true},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req =
- {json_req,
- {[
- {<<"selector">>, Selector},
- {<<"fields">>, []}
- ]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id, doc = Doc}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq),
- ?assertMatch({[{_K1, _V1}, {_K2, _V2}]}, Doc)
- end
- ).
-
-should_select_with_fields({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector", include_docs = true},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req =
- {json_req,
- {[
- {<<"selector">>, Selector},
- {<<"fields">>, [<<"_id">>, <<"nope">>]}
- ]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id, doc = Doc}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq),
- ?assertMatch(Doc, {[{<<"_id">>, <<"doc3">>}]})
- end
- ).
-
-should_emit_only_design_documents({DbName, Revs}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_design"
- },
- Req = {json_req, null},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(1, length(Rows)),
- ?assertEqual(UpSeq, LastSeq),
- ?assertEqual([#row{seq = 8, id = <<"_design/foo">>}], Rows),
-
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, _} = save_doc(
- Db,
- {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"_rev">>, element(8, Revs)},
- {<<"_deleted">>, true}
- ]}
- ),
-
- couch_db:close(Db),
- {Rows2, LastSeq2, _} = run_changes_query(DbName, ChArgs, Req),
-
- UpSeq2 = UpSeq + 1,
-
- ?assertEqual(1, length(Rows2)),
- ?assertEqual(UpSeq2, LastSeq2),
- ?assertEqual(
- [
- #row{
- seq = 11,
- id = <<"_design/foo">>,
- deleted = true
- }
- ],
- Rows2
- )
- end
- ).
-
-%% should_receive_heartbeats(_) ->
-%% {timeout, ?TEST_TIMEOUT div 1000,
-%% ?_test(
-%% begin
-%% DbName = ?tempdb(),
-%% Timeout = 100,
-%% {ok, Db} = create_db(DbName),
-
-%% {ok, _} = save_doc(Db, {[
-%% {<<"_id">>, <<"_design/filtered">>},
-%% {<<"language">>, <<"javascript">>},
-%% {<<"filters">>, {[
-%% {<<"foo">>, <<"function(doc) {
-%% return ['doc10', 'doc11', 'doc12'].indexOf(doc._id) != -1;}">>
-%% }]}}
-%% ]}),
-
-%% ChangesArgs = #changes_args{
-%% filter = "filtered/foo",
-%% feed = "continuous",
-%% timeout = 10000,
-%% heartbeat = 1000
-%% },
-%% Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
-
-%% {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
-
-%% Heartbeats = get_heartbeats(Consumer),
-%% ?assert(Heartbeats > 0),
-
-%% {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
-
-%% Heartbeats2 = get_heartbeats(Consumer),
-%% ?assert(Heartbeats2 > Heartbeats),
-
-%% Rows = get_rows(Consumer),
-%% ?assertEqual(3, length(Rows)),
-
-%% {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}),
-%% timer:sleep(Timeout),
-
-%% Heartbeats3 = get_heartbeats(Consumer),
-%% ?assert(Heartbeats3 > Heartbeats2)
-%% end)}.
-
-should_filter_by_doc_attribute({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>,
- {[
- {<<"valid">>, <<
- "function(doc, req) {"
- " if (doc._id == 'doc3') {"
- " return true; "
- "} }"
- >>}
- ]}}
- ]}
- ),
- ChArgs = #changes_args{filter = "app/valid"},
- Req = {json_req, null},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-should_filter_by_user_ctx({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>,
- {[
- {<<"valid">>, <<
- "function(doc, req) {"
- " if (req.userCtx.name == doc._id) {"
- " return true; "
- "} }"
- >>}
- ]}}
- ]}
- ),
- ChArgs = #changes_args{filter = "app/valid"},
- UserCtx = #user_ctx{name = <<"doc3">>, roles = []},
- {ok, DbRec} = couch_db:clustered_db(DbName, UserCtx),
- Req =
- {json_req,
- {[
- {
- <<"userCtx">>, couch_util:json_user_ctx(DbRec)
- }
- ]}},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-should_filter_by_view({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"views">>,
- {[
- {<<"valid">>,
- {[
- {<<"map">>, <<
- "function(doc) {"
- " if (doc._id == 'doc3') {"
- " emit(doc); "
- "} }"
- >>}
- ]}}
- ]}}
- ]}
- ),
- ChArgs = #changes_args{filter = "_view"},
- Req =
- {json_req,
- {[
- {
- <<"query">>,
- {[
- {<<"view">>, <<"app/valid">>}
- ]}
- }
- ]}},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-should_filter_by_erlang_view({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"erlang">>},
- {<<"views">>,
- {[
- {<<"valid">>,
- {[
- {<<"map">>, <<
- "fun({Doc}) ->"
- " case lists:keyfind(<<\"_id\">>, 1, Doc) of"
- " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); "
- " false -> ok"
- " end "
- "end."
- >>}
- ]}}
- ]}}
- ]}
- ),
- ChArgs = #changes_args{filter = "_view"},
- Req =
- {json_req,
- {[
- {
- <<"query">>,
- {[
- {<<"view">>, <<"app/valid">>}
- ]}
- }
- ]}},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end
- ).
-
-update_ddoc(DbName, DDoc) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db).
-
-run_changes_query(DbName, ChangesArgs, Opts) ->
- Consumer = spawn_consumer(DbName, ChangesArgs, Opts),
- {Rows, LastSeq} = wait_finished(Consumer),
- {ok, Db} = couch_db:open_int(DbName, []),
- UpSeq = couch_db:get_update_seq(Db),
- couch_db:close(Db),
- stop_consumer(Consumer),
- {Rows, LastSeq, UpSeq}.
-
-save_doc(Db, Json) ->
- Doc = couch_doc:from_json_obj(Json),
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
- {ok, couch_doc:rev_to_str(Rev)}.
-
-get_rows({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {get_rows, Ref},
- Resp =
- receive
- {rows, Ref, Rows} ->
- Rows
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-%% get_heartbeats({Consumer, _}) ->
-%% Ref = make_ref(),
-%% Consumer ! {get_heartbeats, Ref},
-%% Resp = receive
-%% {hearthbeats, Ref, HeartBeats} ->
-%% HeartBeats
-%% after ?TIMEOUT ->
-%% timeout
-%% end,
-%% ?assertNotEqual(timeout, Resp),
-%% Resp.
-
-clear_rows({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {reset, Ref},
- Resp =
- receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-stop_consumer({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {stop, Ref},
- Resp =
- receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-pause({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {pause, Ref},
- Resp =
- receive
- {paused, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-unpause({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {continue, Ref},
- Resp =
- receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-wait_finished({_, ConsumerRef}) ->
- receive
- {consumer_finished, Rows, LastSeq} ->
- {Rows, LastSeq};
- {'DOWN', ConsumerRef, _, _, Msg} when Msg == normal; Msg == ok ->
- ok;
- {'DOWN', ConsumerRef, _, _, Msg} ->
- erlang:error(
- {consumer_died, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, Msg}
- ]}
- )
- after ?TIMEOUT ->
- erlang:error(
- {consumer_died, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, timeout}
- ]}
- )
- end.
-
-reset_row_notifications() ->
- receive
- row ->
- reset_row_notifications()
- after 0 ->
- ok
- end.
-
-wait_row_notifications(N) ->
- receive
- row when N == 1 ->
- ok;
- row when N > 1 ->
- wait_row_notifications(N - 1)
- after ?TIMEOUT ->
- timeout
- end.
-
-spawn_consumer(DbName, ChangesArgs0, Req) ->
- Parent = self(),
- spawn_monitor(fun() ->
- put(heartbeat_count, 0),
- Callback = fun
- ({change, {Change}, _}, _, Acc) ->
- Id = couch_util:get_value(<<"id">>, Change),
- Seq = couch_util:get_value(<<"seq">>, Change),
- Del = couch_util:get_value(<<"deleted">>, Change, false),
- Doc = couch_util:get_value(doc, Change, nil),
- Parent ! row,
- [#row{id = Id, seq = Seq, deleted = Del, doc = Doc} | Acc];
- ({stop, LastSeq}, _, Acc) ->
- Parent ! {consumer_finished, lists:reverse(Acc), LastSeq},
- stop_loop(Parent, Acc);
- (timeout, _, Acc) ->
- put(heartbeat_count, get(heartbeat_count) + 1),
- maybe_pause(Parent, Acc);
- (_, _, Acc) ->
- maybe_pause(Parent, Acc)
- end,
- {ok, Db} = couch_db:open_int(DbName, []),
- ChangesArgs =
- case
- (ChangesArgs0#changes_args.timeout =:= undefined) andalso
- (ChangesArgs0#changes_args.heartbeat =:= undefined)
- of
- true ->
- ChangesArgs0#changes_args{timeout = 1000, heartbeat = 100};
- false ->
- ChangesArgs0
- end,
- FeedFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db),
- try
- FeedFun({Callback, []})
- catch
- throw:{stop, _} -> ok;
- _:Error -> exit(Error)
- after
- couch_db:close(Db)
- end
- end).
-
-maybe_pause(Parent, Acc) ->
- receive
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- maybe_pause(Parent, Acc);
- {get_heartbeats, Ref} ->
- Parent ! {hearthbeats, Ref, get(heartbeat_count)},
- maybe_pause(Parent, Acc);
- {reset, Ref} ->
- Parent ! {ok, Ref},
- maybe_pause(Parent, []);
- {pause, Ref} ->
- Parent ! {paused, Ref},
- pause_loop(Parent, Acc);
- {stop, Ref} ->
- Parent ! {ok, Ref},
- throw({stop, Acc});
- V when V /= updated ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, V},
- {reason, "Received unexpected message"}
- ]}
- )
- after 0 ->
- Acc
- end.
-
-pause_loop(Parent, Acc) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref},
- throw({stop, Acc});
- {reset, Ref} ->
- Parent ! {ok, Ref},
- pause_loop(Parent, []);
- {continue, Ref} ->
- Parent ! {ok, Ref},
- Acc;
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- pause_loop(Parent, Acc)
- end.
-
-stop_loop(Parent, Acc) ->
- receive
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- stop_loop(Parent, Acc);
- {stop, Ref} ->
- Parent ! {ok, Ref},
- Acc
- end.
-
-create_db(DbName) ->
- couch_db:create(DbName, [?ADMIN_CTX, overwrite]).
-
-delete_db(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]).
diff --git a/src/couch/test/eunit/couch_db_doc_tests.erl b/src/couch/test/eunit/couch_db_doc_tests.erl
deleted file mode 100644
index dc1ac79e6..000000000
--- a/src/couch/test/eunit/couch_db_doc_tests.erl
+++ /dev/null
@@ -1,117 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_doc_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-start() ->
- test_util:start_couch([ioq]).
-
-setup() ->
- DbName = ?tempdb(),
- config:set("couchdb", "stem_interactive_updates", "false", false),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
- DbName.
-
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-couch_db_doc_test_() ->
- {
- "CouchDB doc tests",
- {
- setup,
- fun start/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_truncate_number_of_revisions/1,
- fun should_raise_bad_request_on_invalid_rev/1,
- fun should_allow_access_in_doc_keys_test/1
- ]
- }
- }
- }.
-
-should_truncate_number_of_revisions(DbName) ->
- DocId = <<"foo">>,
- Db = open_db(DbName),
- couch_db:set_revs_limit(Db, 5),
- Rev = create_doc(Db, DocId),
- Rev10 = add_revisions(Db, DocId, Rev, 10),
- {ok, [{ok, #doc{revs = {11, Revs}}}]} = open_doc_rev(Db, DocId, Rev10),
- ?_assertEqual(5, length(Revs)).
-
-should_raise_bad_request_on_invalid_rev(DbName) ->
- DocId = <<"foo">>,
- InvalidRev1 = <<"foo">>,
- InvalidRev2 = <<"a-foo">>,
- InvalidRev3 = <<"1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>,
- Expect = {bad_request, <<"Invalid rev format">>},
- Db = open_db(DbName),
- create_doc(Db, DocId),
- [
- {InvalidRev1, ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))},
- {InvalidRev2, ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))},
- {InvalidRev3, ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))}
- ].
-
-should_allow_access_in_doc_keys_test(_DbName) ->
- Json = <<"{\"_id\":\"foo\",\"_access\":[\"test\"]}">>,
- EJson = couch_util:json_decode(Json),
- Expected = {[{<<"_id">>, <<"foo">>}, {<<"_access">>, [<<"test">>]}]},
- EJson = Expected,
- Doc = couch_doc:from_json_obj(EJson),
- NewEJson = couch_doc:to_json_obj(Doc, []),
- ?_assertEqual(NewEJson, Expected).
-
-open_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- Db.
-
-create_doc(Db, DocId) ->
- add_revision(Db, DocId, undefined).
-
-open_doc_rev(Db0, DocId, Rev) ->
- {ok, Db} = couch_db:reopen(Db0),
- couch_db:open_doc_revs(Db, DocId, [couch_doc:parse_rev(Rev)], []).
-
-add_revision(Db, DocId, undefined) ->
- add_revision(Db, DocId, []);
-add_revision(Db, DocId, Rev) when is_binary(Rev) ->
- add_revision(Db, DocId, [{<<"_rev">>, Rev}]);
-add_revision(Db0, DocId, Rev) ->
- {ok, Db} = couch_db:reopen(Db0),
- Doc = couch_doc:from_json_obj({
- [
- {<<"_id">>, DocId},
- {<<"value">>, DocId}
- ] ++ Rev
- }),
- {ok, NewRev} = couch_db:update_doc(Db, Doc, []),
- couch_doc:rev_to_str(NewRev).
-
-add_revisions(Db, DocId, Rev, N) ->
- lists:foldl(
- fun(_, OldRev) ->
- add_revision(Db, DocId, OldRev)
- end,
- Rev,
- lists:seq(1, N)
- ).
diff --git a/src/couch/test/eunit/couch_db_mpr_tests.erl b/src/couch/test/eunit/couch_db_mpr_tests.erl
deleted file mode 100644
index 3a9577a0d..000000000
--- a/src/couch/test/eunit/couch_db_mpr_tests.erl
+++ /dev/null
@@ -1,127 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_mpr_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 30).
-
--define(USER, "couch_db_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(JSON_BODY, "{\"foo\": \"bar\"}").
--define(CONTENT_MULTI_RELATED,
- {"Content-Type", "multipart/related;boundary=\"bound\""}
-).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- TmpDb = ?tempdb(),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- Url.
-
-teardown(Url) ->
- catch delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-create_doc(Url, Id, Body, Type) ->
- test_request:put(Url ++ "/" ++ Id, [Type, ?AUTH], Body).
-
-delete_doc(Url, Id, Rev) ->
- test_request:delete(Url ++ "/" ++ Id ++ "?rev=" ++ ?b2l(Rev)).
-
-couch_db_mpr_test_() ->
- {
- "multi-part attachment tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun recreate_with_mpr/1
- ]
- }
- }
- }.
-
-recreate_with_mpr(Url) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- DocId1 = "foo",
- DocId2 = "bar",
-
- create_db(Url),
- create_and_delete_doc(Url, DocId1),
- Rev1 = create_with_mpr(Url, DocId1),
- delete_db(Url),
-
- create_db(Url),
- create_and_delete_doc(Url, DocId1),
- % We create a second unrelated doc to change the
- % position on disk where the attachment is written
- % so that we can assert that the position on disk
- % is not included when calculating a revision.
- create_and_delete_doc(Url, DocId2),
- Rev2 = create_with_mpr(Url, DocId1),
- delete_db(Url),
-
- ?assertEqual(Rev1, Rev2)
- end)}.
-
-create_and_delete_doc(Url, DocId) ->
- {ok, _, _, Resp} = create_doc(Url, DocId, ?JSON_BODY, ?CONTENT_JSON),
- {Props} = ?JSON_DECODE(Resp),
- Rev = couch_util:get_value(<<"rev">>, Props, undefined),
- ?assert(is_binary(Rev)),
- {ok, _, _, _} = delete_doc(Url, DocId, Rev).
-
-create_with_mpr(Url, DocId) ->
- {ok, _, _, Resp} = create_doc(Url, DocId, mpr(), ?CONTENT_MULTI_RELATED),
- {Props} = ?JSON_DECODE(Resp),
- Rev = couch_util:get_value(<<"rev">>, Props, undefined),
- ?assert(is_binary(Rev)),
- Rev.
-
-mpr() ->
- lists:concat([
- "--bound\r\n",
- "Content-Type: application/json\r\n\r\n",
- "{",
- "\"body\":\"stuff\","
- "\"_attachments\":",
- "{\"foo.txt\":{",
- "\"follows\":true,",
- "\"content_type\":\"text/plain\","
- "\"length\":21",
- "}}"
- "}",
- "\r\n--bound\r\n\r\n",
- "this is 21 chars long",
- "\r\n--bound--epilogue"
- ]).
diff --git a/src/couch/test/eunit/couch_db_plugin_tests.erl b/src/couch/test/eunit/couch_db_plugin_tests.erl
deleted file mode 100644
index bcfbffb05..000000000
--- a/src/couch/test/eunit/couch_db_plugin_tests.erl
+++ /dev/null
@@ -1,233 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_plugin_tests).
-
--export([
- validate_dbname/2,
- before_doc_update/3,
- after_doc_read/2,
- validate_docid/1,
- check_is_admin/1,
- on_delete/2
-]).
-
-%% couch_epi_plugin behaviour
--export([
- app/0,
- providers/0,
- services/0,
- data_providers/0,
- data_subscriptions/0,
- processes/0,
- notify/3
-]).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-%% couch_epi_plugin behaviour
-
-app() -> test_app.
-providers() -> [{couch_db, ?MODULE}].
-services() -> [].
-data_providers() -> [].
-data_subscriptions() -> [].
-processes() -> [].
-notify(_, _, _) -> ok.
-fake_db() -> test_util:fake_db([]).
-
-setup() ->
- couch_tests:setup([
- couch_epi_dispatch:dispatch(chttpd, ?MODULE)
- ]).
-
-teardown(Ctx) ->
- couch_tests:teardown(Ctx).
-
-validate_dbname({true, _Db}, _) -> {decided, true};
-validate_dbname({false, _Db}, _) -> {decided, false};
-validate_dbname({fail, _Db}, _) -> throw(validate_dbname);
-validate_dbname({pass, _Db}, _) -> no_decision.
-
-before_doc_update({fail, _Doc}, _Db, interactive_edit) ->
- throw(before_doc_update);
-before_doc_update({true, Doc}, Db, interactive_edit) ->
- [{true, [before_doc_update | Doc]}, Db, interactive_edit];
-before_doc_update({false, Doc}, Db, interactive_edit) ->
- [{false, Doc}, Db, interactive_edit].
-
-after_doc_read({fail, _Doc}, _Db) -> throw(after_doc_read);
-after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read | Doc]}, Db];
-after_doc_read({false, Doc}, Db) -> [{false, Doc}, Db].
-
-validate_docid({true, _Id}) -> true;
-validate_docid({false, _Id}) -> false;
-validate_docid({fail, _Id}) -> throw(validate_docid).
-
-check_is_admin({true, _Db}) -> true;
-check_is_admin({false, _Db}) -> false;
-check_is_admin({fail, _Db}) -> throw(check_is_admin).
-
-on_delete(true, _Opts) -> true;
-on_delete(false, _Opts) -> false;
-on_delete(fail, _Opts) -> throw(on_delete).
-
-callback_test_() ->
- {
- "callback tests",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- {"validate_dbname_match", fun validate_dbname_match/0},
- {"validate_dbname_no_match", fun validate_dbname_no_match/0},
- {"validate_dbname_throw", fun validate_dbname_throw/0},
- {"validate_dbname_pass", fun validate_dbname_pass/0},
-
- {"before_doc_update_match", fun before_doc_update_match/0},
- {"before_doc_update_no_match", fun before_doc_update_no_match/0},
- {"before_doc_update_throw", fun before_doc_update_throw/0},
-
- {"after_doc_read_match", fun after_doc_read_match/0},
- {"after_doc_read_no_match", fun after_doc_read_no_match/0},
- {"after_doc_read_throw", fun after_doc_read_throw/0},
-
- {"validate_docid_match", fun validate_docid_match/0},
- {"validate_docid_no_match", fun validate_docid_no_match/0},
- {"validate_docid_throw", fun validate_docid_throw/0},
-
- {"check_is_admin_match", fun check_is_admin_match/0},
- {"check_is_admin_no_match", fun check_is_admin_no_match/0},
- {"check_is_admin_throw", fun check_is_admin_throw/0},
-
- {"on_delete_match", fun on_delete_match/0},
- {"on_delete_no_match", fun on_delete_no_match/0},
- {"on_delete_throw", fun on_delete_throw/0}
- ]
- }
- }.
-
-validate_dbname_match() ->
- ?assert(
- couch_db_plugin:validate_dbname(
- {true, [db]}, db, fun(_, _) -> pass end
- )
- ).
-
-validate_dbname_no_match() ->
- ?assertNot(
- couch_db_plugin:validate_dbname(
- {false, [db]}, db, fun(_, _) -> pass end
- )
- ).
-
-validate_dbname_throw() ->
- ?assertThrow(
- validate_dbname,
- couch_db_plugin:validate_dbname(
- {fail, [db]}, db, fun(_, _) -> pass end
- )
- ).
-
-validate_dbname_pass() ->
- ?assertEqual(
- pass,
- couch_db_plugin:validate_dbname(
- {pass, [db]}, db, fun(_, _) -> pass end
- )
- ).
-
-before_doc_update_match() ->
- ?assertMatch(
- {true, [before_doc_update, doc]},
- couch_db_plugin:before_doc_update(
- fake_db(), {true, [doc]}, interactive_edit
- )
- ).
-
-before_doc_update_no_match() ->
- ?assertMatch(
- {false, [doc]},
- couch_db_plugin:before_doc_update(
- fake_db(), {false, [doc]}, interactive_edit
- )
- ).
-
-before_doc_update_throw() ->
- ?assertThrow(
- before_doc_update,
- couch_db_plugin:before_doc_update(
- fake_db(), {fail, [doc]}, interactive_edit
- )
- ).
-
-after_doc_read_match() ->
- ?assertMatch(
- {true, [after_doc_read, doc]},
- couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})
- ).
-
-after_doc_read_no_match() ->
- ?assertMatch(
- {false, [doc]},
- couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})
- ).
-
-after_doc_read_throw() ->
- ?assertThrow(
- after_doc_read,
- couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})
- ).
-
-validate_docid_match() ->
- ?assert(couch_db_plugin:validate_docid({true, [doc]})).
-
-validate_docid_no_match() ->
- ?assertNot(couch_db_plugin:validate_docid({false, [doc]})).
-
-validate_docid_throw() ->
- ?assertThrow(
- validate_docid,
- couch_db_plugin:validate_docid({fail, [doc]})
- ).
-
-check_is_admin_match() ->
- ?assert(couch_db_plugin:check_is_admin({true, [db]})).
-
-check_is_admin_no_match() ->
- ?assertNot(couch_db_plugin:check_is_admin({false, [db]})).
-
-check_is_admin_throw() ->
- ?assertThrow(
- check_is_admin,
- couch_db_plugin:check_is_admin({fail, [db]})
- ).
-
-on_delete_match() ->
- ?assertMatch(
- [true],
- couch_db_plugin:on_delete(true, [])
- ).
-
-on_delete_no_match() ->
- ?assertMatch(
- [false],
- couch_db_plugin:on_delete(false, [])
- ).
-
-on_delete_throw() ->
- ?assertThrow(
- on_delete,
- couch_db_plugin:on_delete(fail, [])
- ).
diff --git a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
deleted file mode 100644
index 5ca658129..000000000
--- a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
+++ /dev/null
@@ -1,77 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_props_upgrade_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-setup() ->
- DbName = <<"test">>,
- DbFileName = "test.couch",
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
-
- DbDir = config:get("couchdb", "database_dir"),
- NewDbFilePath = filename:join([DbDir, DbFileName]),
-
- file:delete(NewDbFilePath),
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
-
- DbName.
-
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-old_db_info_test_() ->
- {
- "Old database versions work",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun can_get_props/1,
- fun can_get_db_info/1,
- fun can_compact_db/1
- ]
- }
- }
- }.
-
-can_get_props(DbName) ->
- ?_test(begin
- {ok, Db} = couch_db:open_int(DbName, []),
- Props = couch_db_engine:get_props(Db),
- ?assert(is_list(Props))
- end).
-
-can_get_db_info(DbName) ->
- ?_test(begin
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- Props = couch_util:get_value(props, Info),
- ?assertEqual({[]}, Props)
- end).
-
-can_compact_db(DbName) ->
- ?_test(begin
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:start_compact(Db),
- couch_db:wait_for_compaction(Db)
- end)
- end).
diff --git a/src/couch/test/eunit/couch_db_split_tests.erl b/src/couch/test/eunit/couch_db_split_tests.erl
deleted file mode 100644
index f0baa920b..000000000
--- a/src/couch/test/eunit/couch_db_split_tests.erl
+++ /dev/null
@@ -1,358 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_split_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(RINGTOP, 2 bsl 31).
-% seconds
--define(TIMEOUT, 60).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-teardown(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- FilePath = couch_db:get_filepath(Db),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath).
-
-split_test_() ->
- Cases = [
- {"Should split an empty shard", 0, 2},
- {"Should split shard in half", 100, 2},
- {"Should split shard in three", 99, 3},
- {"Should split shard in four", 100, 4}
- ],
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop/1,
- [
- {
- foreachx,
- fun(_) -> setup() end,
- fun(_, St) -> teardown(St) end,
- [{Case, fun should_split_shard/2} || Case <- Cases]
- },
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_fail_on_missing_source/1,
- fun should_fail_on_existing_target/1,
- fun should_fail_on_invalid_target_name/1,
- fun should_crash_on_invalid_tmap/1,
- fun should_fail_on_opened_target/1
- ]
- }
- ]
- }.
-
-should_split_shard({Desc, TotalDocs, Q}, DbName) ->
- {ok, ExpectSeq} = create_docs(DbName, TotalDocs),
- Ranges = make_ranges(Q),
- TMap = make_targets(Ranges),
- DocsPerRange = TotalDocs div Q,
- PickFun = make_pickfun(DocsPerRange),
- {Desc, timeout, ?TIMEOUT,
- ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- maps:map(
- fun(Range, Name) ->
- {ok, Db} = couch_db:open_int(Name, []),
- FilePath = couch_db:get_filepath(Db),
- %% target actually exists
- ?assertMatch({ok, _}, file:read_file_info(FilePath)),
- %% target's update seq is the same as source's update seq
- USeq = couch_db:get_update_seq(Db),
- ?assertEqual(ExpectSeq, USeq),
- %% target shard has all the expected in its range docs
- {ok, DocsInShard} = couch_db:fold_docs(
- Db,
- fun(FDI, Acc) ->
- DocId = FDI#full_doc_info.id,
- ExpectedRange = PickFun(DocId, Ranges, undefined),
- ?assertEqual(ExpectedRange, Range),
- {ok, Acc + 1}
- end,
- 0
- ),
- ?assertEqual(DocsPerRange, DocsInShard),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath)
- end,
- TMap
- )
- end)}.
-
-should_fail_on_missing_source(_DbName) ->
- DbName = ?tempdb(),
- Ranges = make_ranges(2),
- TMap = make_targets(Ranges),
- Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
- ?_assertEqual({error, missing_source}, Response).
-
-should_fail_on_existing_target(DbName) ->
- Ranges = make_ranges(2),
- TMap = maps:map(
- fun(_, TName) ->
- % We create the target but make sure to remove it from the cache so we
- % hit the eexist error instaed of already_opened
- {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
- Pid = couch_db:get_pid(Db),
- ok = couch_db:close(Db),
- exit(Pid, kill),
- test_util:wait(fun() ->
- case ets:lookup(couch_server:couch_dbs(DbName), TName) of
- [] -> ok;
- [_ | _] -> wait
- end
- end),
- TName
- end,
- make_targets(Ranges)
- ),
- Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
- ?_assertMatch({error, {target_create_error, _, eexist}}, Response).
-
-should_fail_on_invalid_target_name(DbName) ->
- Ranges = make_ranges(2),
- TMap = maps:map(
- fun([B, _], _) ->
- iolist_to_binary(["_$", couch_util:to_hex(<<B:32/integer>>)])
- end,
- make_targets(Ranges)
- ),
- Expect =
- {error, {target_create_error, <<"_$00000000">>, {illegal_database_name, <<"_$00000000">>}}},
- Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
- ?_assertMatch(Expect, Response).
-
-should_crash_on_invalid_tmap(DbName) ->
- Ranges = make_ranges(1),
- TMap = make_targets(Ranges),
- ?_assertError(
- function_clause,
- couch_db_split:split(DbName, TMap, fun fake_pickfun/3)
- ).
-
-should_fail_on_opened_target(DbName) ->
- Ranges = make_ranges(2),
- TMap = maps:map(
- fun(_, TName) ->
- % We create and keep the target open but delete
- % its file on disk so we don't fail with eexist
- {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
- FilePath = couch_db:get_filepath(Db),
- ok = file:delete(FilePath),
- TName
- end,
- make_targets(Ranges)
- ),
- ?_assertMatch(
- {error, {target_create_error, _, already_opened}},
- couch_db_split:split(DbName, TMap, fun fake_pickfun/3)
- ).
-
-copy_local_docs_test_() ->
- Cases = [
- {"Should work with no docs", 0, 2},
- {"Should copy local docs after split in two", 100, 2},
- {"Should copy local docs after split in three", 99, 3},
- {"Should copy local docs after split in four", 100, 4}
- ],
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop/1,
- [
- {
- foreachx,
- fun(_) -> setup() end,
- fun(_, St) -> teardown(St) end,
- [{Case, fun should_copy_local_docs/2} || Case <- Cases]
- },
- {"Should return error on missing source",
- fun should_fail_copy_local_on_missing_source/0}
- ]
- }.
-
-should_copy_local_docs({Desc, TotalDocs, Q}, DbName) ->
- {ok, ExpectSeq} = create_docs(DbName, TotalDocs),
- Ranges = make_ranges(Q),
- TMap = make_targets(Ranges),
- DocsPerRange = TotalDocs div Q,
- PickFun = make_pickfun(DocsPerRange),
- {Desc, timeout, ?TIMEOUT,
- ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
- ?assertEqual(ok, Response),
- maps:map(
- fun(Range, Name) ->
- {ok, Db} = couch_db:open_int(Name, []),
- FilePath = couch_db:get_filepath(Db),
- %% target shard has all the expected in its range docs
- {ok, DocsInShard} = couch_db:fold_local_docs(
- Db,
- fun(Doc, Acc) ->
- DocId = Doc#doc.id,
- ExpectedRange = PickFun(DocId, Ranges, undefined),
- ?assertEqual(ExpectedRange, Range),
- {ok, Acc + 1}
- end,
- 0,
- []
- ),
- ?assertEqual(DocsPerRange, DocsInShard),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath)
- end,
- TMap
- )
- end)}.
-
-should_fail_copy_local_on_missing_source() ->
- DbName = ?tempdb(),
- Ranges = make_ranges(2),
- TMap = make_targets(Ranges),
- PickFun = fun fake_pickfun/3,
- Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
- ?assertEqual({error, missing_source}, Response).
-
-cleanup_target_test_() ->
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop/1,
- [
- {
- setup,
- fun setup/0,
- fun teardown/1,
- fun should_delete_existing_targets/1
- },
- {"Should return error on missing source",
- fun should_fail_cleanup_target_on_missing_source/0}
- ]
- }.
-
-should_delete_existing_targets(SourceName) ->
- {ok, ExpectSeq} = create_docs(SourceName, 100),
- Ranges = make_ranges(2),
- TMap = make_targets(Ranges),
- PickFun = make_pickfun(50),
- ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(SourceName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- maps:map(
- fun(_Range, TargetName) ->
- FilePath = couch_util:with_db(TargetName, fun(Db) ->
- couch_db:get_filepath(Db)
- end),
- ?assertMatch({ok, _}, file:read_file_info(FilePath)),
- Response = couch_db_split:cleanup_target(SourceName, TargetName),
- ?assertEqual(ok, Response),
- ?assertEqual({error, enoent}, file:read_file_info(FilePath))
- end,
- TMap
- )
- end).
-
-should_fail_cleanup_target_on_missing_source() ->
- SourceName = ?tempdb(),
- TargetName = ?tempdb(),
- Response = couch_db_split:cleanup_target(SourceName, TargetName),
- ?assertEqual({error, missing_source}, Response).
-
-make_pickfun(DocsPerRange) ->
- fun(DocId, Ranges, _HashFun) ->
- Id = docid_to_integer(DocId),
- case {Id div DocsPerRange, Id rem DocsPerRange} of
- {N, 0} ->
- lists:nth(N, Ranges);
- {N, _} ->
- lists:nth(N + 1, Ranges)
- end
- end.
-
-fake_pickfun(_, Ranges, _) ->
- hd(Ranges).
-
-make_targets([]) ->
- maps:new();
-make_targets(Ranges) ->
- Targets = lists:map(
- fun(Range) ->
- {Range, ?tempdb()}
- end,
- Ranges
- ),
- maps:from_list(Targets).
-
-make_ranges(Q) when Q > 0 ->
- Incr = (2 bsl 31) div Q,
- lists:map(
- fun
- (End) when End >= ?RINGTOP - 1 ->
- [End - Incr, ?RINGTOP - 1];
- (End) ->
- [End - Incr, End - 1]
- end,
- lists:seq(Incr, ?RINGTOP, Incr)
- );
-make_ranges(_) ->
- [].
-
-create_docs(DbName, 0) ->
- couch_util:with_db(DbName, fun(Db) ->
- UpdateSeq = couch_db:get_update_seq(Db),
- {ok, UpdateSeq}
- end);
-create_docs(DbName, DocNum) ->
- Docs = lists:foldl(
- fun(I, Acc) ->
- [create_doc(I), create_local_doc(I) | Acc]
- end,
- [],
- lists:seq(DocNum, 1, -1)
- ),
- couch_util:with_db(DbName, fun(Db) ->
- {ok, _Result} = couch_db:update_docs(Db, Docs),
- {ok, Db1} = couch_db:reopen(Db),
- UpdateSeq = couch_db:get_update_seq(Db1),
- {ok, UpdateSeq}
- end).
-
-create_doc(I) ->
- create_prefix_id_doc(I, "").
-
-create_local_doc(I) ->
- create_prefix_id_doc(I, "_local/").
-
-create_prefix_id_doc(I, Prefix) ->
- Id = iolist_to_binary(io_lib:format(Prefix ++ "~3..0B", [I])),
- couch_doc:from_json_obj({[{<<"_id">>, Id}, {<<"value">>, I}]}).
-
-docid_to_integer(<<"_local/", DocId/binary>>) ->
- docid_to_integer(DocId);
-docid_to_integer(DocId) ->
- list_to_integer(binary_to_list(DocId)).
diff --git a/src/couch/test/eunit/couch_db_tests.erl b/src/couch/test/eunit/couch_db_tests.erl
deleted file mode 100644
index 82137dc40..000000000
--- a/src/couch/test/eunit/couch_db_tests.erl
+++ /dev/null
@@ -1,212 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(TIMEOUT, 120).
-
-create_delete_db_test_() ->
- {
- "Database create/delete tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun() -> ?tempdb() end,
- [
- fun should_create_db/1,
- fun should_delete_db/1
- ]
- }
- }
- }.
-
-create_delete_multiple_dbs_test_() ->
- {
- "Multiple database create/delete tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun() -> [?tempdb() || _ <- lists:seq(1, 6)] end,
- [
- fun should_create_multiple_dbs/1,
- fun should_delete_multiple_dbs/1
- ]
- }
- }
- }.
-
-create_delete_database_continuously_test_() ->
- {
- "Continious database create/delete tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreachx,
- fun(_) -> ?tempdb() end,
- [
- {10, fun should_create_delete_database_continuously/2},
- {100, fun should_create_delete_database_continuously/2}
- ]
- }
- }
- }.
-
-open_db_test_() ->
- {
- "Database open tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun() -> ?tempdb() end,
- [
- fun should_create_db_if_missing/1,
- fun should_open_db_if_exists/1,
- fun locking_should_work/1
- ]
- }
- }
- }.
-
-should_create_db(DbName) ->
- ?_test(begin
- {ok, Before} = couch_server:all_databases(),
- ?assertNot(lists:member(DbName, Before)),
- ?assert(create_db(DbName)),
- {ok, After} = couch_server:all_databases(),
- ?assert(lists:member(DbName, After))
- end).
-
-should_delete_db(DbName) ->
- ?_test(begin
- ?assert(create_db(DbName)),
- {ok, Before} = couch_server:all_databases(),
- ?assert(lists:member(DbName, Before)),
- couch_server:delete(DbName, []),
- {ok, After} = couch_server:all_databases(),
- ?assertNot(lists:member(DbName, After))
- end).
-
-should_create_multiple_dbs(DbNames) ->
- ?_test(begin
- [
- gen_server:call(couch_server:couch_server(N), {set_max_dbs_open, 3})
- || N <- lists:seq(1, couch_server:num_servers())
- ],
- {ok, Before} = couch_server:all_databases(),
- [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames],
- [?assert(create_db(DbName)) || DbName <- DbNames],
- {ok, After} = couch_server:all_databases(),
- [?assert(lists:member(DbName, After)) || DbName <- DbNames]
- end).
-
-should_delete_multiple_dbs(DbNames) ->
- ?_test(begin
- [?assert(create_db(DbName)) || DbName <- DbNames],
- {ok, Before} = couch_server:all_databases(),
- [?assert(lists:member(DbName, Before)) || DbName <- DbNames],
- [?assert(delete_db(DbName)) || DbName <- DbNames],
- {ok, After} = couch_server:all_databases(),
- [?assertNot(lists:member(DbName, After)) || DbName <- DbNames]
- end).
-
-should_create_delete_database_continuously(Times, DbName) ->
- {
- lists:flatten(io_lib:format("~b times", [Times])),
- {timeout, ?TIMEOUT,
- ?_test(begin
- ?assert(create_db(DbName)),
- lists:foreach(
- fun(_) ->
- ?assert(delete_db(DbName)),
- ?assert(create_db(DbName))
- end,
- lists:seq(1, Times)
- )
- end)}
- }.
-
-should_create_db_if_missing(DbName) ->
- ?_test(begin
- {ok, Before} = couch_server:all_databases(),
- ?assertNot(lists:member(DbName, Before)),
- {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]),
- ok = couch_db:close(Db),
- {ok, After} = couch_server:all_databases(),
- ?assert(lists:member(DbName, After))
- end).
-
-should_open_db_if_exists(DbName) ->
- ?_test(begin
- ?assert(create_db(DbName)),
- {ok, Before} = couch_server:all_databases(),
- ?assert(lists:member(DbName, Before)),
- {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]),
- ok = couch_db:close(Db),
- {ok, After} = couch_server:all_databases(),
- ?assert(lists:member(DbName, After))
- end).
-
-locking_should_work(DbName) ->
- ?_test(begin
- ?assertEqual(ok, couch_server:lock(DbName, <<"x">>)),
- ?assertEqual({error, {locked, <<"x">>}}, couch_db:create(DbName, [])),
- ?assertEqual(ok, couch_server:unlock(DbName)),
- {ok, Db} = couch_db:create(DbName, []),
- ?assertEqual(
- {error, already_opened},
- couch_server:lock(DbName, <<>>)
- ),
-
- ok = couch_db:close(Db),
- catch exit(couch_db:get_pid(Db), kill),
- test_util:wait(fun() ->
- case ets:lookup(couch_server:couch_dbs(DbName), DbName) of
- [] -> ok;
- [_ | _] -> wait
- end
- end),
-
- ?assertEqual(ok, couch_server:lock(DbName, <<"y">>)),
- ?assertEqual(
- {error, {locked, <<"y">>}},
- couch_db:open(DbName, [])
- ),
-
- couch_server:unlock(DbName),
- {ok, Db1} = couch_db:open(DbName, [{create_if_missing, true}]),
- ok = couch_db:close(Db1)
- end).
-
-create_db(DbName) ->
- create_db(DbName, []).
-
-create_db(DbName, Opts) ->
- {ok, Db} = couch_db:create(DbName, Opts),
- ok = couch_db:close(Db),
- true.
-
-delete_db(DbName) ->
- ok = couch_server:delete(DbName, []),
- true.
diff --git a/src/couch/test/eunit/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl
deleted file mode 100644
index a004ed8fd..000000000
--- a/src/couch/test/eunit/couch_doc_json_tests.erl
+++ /dev/null
@@ -1,526 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_doc_json_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup() ->
- mock(couch_log),
- mock(config),
- mock(couch_db_plugin),
- ok.
-
-teardown(_) ->
- meck:unload(couch_log),
- meck:unload(config),
- meck:unload(couch_db_plugin),
- ok.
-
-mock(couch_db_plugin) ->
- ok = meck:new(couch_db_plugin, [passthrough]),
- ok = meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end),
- ok;
-mock(couch_log) ->
- ok = meck:new(couch_log, [passthrough]),
- ok = meck:expect(couch_log, debug, fun(_, _) -> ok end),
- ok;
-mock(config) ->
- meck:new(config, [passthrough]),
- meck:expect(
- config,
- get_integer,
- fun("couchdb", "max_document_size", 8000000) -> 1024 end
- ),
- meck:expect(config, get, fun(_, _) -> undefined end),
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- ok.
-
-json_doc_test_() ->
- {
- setup,
- fun setup/0,
- fun teardown/1,
- fun(_) ->
- [
- {"Document from JSON", [
- from_json_with_dbname_error_cases(),
- from_json_with_db_name_success_cases(),
- from_json_success_cases(),
- from_json_error_cases()
- ]},
- {"Document to JSON", [
- to_json_success_cases()
- ]}
- ]
- end
- }.
-
-from_json_success_cases() ->
- Cases = [
- {
- {[]},
- #doc{},
- "Return an empty document for an empty JSON object."
- },
- {
- {[{<<"_id">>, <<"zing!">>}]},
- #doc{id = <<"zing!">>},
- "Parses document ids."
- },
- {
- {[{<<"_id">>, <<"_design/foo">>}]},
- #doc{id = <<"_design/foo">>},
- "_design/document ids."
- },
- {
- {[{<<"_id">>, <<"_local/bam">>}]},
- #doc{id = <<"_local/bam">>},
- "_local/document ids."
- },
- {
- {[{<<"_rev">>, <<"4-230234">>}]},
- #doc{revs = {4, [<<"230234">>]}},
- "_rev stored in revs."
- },
- {
- {[{<<"soap">>, 35}]},
- #doc{body = {[{<<"soap">>, 35}]}},
- "Non underscore prefixed fields stored in body."
- },
- {
- {[
- {<<"_attachments">>,
- {[
- {<<"my_attachment.fu">>,
- {[
- {<<"stub">>, true},
- {<<"content_type">>, <<"application/awesome">>},
- {<<"length">>, 45}
- ]}},
- {<<"noahs_private_key.gpg">>,
- {[
- {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>},
- {<<"content_type">>, <<"application/pgp-signature">>}
- ]}}
- ]}}
- ]},
- #doc{
- atts = [
- couch_att:new([
- {name, <<"my_attachment.fu">>},
- {data, stub},
- {type, <<"application/awesome">>},
- {att_len, 45},
- {disk_len, 45},
- {revpos, undefined}
- ]),
- couch_att:new([
- {name, <<"noahs_private_key.gpg">>},
- {data, <<"I have a pet fish!">>},
- {type, <<"application/pgp-signature">>},
- {att_len, 18},
- {disk_len, 18},
- {revpos, 0}
- ])
- ]
- },
- "Attachments are parsed correctly."
- },
- {
- {[{<<"_deleted">>, true}]},
- #doc{deleted = true},
- "_deleted controls the deleted field."
- },
- {
- {[{<<"_deleted">>, false}]},
- #doc{},
- "{\"_deleted\": false} is ok."
- },
- {
- {[
- {<<"_revisions">>,
- {[
- {<<"start">>, 4},
- {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}
- ]}},
- {<<"_rev">>, <<"6-something">>}
- ]},
- #doc{revs = {4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}},
- "_revisions attribute are preferred to _rev."
- },
- {
- {[{<<"_revs_info">>, dropping}]},
- #doc{},
- "Drops _revs_info."
- },
- {
- {[{<<"_local_seq">>, dropping}]},
- #doc{},
- "Drops _local_seq."
- },
- {
- {[{<<"_conflicts">>, dropping}]},
- #doc{},
- "Drops _conflicts."
- },
- {
- {[{<<"_deleted_conflicts">>, dropping}]},
- #doc{},
- "Drops _deleted_conflicts."
- }
- ],
- lists:map(
- fun({EJson, Expect, Msg}) ->
- {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson))}
- end,
- Cases
- ).
-
-from_json_with_db_name_success_cases() ->
- Cases = [
- {
- {[]},
- <<"_dbs">>,
- #doc{},
- "DbName _dbs is acceptable with no docid"
- },
- {
- {[{<<"_id">>, <<"zing!">>}]},
- <<"_dbs">>,
- #doc{id = <<"zing!">>},
- "DbName _dbs is acceptable with a normal docid"
- },
- {
- {[{<<"_id">>, <<"_users">>}]},
- <<"_dbs">>,
- #doc{id = <<"_users">>},
- "_dbs/_users is acceptable"
- },
- {
- {[{<<"_id">>, <<"_replicator">>}]},
- <<"_dbs">>,
- #doc{id = <<"_replicator">>},
- "_dbs/_replicator is acceptable"
- },
- {
- {[{<<"_id">>, <<"_global_changes">>}]},
- <<"_dbs">>,
- #doc{id = <<"_global_changes">>},
- "_dbs/_global_changes is acceptable"
- }
- ],
- lists:map(
- fun({EJson, DbName, Expect, Msg}) ->
- {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))}
- end,
- Cases
- ).
-
-from_json_error_cases() ->
- Cases = [
- {
- [],
- {bad_request, "Document must be a JSON object"},
- "arrays are invalid"
- },
- {
- 4,
- {bad_request, "Document must be a JSON object"},
- "integers are invalid"
- },
- {
- true,
- {bad_request, "Document must be a JSON object"},
- "literals are invalid"
- },
- {
- {[{<<"_id">>, {[{<<"foo">>, 5}]}}]},
- {illegal_docid, <<"Document id must be a string">>},
- "Document id must be a string."
- },
- {
- {[{<<"_id">>, <<"_random">>}]},
- {illegal_docid, <<"Only reserved document ids may start with underscore.">>},
- "Disallow arbitrary underscore prefixed docids."
- },
- {
- {[{<<"_rev">>, 5}]},
- {bad_request, <<"Invalid rev format">>},
- "_rev must be a string"
- },
- {
- {[{<<"_rev">>, "foobar"}]},
- {bad_request, <<"Invalid rev format">>},
- "_rev must be %d-%s"
- },
- {
- {[{<<"_rev">>, "foo-bar"}]},
- "Error if _rev's integer expection is broken."
- },
- {
- {[{<<"_revisions">>, {[{<<"start">>, true}]}}]},
- {doc_validation, "_revisions.start isn't an integer."},
- "_revisions.start must be an integer."
- },
- {
- {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, 5}]}}]},
- {doc_validation, "_revisions.ids isn't a array."},
- "_revions.ids must be a list."
- },
- {
- {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, [5]}]}}]},
- {doc_validation, "RevId isn't a string"},
- "Revision ids must be strings."
- },
- {
- {[
- {<<"_revisions">>,
- {[
- {<<"start">>, 0},
- {<<"ids">>, [<<"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>]}
- ]}}
- ]},
- {doc_validation, "RevId isn't a valid hexadecimal"},
- "Revision ids must be a valid hex."
- },
- {
- {[{<<"_something">>, 5}]},
- {doc_validation, <<"Bad special document member: _something">>},
- "Underscore prefix fields are reserved."
- },
- {
- fun() ->
- {[
- {<<"_id">>, <<"large_doc">>},
- {<<"x">>, <<<<"x">> || _ <- lists:seq(1, 1025)>>}
- ]}
- end,
- {request_entity_too_large, <<"large_doc">>},
- "Document too large."
- }
- ],
-
- lists:map(
- fun
- ({Fun, Expect, Msg}) when is_function(Fun, 0) ->
- {Msg, ?_assertThrow(Expect, couch_doc:from_json_obj_validate(Fun()))};
- ({EJson, Expect, Msg}) ->
- {Msg, ?_assertThrow(Expect, couch_doc:from_json_obj_validate(EJson))};
- ({EJson, Msg}) ->
- {Msg, ?_assertThrow(_, couch_doc:from_json_obj_validate(EJson))}
- end,
- Cases
- ).
-
-from_json_with_dbname_error_cases() ->
- Cases = [
- {
- {[{<<"_id">>, <<"_random">>}]},
- <<"_dbs">>,
- {illegal_docid, <<"Only reserved document ids may start with underscore.">>},
- "Disallow non-system-DB underscore prefixed docids in _dbs database."
- },
- {
- {[{<<"_id">>, <<"_random">>}]},
- <<"foobar">>,
- {illegal_docid, <<"Only reserved document ids may start with underscore.">>},
- "Disallow arbitrary underscore prefixed docids in regular database."
- },
- {
- {[{<<"_id">>, <<"_users">>}]},
- <<"foobar">>,
- {illegal_docid, <<"Only reserved document ids may start with underscore.">>},
- "Disallow system-DB docid _users in regular database."
- }
- ],
-
- lists:map(
- fun({EJson, DbName, Expect, Msg}) ->
- Error = (catch couch_doc:from_json_obj_validate(EJson, DbName)),
- {Msg, ?_assertMatch(Expect, Error)}
- end,
- Cases
- ).
-
-to_json_success_cases() ->
- Cases = [
- {
- #doc{},
- {[{<<"_id">>, <<"">>}]},
- "Empty docs are {\"_id\": \"\"}"
- },
- {
- #doc{id = <<"foo">>},
- {[{<<"_id">>, <<"foo">>}]},
- "_id is added."
- },
- {
- #doc{revs = {5, ["foo"]}},
- {[{<<"_id">>, <<>>}, {<<"_rev">>, <<"5-foo">>}]},
- "_rev is added."
- },
- {
- [revs],
- #doc{revs = {5, [<<"first">>, <<"second">>]}},
- {[
- {<<"_id">>, <<>>},
- {<<"_rev">>, <<"5-first">>},
- {<<"_revisions">>,
- {[
- {<<"start">>, 5},
- {<<"ids">>, [<<"first">>, <<"second">>]}
- ]}}
- ]},
- "_revisions include with revs option"
- },
- {
- #doc{body = {[{<<"foo">>, <<"bar">>}]}},
- {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}]},
- "Arbitrary fields are added."
- },
- {
- #doc{deleted = true, body = {[{<<"foo">>, <<"bar">>}]}},
- {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}, {<<"_deleted">>, true}]},
- "Deleted docs no longer drop body members."
- },
- {
- #doc{
- meta = [
- {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]}
- ]
- },
- {[
- {<<"_id">>, <<>>},
- {<<"_revs_info">>, [
- {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]},
- {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]}
- ]}
- ]},
- "_revs_info field is added correctly."
- },
- {
- #doc{meta = [{local_seq, 5}]},
- {[{<<"_id">>, <<>>}, {<<"_local_seq">>, 5}]},
- "_local_seq is added as an integer."
- },
- {
- #doc{meta = [{conflicts, [{3, <<"yep">>}, {1, <<"snow">>}]}]},
- {[
- {<<"_id">>, <<>>},
- {<<"_conflicts">>, [<<"3-yep">>, <<"1-snow">>]}
- ]},
- "_conflicts is added as an array of strings."
- },
- {
- #doc{meta = [{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]},
- {[
- {<<"_id">>, <<>>},
- {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]}
- ]},
- "_deleted_conflicsts is added as an array of strings."
- },
- {
- #doc{
- atts = [
- couch_att:new([
- {name, <<"big.xml">>},
- {type, <<"xml/sucks">>},
- {data, fun() -> ok end},
- {revpos, 1},
- {att_len, 400},
- {disk_len, 400}
- ]),
- couch_att:new([
- {name, <<"fast.json">>},
- {type, <<"json/ftw">>},
- {data, <<"{\"so\": \"there!\"}">>},
- {revpos, 1},
- {att_len, 16},
- {disk_len, 16}
- ])
- ]
- },
- {[
- {<<"_id">>, <<>>},
- {<<"_attachments">>,
- {[
- {<<"big.xml">>,
- {[
- {<<"content_type">>, <<"xml/sucks">>},
- {<<"revpos">>, 1},
- {<<"length">>, 400},
- {<<"stub">>, true}
- ]}},
- {<<"fast.json">>,
- {[
- {<<"content_type">>, <<"json/ftw">>},
- {<<"revpos">>, 1},
- {<<"length">>, 16},
- {<<"stub">>, true}
- ]}}
- ]}}
- ]},
- "Attachments attached as stubs only include a length."
- },
- {
- [attachments],
- #doc{
- atts = [
- couch_att:new([
- {name, <<"stuff.txt">>},
- {type, <<"text/plain">>},
- {data, fun() -> <<"diet pepsi">> end},
- {revpos, 1},
- {att_len, 10},
- {disk_len, 10}
- ]),
- couch_att:new([
- {name, <<"food.now">>},
- {type, <<"application/food">>},
- {revpos, 1},
- {data, <<"sammich">>}
- ])
- ]
- },
- {[
- {<<"_id">>, <<>>},
- {<<"_attachments">>,
- {[
- {<<"stuff.txt">>,
- {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"revpos">>, 1},
- {<<"data">>, <<"ZGlldCBwZXBzaQ==">>}
- ]}},
- {<<"food.now">>,
- {[
- {<<"content_type">>, <<"application/food">>},
- {<<"revpos">>, 1},
- {<<"data">>, <<"c2FtbWljaA==">>}
- ]}}
- ]}}
- ]},
- "Attachments included inline with attachments option."
- }
- ],
-
- lists:map(
- fun
- ({Doc, EJson, Msg}) ->
- {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))};
- ({Options, Doc, EJson, Msg}) ->
- {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))}
- end,
- Cases
- ).
diff --git a/src/couch/test/eunit/couch_doc_tests.erl b/src/couch/test/eunit/couch_doc_tests.erl
deleted file mode 100644
index 5a6e11ab2..000000000
--- a/src/couch/test/eunit/couch_doc_tests.erl
+++ /dev/null
@@ -1,179 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_doc_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(REQUEST_FIXTURE,
- filename:join([?FIXTURESDIR, "multipart.http"])
-).
-
-parse_rev_test() ->
- ?assertEqual({1, <<"123">>}, couch_doc:parse_rev("1-123")),
- ?assertEqual({1, <<"123">>}, couch_doc:parse_rev(<<"1-123">>)),
- ?assertException(throw, {bad_request, _}, couch_doc:parse_rev("1f-123")),
- ?assertException(throw, {bad_request, _}, couch_doc:parse_rev("bar")).
-
-doc_from_multi_part_stream_test() ->
- ContentType = "multipart/related;boundary=multipart_related_boundary~~~~~~~~~~~~~~~~~~~~",
- DataFun = fun() -> request(start) end,
-
- mock_config(),
- {ok, #doc{id = <<"doc0">>, atts = [_]}, _Fun, _Parser} =
- couch_doc:doc_from_multi_part_stream(ContentType, DataFun),
- meck:unload(config),
- ok.
-
-doc_to_multi_part_stream_test() ->
- Boundary = <<"multipart_related_boundary~~~~~~~~~~~~~~~~~~~~">>,
- JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>,
- AttData = <<"Hello my important document">>,
- AttLength = size(AttData),
- Atts = [
- couch_att:new([
- {name, <<"test">>},
- {data, AttData},
- {type, <<"text/plain">>},
- {att_len, AttLength},
- {disk_len, AttLength}
- ])
- ],
- couch_doc:doc_to_multi_part_stream(Boundary, JsonBytes, Atts, fun send/1, true),
- AttLengthStr = integer_to_binary(AttLength),
- BoundaryLen = size(Boundary),
- [
- <<"--", Boundary/binary>>,
- <<"Content-Type: application/json">>,
- <<>>,
- JsonBytes,
- <<"--", Boundary/binary>>,
- <<"Content-Disposition: attachment; filename=\"test\"">>,
- <<"Content-Type: text/plain">>,
- <<"Content-Length: ", AttLengthStr/binary>>,
- <<>>,
- AttData,
- <<"--", Boundary:BoundaryLen/binary, "--">>
- ] = collected(),
- ok.
-
-len_doc_to_multi_part_stream_test() ->
- Boundary = <<"simple_boundary">>,
- JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>,
- ContentType = <<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
- AttData = <<"Hello my important document">>,
- AttLength = size(AttData),
- Atts = [
- couch_att:new([
- {name, <<"test">>},
- {data, AttData},
- {type, <<"text/plain">>},
- {att_len, AttLength},
- {disk_len, AttLength}
- ])
- ],
- %% 258 is expected size of the document
- {ContentType, 258} =
- couch_doc:len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, true),
- ok.
-
-validate_docid_test_() ->
- {setup,
- fun() ->
- mock_config(),
- ok = meck:new(couch_db_plugin, [passthrough]),
- meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end)
- end,
- fun(_) ->
- meck:unload(config),
- meck:unload(couch_db_plugin)
- end,
- [
- ?_assertEqual(ok, couch_doc:validate_docid(<<"idx">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_design/idx">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_local/idx">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(large_id(1024))),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)),
- ?_assertThrow(
- {illegal_docid, _},
- couch_doc:validate_docid(<<>>)
- ),
- ?_assertThrow(
- {illegal_docid, _},
- couch_doc:validate_docid(<<16#80>>)
- ),
- ?_assertThrow(
- {illegal_docid, _},
- couch_doc:validate_docid(<<"_idx">>)
- ),
- ?_assertThrow(
- {illegal_docid, _},
- couch_doc:validate_docid(<<"_">>)
- ),
- ?_assertThrow(
- {illegal_docid, _},
- couch_doc:validate_docid(<<"_design/">>)
- ),
- ?_assertThrow(
- {illegal_docid, _},
- couch_doc:validate_docid(<<"_local/">>)
- ),
- ?_assertThrow(
- {illegal_docid, _},
- couch_doc:validate_docid(large_id(1025))
- ),
- ?_assertThrow(
- {illegal_docid, _},
- couch_doc:validate_docid(<<"_users">>, <<"foo">>)
- ),
- ?_assertThrow(
- {illegal_docid, _},
- couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>)
- )
- ]}.
-
-large_id(N) ->
- <<<<"x">> || _ <- lists:seq(1, N)>>.
-
-request(start) ->
- {ok, Doc} = file:read_file(?REQUEST_FIXTURE),
- {Doc, fun() -> request(stop) end};
-request(stop) ->
- {"", fun() -> request(stop) end}.
-
-send(Data) ->
- send(Data, get(data)).
-send(Data, undefined) ->
- send(Data, []);
-send(Data, Acc) ->
- put(data, [Acc | Data]).
-
-collected() ->
- B = binary:replace(iolist_to_binary(get(data)), <<"\r\n">>, <<0>>, [global]),
- binary:split(B, [<<0>>], [global]).
-
-mock_config() ->
- ok = meck:new(config, [passthrough]),
- meck:expect(
- config,
- get,
- fun
- ("couchdb", "max_document_id_length", "infinity") -> "1024";
- ("couchdb", "max_attachment_size", "infinity") -> "infinity";
- ("couchdb", "max_attachment_size", 1073741824) -> 1073741824;
- ("mem3", "shards_db", "_dbs") -> "_dbs";
- (Key, Val, Default) -> meck:passthrough([Key, Val, Default])
- end
- ).
diff --git a/src/couch/test/eunit/couch_ejson_compare_tests.erl b/src/couch/test/eunit/couch_ejson_compare_tests.erl
deleted file mode 100644
index ae4a5ff7c..000000000
--- a/src/couch/test/eunit/couch_ejson_compare_tests.erl
+++ /dev/null
@@ -1,289 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_ejson_compare_tests).
-
--define(MAX_UNICODE_STRING, <<255, 255, 255, 255>>).
-
-% See mango_idx_view.hrl
--define(MAX_JSON_OBJ, {?MAX_UNICODE_STRING}).
-
--define(TEST_VALUES, [
- null,
- false,
- true,
- -2,
- -0.1,
- 0,
- 0.1,
- 1,
- 2,
- 3.0,
- 4,
- <<"a">>,
- <<"A">>,
- <<"aa">>,
- <<"b">>,
- <<"B">>,
- <<"ba">>,
- <<"bb">>,
- % Highest sorting unicode value. Special case in the nif
- ?MAX_UNICODE_STRING,
- [<<"a">>],
- [<<"b">>],
- [<<"b">>, <<"c">>],
- [<<"b">>, <<"d">>],
- [<<"b">>, <<"d">>, <<"e">>],
- {[{<<"a">>, 1}]},
- {[{<<"a">>, 2}]},
- {[{<<"b">>, 1}]},
- {[{<<"b">>, 2}]},
- {[{<<"b">>, 2}, {<<"a">>, 1}]},
- {[{<<"b">>, 2}, {<<"c">>, 2}]}
-]).
-
-% Propery tests
-
--ifdef(WITH_PROPER).
-
--include_lib("couch/include/couch_eunit_proper.hrl").
-
-property_test_() ->
- ?EUNIT_QUICKCHECK(60, 400).
-
-% Properties
-
-% The main, nif-based comparison, sorts the test values correctly
-prop_nif_sorts_correctly() ->
- Positions = get_positions(?TEST_VALUES),
- ?FORALL(
- A,
- oneof(?TEST_VALUES),
- ?FORALL(B, oneof(?TEST_VALUES), begin
- expected_less(A, B, Positions) =:= less_nif(A, B)
- end)
- ).
-
-% The erlang fallback comparison sorts the test values correctly
-prop_erlang_sorts_correctly() ->
- Positions = get_positions(?TEST_VALUES),
- ?FORALL(
- A,
- oneof(?TEST_VALUES),
- ?FORALL(B, oneof(?TEST_VALUES), begin
- expected_less(A, B, Positions) =:= less_erl(A, B)
- end)
- ).
-
-% Zero width unicode chars are ignored
-prop_equivalent_unicode_values() ->
- ?FORALL({Prefix, Suffix}, {zero_width_list(), zero_width_list()}, begin
- Binary = unicode:characters_to_binary(Prefix ++ [$a] ++ Suffix),
- less(<<"a">>, Binary) =:= 0
- end).
-
-% Every test value sorts less than the special ?MAX_JSON_OBJ
-prop_test_values_are_less_than_max_json() ->
- ?FORALL(V, oneof(?TEST_VALUES), begin
- less(V, ?MAX_JSON_OBJ) =:= -1
- end).
-
-% Any json value sorts less than the special ?MAX_JSON_OBJ
-prop_any_json_is_less_than_max_json() ->
- ?FORALL(V, json(), begin
- less(V, ?MAX_JSON_OBJ) =:= -1
- end).
-
-% In general, for any json, the nif collator matches the erlang collator
-prop_nif_matches_erlang() ->
- ?FORALL(
- A,
- json(),
- ?FORALL(B, json(), begin
- less_nif(A, B) =:= less_erl(A, B)
- end)
- ).
-
-% Generators
-
-json() ->
- ?SIZED(Size, json(Size)).
-
-json(0) ->
- oneof([
- null,
- true,
- false,
- json_number(),
- json_string(),
- [],
- {[]}
- ]);
-json(Size) ->
- frequency([
- {1, null},
- {1, true},
- {1, false},
- {2, json_number()},
- {3, json_string()},
- {4, []},
- {4, {[]}},
- {5, ?LAZY(json_array(Size))},
- {5, ?LAZY(json_object(Size))}
- ]).
-
-json_number() ->
- oneof([largeint(), int(), real()]).
-
-json_string() ->
- utf8().
-
-json_array(0) ->
- [];
-json_array(Size) ->
- vector(Size div 2, json(Size div 2)).
-
-json_object(0) ->
- {[]};
-json_object(Size) ->
- {vector(Size div 2, {json_string(), json(Size div 2)})}.
-
-zero_width_list() ->
- ?SIZED(Size, vector(Size, zero_width_chars())).
-
-zero_width_chars() ->
- oneof([16#200B, 16#200C, 16#200D]).
-
--endif.
-
-% Regular EUnit tests
-
-get_icu_version_test() ->
- Ver = couch_ejson_compare:get_icu_version(),
- ?assertMatch({_, _, _, _}, Ver),
- {V1, V2, V3, V4} = Ver,
- ?assert(is_integer(V1) andalso V1 > 0),
- ?assert(is_integer(V2) andalso V2 >= 0),
- ?assert(is_integer(V3) andalso V3 >= 0),
- ?assert(is_integer(V4) andalso V4 >= 0).
-
-get_uca_version_test() ->
- Ver = couch_ejson_compare:get_uca_version(),
- ?assertMatch({_, _, _, _}, Ver),
- {V1, V2, V3, V4} = Ver,
- ?assert(is_integer(V1) andalso V1 > 0),
- ?assert(is_integer(V2) andalso V2 >= 0),
- ?assert(is_integer(V3) andalso V3 >= 0),
- ?assert(is_integer(V4) andalso V4 >= 0).
-
-get_collator_version_test() ->
- Ver = couch_ejson_compare:get_collator_version(),
- ?assertMatch({_, _, _, _}, Ver),
- {V1, V2, V3, V4} = Ver,
- ?assert(is_integer(V1) andalso V1 > 0),
- ?assert(is_integer(V2) andalso V2 >= 0),
- ?assert(is_integer(V3) andalso V3 >= 0),
- ?assert(is_integer(V4) andalso V4 >= 0).
-
-max_depth_error_list_test() ->
- % NIF can handle terms with depth <= 9
- Nested9 = nest_list(<<"val">>, 9),
- ?assertEqual(0, less_nif(Nested9, Nested9)),
-
- % At depth >= 10 it will throw a max_depth_error
- Nested10 = nest_list(<<"val">>, 10),
- ?assertError(max_depth_error, less_nif(Nested10, Nested10)),
-
- % Then it should transparently jump to erlang land
- ?assertEqual(0, less(Nested10, Nested10)).
-
-max_depth_error_obj_test() ->
- % NIF can handle terms with depth <= 9
- Nested9 = nest_obj(<<"k">>, <<"v">>, 9),
- ?assertEqual(0, less_nif(Nested9, Nested9)),
-
- % At depth >= 10 it will throw a max_depth_error
- Nested10 = nest_obj(<<"k">>, <<"v">>, 10),
- ?assertError(max_depth_error, less_nif(Nested10, Nested10)),
-
- % Then it should transparently jump to erlang land
- ?assertEqual(0, less(Nested10, Nested10)).
-
-compare_strings_nif_test() ->
- ?assertEqual(-1, compare_strings(<<"a">>, <<"b">>)),
- ?assertEqual(0, compare_strings(<<"a">>, <<"a">>)),
- ?assertEqual(1, compare_strings(<<"b">>, <<"a">>)),
-
- LargeBin1 = <<<<"x">> || _ <- lists:seq(1, 1000000)>>,
- LargeBin2 = <<LargeBin1/binary, "x">>,
- ?assertEqual(-1, compare_strings(LargeBin1, LargeBin2)),
- ?assertEqual(1, compare_strings(LargeBin2, LargeBin1)),
- ?assertEqual(0, compare_strings(LargeBin1, LargeBin1)),
-
- ?assertError(badarg, compare_strings(42, <<"a">>)),
- ?assertError(badarg, compare_strings(<<"a">>, 42)),
- ?assertError(badarg, compare_strings(42, 42)).
-
-% Helper functions
-
-less(A, B) ->
- cmp_norm(couch_ejson_compare:less(A, B)).
-
-less_nif(A, B) ->
- cmp_norm(couch_ejson_compare:less_nif(A, B)).
-
-less_erl(A, B) ->
- cmp_norm(couch_ejson_compare:less_erl(A, B)).
-
-compare_strings(A, B) ->
- couch_ejson_compare:compare_strings_nif(A, B).
-
-nest_list(Val, 0) ->
- Val;
-nest_list(Val, Depth) when is_integer(Depth), Depth > 0 ->
- [nest_list(Val, Depth - 1)].
-
-nest_obj(K, V, 1) ->
- {[{K, V}]};
-nest_obj(K, V, Depth) when is_integer(Depth), Depth > 1 ->
- {[{K, nest_obj(K, V, Depth - 1)}]}.
-
-% Build a map of #{Val => PositionIndex} for the test values so that when any
-% two are compared we can verify their position in the test list matches the
-% compared result
-get_positions(TestValues) ->
- lists:foldl(
- fun(Val, Acc) ->
- Acc#{Val => map_size(Acc)}
- end,
- #{},
- TestValues
- ).
-
-% When two values are compared, check the test values positions index to ensure
-% the order in the test value list matches the comparison result
-expected_less(A, B, Positions) ->
- #{A := PosA, B := PosB} = Positions,
- if
- PosA =:= PosB -> 0;
- PosA < PosB -> -1;
- PosA > PosB -> 1
- end.
-
-% Since collation functions can return magnitudes > 1, for example when
-% comparing atoms A - B, we need to normalize the result to -1, 0, and 1.
-cmp_norm(Cmp) when is_number(Cmp) ->
- if
- Cmp == 0 -> 0;
- Cmp < 0 -> -1;
- Cmp > 0 -> 1
- end.
diff --git a/src/couch/test/eunit/couch_ejson_size_tests.erl b/src/couch/test/eunit/couch_ejson_size_tests.erl
deleted file mode 100644
index 27803d8b7..000000000
--- a/src/couch/test/eunit/couch_ejson_size_tests.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_ejson_size_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-% 4 byte utf8 encoding
--define(HWAIR, $\x{10348}).
-% 3 byte utf8 encoding
--define(EURO, $\x{20ac}).
-% 2 byte utf8 encoding
--define(CENT, $\x{a2}).
-
-ejson_size_test_() ->
- [
- ?_assertEqual(R, couch_ejson_size:encoded_size(Input))
- || {R, Input} <- [
- {1, 1},
- {1, 1},
- {2, -1},
- {1, 9},
- {2, 10},
- {3, -10},
- {2, 11},
- {2, 99},
- {3, 100},
- {3, 999},
- {4, 1000},
- {4, 9999},
- {5, 10000},
-
- {3, 0.0},
- {3, 0.1},
- {3, 1.0},
- {4, -1.0},
- {3, 1.0e9},
- {4, 1.0e10},
- {5, 1.0e-10},
- {5, 1.0e-99},
- {6, 1.0e-100},
- {3, 1.0e-323},
-
- {2, arr_nested(0)},
- {22, arr_nested(10)},
- {2002, arr_nested(1000)},
- {9, obj_nested(0)},
- {69, obj_nested(10)},
- {6009, obj_nested(1000)},
-
- {4, null},
- {4, true},
- {5, false},
-
- {3, str(1, $x)},
- {4, str(1, ?CENT)},
- {5, str(1, ?EURO)},
- {6, str(1, ?HWAIR)},
- {3, str(1, $\x{1})},
- {12, str(10, $x)},
- {22, str(10, ?CENT)},
- {32, str(10, ?EURO)},
- {42, str(10, ?HWAIR)},
- {12, str(10, $\x{1})}
- ]
- ].
-
-%% Helper functions
-
-arr_nested(MaxDepth) ->
- arr_nested(MaxDepth, 0).
-
-obj_nested(MaxDepth) ->
- obj_nested(MaxDepth, 0).
-
-obj(N, K, V) ->
- {[{K, V} || _ <- lists:seq(1, N)]}.
-
-str(N, C) ->
- unicode:characters_to_binary([C || _ <- lists:seq(1, N)]).
-
-arr_nested(MaxDepth, MaxDepth) ->
- [];
-arr_nested(MaxDepth, Depth) ->
- [arr_nested(MaxDepth, Depth + 1)].
-
-obj_nested(MaxDepth, MaxDepth) ->
- obj(1, <<"k">>, <<"v">>);
-obj_nested(MaxDepth, Depth) ->
- {[{<<"k">>, obj_nested(MaxDepth, Depth + 1)}]}.
diff --git a/src/couch/test/eunit/couch_etag_tests.erl b/src/couch/test/eunit/couch_etag_tests.erl
deleted file mode 100644
index 72db6008a..000000000
--- a/src/couch/test/eunit/couch_etag_tests.erl
+++ /dev/null
@@ -1,31 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_etag_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-local_with_empty_body_test() ->
- Etag = couch_httpd:doc_etag(<<"_local/local-and-empty">>, {[]}, {0, <<"1">>}),
- ?assertEqual(Etag, <<"\"5ZVXQYO7VLEOU0TL9VXDNP5PV\"">>).
-
-local_with_body_test() ->
- DocBody = {[{<<"hello">>, <<"world">>}, {<<"relax">>, true}]},
- Etag = couch_httpd:doc_etag(<<"_local/local-with-body">>, DocBody, {0, <<"1">>}),
- ?assertEqual(Etag, <<"\"CEFXP6WH8OKYIWO1GLGBHKCCA\"">>).
-
-normal_doc_uses_rev_test() ->
- DocBody = {[{<<"hello">>, <<"world">>}, {<<"relax">>, true}]},
- Etag = couch_httpd:doc_etag(
- <<"nomal-doc">>, DocBody, {1, <<"efda11e34e88ebe31a2f83e84a0435b6">>}
- ),
- ?assertEqual(Etag, <<"\"1-efda11e34e88ebe31a2f83e84a0435b6\"">>).
diff --git a/src/couch/test/eunit/couch_file_tests.erl b/src/couch/test/eunit/couch_file_tests.erl
deleted file mode 100644
index 1b54cd70e..000000000
--- a/src/couch/test/eunit/couch_file_tests.erl
+++ /dev/null
@@ -1,553 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_file_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(BLOCK_SIZE, 4096).
--define(setup(F), {setup, fun setup/0, fun teardown/1, F}).
--define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}).
-
-setup() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- Fd.
-
-teardown(Fd) ->
- case is_process_alive(Fd) of
- true -> ok = couch_file:close(Fd);
- false -> ok
- end.
-
-open_close_test_() ->
- {
- "Test for proper file open and close",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end,
- fun test_util:stop/1,
- [
- should_return_enoent_if_missed(),
- should_ignore_invalid_flags_with_open(),
- ?setup(fun should_return_pid_on_file_open/1),
- should_close_file_properly(),
- ?setup(fun should_create_empty_new_files/1)
- ]
- }
- }.
-
-should_return_enoent_if_missed() ->
- ?_assertEqual({error, enoent}, couch_file:open("not a real file")).
-
-should_ignore_invalid_flags_with_open() ->
- ?_assertMatch(
- {ok, _},
- couch_file:open(?tempfile(), [create, invalid_option])
- ).
-
-should_return_pid_on_file_open(Fd) ->
- ?_assert(is_pid(Fd)).
-
-should_close_file_properly() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- ok = couch_file:close(Fd),
- ?_assert(true).
-
-should_create_empty_new_files(Fd) ->
- ?_assertMatch({ok, 0}, couch_file:bytes(Fd)).
-
-read_write_test_() ->
- {
- "Common file read/write tests",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end,
- fun test_util:stop/1,
- ?foreach([
- fun should_increase_file_size_on_write/1,
- fun should_return_current_file_size_on_write/1,
- fun should_write_and_read_term/1,
- fun should_write_and_read_binary/1,
- fun should_write_and_read_large_binary/1,
- fun should_return_term_as_binary_for_reading_binary/1,
- fun should_read_term_written_as_binary/1,
- fun should_read_iolist/1,
- fun should_fsync/1,
- fun should_not_read_beyond_eof/1,
- fun should_truncate/1
- ])
- }
- }.
-
-should_increase_file_size_on_write(Fd) ->
- {ok, 0, _} = couch_file:append_term(Fd, foo),
- {ok, Size} = couch_file:bytes(Fd),
- ?_assert(Size > 0).
-
-should_return_current_file_size_on_write(Fd) ->
- {ok, 0, _} = couch_file:append_term(Fd, foo),
- {ok, Size} = couch_file:bytes(Fd),
- ?_assertMatch({ok, Size, _}, couch_file:append_term(Fd, bar)).
-
-should_write_and_read_term(Fd) ->
- {ok, Pos, _} = couch_file:append_term(Fd, foo),
- ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
-
-should_write_and_read_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_binary(Fd, <<"fancy!">>),
- ?_assertMatch({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Pos)).
-
-should_return_term_as_binary_for_reading_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_term(Fd, foo),
- Foo = couch_compress:compress(foo, snappy),
- ?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)).
-
-should_read_term_written_as_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_binary(Fd, <<131, 100, 0, 3, 102, 111, 111>>),
- ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
-
-should_write_and_read_large_binary(Fd) ->
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, Pos, _} = couch_file:append_binary(Fd, BigBin),
- ?_assertMatch({ok, BigBin}, couch_file:pread_binary(Fd, Pos)).
-
-should_read_iolist(Fd) ->
- %% append_binary == append_iolist?
- %% Possible bug in pread_iolist or iolist() -> append_binary
- {ok, Pos, _} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]),
- {ok, IoList} = couch_file:pread_iolist(Fd, Pos),
- ?_assertMatch(<<"foombam">>, iolist_to_binary(IoList)).
-
-should_fsync(Fd) ->
- {"How does on test fsync?", ?_assertMatch(ok, couch_file:sync(Fd))}.
-
-should_not_read_beyond_eof(Fd) ->
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- DoubleBin = round(byte_size(BigBin) * 2),
- {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin),
- {_, Filepath} = couch_file:process_info(Fd),
- %% corrupt db file
- {ok, Io} = file:open(Filepath, [read, write, binary]),
- ok = file:pwrite(Io, Pos, <<0:1/integer, DoubleBin:31/integer>>),
- file:close(Io),
- unlink(Fd),
- ExpectedError = {badmatch, {'EXIT', {bad_return_value, {read_beyond_eof, Filepath}}}},
- ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)).
-
-should_truncate(Fd) ->
- {ok, 0, _} = couch_file:append_term(Fd, foo),
- {ok, Size} = couch_file:bytes(Fd),
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, _, _} = couch_file:append_binary(Fd, BigBin),
- ok = couch_file:truncate(Fd, Size),
- ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, 0)).
-
-pread_limit_test_() ->
- {
- "Read limit tests",
- {
- setup,
- fun() ->
- Ctx = test_util:start(?MODULE),
- config:set("couchdb", "max_pread_size", "50000"),
- Ctx
- end,
- fun(Ctx) ->
- config:delete("couchdb", "max_pread_size"),
- test_util:stop(Ctx)
- end,
- ?foreach([
- fun should_increase_file_size_on_write/1,
- fun should_return_current_file_size_on_write/1,
- fun should_write_and_read_term/1,
- fun should_write_and_read_binary/1,
- fun should_not_read_more_than_pread_limit/1
- ])
- }
- }.
-
-should_not_read_more_than_pread_limit(Fd) ->
- {_, Filepath} = couch_file:process_info(Fd),
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin),
- unlink(Fd),
- ExpectedError = {badmatch, {'EXIT', {bad_return_value, {exceed_pread_limit, Filepath, 50000}}}},
- ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)).
-
-header_test_() ->
- {
- "File header read/write tests",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end,
- fun test_util:stop/1,
- [
- ?foreach([
- fun should_write_and_read_atom_header/1,
- fun should_write_and_read_tuple_header/1,
- fun should_write_and_read_second_header/1,
- fun should_truncate_second_header/1,
- fun should_produce_same_file_size_on_rewrite/1,
- fun should_save_headers_larger_than_block_size/1
- ]),
- should_recover_header_marker_corruption(),
- should_recover_header_size_corruption(),
- should_recover_header_md5sig_corruption(),
- should_recover_header_data_corruption()
- ]
- }
- }.
-
-should_write_and_read_atom_header(Fd) ->
- ok = couch_file:write_header(Fd, hello),
- ?_assertMatch({ok, hello}, couch_file:read_header(Fd)).
-
-should_write_and_read_tuple_header(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
-
-should_write_and_read_second_header(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- ?_assertMatch({ok, [foo, <<"more">>]}, couch_file:read_header(Fd)).
-
-should_truncate_second_header(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- {ok, Size} = couch_file:bytes(Fd),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- ok = couch_file:truncate(Fd, Size),
- ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
-
-should_produce_same_file_size_on_rewrite(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- {ok, Size1} = couch_file:bytes(Fd),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- {ok, Size2} = couch_file:bytes(Fd),
- ok = couch_file:truncate(Fd, Size1),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- ?_assertMatch({ok, Size2}, couch_file:bytes(Fd)).
-
-should_save_headers_larger_than_block_size(Fd) ->
- Header = erlang:make_tuple(5000, <<"CouchDB">>),
- couch_file:write_header(Fd, Header),
- {"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}.
-
-should_recover_header_marker_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- file:pwrite(RawFd, HeaderPos, <<0>>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end
- )
- ).
-
-should_recover_header_size_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- % +1 for 0x1 byte marker
- file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end
- )
- ).
-
-should_recover_header_md5sig_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- % +5 = +1 for 0x1 byte and +4 for term size.
- file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end
- )
- ).
-
-should_recover_header_data_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig
- file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end
- )
- ).
-
-check_header_recovery(CheckFun) ->
- Path = ?tempfile(),
- {ok, Fd} = couch_file:open(Path, [create, overwrite]),
- {ok, RawFd} = file:open(Path, [read, write, raw, binary]),
-
- {ok, _} = write_random_data(Fd),
- ExpectHeader = {some_atom, <<"a binary">>, 756},
- ok = couch_file:write_header(Fd, ExpectHeader),
-
- {ok, HeaderPos} = write_random_data(Fd),
- ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}),
-
- CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos),
-
- ok = file:close(RawFd),
- ok = couch_file:close(Fd),
- ok.
-
-write_random_data(Fd) ->
- write_random_data(Fd, 100 + couch_rand:uniform(1000)).
-
-write_random_data(Fd, 0) ->
- {ok, Bytes} = couch_file:bytes(Fd),
- {ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE};
-write_random_data(Fd, N) ->
- Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
- Term = lists:nth(couch_rand:uniform(4) + 1, Choices),
- {ok, _, _} = couch_file:append_term(Fd, Term),
- write_random_data(Fd, N - 1).
-
-delete_test_() ->
- {
- "File delete tests",
- {
- setup,
- fun() ->
- meck:new(config, [passthrough])
- end,
- fun(_) ->
- meck:unload()
- end,
- {
- foreach,
- fun() ->
- meck:reset([config]),
- File = ?tempfile() ++ ".couch",
- RootDir = filename:dirname(File),
- ok = couch_file:init_delete_dir(RootDir),
- ok = file:write_file(File, <<>>),
- {RootDir, File}
- end,
- fun({_, File}) ->
- file:delete(File)
- end,
- [
- fun(Cfg) ->
- {"enable_database_recovery = false, context = delete",
- make_enable_recovery_test_case(Cfg, false, delete)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = true, context = delete",
- make_enable_recovery_test_case(Cfg, true, delete)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = false, context = compaction",
- make_enable_recovery_test_case(Cfg, false, compaction)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = true, context = compaction",
- make_enable_recovery_test_case(Cfg, true, compaction)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = true",
- make_delete_after_rename_test_case(Cfg, true)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = false",
- make_delete_after_rename_test_case(Cfg, false)}
- end
- ]
- }
- }
- }.
-
-make_enable_recovery_test_case({RootDir, File}, EnableRecovery, Context) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> EnableRecovery;
- ("couchdb", "delete_after_rename", _) -> false
- end),
- FileExistsBefore = filelib:is_regular(File),
- couch_file:delete(RootDir, File, [{context, Context}]),
- FileExistsAfter = filelib:is_regular(File),
- RenamedFiles = filelib:wildcard(filename:rootname(File) ++ "*.deleted.*"),
- DeletedFiles = filelib:wildcard(RootDir ++ "/.delete/*"),
- {ExpectRenamedCount, ExpectDeletedCount} =
- if
- EnableRecovery andalso Context =:= delete -> {1, 0};
- true -> {0, 1}
- end,
- [
- ?_assert(FileExistsBefore),
- ?_assertNot(FileExistsAfter),
- ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)),
- ?_assertEqual(ExpectDeletedCount, length(DeletedFiles))
- ].
-
-make_delete_after_rename_test_case({RootDir, File}, DeleteAfterRename) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> false;
- ("couchdb", "delete_after_rename", _) -> DeleteAfterRename
- end),
- FileExistsBefore = filelib:is_regular(File),
- couch_file:delete(RootDir, File),
- FileExistsAfter = filelib:is_regular(File),
- RenamedFiles = filelib:wildcard(filename:join([RootDir, ".delete", "*"])),
- ExpectRenamedCount =
- if
- DeleteAfterRename -> 0;
- true -> 1
- end,
- [
- ?_assert(FileExistsBefore),
- ?_assertNot(FileExistsAfter),
- ?_assertEqual(ExpectRenamedCount, length(RenamedFiles))
- ].
-
-nuke_dir_test_() ->
- {
- "Nuke directory tests",
- {
- setup,
- fun() ->
- meck:new(config, [passthrough])
- end,
- fun(_) ->
- meck:unload()
- end,
- {
- foreach,
- fun() ->
- meck:reset([config]),
- File0 = ?tempfile() ++ ".couch",
- RootDir = filename:dirname(File0),
- BaseName = filename:basename(File0),
- Seed = couch_rand:uniform(8999999999) + 999999999,
- DDocDir = io_lib:format("db.~b_design", [Seed]),
- ViewDir = filename:join([RootDir, DDocDir]),
- file:make_dir(ViewDir),
- File = filename:join([ViewDir, BaseName]),
- file:rename(File0, File),
- ok = couch_file:init_delete_dir(RootDir),
- ok = file:write_file(File, <<>>),
- {RootDir, ViewDir}
- end,
- fun({RootDir, ViewDir}) ->
- remove_dir(ViewDir),
- Ext = filename:extension(ViewDir),
- case filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext) of
- [DelDir] -> remove_dir(DelDir);
- _ -> ok
- end
- end,
- [
- fun(Cfg) ->
- {"enable_database_recovery = false", make_rename_dir_test_case(Cfg, false)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = true", make_rename_dir_test_case(Cfg, true)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = true", make_delete_dir_test_case(Cfg, true)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = false", make_delete_dir_test_case(Cfg, false)}
- end
- ]
- }
- }
- }.
-
-make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> EnableRecovery;
- ("couchdb", "delete_after_rename", _) -> true;
- (_, _, Default) -> Default
- end),
- DirExistsBefore = filelib:is_dir(ViewDir),
- couch_file:nuke_dir(RootDir, ViewDir),
- DirExistsAfter = filelib:is_dir(ViewDir),
- Ext = filename:extension(ViewDir),
- RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext),
- ExpectRenamedCount =
- if
- EnableRecovery -> 1;
- true -> 0
- end,
- [
- ?_assert(DirExistsBefore),
- ?_assertNot(DirExistsAfter),
- ?_assertEqual(ExpectRenamedCount, length(RenamedDirs))
- ].
-
-make_delete_dir_test_case({RootDir, ViewDir}, DeleteAfterRename) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> false;
- ("couchdb", "delete_after_rename", _) -> DeleteAfterRename;
- (_, _, Default) -> Default
- end),
- DirExistsBefore = filelib:is_dir(ViewDir),
- couch_file:nuke_dir(RootDir, ViewDir),
- DirExistsAfter = filelib:is_dir(ViewDir),
- Ext = filename:extension(ViewDir),
- RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext),
- RenamedFiles = filelib:wildcard(RootDir ++ "/.delete/*"),
- ExpectRenamedCount =
- if
- DeleteAfterRename -> 0;
- true -> 1
- end,
- [
- ?_assert(DirExistsBefore),
- ?_assertNot(DirExistsAfter),
- ?_assertEqual(0, length(RenamedDirs)),
- ?_assertEqual(ExpectRenamedCount, length(RenamedFiles))
- ].
-
-remove_dir(Dir) ->
- [file:delete(File) || File <- filelib:wildcard(filename:join([Dir, "*"]))],
- file:del_dir(Dir).
-
-fsync_error_test_() ->
- {
- "Test fsync raises errors",
- {
- setup,
- fun() ->
- test_util:start(?MODULE, [ioq])
- end,
- fun(Ctx) ->
- test_util:stop(Ctx)
- end,
- [
- fun fsync_raises_errors/0
- ]
- }
- }.
-
-fsync_raises_errors() ->
- Fd = spawn(fun() -> fake_fsync_fd() end),
- ?assertError({fsync_error, eio}, couch_file:sync(Fd)).
-
-fake_fsync_fd() ->
- % Mocking gen_server did not go very
- % well so faking the couch_file pid
- % will have to do.
- receive
- {'$gen_call', From, sync} ->
- gen:reply(From, {error, eio})
- end.
diff --git a/src/couch/test/eunit/couch_flags_config_tests.erl b/src/couch/test/eunit/couch_flags_config_tests.erl
deleted file mode 100644
index 05707718b..000000000
--- a/src/couch/test/eunit/couch_flags_config_tests.erl
+++ /dev/null
@@ -1,147 +0,0 @@
--module(couch_flags_config_tests).
--include_lib("eunit/include/eunit.hrl").
-
-%% value copied from couch_flags_config
--define(MAX_FLAG_NAME_LENGTH, 256).
-
-setup() ->
- meck:new(couch_log),
- meck:expect(couch_log, error, ['_', '_'], meck:val(ok)),
- ok.
-
-teardown(_) ->
- meck:unload().
-
-couch_flags_config_test_() ->
- {
- "test couch_flags_config",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [fun all_combinations_return_same_result/0] ++
- latest_overide_wins() ++
- [
- {"rules_are_sorted", fun rules_are_sorted/0}
- ]
- }
- }.
-
-all_combinations_return_same_result() ->
- Config = [
- {"foo, bar||*", "true"},
- {"baz, qux||*", "false"},
- {"baz||shards/test*", "true"},
- {"baz||shards/blacklist*", "false"},
- {"bar||shards/test*", "false"},
- {"bar||shards/test/blacklist*", "true"}
- ],
- Expected = [
- {{<<"shards/test/blacklist*">>}, {<<"shards/test/blacklist*">>, 22, [bar, baz, foo]}},
- {{<<"shards/test*">>}, {<<"shards/test*">>, 12, [baz, foo]}},
- {{<<"shards/blacklist*">>}, {<<"shards/blacklist*">>, 17, [bar, foo]}},
- {{<<"*">>}, {<<"*">>, 1, [bar, foo]}}
- ],
- Combinations = couch_tests_combinatorics:permutations(Config),
- lists:foreach(
- fun(Items) ->
- ?assertEqual(Expected, couch_flags_config:data(Items))
- end,
- Combinations
- ).
-
-rules_are_sorted() ->
- Expected = [
- {{<<"shards/test/exact">>}, {<<"shards/test/exact">>, 17, [baz, flag_bar, flag_foo]}},
- {{<<"shards/test/blacklist*">>}, {<<"shards/test/blacklist*">>, 22, [flag_foo]}},
- {{<<"shards/test*">>}, {<<"shards/test*">>, 12, [baz, flag_bar, flag_foo]}},
- {{<<"shards/exact">>}, {<<"shards/exact">>, 12, [flag_bar, flag_foo]}},
- {{<<"shards/blacklist*">>}, {<<"shards/blacklist*">>, 17, []}},
- {{<<"*">>}, {<<"*">>, 1, [flag_foo]}}
- ],
- ?assertEqual(Expected, couch_flags_config:data(test_config())).
-
-latest_overide_wins() ->
- Cases = [
- {
- [
- {"flag||*", "false"},
- {"flag||a*", "true"},
- {"flag||ab*", "true"},
- {"flag||abc*", "true"}
- ],
- true
- },
- {
- [
- {"flag||*", "true"},
- {"flag||a*", "false"},
- {"flag||ab*", "true"},
- {"flag||abc*", "false"}
- ],
- false
- }
- ],
- [
- {
- test_id(Rules, Expected),
- ?_assertEqual(
- Expected,
- lists:member(
- flag,
- flags(hd(couch_flags_config:data(Rules)))
- )
- )
- }
- || {Rules, Expected} <- Cases
- ].
-
-flags({{_Pattern}, {_Pattern, _Size, Flags}}) ->
- Flags.
-
-test_id(Items, ExpectedResult) ->
- lists:flatten(io_lib:format("~p -> ~p", [[P || {P, _} <- Items], ExpectedResult])).
-
-test_config() ->
- [
- {"flag_foo||*", "true"},
- {"flag_bar||*", "false"},
- {"flag_bar||shards/test*", "true"},
- {"flag_foo||shards/blacklist*", "false"},
- {"baz||shards/test*", "true"},
- {"baz||shards/test/blacklist*", "false"},
- {"flag_bar||shards/exact", "true"},
- {"flag_bar||shards/test/exact", "true"}
- ].
-
-parse_flags_term_test_() ->
- LongBinary = binary:copy(<<"a">>, ?MAX_FLAG_NAME_LENGTH + 1),
- ExpectedError = {error, {"Cannot parse list of tags: ~n~p", [{too_long, LongBinary}]}},
- ExpectedUnknownError =
- {error, {"Cannot parse list of tags: ~n~p", [{invalid_flag, <<"dddddddd">>}]}},
- [
- {"empty binary",
- ?_assertEqual(
- [], couch_flags_config:parse_flags_term(<<>>)
- )},
- {"single flag",
- ?_assertEqual(
- [fff], couch_flags_config:parse_flags_term(<<"fff">>)
- )},
- {"sorted",
- ?_assertEqual(
- [aaa, bbb, fff], couch_flags_config:parse_flags_term(<<"fff,aaa,bbb">>)
- )},
- {"whitespace",
- ?_assertEqual(
- [aaa, bbb, fff], couch_flags_config:parse_flags_term(<<"fff , aaa, bbb ">>)
- )},
- {"error",
- ?_assertEqual(
- ExpectedError, couch_flags_config:parse_flags_term(LongBinary)
- )},
- {"unknown_flag",
- ?_assertEqual(
- ExpectedUnknownError, couch_flags_config:parse_flags_term(<<"dddddddd">>)
- )}
- ].
diff --git a/src/couch/test/eunit/couch_flags_tests.erl b/src/couch/test/eunit/couch_flags_tests.erl
deleted file mode 100644
index e3635e9f2..000000000
--- a/src/couch/test/eunit/couch_flags_tests.erl
+++ /dev/null
@@ -1,158 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_flags_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-%% couch_epi_plugin behaviour callbacks
--export([
- app/0,
- providers/0,
- services/0,
- data_providers/0,
- data_subscriptions/0,
- processes/0,
- notify/3
-]).
-
--export([
- rules/0
-]).
-
-app() ->
- test_app.
-
-providers() ->
- [{feature_flags, ?MODULE}].
-
-services() ->
- [].
-
-data_providers() ->
- [].
-
-data_subscriptions() ->
- [].
-
-processes() ->
- [].
-
-notify(_, _, _) ->
- ok.
-
-rules() ->
- test_config().
-
-setup() ->
- %% FIXME after we upgrade couch_epi
-
- % in case it's already running from other tests...
- application:stop(couch_epi),
- application:unload(couch_epi),
-
- application:load(couch_epi),
- application:set_env(couch_epi, plugins, [couch_db_epi, ?MODULE]),
- meck:expect(config, get, 1, []),
-
- Ctx = test_util:start_couch([couch_epi]),
- Ctx.
-
-teardown(Ctx) ->
- test_util:stop_couch(Ctx),
- ok = application:unload(couch_epi),
- meck:unload(),
- ok.
-
-couch_flags_test_() ->
- {
- "test couch_flags",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- enabled_flags_tests() ++
- is_enabled()
- %% ++ match_performance()
- }
- }.
-
-enabled_flags_tests() ->
- [
- {"enabled_flags_tests", [
- {"flags_default_rule",
- ?_assertEqual(
- [foo], couch_flags:enabled("something")
- )},
- {"flags_wildcard_rule",
- ?_assertEqual(
- [bar, baz, foo],
- couch_flags:enabled("shards/test/something")
- )},
- {"flags_exact_rule",
- ?_assertEqual(
- [bar, baz, foo],
- couch_flags:enabled("shards/test/exact")
- )},
- {"flags_blacklist_rule",
- ?_assertEqual(
- [],
- couch_flags:enabled("shards/blacklist/4")
- )}
- ]}
- ].
-
-is_enabled() ->
- [
- {"is_enabled_tests", [
- {"flags_default_rule [enabled]", ?_assert(couch_flags:is_enabled(foo, "something"))},
- {"flags_default_rule [disabled]",
- ?_assertNot(couch_flags:is_enabled(baz, "something"))},
- {"flags_default_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "something"))},
-
- {"flags_wildcard_rule [enabled]",
- ?_assert(couch_flags:is_enabled(bar, "shards/test/something"))},
- {"flags_wildcard_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/something"))},
-
- {"flags_exact_rule [overide_disbled]",
- ?_assert(couch_flags:is_enabled(bar, "shards/test/exact"))},
- {"flags_exact_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/exact"))},
-
- {"flags_blacklist_rule [overide_enabled]",
- ?_assertNot(couch_flags:is_enabled(foo, "shards/blacklist/4"))},
- {"flags_blacklist_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "shards/blacklist/4"))}
- ]}
- ].
-
-%% match_performance() ->
-%% [{"match_performance", [
-%% ?_test(begin
-%% ?debugTime("1 million of operations took", lists:foreach(fun(_) ->
-%% couch_flags:is_enabled(bar, "shards/test/exact")
-%% end, lists:seq(1, 1000000)))
-%% end)
-%% ]}].
-
-test_config() ->
- [
- {"foo||/*", "true"},
- {"bar||/*", "false"},
- {"bar||/shards/test*", "true"},
- {"foo||/shards/blacklist*", "false"},
- {"baz||/shards/test*", "true"},
- {"bar||/shards/exact", "true"},
- {"bar||/shards/test/exact", "true"}
- ].
diff --git a/src/couch/test/eunit/couch_hotp_tests.erl b/src/couch/test/eunit/couch_hotp_tests.erl
deleted file mode 100644
index fee10ff5e..000000000
--- a/src/couch/test/eunit/couch_hotp_tests.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_hotp_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-hotp_test() ->
- Key = <<"12345678901234567890">>,
- ?assertEqual(755224, couch_hotp:generate(sha, Key, 0, 6)),
- ?assertEqual(287082, couch_hotp:generate(sha, Key, 1, 6)),
- ?assertEqual(359152, couch_hotp:generate(sha, Key, 2, 6)),
- ?assertEqual(969429, couch_hotp:generate(sha, Key, 3, 6)),
- ?assertEqual(338314, couch_hotp:generate(sha, Key, 4, 6)),
- ?assertEqual(254676, couch_hotp:generate(sha, Key, 5, 6)),
- ?assertEqual(287922, couch_hotp:generate(sha, Key, 6, 6)),
- ?assertEqual(162583, couch_hotp:generate(sha, Key, 7, 6)),
- ?assertEqual(399871, couch_hotp:generate(sha, Key, 8, 6)),
- ?assertEqual(520489, couch_hotp:generate(sha, Key, 9, 6)).
diff --git a/src/couch/test/eunit/couch_index_tests.erl b/src/couch/test/eunit/couch_index_tests.erl
deleted file mode 100644
index 368f7a059..000000000
--- a/src/couch/test/eunit/couch_index_tests.erl
+++ /dev/null
@@ -1,273 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("stdlib/include/ms_transform.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- tracer_new(),
- DbName.
-
-teardown(DbName) ->
- tracer_delete(),
- couch_server:delete(DbName, [?ADMIN_CTX]).
-
-couch_index_ioq_priority_test_() ->
- {
- "Test ioq_priority for views",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun check_io_priority_for_updater/1,
- fun check_io_priority_for_compactor/1
- ]
- }
- }
- }.
-
-check_io_priority_for_updater(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>
- ),
- CouchIndexUpdaterPid = updater_pid(IndexerPid),
- tracer_record(CouchIndexUpdaterPid),
-
- create_docs(DbName),
-
- CommittedSeq = couch_util:with_db(DbName, fun(Db) -> couch_db:get_update_seq(Db) end),
- couch_index:get_state(IndexerPid, CommittedSeq),
- [UpdaterPid] = wait_spawn_event_for_pid(CouchIndexUpdaterPid),
-
- [UpdaterMapProcess] = wait_spawn_by_anonymous_fun(
- UpdaterPid, '-start_update/4-fun-0-'
- ),
-
- ?assert(
- wait_set_io_priority(
- UpdaterMapProcess, {view_update, DbName, <<"_design/foo">>}
- )
- ),
-
- [UpdaterWriterProcess] = wait_spawn_by_anonymous_fun(
- UpdaterPid, '-start_update/4-fun-1-'
- ),
- ?assert(
- wait_set_io_priority(
- UpdaterWriterProcess, {view_update, DbName, <<"_design/foo">>}
- )
- ),
-
- ok
- end).
-
-check_io_priority_for_compactor(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>
- ),
- {ok, CompactorPid} = couch_index:get_compactor_pid(IndexerPid),
- tracer_record(CompactorPid),
-
- create_docs(DbName),
-
- couch_index:compact(IndexerPid),
- wait_spawn_event_for_pid(CompactorPid),
-
- [CompactorProcess] = wait_spawn_by_anonymous_fun(
- CompactorPid, '-handle_call/3-fun-0-'
- ),
- ?assert(
- wait_set_io_priority(
- CompactorProcess, {view_compact, DbName, <<"_design/foo">>}
- )
- ),
- ok
- end).
-
-create_docs(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
- ]}
- ),
- Doc2 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
- ]}
- ),
- Doc3 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
- ]}
- ),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
- couch_db:close(Db).
-
-create_design_doc(DbName, DDName, ViewName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>,
- {[
- {ViewName,
- {[
- {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
- ]}}
- ]}}
- ]}
- ),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db),
- Rev.
-
-wait_set_io_priority(Pid, IOPriority) ->
- test_util:wait_value(
- fun() ->
- does_process_set_io_priority(Pid, IOPriority)
- end,
- true
- ).
-
-does_process_set_io_priority(Pid, IOPriority) ->
- PutCallsArgs = find_calls_to_fun(Pid, {erlang, put, 2}),
- lists:any(fun([_, Priority]) -> Priority =:= IOPriority end, PutCallsArgs).
-
-wait_events(MatchSpec) ->
- test_util:wait_other_value(fun() -> select(MatchSpec) end, []).
-
-find_spawned_by_anonymous_fun(ParentPid, Name) ->
- AnonymousFuns = select(
- ets:fun2ms(fun({spawned, Pid, _TS, _Name, _Dict, [PPid, {erlang, apply, [Fun, _]}]}) when
- is_function(Fun) andalso PPid =:= ParentPid
- ->
- {Pid, Fun}
- end)
- ),
- lists:filtermap(
- fun({Pid, Fun}) ->
- case erlang:fun_info(Fun, name) of
- {name, Name} -> {true, Pid};
- _ -> false
- end
- end,
- AnonymousFuns
- ).
-
-find_calls_to_fun(Pid, {Module, Function, Arity}) ->
- select(
- ets:fun2ms(fun({call, P, _TS, _Name, _Dict, [{M, F, Args}]}) when
- length(Args) =:= Arity andalso
- M =:= Module andalso
- F =:= Function andalso
- P =:= Pid
- ->
- Args
- end)
- ).
-
-wait_spawn_event_for_pid(ParentPid) ->
- wait_events(
- ets:fun2ms(fun({spawned, Pid, _TS, _Name, _Dict, [P, _]}) when P =:= ParentPid -> Pid end)
- ).
-
-wait_spawn_by_anonymous_fun(ParentPid, Name) ->
- test_util:wait_other_value(
- fun() ->
- find_spawned_by_anonymous_fun(ParentPid, Name)
- end,
- []
- ).
-
-updater_pid(IndexerPid) ->
- {links, Links} = process_info(IndexerPid, links),
- [Pid] = select_process_by_name_prefix(Links, "couch_index_updater:init/1"),
- Pid.
-
-select_process_by_name_prefix(Pids, Name) ->
- lists:filter(
- fun(Pid) ->
- Key = couch_debug:process_name(Pid),
- string:str(Key, Name) =:= 1
- end,
- Pids
- ).
-
-select(MatchSpec) ->
- lists:filtermap(
- fun(Event) ->
- case ets:test_ms(Event, MatchSpec) of
- {ok, false} -> false;
- {ok, Result} -> {true, Result};
- _ -> false
- end
- end,
- tracer_events()
- ).
-
-%% ========================
-%% Tracer related functions
-%% ------------------------
-tracer_new() ->
- ets:new(?MODULE, [public, named_table]),
- {ok, _Tracer} = dbg:tracer(process, {fun tracer_collector/2, 0}),
- ok.
-
-tracer_delete() ->
- dbg:stop_clear(),
- (catch ets:delete(?MODULE)),
- ok.
-
-tracer_record(Pid) ->
- {ok, _} = dbg:tp(erlang, put, x),
- {ok, _} = dbg:p(Pid, [c, p, sos]),
- ok.
-
-tracer_events() ->
- Events = [{Idx, E} || [Idx, E] <- ets:match(?MODULE, {{trace, '$1'}, '$2'})],
- {_, Sorted} = lists:unzip(lists:keysort(1, Events)),
- Sorted.
-
-tracer_collector(Msg, Seq) ->
- ets:insert(?MODULE, {{trace, Seq}, normalize_trace_msg(Msg)}),
- Seq + 1.
-
-normalize_trace_msg(TraceMsg) ->
- case tuple_to_list(TraceMsg) of
- [trace_ts, Pid, Type | Info] ->
- {TraceInfo, [Timestamp]} = lists:split(length(Info) - 1, Info),
- {Type, Pid, Timestamp, couch_debug:process_name(Pid), process_info(Pid), TraceInfo};
- [trace, Pid, Type | TraceInfo] ->
- {Type, Pid, os:timestamp(), couch_debug:process_name(Pid), process_info(Pid), TraceInfo}
- end.
diff --git a/src/couch/test/eunit/couch_js_tests.erl b/src/couch/test/eunit/couch_js_tests.erl
deleted file mode 100644
index 1079678da..000000000
--- a/src/couch/test/eunit/couch_js_tests.erl
+++ /dev/null
@@ -1,200 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_js_tests).
--include_lib("eunit/include/eunit.hrl").
-
-couch_js_test_() ->
- {
- "Test couchjs",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- [
- fun should_create_sandbox/0,
- fun should_roundtrip_utf8/0,
- fun should_roundtrip_modified_utf8/0,
- fun should_replace_broken_utf16/0,
- fun should_allow_js_string_mutations/0,
- {timeout, 60000, fun should_exit_on_oom/0}
- ]
- }
- }.
-
-should_create_sandbox() ->
- % Try and detect whether we can see out of the
- % sandbox or not.
- Src = <<
- "function(doc) {\n"
- " try {\n"
- " emit(false, typeof(Couch.compile_function));\n"
- " } catch (e) {\n"
- " emit(true, e.message);\n"
- " }\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]),
- ?assertEqual([[[true, <<"Couch is not defined">>]]], Result).
-
-should_roundtrip_utf8() ->
- % Try round tripping UTF-8 both directions through
- % couchjs. These tests use hex encoded values of
- % Ä (C384) and Ü (C39C) so as to avoid odd editor/Erlang encoding
- % strangeness.
- Src = <<
- "function(doc) {\n"
- " emit(doc.value, \"",
- 16#C3,
- 16#9C,
- "\");\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Doc =
- {[
- {<<"value">>, <<16#C3, 16#84>>}
- ]},
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
- ?assertEqual([[[<<16#C3, 16#84>>, <<16#C3, 16#9C>>]]], Result).
-
-should_roundtrip_modified_utf8() ->
- % Mimicing the test case from the mailing list
- Src = <<
- "function(doc) {\n"
- " emit(doc.value.toLowerCase(), \"",
- 16#C3,
- 16#9C,
- "\");\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Doc =
- {[
- {<<"value">>, <<16#C3, 16#84>>}
- ]},
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
- ?assertEqual([[[<<16#C3, 16#A4>>, <<16#C3, 16#9C>>]]], Result).
-
-should_replace_broken_utf16() ->
- % This test reverse the surrogate pair of
- % the Boom emoji U+1F4A5
- Src = <<
- "function(doc) {\n"
- " emit(doc.value.split(\"\").reverse().join(\"\"), 1);\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Doc =
- {[
- {<<"value">>, list_to_binary(xmerl_ucs:to_utf8([16#1F4A5]))}
- ]},
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
- % Invalid UTF-8 gets replaced with the 16#FFFD replacement
- % marker
- Markers = list_to_binary(xmerl_ucs:to_utf8([16#FFFD, 16#FFFD])),
- ?assertEqual([[[Markers, 1]]], Result).
-
-should_allow_js_string_mutations() ->
- % This binary corresponds to this string: мама мыла раму
- % Which I'm told translates to: "mom was washing the frame"
- MomWashedTheFrame = <<
- 16#D0,
- 16#BC,
- 16#D0,
- 16#B0,
- 16#D0,
- 16#BC,
- 16#D0,
- 16#B0,
- 16#20,
- 16#D0,
- 16#BC,
- 16#D1,
- 16#8B,
- 16#D0,
- 16#BB,
- 16#D0,
- 16#B0,
- 16#20,
- 16#D1,
- 16#80,
- 16#D0,
- 16#B0,
- 16#D0,
- 16#BC,
- 16#D1,
- 16#83
- >>,
- Mom = <<16#D0, 16#BC, 16#D0, 16#B0, 16#D0, 16#BC, 16#D0, 16#B0>>,
- Washed = <<16#D0, 16#BC, 16#D1, 16#8B, 16#D0, 16#BB, 16#D0, 16#B0>>,
- Src1 = <<
- "function(doc) {\n"
- " emit(\"length\", doc.value.length);\n"
- "}\n"
- >>,
- Src2 = <<
- "function(doc) {\n"
- " emit(\"substring\", doc.value.substring(5, 9));\n"
- "}\n"
- >>,
- Src3 = <<
- "function(doc) {\n"
- " emit(\"slice\", doc.value.slice(0, 4));\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src1]),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src2]),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src3]),
- Doc = {[{<<"value">>, MomWashedTheFrame}]},
- Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
- Expect = [
- [[<<"length">>, 14]],
- [[<<"substring">>, Washed]],
- [[<<"slice">>, Mom]]
- ],
- ?assertEqual(Expect, Result).
-
-should_exit_on_oom() ->
- Src = <<
- "var state = [];\n"
- "function(doc) {\n"
- " var val = \"0123456789ABCDEF\";\n"
- " for(var i = 0; i < 665535; i++) {\n"
- " state.push([val, val]);\n"
- " emit(null, null);\n"
- " }\n"
- "}\n"
- >>,
- Proc = couch_query_servers:get_os_process(<<"javascript">>),
- true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- trigger_oom(Proc).
-
-trigger_oom(Proc) ->
- Status =
- try
- couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]),
- continue
- catch
- throw:{os_process_error, {exit_status, 1}} ->
- done
- end,
- case Status of
- continue -> trigger_oom(Proc);
- done -> ok
- end.
diff --git a/src/couch/test/eunit/couch_key_tree_prop_tests.erl b/src/couch/test/eunit/couch_key_tree_prop_tests.erl
deleted file mode 100644
index d6ed26553..000000000
--- a/src/couch/test/eunit/couch_key_tree_prop_tests.erl
+++ /dev/null
@@ -1,531 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_key_tree_prop_tests).
-
--ifdef(WITH_PROPER).
-
--include_lib("couch/include/couch_eunit_proper.hrl").
-
-% How much to reduce size with tree depth.
--define(SIZE_REDUCTION, 3).
-% Maximum number of branches.
--define(MAX_BRANCHES, 4).
--define(RAND_SIZE, 1 bsl 64).
-
-property_test_() ->
- ?EUNIT_QUICKCHECK(60).
-
-%
-% Properties
-%
-
-% Merge random paths from a revtree into itself. Check that no revisions have
-% been lost in the process and that result is one of the 3 expected values.
-%
-prop_revtree_merge_with_subset_of_own_nodes() ->
- ?FORALL(
- Revs,
- g_revs(),
- ?FORALL(
- {RevTree, Branch},
- {g_revtree(Revs), g_revtree(Revs, 1)},
- ?IMPLIES(
- length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [],
- begin
- {Merged, Result} = couch_key_tree:merge(RevTree, hd(Branch)),
- lists:member(Result, [new_leaf, new_branch, internal_node]) andalso
- same_keys(RevTree ++ Branch, Merged) andalso
- valid_revtree(Merged)
- end
- )
- )
- ).
-
-% Merge random trees into revtree.
-%
-prop_revtree_merge_random_nodes() ->
- ?FORALL(
- {RevTree, Branch},
- {g_revtree(), g_revtree([], 1)},
- ?IMPLIES(
- length(Branch) > 0,
- begin
- {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)),
- valid_revtree(Merged)
- end
- )
- ).
-
-% Merge mix or random and existing revtree paths into revtree
-%
-prop_revtree_merge_some_existing_some_new() ->
- ?FORALL(
- RevTree,
- g_revtree(),
- ?FORALL(
- Branch,
- begin
- KeyList = keylist(RevTree),
- Half = lists:sublist(KeyList, length(KeyList) div 2),
- g_revtree(Half, 1)
- end,
- ?IMPLIES(
- length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [],
- begin
- {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)),
- valid_revtree(Merged)
- end
- )
- )
- ).
-
-% Stem deeper than the current max level. Expect no changes to the revtree
-%
-prop_no_change_stemming_deeper_than_current_depth() ->
- ?FORALL(
- RevTree,
- g_revtree(),
- begin
- StemDepth = depth(RevTree) + 1,
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- StemmedKeys = lists:usort(keylist(Stemmed)),
- InputKeys = lists:usort(keylist(RevTree)),
- StemmedKeys == InputKeys
- end
- ).
-
-% Stem at a random small depth, make sure that resulting tree has
-% unique revisions and the same number or less revisions than input
-%
-prop_stemming_results_in_same_or_less_total_revs() ->
- ?FORALL(
- {RevTree, StemDepth},
- {g_revtree(), choose(1, 20)},
- begin
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- OldRealDepth = real_depth(RevTree),
- StemmedKeys = keylist(Stemmed),
- UniqueStemmedKeys = lists:usort(StemmedKeys),
- UniqueInputKeys = lists:usort(keylist(RevTree)),
- NewRealDepth = real_depth(Stemmed),
- length(StemmedKeys) == length(UniqueStemmedKeys) andalso
- length(UniqueStemmedKeys) =< length(UniqueInputKeys) andalso
- OldRealDepth >= NewRealDepth
- end
- ).
-
-% Generate a longer path (revtree with no branches) then stem it.
-% Always expect it to shrink to stemmed depth.
-prop_stem_path_expect_size_to_get_smaller() ->
- ?FORALL(
- {RevTree, StemDepth},
- {
- ?SIZED(Size, g_revtree(Size * 10, [], 1)),
- choose(1, 3)
- },
- ?IMPLIES(
- real_depth(RevTree) > 3,
- begin
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- StemmedKeys = lists:usort(keylist(Stemmed)),
- InputKeys = lists:usort(keylist(RevTree)),
- length(InputKeys) > length(StemmedKeys) andalso
- real_depth(Stemmed) == StemDepth
- end
- )
- ).
-
-% After stemming all leaves are still present
-prop_after_stemming_all_leaves_are_present() ->
- ?FORALL(
- {RevTree, StemDepth},
- {g_revtree(), choose(1, 20)},
- begin
- OldRealDepth = real_depth(RevTree),
- OldLeaves = leaves(RevTree),
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- NewRealDepth = real_depth(Stemmed),
- NewLeaves = leaves(Stemmed),
- valid_revtree(Stemmed) andalso
- OldRealDepth >= NewRealDepth andalso
- OldLeaves == NewLeaves
- end
- ).
-
-% After stemming paths to root didn't get longer
-prop_after_stemming_paths_are_shorter() ->
- ?FORALL(
- {StemDepth, RevTree},
- {choose(2, 10), g_revtree()},
- begin
- OldPaths = paths(RevTree),
- Stemmed = couch_key_tree:stem(RevTree, StemDepth),
- NewPaths = paths(Stemmed),
- GrowingPaths = orddict:fold(
- fun(Rev, Path, Acc) ->
- OldPath = orddict:fetch(Rev, OldPaths),
- case length(Path) > length(OldPath) of
- true ->
- [{Rev, Path, OldPath} | Acc];
- false ->
- Acc
- end
- end,
- [],
- NewPaths
- ),
- valid_revtree(Stemmed) andalso GrowingPaths == []
- end
- ).
-
-% Check leaf count
-prop_leaf_count() ->
- ?FORALL(
- RevTree,
- g_revtree(),
- length(leaves(RevTree)) == couch_key_tree:count_leafs(RevTree)
- ).
-
-% Check get leafs
-prop_get_leafs() ->
- ?FORALL(
- RevTree,
- g_revtree(),
- begin
- LeafsFull = couch_key_tree:get_all_leafs(RevTree),
- lists:usort([Rev || {_V, {_D, [Rev | _]}} <- LeafsFull]) == leaves(RevTree)
- end
- ).
-
-%
-% Generators
-%
-
-% Generate a full rev tree. Most of the forms are just there to set up default
-% parameters, _revtree/3 does all heavy lifting.
-%
-
-g_revtree() ->
- ?SIZED(Size, g_revtree(Size)).
-
-g_revtree(Size) when is_integer(Size) ->
- g_revtree(Size, [], ?MAX_BRANCHES);
-g_revtree(Revs) when is_list(Revs) ->
- ?SIZED(Size, g_revtree(Size, Revs, ?MAX_BRANCHES)).
-
-g_revtree(Size, Revs) when is_integer(Size), is_list(Revs) ->
- g_revtree(Size, Revs, ?MAX_BRANCHES);
-g_revtree(Revs, MaxBranches) when is_list(Revs), is_integer(MaxBranches) ->
- ?SIZED(Size, g_revtree(Size, Revs, MaxBranches)).
-
-g_revtree(0, _Revs, _MaxBranches) ->
- [];
-g_revtree(Size, ERevs, MaxBranches) ->
- ?LET(
- {Depth, Revs},
- {g_stem_depth(Size), g_revs(Size, ERevs)},
- [{Depth, g_treenode(Size, Revs, MaxBranches)}]
- ).
-
-% Generate a tree node and then recursively generate its children.
-%
-g_treenode(0, Revs, _) ->
- {elements(Revs), x, []};
-g_treenode(Size, Revs, MaxBranches) ->
- ?LAZY(
- ?LET(
- N,
- choose(0, MaxBranches),
- begin
- [Rev | ChildRevs] = Revs,
- {Rev, x, g_nodes(Size div ?SIZE_REDUCTION, N, ChildRevs, MaxBranches)}
- end
- )
- ).
-
-% Generate a list of child nodes. Depending on how many children there are
-% the pre-generarated revision list is split into that many sublists.
-%
-g_nodes(0, _N, _Revs, _MaxBranches) ->
- [];
-g_nodes(_Size, 0, _Revs, _MaxBranches) ->
- [];
-g_nodes(Size, ChildCount, Revs, MaxBranches) ->
- ?LETSHRINK(
- ChildNodes,
- begin
- ChildRevList = child_revs(ChildCount, Revs, Size, MaxBranches),
- [g_treenode(Size, ChildRevs, MaxBranches) || ChildRevs <- ChildRevList]
- end,
- ordered_nodes(ChildNodes)
- ).
-
-% Generate each subtree's stem depth
-%
-
-g_stem_depth(Size) ->
- choose(0, expected_height(Size, ?SIZE_REDUCTION) div 2).
-
-% Uses the shuffle/1 function to shuffle the input list. Unshuffled list is
-% used as the shrink value.
-%
-g_shuffle([]) ->
- [];
-g_shuffle(L) when is_list(L) ->
- ?LET(X, elements(L), [X | g_shuffle(lists:delete(X, L))]).
-
-% Wrapper to make a list shuffling generator that doesn't shrink
-%
-g_shuffle_noshrink(L) when is_list(L) ->
- proper_types:noshrink(g_shuffle(L)).
-
-% Generate shuffled sublists up to N items long from a list.
-%
-g_shuffled_sublists(L, N) ->
- ?LET(Shuffled, g_shuffle_noshrink(L), lists:sublist(Shuffled, N)).
-
-% Generate revision lists.
-%
-g_revs() ->
- ?SIZED(Size, g_revs(Size)).
-
-g_revs(Size) when is_integer(Size) ->
- g_revs(Size, []).
-
-g_revs(Size, Existing) when is_integer(Size), is_list(Existing) ->
- Expected = keys_needed(Size, ?SIZE_REDUCTION, ?MAX_BRANCHES),
- Revs = revs(Expected, Existing),
- case length(Revs) > Expected of
- % have extra, try various sublists
- true ->
- g_shuffled_sublists(Revs, Expected);
- false ->
- proper_types:return(Revs)
- end.
-
-%
-% Helper functions
-%
-
-valid_revtree(RevTree) ->
- repeating_revs(levels(RevTree)) == [] andalso children_sorted(RevTree).
-
-same_keys(RevTree1, RevTree2) ->
- Keys1 = lists:usort(keylist(RevTree1)),
- Keys2 = lists:usort(keylist(RevTree2)),
- Keys1 == Keys2.
-
-all(L) ->
- lists:all(fun(E) -> E end, L).
-
-% Generate list of relateively unique large random numbers
-rand_list(N) when N =< 0 ->
- [];
-rand_list(N) ->
- [rand:uniform(?RAND_SIZE) || _ <- lists:seq(1, N)].
-
-% Generate a list of revisions to be used as key in revision trees. Expected
-% must the number of maximum expected nodes in a revision tree. Existing is an
-% optional list revisions which must be included in the result. The output list
-% is sorted.
-revs(0, _Existing) ->
- [];
-revs(Expected, Existing) when is_integer(Expected), is_list(Existing) ->
- Need = Expected - length(Existing),
- lists:usort(lists:append(Existing, rand_list(Need))).
-
-% Get the list of all the keys in a revision tree. The input can also be a
-% an individual tree (tagged with the depth to virtual root) or a node.
-% Yes, this is not tail recursive but the idea is to keep it simple.
-%
-keylist({_D, Node}) when is_tuple(Node) ->
- keylist(Node);
-keylist({K, _V, Nodes}) ->
- [K | keylist(Nodes)];
-keylist(Nodes) ->
- lists:append([keylist(Node) || Node <- Nodes]).
-
-% Get the list of leaves from a revision tree.
-leaves([]) ->
- [];
-leaves({_D, Node}) when is_tuple(Node) ->
- leaves(Node);
-leaves({K, _V, []}) ->
- [K];
-leaves({_K, _V, Nodes}) ->
- leaves(Nodes);
-leaves(Nodes) ->
- lists:usort(lists:append([leaves(N) || N <- Nodes])).
-
-% Get paths from leaf to root. Result is an orddict of [{LeafRev, [Rev]}]
-%
-paths([]) ->
- orddict:new();
-paths(RevTree) when is_list(RevTree) ->
- paths_merge_dicts([paths(T) || T <- RevTree]);
-paths({_Depth, Node}) when is_tuple(Node) ->
- paths(Node);
-paths({K, _V, []}) ->
- orddict:store(K, [], orddict:new());
-paths({K, _V, Nodes}) ->
- CombinedDict = paths_merge_dicts([paths(N) || N <- Nodes]),
- orddict:map(fun(_LeafKey, Path) -> Path ++ [K] end, CombinedDict).
-
-paths_merge_dicts(Dicts) ->
- lists:foldl(
- fun(D, AccD) ->
- orddict:merge(
- fun(K, V1, V2) ->
- throw({found_duplicates, K, V1, V2})
- end,
- D,
- AccD
- )
- end,
- orddict:new(),
- Dicts
- ).
-
-% Get lists of all the keys at each depth level. Result is an orddict that
-% looks like [{depth, [key]}]. The depth used here is the "virtual" depth as
-% indicated by the stemmed depth tag that goes with every top level subtree.
-%
-levels([]) ->
- orddict:new();
-levels(RevTree) when is_list(RevTree) ->
- lists:foldl(fun(T, Dict) -> levels(T, Dict) end, orddict:new(), RevTree).
-
-levels({Depth, Node}, Dict) when is_tuple(Node) ->
- levels(Node, Depth, Dict).
-
-levels({K, _V, Nodes}, Depth, Dict) ->
- Dict1 =
- case orddict:is_key(Depth, Dict) of
- true -> orddict:append(Depth, K, Dict);
- false -> orddict:store(Depth, [K], Dict)
- end,
- levels(Nodes, Depth + 1, Dict1);
-levels(Nodes, Depth, Dict) ->
- lists:foldl(
- fun(Node, AccDict) ->
- levels(Node, Depth, AccDict)
- end,
- Dict,
- Nodes
- ).
-
-% Using the output of leaves/1 as input return any repeating revisions if
-% there are any at a particular level. Levels which have not revisions are
-% not returned.
-%
-repeating_revs(Dict) ->
- orddict:filter(
- fun(_Depth, Revs) ->
- length(lists:usort(Revs)) =/= length(Revs)
- end,
- Dict
- ).
-
-% Check that children of all nodes are sorted
-children_sorted([]) ->
- true;
-children_sorted(Nodes) when is_list(Nodes) ->
- all([children_sorted(N) || N <- Nodes]);
-children_sorted({_D, Node}) when is_tuple(Node) ->
- children_sorted(Node);
-children_sorted({_K, _V, Nodes}) ->
- children_sorted(Nodes).
-
-% Get the maximum depth of a revtree. The depth is "virtual" as it takes into
-% account the distance to the now stemmed root node as indicated by the top
-% level subtrees.
-%
-depth([]) ->
- 0;
-depth(RevTree) when is_list(RevTree) ->
- lists:max([depth(T) || T <- RevTree]);
-depth({Depth, Node}) when is_tuple(Node) ->
- depth(Node, Depth - 1).
-
-depth({_K, _V, Nodes}, Depth) ->
- depth(Nodes, Depth + 1);
-depth([], Depth) ->
- Depth;
-depth(Nodes, Depth) ->
- lists:max([depth(Node, Depth) || Node <- Nodes]).
-
-% Get the "real" tree depth, not the virtual one. As revtrees gets stemmed they
-% will keep their virtual depth but the actual number of nodes in the tree
-% could be reduced.
-%
-real_depth([]) ->
- 0;
-real_depth(RevTree) when is_list(RevTree) ->
- lists:max([real_depth(T) || T <- RevTree]);
-real_depth({_Depth, Node}) when is_tuple(Node) ->
- % Note from here on use the depth/3 function
- depth(Node, 0).
-
-% Return an ordered list of revtree nodes. When sorting only immediate keys
-% (revisions) are looked at and comparison doesn't descent into the treee.
-%
-ordered_nodes(Nodes) ->
- lists:sort(fun({K1, _, _}, {K2, _, _}) -> K1 =< K2 end, Nodes).
-
-% Calculate a maximum number of rev tree nodes needed for a tree of a given
-% height and branchiness. Height is derived from Size and LevelReductionFactor,
-% that is how big the sample should be and quickly the size parameter would
-% shrink on each level.
-%
-keys_needed(0, _, _) ->
- 0;
-keys_needed(Size, LevelReductionFactor, 1) ->
- expected_height(Size, LevelReductionFactor);
-keys_needed(Size, LevelReductionFactor, Branches) ->
- Height = expected_height(Size, LevelReductionFactor),
- trunc(math:pow(Branches, Height + 1)) + 1.
-
-% Calculate expected tree height for a given sample size and branchiness.
-% At each step the size is divided by the reduction factor.
-expected_height(Size, LevelReductionFactor) ->
- trunc(log(LevelReductionFactor, Size)) + 1.
-
-log(B, X) ->
- math:log(X) / math:log(B).
-
-% Distribute items in a list into roughly equal chunks of a given size.
-%
-distribute(_ChunkSize, []) ->
- [];
-distribute(ChunkSize, L) when ChunkSize >= length(L) ->
- [L];
-distribute(ChunkSize, L) ->
- {L1, L2} = lists:split(ChunkSize, L),
- [L1 | distribute(ChunkSize, L2)].
-
-% Split a single (parent) revision list into chunks (sub-lists), one for each
-% child. Also, for safety, double check that at this point in the process the
-% list of revisions is sufficiently large. If it isn't something went wrong and
-% a specific exception is thrown ({not_enough_revisions, Got, Needed}).
-%
-child_revs(ChildCount, Revs, Size, MaxBranches) ->
- NeedKeys = keys_needed(Size, ?SIZE_REDUCTION, MaxBranches),
- case length(Revs) >= NeedKeys of
- true ->
- ChunkSize = trunc(length(Revs) / ChildCount) + 1,
- distribute(ChunkSize, Revs);
- false ->
- throw({not_enough_revisions, length(Revs), NeedKeys})
- end.
-
--endif.
diff --git a/src/couch/test/eunit/couch_key_tree_tests.erl b/src/couch/test/eunit/couch_key_tree_tests.erl
deleted file mode 100644
index d191541d4..000000000
--- a/src/couch/test/eunit/couch_key_tree_tests.erl
+++ /dev/null
@@ -1,576 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_key_tree_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(DEPTH, 10).
-
-key_tree_merge_test_() ->
- {
- "Key tree merge",
- [
- should_merge_with_empty_tree(),
- should_merge_reflexive(),
- should_merge_prefix_of_a_tree_with_tree(),
- should_produce_conflict_on_merge_with_unrelated_branch(),
- should_merge_reflexive_for_child_nodes(),
- should_merge_tree_to_itself(),
- should_merge_tree_of_odd_length(),
- should_merge_tree_with_stem(),
- should_merge_with_stem_at_deeper_level(),
- should_merge_with_stem_at_deeper_level_with_deeper_paths(),
- should_merge_single_tree_with_deeper_stem(),
- should_merge_tree_with_large_stem(),
- should_merge_stems(),
- should_create_conflicts_on_merge(),
- should_create_no_conflicts_on_merge(),
- should_ignore_conflicting_branch()
- ]
- }.
-
-key_tree_missing_leaves_test_() ->
- {
- "Missing tree leaves",
- [
- should_not_find_missing_leaves(),
- should_find_missing_leaves()
- ]
- }.
-
-key_tree_remove_leaves_test_() ->
- {
- "Remove tree leaves",
- [
- should_have_no_effect_on_removing_no_leaves(),
- should_have_no_effect_on_removing_non_existant_branch(),
- should_remove_leaf(),
- should_produce_empty_tree_on_removing_all_leaves(),
- should_have_no_effect_on_removing_non_existant_node(),
- should_produce_empty_tree_on_removing_last_leaf()
- ]
- }.
-
-key_tree_get_leaves_test_() ->
- {
- "Leaves retrieving",
- [
- should_extract_subtree(),
- should_extract_subsubtree(),
- should_gather_non_existant_leaf(),
- should_gather_leaf(),
- shoul_gather_multiple_leaves(),
- should_gather_single_leaf_for_multiple_revs(),
- should_gather_multiple_for_multiple_revs(),
- should_retrieve_full_key_path(),
- should_retrieve_full_key_path_for_node(),
- should_retrieve_leaves_with_parent_node(),
- should_retrieve_all_leaves()
- ]
- }.
-
-key_tree_leaf_counting_test_() ->
- {
- "Leaf counting",
- [
- should_have_no_leaves_for_empty_tree(),
- should_have_single_leaf_for_tree_with_single_node(),
- should_have_two_leaves_for_tree_with_chindler_siblings(),
- should_not_affect_on_leaf_counting_for_stemmed_tree()
- ]
- }.
-
-key_tree_stemming_test_() ->
- {
- "Stemming",
- [
- should_have_no_effect_for_stemming_more_levels_than_exists(),
- should_return_one_deepest_node(),
- should_return_two_deepest_nodes(),
- should_not_use_excessive_memory_when_stemming()
- ]
- }.
-
-should_merge_with_empty_tree() ->
- One = {1, {"1", "foo", []}},
- ?_assertEqual(
- {[One], new_leaf},
- merge_and_stem([], One)
- ).
-
-should_merge_reflexive() ->
- One = {1, {"1", "foo", []}},
- ?_assertEqual(
- {[One], internal_node},
- merge_and_stem([One], One)
- ).
-
-should_merge_prefix_of_a_tree_with_tree() ->
- One = {1, {"1", "foo", []}},
- TwoSibs = [
- {1, {"1", "foo", []}},
- {1, {"2", "foo", []}}
- ],
- ?_assertEqual(
- {TwoSibs, internal_node},
- merge_and_stem(TwoSibs, One)
- ).
-
-should_produce_conflict_on_merge_with_unrelated_branch() ->
- TwoSibs = [
- {1, {"1", "foo", []}},
- {1, {"2", "foo", []}}
- ],
- Three = {1, {"3", "foo", []}},
- ThreeSibs = [
- {1, {"1", "foo", []}},
- {1, {"2", "foo", []}},
- {1, {"3", "foo", []}}
- ],
- ?_assertEqual(
- {ThreeSibs, new_branch},
- merge_and_stem(TwoSibs, Three)
- ).
-
-should_merge_reflexive_for_child_nodes() ->
- TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual(
- {[TwoChild], internal_node},
- merge_and_stem([TwoChild], TwoChild)
- ).
-
-should_merge_tree_to_itself() ->
- TwoChildSibs =
- {1,
- {"1", "foo", [
- {"1a", "bar", []},
- {"1b", "bar", []}
- ]}},
- Leafs = couch_key_tree:get_all_leafs([TwoChildSibs]),
- Paths = lists:map(fun leaf_to_path/1, Leafs),
- FinalTree = lists:foldl(
- fun(Path, TreeAcc) ->
- {NewTree, internal_node} = merge_and_stem(TreeAcc, Path),
- NewTree
- end,
- [TwoChildSibs],
- Paths
- ),
- ?_assertEqual([TwoChildSibs], FinalTree).
-
-leaf_to_path({Value, {Start, Keys}}) ->
- [Branch] = to_branch(Value, lists:reverse(Keys)),
- {Start - length(Keys) + 1, Branch}.
-
-to_branch(Value, [Key]) ->
- [{Key, Value, []}];
-to_branch(Value, [Key | RestKeys]) ->
- [{Key, [], to_branch(Value, RestKeys)}].
-
-should_merge_tree_of_odd_length() ->
- TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- TwoChildSibs =
- {1,
- {"1", "foo", [
- {"1a", "bar", []},
- {"1b", "bar", []}
- ]}},
- TwoChildPlusSibs =
- {1,
- {"1", "foo", [
- {"1a", "bar", [{"1aa", "bar", []}]},
- {"1b", "bar", []}
- ]}},
- ?_assertEqual(
- {[TwoChildPlusSibs], new_leaf},
- merge_and_stem([TwoChildSibs], TwoChild)
- ).
-
-should_merge_tree_with_stem() ->
- Stemmed = {2, {"1a", "bar", []}},
- TwoChildSibs =
- {1,
- {"1", "foo", [
- {"1a", "bar", []},
- {"1b", "bar", []}
- ]}},
-
- ?_assertEqual(
- {[TwoChildSibs], internal_node},
- merge_and_stem([TwoChildSibs], Stemmed)
- ).
-
-should_merge_with_stem_at_deeper_level() ->
- Stemmed = {3, {"1bb", "boo", []}},
- TwoChildSibs =
- {1,
- {"1", "foo", [
- {"1a", "bar", []},
- {"1b", "bar", [{"1bb", "boo", []}]}
- ]}},
- ?_assertEqual(
- {[TwoChildSibs], internal_node},
- merge_and_stem([TwoChildSibs], Stemmed)
- ).
-
-should_merge_with_stem_at_deeper_level_with_deeper_paths() ->
- Stemmed = {3, {"1bb", "boo", []}},
- StemmedTwoChildSibs = [
- {2, {"1a", "bar", []}},
- {2, {"1b", "bar", [{"1bb", "boo", []}]}}
- ],
- ?_assertEqual(
- {StemmedTwoChildSibs, internal_node},
- merge_and_stem(StemmedTwoChildSibs, Stemmed)
- ).
-
-should_merge_single_tree_with_deeper_stem() ->
- Stemmed = {3, {"1aa", "bar", []}},
- TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual(
- {[TwoChild], internal_node},
- merge_and_stem([TwoChild], Stemmed)
- ).
-
-should_merge_tree_with_large_stem() ->
- Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
- TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual(
- {[TwoChild], internal_node},
- merge_and_stem([TwoChild], Stemmed)
- ).
-
-should_merge_stems() ->
- StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
- StemmedB = {3, {"1aa", "bar", []}},
- ?_assertEqual(
- {[StemmedA], internal_node},
- merge_and_stem([StemmedA], StemmedB)
- ).
-
-should_create_conflicts_on_merge() ->
- OneChild = {1, {"1", "foo", [{"1a", "bar", []}]}},
- Stemmed = {3, {"1aa", "bar", []}},
- ?_assertEqual(
- {[OneChild, Stemmed], new_branch},
- merge_and_stem([OneChild], Stemmed)
- ).
-
-should_create_no_conflicts_on_merge() ->
- OneChild = {1, {"1", "foo", [{"1a", "bar", []}]}},
- Stemmed = {3, {"1aa", "bar", []}},
- TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual(
- {[TwoChild], new_leaf},
- merge_and_stem([OneChild, Stemmed], TwoChild)
- ).
-
-should_ignore_conflicting_branch() ->
- %% this test is based on couch-902-test-case2.py
- %% foo has conflicts from replication at depth two
- %% foo3 is the current value
- Foo =
- {1,
- {"foo", "val1", [
- {"foo2", "val2", []},
- {"foo3", "val3", []}
- ]}},
- %% foo now has an attachment added, which leads to foo4 and val4
- %% off foo3
- Bar = {1, {"foo", [], [{"foo3", [], [{"foo4", "val4", []}]}]}},
- %% this is what the merge returns
- %% note that it ignore the conflicting branch as there's no match
- FooBar =
- {1,
- {"foo", "val1", [
- {"foo2", "val2", []},
- {"foo3", "val3", [{"foo4", "val4", []}]}
- ]}},
- {
- "COUCHDB-902",
- ?_assertEqual(
- {[FooBar], new_leaf},
- merge_and_stem([Foo], Bar)
- )
- }.
-
-should_not_find_missing_leaves() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- [],
- couch_key_tree:find_missing(
- TwoChildSibs,
- [{0, "1"}, {1, "1a"}]
- )
- ).
-
-should_find_missing_leaves() ->
- Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- Stemmed2 = [{2, {"1aa", "bar", []}}],
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- [
- ?_assertEqual(
- [{0, "10"}, {100, "x"}],
- couch_key_tree:find_missing(
- TwoChildSibs,
- [{0, "1"}, {0, "10"}, {1, "1a"}, {100, "x"}]
- )
- ),
- ?_assertEqual(
- [{0, "1"}, {100, "x"}],
- couch_key_tree:find_missing(
- Stemmed1,
- [{0, "1"}, {1, "1a"}, {100, "x"}]
- )
- ),
- ?_assertEqual(
- [{0, "1"}, {1, "1a"}, {100, "x"}],
- couch_key_tree:find_missing(
- Stemmed2,
- [{0, "1"}, {1, "1a"}, {100, "x"}]
- )
- )
- ].
-
-should_have_no_effect_on_removing_no_leaves() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {TwoChildSibs, []},
- couch_key_tree:remove_leafs(
- TwoChildSibs,
- []
- )
- ).
-
-should_have_no_effect_on_removing_non_existant_branch() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {TwoChildSibs, []},
- couch_key_tree:remove_leafs(
- TwoChildSibs,
- [{0, "1"}]
- )
- ).
-
-should_remove_leaf() ->
- OneChild = [{0, {"1", "foo", [{"1a", "bar", []}]}}],
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {OneChild, [{1, "1b"}]},
- couch_key_tree:remove_leafs(
- TwoChildSibs,
- [{1, "1b"}]
- )
- ).
-
-should_produce_empty_tree_on_removing_all_leaves() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {[], [{1, "1b"}, {1, "1a"}]},
- couch_key_tree:remove_leafs(
- TwoChildSibs,
- [{1, "1b"}, {1, "1a"}]
- )
- ).
-
-should_have_no_effect_on_removing_non_existant_node() ->
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- ?_assertEqual(
- {Stemmed, []},
- couch_key_tree:remove_leafs(
- Stemmed,
- [{1, "1a"}]
- )
- ).
-
-should_produce_empty_tree_on_removing_last_leaf() ->
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- ?_assertEqual(
- {[], [{2, "1aa"}]},
- couch_key_tree:remove_leafs(
- Stemmed,
- [{2, "1aa"}]
- )
- ).
-
-should_extract_subtree() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {[{"foo", {0, ["1"]}}], []},
- couch_key_tree:get(TwoChildSibs, [{0, "1"}])
- ).
-
-should_extract_subsubtree() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {[{"bar", {1, ["1a", "1"]}}], []},
- couch_key_tree:get(TwoChildSibs, [{1, "1a"}])
- ).
-
-should_gather_non_existant_leaf() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {[], [{0, "x"}]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])
- ).
-
-should_gather_leaf() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {[{"bar", {1, ["1a", "1"]}}], []},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])
- ).
-
-shoul_gather_multiple_leaves() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {[{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b", "1"]}}], []},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])
- ).
-
-should_gather_single_leaf_for_multiple_revs() ->
- OneChild = [{0, {"1", "foo", [{"1a", "bar", []}]}}],
- ToFind = [{0, "1"}, {1, "1a"}],
- ?_assertEqual(
- {[{"bar", {1, ["1a", "1"]}}], []},
- couch_key_tree:get_key_leafs(OneChild, ToFind)
- ).
-
-should_gather_multiple_for_multiple_revs() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ToFind = [{0, "1"}, {1, "1a"}],
- ?_assertEqual(
- {[{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b", "1"]}}], []},
- couch_key_tree:get_key_leafs(TwoChildSibs, ToFind)
- ).
-
-should_retrieve_full_key_path() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {[{0, [{"1", "foo"}]}], []},
- couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])
- ).
-
-should_retrieve_full_key_path_for_node() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(
- {[{1, [{"1a", "bar"}, {"1", "foo"}]}], []},
- couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])
- ).
-
-should_retrieve_leaves_with_parent_node() ->
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- [
- ?_assertEqual(
- [{2, [{"1aa", "bar"}, {"1a", "bar"}]}],
- couch_key_tree:get_all_leafs_full(Stemmed)
- ),
- ?_assertEqual(
- [
- {1, [{"1a", "bar"}, {"1", "foo"}]},
- {1, [{"1b", "bar"}, {"1", "foo"}]}
- ],
- couch_key_tree:get_all_leafs_full(TwoChildSibs)
- )
- ].
-
-should_retrieve_all_leaves() ->
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- [
- ?_assertEqual(
- [{"bar", {2, ["1aa", "1a"]}}],
- couch_key_tree:get_all_leafs(Stemmed)
- ),
- ?_assertEqual(
- [{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b", "1"]}}],
- couch_key_tree:get_all_leafs(TwoChildSibs)
- )
- ].
-
-should_have_no_leaves_for_empty_tree() ->
- ?_assertEqual(0, couch_key_tree:count_leafs([])).
-
-should_have_single_leaf_for_tree_with_single_node() ->
- ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1", "foo", []}}])).
-
-should_have_two_leaves_for_tree_with_chindler_siblings() ->
- TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual(2, couch_key_tree:count_leafs(TwoChildSibs)).
-
-should_not_affect_on_leaf_counting_for_stemmed_tree() ->
- ?_assertEqual(1, couch_key_tree:count_leafs([{2, {"1bb", "boo", []}}])).
-
-should_have_no_effect_for_stemming_more_levels_than_exists() ->
- TwoChild = [{0, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
- ?_assertEqual(TwoChild, couch_key_tree:stem(TwoChild, 3)).
-
-should_return_one_deepest_node() ->
- TwoChild = [{0, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
- Stemmed = [{2, {"1aa", "bar", []}}],
- ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 1)).
-
-should_return_two_deepest_nodes() ->
- TwoChild = [{0, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
- Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)).
-
-merge_and_stem(RevTree, Tree) ->
- {Merged, Result} = couch_key_tree:merge(RevTree, Tree),
- {couch_key_tree:stem(Merged, ?DEPTH), Result}.
-
-should_not_use_excessive_memory_when_stemming() ->
- ?_test(begin
- % This is to preserve determinism
- Seed = {1647, 841737, 351137},
- Tree = generate_rev_tree(1000, 0.006, Seed),
- % Without the optimization #91de482fd66f4773b3b8583039c6bcaf1c5727ec
- % stemming would consume about 18_000_000 words. With it, it consumes
- % 6_000_000. So, use 13_000_000 as a threshold.
- Opts = [
- monitor,
- {max_heap_size, #{
- size => 13000000,
- error_logger => false,
- kill => true
- }}
- ],
- {_Pid, Ref} = spawn_opt(couch_key_tree, stem, [Tree, 1000], Opts),
- % When it uses too much memory exit would be `killed`
- Exit =
- receive
- {'DOWN', Ref, _, _, Res} -> Res
- end,
- ?assertEqual(normal, Exit)
- end).
-
-generate_rev_tree(Depth, BranchChance, Seed) ->
- rand:seed(exrop, Seed),
- [{1, revnode(Depth, BranchChance)}].
-
-revnode(0, _) ->
- {rev(), x, []};
-revnode(Depth, BranchChance) ->
- case rand:uniform() < BranchChance of
- true ->
- {rev(), x, [
- revnode(Depth - 1, BranchChance),
- revnode(Depth - 1, BranchChance)
- ]};
- false ->
- {rev(), x, [revnode(Depth - 1, BranchChance)]}
- end.
-
-rev() ->
- crypto:strong_rand_bytes(16).
diff --git a/src/couch/test/eunit/couch_passwords_tests.erl b/src/couch/test/eunit/couch_passwords_tests.erl
deleted file mode 100644
index 6b67a99e3..000000000
--- a/src/couch/test/eunit/couch_passwords_tests.erl
+++ /dev/null
@@ -1,65 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_passwords_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-pbkdf2_test_() ->
- {"PBKDF2", [
- {"Iterations: 1, length: 20",
- ?_assertEqual(
- {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20)
- )},
-
- {"Iterations: 2, length: 20",
- ?_assertEqual(
- {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20)
- )},
-
- {"Iterations: 4096, length: 20",
- ?_assertEqual(
- {ok, <<"4b007901b765489abead49d926f721d065a429c1">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20)
- )},
-
- {"Iterations: 4096, length: 25",
- ?_assertEqual(
- {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>},
- couch_passwords:pbkdf2(
- <<"passwordPASSWORDpassword">>,
- <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>,
- 4096,
- 25
- )
- )},
- {"Null byte",
- ?_assertEqual(
- {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>},
- couch_passwords:pbkdf2(
- <<"pass\0word">>,
- <<"sa\0lt">>,
- 4096,
- 16
- )
- )},
-
- %% this may runs too long on slow hosts
- {timeout, 600,
- {"Iterations: 16777216 - this may take some time",
- ?_assertEqual(
- {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20)
- )}}
- ]}.
diff --git a/src/couch/test/eunit/couch_query_servers_tests.erl b/src/couch/test/eunit/couch_query_servers_tests.erl
deleted file mode 100644
index 01631ba28..000000000
--- a/src/couch/test/eunit/couch_query_servers_tests.erl
+++ /dev/null
@@ -1,154 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_query_servers_tests).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch/include/couch_eunit.hrl").
-
-setup() ->
- meck:new([config, couch_log]).
-
-teardown(_) ->
- meck:unload().
-
-setup_oom() ->
- test_util:start_couch([ioq]).
-
-teardown_oom(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-sum_overflow_test_() ->
- {
- "Test overflow detection in the _sum reduce function",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- fun should_return_error_on_overflow/0,
- fun should_return_object_on_log/0,
- fun should_return_object_on_false/0
- ]
- }
- }.
-
-filter_oom_test_() ->
- {
- "Test recovery from oom in filters",
- {
- setup,
- fun setup_oom/0,
- fun teardown_oom/1,
- [
- fun should_split_large_batches/0
- ]
- }
- }.
-
-should_return_error_on_overflow() ->
- meck:reset([config, couch_log]),
- meck:expect(
- config,
- get,
- ["query_server_config", "reduce_limit", "true"],
- "true"
- ),
- meck:expect(couch_log, error, ['_', '_'], ok),
- KVs = gen_sum_kvs(),
- {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
- ?assertMatch({[{<<"error">>, <<"builtin_reduce_error">>} | _]}, Result),
- ?assert(meck:called(config, get, '_')),
- ?assert(meck:called(couch_log, error, '_')).
-
-should_return_object_on_log() ->
- meck:reset([config, couch_log]),
- meck:expect(
- config,
- get,
- ["query_server_config", "reduce_limit", "true"],
- "log"
- ),
- meck:expect(couch_log, error, ['_', '_'], ok),
- KVs = gen_sum_kvs(),
- {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
- ?assertMatch({[_ | _]}, Result),
- Keys = [K || {K, _} <- element(1, Result)],
- ?assert(not lists:member(<<"error">>, Keys)),
- ?assert(meck:called(config, get, '_')),
- ?assert(meck:called(couch_log, error, '_')).
-
-should_return_object_on_false() ->
- meck:reset([config, couch_log]),
- meck:expect(
- config,
- get,
- ["query_server_config", "reduce_limit", "true"],
- "false"
- ),
- meck:expect(couch_log, error, ['_', '_'], ok),
- KVs = gen_sum_kvs(),
- {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
- ?assertMatch({[_ | _]}, Result),
- Keys = [K || {K, _} <- element(1, Result)],
- ?assert(not lists:member(<<"error">>, Keys)),
- ?assert(meck:called(config, get, '_')),
- ?assertNot(meck:called(couch_log, error, '_')).
-
-should_split_large_batches() ->
- Req = {json_req, {[]}},
- Db = undefined,
- DDoc = #doc{
- id = <<"_design/foo">>,
- revs = {0, [<<"bork bork bork">>]},
- body =
- {[
- {<<"filters">>,
- {[
- {<<"bar">>, <<"function(req, doc) {return true;}">>}
- ]}}
- ]}
- },
- FName = <<"bar">>,
- Docs = [
- #doc{id = <<"a">>, body = {[]}},
- #doc{id = <<"b">>, body = {[]}}
- ],
- meck:new(couch_os_process, [passthrough]),
- meck:expect(couch_os_process, prompt, fun(Pid, Data) ->
- case Data of
- [<<"ddoc">>, _, [<<"filters">>, <<"bar">>], [[_, _], _]] ->
- throw({os_process_error, {exit_status, 1}});
- [<<"ddoc">>, _, [<<"filters">>, <<"bar">>], [[_], _]] ->
- [true, [split_batch]];
- _ ->
- meck:passthrough([Pid, Data])
- end
- end),
- {ok, Ret} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
- ?assertEqual([split_batch, split_batch], Ret).
-
-gen_sum_kvs() ->
- lists:map(
- fun(I) ->
- Props = lists:map(
- fun(_) ->
- K = couch_util:encodeBase64Url(crypto:strong_rand_bytes(16)),
- {K, 1}
- end,
- lists:seq(1, 20)
- ),
- [I, {Props}]
- end,
- lists:seq(1, 10)
- ).
diff --git a/src/couch/test/eunit/couch_server_tests.erl b/src/couch/test/eunit/couch_server_tests.erl
deleted file mode 100644
index a43106d89..000000000
--- a/src/couch/test/eunit/couch_server_tests.erl
+++ /dev/null
@@ -1,290 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_server_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--include("../src/couch_db_int.hrl").
--include("../src/couch_server_int.hrl").
-
-start() ->
- Ctx = test_util:start_couch(),
- config:set("log", "include_sasl", "false", false),
- Ctx.
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, []),
- Db.
-
-setup(rename) ->
- config:set("couchdb", "enable_database_recovery", "true", false),
- setup();
-setup(_) ->
- setup().
-
-teardown(Db) ->
- FilePath = couch_db:get_filepath(Db),
- (catch couch_db:close(Db)),
- (catch file:delete(FilePath)).
-
-teardown(rename, Db) ->
- config:set("couchdb", "enable_database_recovery", "false", false),
- teardown(Db);
-teardown(_, Db) ->
- teardown(Db).
-
-delete_db_test_() ->
- {
- "Test for proper deletion of db file",
- {
- setup,
- fun start/0,
- fun test_util:stop/1,
- [
- make_test_case(rename, [fun should_rename_on_delete/2]),
- make_test_case(delete, [fun should_delete/2])
- ]
- }
- }.
-
-make_test_case(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
- }.
-
-should_rename_on_delete(_, Db) ->
- DbName = couch_db:name(Db),
- Origin = couch_db:get_filepath(Db),
- ?_test(begin
- ?assert(filelib:is_regular(Origin)),
- ?assertMatch(ok, couch_server:delete(DbName, [])),
- ?assertNot(filelib:is_regular(Origin)),
- DeletedFiles = deleted_files(Origin),
- ?assertMatch([_], DeletedFiles),
- [Renamed] = DeletedFiles,
- ?assertEqual(
- filename:extension(Origin), filename:extension(Renamed)
- ),
- ?assert(filelib:is_regular(Renamed))
- end).
-
-should_delete(_, Db) ->
- DbName = couch_db:name(Db),
- Origin = couch_db:get_filepath(Db),
- ?_test(begin
- ?assert(filelib:is_regular(Origin)),
- ?assertMatch(ok, couch_server:delete(DbName, [])),
- ?assertNot(filelib:is_regular(Origin)),
- ?assertMatch([], deleted_files(Origin))
- end).
-
-deleted_files(ViewFile) ->
- filelib:wildcard(filename:rootname(ViewFile) ++ "*.deleted.*").
-
-bad_engine_option_test_() ->
- {
- setup,
- fun start/0,
- fun test_util:stop/1,
- [
- fun t_bad_engine_option/0
- ]
- }.
-
-t_bad_engine_option() ->
- Resp = couch_server:create(?tempdb(), [{engine, <<"cowabunga!">>}]),
- ?assertEqual(Resp, {error, {invalid_engine_extension, <<"cowabunga!">>}}).
-
-get_engine_path_test_() ->
- {
- setup,
- fun start/0,
- fun test_util:stop/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_return_engine_path/1,
- fun should_return_invalid_engine_error/1
- ]
- }
- }.
-
-should_return_engine_path(Db) ->
- DbName = couch_db:name(Db),
- Engine = couch_db_engine:get_engine(Db),
- Resp = couch_server:get_engine_path(DbName, Engine),
- FilePath = couch_db:get_filepath(Db),
- ?_assertMatch({ok, FilePath}, Resp).
-
-should_return_invalid_engine_error(Db) ->
- DbName = couch_db:name(Db),
- Engine = fake_engine,
- Resp = couch_server:get_engine_path(DbName, Engine),
- ?_assertMatch({error, {invalid_engine, Engine}}, Resp).
-
-interleaved_requests_test_() ->
- {
- setup,
- fun start_interleaved/0,
- fun stop_interleaved/1,
- fun make_interleaved_requests/1
- }.
-
-start_interleaved() ->
- TestDbName = ?tempdb(),
- meck:new(couch_db, [passthrough]),
- meck:expect(couch_db, start_link, fun(Engine, DbName, Filename, Options) ->
- case DbName of
- TestDbName ->
- receive
- go -> ok
- end,
- Res = meck:passthrough([Engine, DbName, Filename, Options]),
- % We're unlinking and sending a delayed
- % EXIT signal so that we can mimic a specific
- % message order in couch_server. On a test machine
- % this is a big race condition which affects the
- % ability to induce the bug.
- case Res of
- {ok, Db} ->
- DbPid = couch_db:get_pid(Db),
- unlink(DbPid),
- Msg = {'EXIT', DbPid, killed},
- erlang:send_after(2000, whereis(couch_server:couch_server(DbName)), Msg);
- _ ->
- ok
- end,
- Res;
- _ ->
- meck:passthrough([Engine, DbName, Filename, Options])
- end
- end),
- {test_util:start_couch(), TestDbName}.
-
-stop_interleaved({Ctx, TestDbName}) ->
- couch_server:delete(TestDbName, [?ADMIN_CTX]),
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-make_interleaved_requests({_, TestDbName}) ->
- [
- fun() -> t_interleaved_create_delete_open(TestDbName) end
- ].
-
-t_interleaved_create_delete_open(DbName) ->
- {CrtRef, OpenRef} = {make_ref(), make_ref()},
- CrtMsg = {'$gen_call', {self(), CrtRef}, {create, DbName, [?ADMIN_CTX]}},
- FakePid = spawn(fun() -> ok end),
- OpenResult = {open_result, DbName, {ok, #db{main_pid = FakePid}}},
- OpenResultMsg = {'$gen_call', {self(), OpenRef}, OpenResult},
-
- % Get the current couch_server pid so we're sure
- % to not end up messaging two different pids
- CouchServer = whereis(couch_server:couch_server(DbName)),
-
- % Start our first instance that will succeed in
- % an invalid state. Notice that the opener pid
- % spawned by couch_server:open_async/5 will halt
- % in our meck expect function waiting for a message.
- %
- % We're using raw message passing here so that we don't
- % have to coordinate multiple processes for this test.
- CouchServer ! CrtMsg,
- {ok, Opener} = get_opener_pid(DbName),
-
- % We have to suspend couch_server so that we can enqueue
- % our next requests and let the opener finish processing.
- erlang:suspend_process(CouchServer),
-
- % We queue a confused open_result message in front of
- % the correct response from the opener.
- CouchServer ! OpenResultMsg,
-
- % Release the opener pid so it can continue
- Opener ! go,
-
- % Wait for the '$gen_call' message from OpenerPid to arrive
- % in couch_server's mailbox
- ok = wait_for_open_async_result(CouchServer, Opener),
-
- % Now monitor and resume the couch_server and assert that
- % couch_server does not crash while processing OpenResultMsg
- CSRef = erlang:monitor(process, CouchServer),
- erlang:resume_process(CouchServer),
- check_monitor_not_triggered(CSRef),
-
- % Our open_result message was processed and ignored
- ?assertEqual({OpenRef, ok}, get_next_message()),
-
- % Our create request was processed normally after we
- % ignored the spurious open_result
- ?assertMatch({CrtRef, {ok, _}}, get_next_message()),
-
- % And finally assert that couch_server is still
- % alive.
- ?assert(is_process_alive(CouchServer)),
- check_monitor_not_triggered(CSRef).
-
-get_opener_pid(DbName) ->
- WaitFun = fun() ->
- case ets:lookup(couch_server:couch_dbs(DbName), DbName) of
- [#entry{pid = Pid}] ->
- {ok, Pid};
- [] ->
- wait
- end
- end,
- test_util:wait(WaitFun).
-
-wait_for_open_async_result(CouchServer, Opener) ->
- WaitFun = fun() ->
- {_, Messages} = erlang:process_info(CouchServer, messages),
- Found = lists:foldl(
- fun(Msg, Acc) ->
- case Msg of
- {'$gen_call', {Opener, _}, {open_result, _, {ok, _}}} ->
- true;
- _ ->
- Acc
- end
- end,
- false,
- Messages
- ),
- if
- Found -> ok;
- true -> wait
- end
- end,
- test_util:wait(WaitFun).
-
-check_monitor_not_triggered(Ref) ->
- receive
- {'DOWN', Ref, _, _, Reason0} ->
- erlang:error({monitor_triggered, Reason0})
- after 100 ->
- ok
- end.
-
-get_next_message() ->
- receive
- Msg ->
- Msg
- after 5000 ->
- erlang:error(timeout)
- end.
diff --git a/src/couch/test/eunit/couch_stream_tests.erl b/src/couch/test/eunit/couch_stream_tests.erl
deleted file mode 100644
index 4146a9139..000000000
--- a/src/couch/test/eunit/couch_stream_tests.erl
+++ /dev/null
@@ -1,128 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stream_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(ENGINE(FdVar), {couch_bt_engine_stream, {FdVar, []}}).
-
-setup() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Stream} = couch_stream:open(?ENGINE(Fd), []),
- {Fd, Stream}.
-
-teardown({Fd, _}) ->
- ok = couch_file:close(Fd).
-
-stream_test_() ->
- {
- "CouchDB stream tests",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end,
- fun test_util:stop/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_write/1,
- fun should_write_consecutive/1,
- fun should_write_empty_binary/1,
- fun should_return_file_pointers_on_close/1,
- fun should_return_stream_size_on_close/1,
- fun should_return_valid_pointers/1,
- fun should_recall_last_pointer_position/1,
- fun should_stream_more_with_4K_chunk_size/1,
- fun should_stop_on_normal_exit_of_stream_opener/1
- ]
- }
- }
- }.
-
-should_write({_, Stream}) ->
- ?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)).
-
-should_write_consecutive({_, Stream}) ->
- couch_stream:write(Stream, <<"food">>),
- ?_assertEqual(ok, couch_stream:write(Stream, <<"foob">>)).
-
-should_write_empty_binary({_, Stream}) ->
- ?_assertEqual(ok, couch_stream:write(Stream, <<>>)).
-
-should_return_file_pointers_on_close({_, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {NewEngine, _, _, _, _} = couch_stream:close(Stream),
- {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
- ?_assertEqual([{0, 8}], Ptrs).
-
-should_return_stream_size_on_close({_, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {_, Length, _, _, _} = couch_stream:close(Stream),
- ?_assertEqual(8, Length).
-
-should_return_valid_pointers({_Fd, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {NewEngine, _, _, _, _} = couch_stream:close(Stream),
- ?_assertEqual(<<"foodfoob">>, read_all(NewEngine)).
-
-should_recall_last_pointer_position({Fd, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {_, _, _, _, _} = couch_stream:close(Stream),
- {ok, ExpPtr} = couch_file:bytes(Fd),
- {ok, Stream2} = couch_stream:open(?ENGINE(Fd)),
- ZeroBits = <<0:(8 * 10)>>,
- OneBits = <<1:(8 * 10)>>,
- ok = couch_stream:write(Stream2, OneBits),
- ok = couch_stream:write(Stream2, ZeroBits),
- {NewEngine, 20, _, _, _} = couch_stream:close(Stream2),
- {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
- [{ExpPtr, 20}] = Ptrs,
- AllBits = iolist_to_binary([OneBits, ZeroBits]),
- ?_assertEqual(AllBits, read_all(NewEngine)).
-
-should_stream_more_with_4K_chunk_size({Fd, _}) ->
- {ok, Stream} = couch_stream:open(?ENGINE(Fd), [{buffer_size, 4096}]),
- lists:foldl(
- fun(_, Acc) ->
- Data = <<"a1b2c">>,
- couch_stream:write(Stream, Data),
- [Data | Acc]
- end,
- [],
- lists:seq(1, 1024)
- ),
- {NewEngine, Length, _, _, _} = couch_stream:close(Stream),
- {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
- ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120}, {Ptrs, Length}).
-
-should_stop_on_normal_exit_of_stream_opener({Fd, _}) ->
- RunnerPid = self(),
- OpenerPid = spawn(
- fun() ->
- {ok, StreamPid} = couch_stream:open(?ENGINE(Fd)),
- RunnerPid ! {pid, StreamPid}
- end
- ),
- StreamPid =
- receive
- {pid, StreamPid0} -> StreamPid0
- end,
- % Confirm the validity of the test by verifying the stream opener has died
- ?assertNot(is_process_alive(OpenerPid)),
- % Verify the stream itself has also died
- ?_assertNot(is_process_alive(StreamPid)).
-
-read_all(Engine) ->
- Data = couch_stream:foldl(Engine, fun(Bin, Acc) -> [Bin, Acc] end, []),
- iolist_to_binary(Data).
diff --git a/src/couch/test/eunit/couch_task_status_tests.erl b/src/couch/test/eunit/couch_task_status_tests.erl
deleted file mode 100644
index f888dd596..000000000
--- a/src/couch/test/eunit/couch_task_status_tests.erl
+++ /dev/null
@@ -1,243 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_task_status_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- Ctx = test_util:start(?MODULE, [couch_log], [{dont_mock, [config]}]),
- {ok, TaskStatusPid} = couch_task_status:start_link(),
- TaskUpdaterPid = spawn(fun() -> loop() end),
- {TaskStatusPid, TaskUpdaterPid, Ctx}.
-
-teardown({TaskStatusPid, _, Ctx}) ->
- test_util:stop_sync_throw(
- TaskStatusPid,
- fun() ->
- couch_task_status:stop()
- end,
- timeout_error,
- ?TIMEOUT
- ),
- test_util:stop(Ctx).
-
-couch_task_status_test_() ->
- {
- "CouchDB task status updates",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_register_task/1,
- fun should_set_task_startup_time/1,
- fun should_have_update_time_as_startup_before_any_progress/1,
- fun should_set_task_type/1,
- fun should_not_register_multiple_tasks_for_same_pid/1,
- fun should_set_task_progress/1,
- fun should_update_task_progress/1,
- fun should_update_time_changes_on_task_progress/1,
- %% fun should_control_update_frequency/1,
- fun should_reset_control_update_frequency/1,
- fun should_track_multiple_tasks/1,
- fun should_finish_task/1
- ]
- }
- }.
-
-should_register_task({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(1, length(couch_task_status:all())).
-
-should_set_task_startup_time({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assert(is_integer(get_task_prop(Pid, started_on))).
-
-should_have_update_time_as_startup_before_any_progress({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- StartTime = get_task_prop(Pid, started_on),
- ?_assertEqual(StartTime, get_task_prop(Pid, updated_on)).
-
-should_set_task_type({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(replication, get_task_prop(Pid, type)).
-
-should_not_register_multiple_tasks_for_same_pid({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(
- {add_task_error, already_registered},
- call(Pid, add, [{type, compaction}, {progress, 0}])
- ).
-
-should_set_task_progress({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(0, get_task_prop(Pid, progress)).
-
-should_update_task_progress({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- call(Pid, update, [{progress, 25}]),
- ?_assertEqual(25, get_task_prop(Pid, progress)).
-
-should_update_time_changes_on_task_progress({_, Pid, _Ctx}) ->
- ?_assert(
- begin
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- % sleep awhile to customize update time
- ok = timer:sleep(1000),
- call(Pid, update, [{progress, 25}]),
- get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on)
- end
- ).
-
-%%should_control_update_frequency({_, Pid, _Ctx}) ->
-%% ?_assertEqual(66,
-%% begin
-%% ok = call(Pid, add, [{type, replication}, {progress, 0}]),
-%% call(Pid, update, [{progress, 50}]),
-%% call(Pid, update_frequency, 500),
-%% call(Pid, update, [{progress, 66}]),
-%% call(Pid, update, [{progress, 77}]),
-%% get_task_prop(Pid, progress)
-%% end).
-
-should_reset_control_update_frequency({_, Pid, _Ctx}) ->
- ?_assertEqual(
- 87,
- begin
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- call(Pid, update, [{progress, 50}]),
- call(Pid, update_frequency, 500),
- call(Pid, update, [{progress, 66}]),
- call(Pid, update, [{progress, 77}]),
- call(Pid, update_frequency, 0),
- call(Pid, update, [{progress, 87}]),
- get_task_prop(Pid, progress)
- end
- ).
-
-should_track_multiple_tasks(_) ->
- ?_assert(run_multiple_tasks()).
-
-should_finish_task({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?assertEqual(1, length(couch_task_status:all())),
- ok = call(Pid, done),
- ?_assertEqual(0, length(couch_task_status:all())).
-
-run_multiple_tasks() ->
- Pid1 = spawn(fun() -> loop() end),
- Pid2 = spawn(fun() -> loop() end),
- Pid3 = spawn(fun() -> loop() end),
- call(Pid1, add, [{type, replication}, {progress, 0}]),
- call(Pid2, add, [{type, compaction}, {progress, 0}]),
- call(Pid3, add, [{type, indexer}, {progress, 0}]),
-
- ?assertEqual(3, length(couch_task_status:all())),
- ?assertEqual(replication, get_task_prop(Pid1, type)),
- ?assertEqual(compaction, get_task_prop(Pid2, type)),
- ?assertEqual(indexer, get_task_prop(Pid3, type)),
-
- call(Pid2, update, [{progress, 33}]),
- call(Pid3, update, [{progress, 42}]),
- call(Pid1, update, [{progress, 11}]),
- ?assertEqual(42, get_task_prop(Pid3, progress)),
- call(Pid1, update, [{progress, 72}]),
- ?assertEqual(72, get_task_prop(Pid1, progress)),
- ?assertEqual(33, get_task_prop(Pid2, progress)),
-
- call(Pid1, done),
- ?assertEqual(2, length(couch_task_status:all())),
- call(Pid3, done),
- ?assertEqual(1, length(couch_task_status:all())),
- call(Pid2, done),
- ?assertEqual(0, length(couch_task_status:all())),
-
- true.
-
-loop() ->
- receive
- {add, Props, From} ->
- Resp = couch_task_status:add_task(Props),
- From ! {ok, self(), Resp},
- loop();
- {update, Props, From} ->
- Resp = couch_task_status:update(Props),
- From ! {ok, self(), Resp},
- loop();
- {update_frequency, Msecs, From} ->
- Resp = couch_task_status:set_update_frequency(Msecs),
- From ! {ok, self(), Resp},
- loop();
- {done, From} ->
- From ! {ok, self(), ok}
- end.
-
-call(Pid, done) ->
- Ref = erlang:monitor(process, Pid),
- Pid ! {done, self()},
- Res = wait(Pid),
- receive
- {'DOWN', Ref, _Type, Pid, _Info} ->
- Res
- after ?TIMEOUT ->
- throw(timeout_error)
- end;
-call(Pid, Command) ->
- Pid ! {Command, self()},
- wait(Pid).
-
-call(Pid, Command, Arg) ->
- Pid ! {Command, Arg, self()},
- wait(Pid).
-
-wait(Pid) ->
- receive
- {ok, Pid, Msg} ->
- Msg
- after ?TIMEOUT ->
- throw(timeout_error)
- end.
-
-get_task_prop(Pid, Prop) ->
- From = list_to_binary(pid_to_list(Pid)),
- Element = lists:foldl(
- fun(PropList, Acc) ->
- case couch_util:get_value(pid, PropList) of
- From ->
- [PropList | Acc];
- _ ->
- Acc
- end
- end,
- [],
- couch_task_status:all()
- ),
- case couch_util:get_value(Prop, hd(Element), nil) of
- nil ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- "Could not get property '" ++
- couch_util:to_list(Prop) ++
- "' for task " ++
- pid_to_list(Pid)}
- ]}
- );
- Value ->
- Value
- end.
diff --git a/src/couch/test/eunit/couch_totp_tests.erl b/src/couch/test/eunit/couch_totp_tests.erl
deleted file mode 100644
index 6817a092a..000000000
--- a/src/couch/test/eunit/couch_totp_tests.erl
+++ /dev/null
@@ -1,55 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_totp_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-totp_sha_test() ->
- Key = <<"12345678901234567890">>,
- ?assertEqual(94287082, couch_totp:generate(sha, Key, 59, 30, 8)),
- ?assertEqual(07081804, couch_totp:generate(sha, Key, 1111111109, 30, 8)),
- ?assertEqual(14050471, couch_totp:generate(sha, Key, 1111111111, 30, 8)),
- ?assertEqual(89005924, couch_totp:generate(sha, Key, 1234567890, 30, 8)),
- ?assertEqual(69279037, couch_totp:generate(sha, Key, 2000000000, 30, 8)),
- ?assertEqual(65353130, couch_totp:generate(sha, Key, 20000000000, 30, 8)).
-
-totp_sha256_test() ->
- Key = <<"12345678901234567890123456789012">>,
- case sha_256_512_supported() of
- true ->
- ?assertEqual(46119246, couch_totp:generate(sha256, Key, 59, 30, 8)),
- ?assertEqual(68084774, couch_totp:generate(sha256, Key, 1111111109, 30, 8)),
- ?assertEqual(67062674, couch_totp:generate(sha256, Key, 1111111111, 30, 8)),
- ?assertEqual(91819424, couch_totp:generate(sha256, Key, 1234567890, 30, 8)),
- ?assertEqual(90698825, couch_totp:generate(sha256, Key, 2000000000, 30, 8)),
- ?assertEqual(77737706, couch_totp:generate(sha256, Key, 20000000000, 30, 8));
- false ->
- ?debugMsg("sha256 not supported, tests skipped")
- end.
-
-totp_sha512_test() ->
- Key = <<"1234567890123456789012345678901234567890123456789012345678901234">>,
- case sha_256_512_supported() of
- true ->
- ?assertEqual(90693936, couch_totp:generate(sha512, Key, 59, 30, 8)),
- ?assertEqual(25091201, couch_totp:generate(sha512, Key, 1111111109, 30, 8)),
- ?assertEqual(99943326, couch_totp:generate(sha512, Key, 1111111111, 30, 8)),
- ?assertEqual(93441116, couch_totp:generate(sha512, Key, 1234567890, 30, 8)),
- ?assertEqual(38618901, couch_totp:generate(sha512, Key, 2000000000, 30, 8)),
- ?assertEqual(47863826, couch_totp:generate(sha512, Key, 20000000000, 30, 8));
- false ->
- ?debugMsg("sha512 not supported, tests skipped")
- end.
-
-sha_256_512_supported() ->
- erlang:function_exported(crypto, hmac, 3).
diff --git a/src/couch/test/eunit/couch_util_tests.erl b/src/couch/test/eunit/couch_util_tests.erl
deleted file mode 100644
index c07ddc093..000000000
--- a/src/couch/test/eunit/couch_util_tests.erl
+++ /dev/null
@@ -1,152 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_util_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-validate_callback_exists_test_() ->
- {
- "validate_callback_exists tests",
- [
- fun should_succeed_for_existent_cb/0,
- should_fail_for_missing_cb()
- ]
- }.
-
-to_existed_atom_test() ->
- ?assert(couch_util:to_existing_atom(true)),
- ?assertMatch(foo, couch_util:to_existing_atom(<<"foo">>)),
- ?assertMatch(foobarbaz, couch_util:to_existing_atom("foobarbaz")).
-
-implode_test() ->
- ?assertEqual([1, 38, 2, 38, 3], couch_util:implode([1, 2, 3], "&")).
-
-trim_test() ->
- lists:map(
- fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end,
- [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]
- ).
-
-abs_pathname_test() ->
- {ok, Cwd} = file:get_cwd(),
- ?assertEqual(Cwd ++ "/foo", couch_util:abs_pathname("./foo")).
-
-flush_test() ->
- ?assertNot(couch_util:should_flush()),
- AcquireMem = fun() ->
- _IntsToAGazillion = lists:seq(1, 200000),
- _LotsOfData = lists:map(
- fun(_) -> <<"foobar">> end,
- lists:seq(1, 500000)
- ),
- _ = list_to_binary(_LotsOfData),
-
- %% Allocation 200K tuples puts us above the memory threshold
- %% Originally, there should be:
- %% ?assertNot(should_flush())
- %% however, unlike for etap test, GC collects all allocated bits
- %% making this conditions fail. So we have to invert the condition
- %% since GC works, cleans the memory and everything is fine.
- ?assertNot(couch_util:should_flush())
- end,
- AcquireMem(),
-
- %% Checking to flush invokes GC
- ?assertNot(couch_util:should_flush()).
-
-verify_test() ->
- ?assert(couch_util:verify("It4Vooya", "It4Vooya")),
- ?assertNot(couch_util:verify("It4VooyaX", "It4Vooya")),
- ?assert(couch_util:verify(<<"ahBase3r">>, <<"ahBase3r">>)),
- ?assertNot(couch_util:verify(<<"ahBase3rX">>, <<"ahBase3r">>)),
- ?assertNot(couch_util:verify(nil, <<"ahBase3r">>)).
-
-find_in_binary_test_() ->
- Cases = [
- {<<"foo">>, <<"foobar">>, {exact, 0}},
- {<<"foo">>, <<"foofoo">>, {exact, 0}},
- {<<"foo">>, <<"barfoo">>, {exact, 3}},
- {<<"foo">>, <<"barfo">>, {partial, 3}},
- {<<"f">>, <<"fobarfff">>, {exact, 0}},
- {<<"f">>, <<"obarfff">>, {exact, 4}},
- {<<"f">>, <<"obarggf">>, {exact, 6}},
- {<<"f">>, <<"f">>, {exact, 0}},
- {<<"f">>, <<"g">>, not_found},
- {<<"foo">>, <<"f">>, {partial, 0}},
- {<<"foo">>, <<"g">>, not_found},
- {<<"foo">>, <<"">>, not_found},
- {<<"fofo">>, <<"foofo">>, {partial, 3}},
- {<<"foo">>, <<"gfobarfo">>, {partial, 6}},
- {<<"foo">>, <<"gfobarf">>, {partial, 6}},
- {<<"foo">>, <<"gfobar">>, not_found},
- {<<"fog">>, <<"gbarfogquiz">>, {exact, 4}},
- {<<"ggg">>, <<"ggg">>, {exact, 0}},
- {<<"ggg">>, <<"ggggg">>, {exact, 0}},
- {<<"ggg">>, <<"bggg">>, {exact, 1}},
- {<<"ggg">>, <<"bbgg">>, {partial, 2}},
- {<<"ggg">>, <<"bbbg">>, {partial, 3}},
- {<<"ggg">>, <<"bgbggbggg">>, {exact, 6}},
- {<<"ggg">>, <<"bgbggb">>, not_found}
- ],
- lists:map(
- fun({Needle, Haystack, Result}) ->
- Msg = lists:flatten(
- io_lib:format(
- "Looking for ~s in ~s",
- [Needle, Haystack]
- )
- ),
- {Msg,
- ?_assertMatch(
- Result,
- couch_util:find_in_binary(Needle, Haystack)
- )}
- end,
- Cases
- ).
-
-should_succeed_for_existent_cb() ->
- ?_assert(couch_util:validate_callback_exists(lists, any, 2)).
-
-should_fail_for_missing_cb() ->
- Cases = [
- {unknown_module, any, 1},
- {erlang, unknown_function, 1},
- {erlang, whereis, 100}
- ],
- lists:map(
- fun({M, F, A} = MFA) ->
- Name = lists:flatten(io_lib:format("~w:~w/~w", [M, F, A])),
- {Name,
- ?_assertThrow(
- {error, {undefined_callback, Name, MFA}},
- couch_util:validate_callback_exists(M, F, A)
- )}
- end,
- Cases
- ).
-
-to_hex_test_() ->
- [
- ?_assertEqual("", couch_util:to_hex([])),
- ?_assertEqual("010203faff", couch_util:to_hex([1, 2, 3, 250, 255])),
- ?_assertEqual("", couch_util:to_hex(<<>>)),
- ?_assertEqual("010203faff", couch_util:to_hex(<<1, 2, 3, 250, 255>>))
- ].
-
-json_decode_test_() ->
- [
- ?_assertEqual({[]}, couch_util:json_decode(<<"{}">>)),
- ?_assertEqual({[]}, couch_util:json_decode(<<"{}">>, [])),
- ?_assertEqual(#{}, couch_util:json_decode(<<"{}">>, [return_maps]))
- ].
diff --git a/src/couch/test/eunit/couch_uuids_tests.erl b/src/couch/test/eunit/couch_uuids_tests.erl
deleted file mode 100644
index 6546779bb..000000000
--- a/src/couch/test/eunit/couch_uuids_tests.erl
+++ /dev/null
@@ -1,109 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_uuids_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(TIMEOUT, 20).
-
-setup_all() ->
- test_util:start_applications([config]),
- couch_uuids:start().
-
-teardown_all(_) ->
- couch_uuids:stop(),
- test_util:stop_applications([config]).
-
-uuids_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- {timeout, ?TIMEOUT, fun default_algorithm/0},
- {timeout, ?TIMEOUT, fun sequential_algorithm/0},
- {timeout, ?TIMEOUT, fun utc_algorithm/0},
- {timeout, ?TIMEOUT, fun utc_id_suffix_algorithm/0}
- ]
- }.
-
-default_algorithm() ->
- config:delete("uuids", "algorithm", false),
- check_unique().
-
-sequential_algorithm() ->
- config:set("uuids", "algorithm", "sequential", false),
- check_unique(),
- check_increment_monotonically(),
- check_rollover().
-
-utc_algorithm() ->
- config:set("uuids", "algorithm", "utc_random", false),
- check_unique(),
- check_increment_monotonically().
-
-utc_id_suffix_algorithm() ->
- config:set("uuids", "algorithm", "utc_id", false),
- config:set("uuids", "utc_id_suffix", "bozo", false),
- check_unique(),
- check_increment_monotonically(),
- check_preserve_suffix().
-
-check_unique() ->
- %% this one may really runs for too long on slow hosts
- ?assert(test_unique(10000, [couch_uuids:new()])).
-
-check_increment_monotonically() ->
- ?assert(couch_uuids:new() < couch_uuids:new()).
-
-check_rollover() ->
- UUID = binary_to_list(couch_uuids:new()),
- Prefix = element(1, lists:split(26, UUID)),
- N = gen_until_pref_change(Prefix, 0),
- ?assert(N >= 5000 andalso N =< 11000).
-
-check_preserve_suffix() ->
- UUID = binary_to_list(couch_uuids:new()),
- Suffix = get_suffix(UUID),
- ?assert(test_same_suffix(10000, Suffix)).
-
-test_unique(0, _) ->
- true;
-test_unique(N, UUIDs) ->
- UUID = couch_uuids:new(),
- ?assertNot(lists:member(UUID, UUIDs)),
- test_unique(N - 1, [UUID | UUIDs]).
-
-gen_until_pref_change(_, Count) when Count > 8251 ->
- Count;
-gen_until_pref_change(Prefix, N) ->
- case get_prefix(couch_uuids:new()) of
- Prefix -> gen_until_pref_change(Prefix, N + 1);
- _ -> N
- end.
-
-test_same_suffix(0, _) ->
- true;
-test_same_suffix(N, Suffix) ->
- case get_suffix(couch_uuids:new()) of
- Suffix -> test_same_suffix(N - 1, Suffix);
- _ -> false
- end.
-
-get_prefix(UUID) ->
- element(1, lists:split(26, binary_to_list(UUID))).
-
-get_suffix(UUID) when is_binary(UUID) ->
- get_suffix(binary_to_list(UUID));
-get_suffix(UUID) ->
- element(2, lists:split(14, UUID)).
diff --git a/src/couch/test/eunit/couch_work_queue_tests.erl b/src/couch/test/eunit/couch_work_queue_tests.erl
deleted file mode 100644
index acf0e45dc..000000000
--- a/src/couch/test/eunit/couch_work_queue_tests.erl
+++ /dev/null
@@ -1,416 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_work_queue_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(TIMEOUT, 100).
-
-setup(Opts) ->
- {ok, Q} = couch_work_queue:new(Opts),
- Producer = spawn_producer(Q),
- Consumer = spawn_consumer(Q),
- {Q, Producer, Consumer}.
-
-setup_max_items() ->
- setup([{max_items, 3}]).
-
-setup_max_size() ->
- setup([{max_size, 160}]).
-
-setup_max_items_and_size() ->
- setup([{max_size, 160}, {max_items, 3}]).
-
-setup_multi_workers() ->
- {Q, Producer, Consumer1} = setup([
- {max_size, 160},
- {max_items, 3},
- {multi_workers, true}
- ]),
- Consumer2 = spawn_consumer(Q),
- Consumer3 = spawn_consumer(Q),
- {Q, Producer, [Consumer1, Consumer2, Consumer3]}.
-
-teardown({Q, Producer, Consumers}) when is_list(Consumers) ->
- % consume all to unblock and let producer/consumer stop without timeout
- [consume(Consumer, all) || Consumer <- Consumers],
-
- ok = close_queue(Q),
- ok = stop(Producer, "producer"),
- R = [stop(Consumer, "consumer") || Consumer <- Consumers],
- R = [ok || _ <- Consumers],
- ok;
-teardown({Q, Producer, Consumer}) ->
- teardown({Q, Producer, [Consumer]}).
-
-single_consumer_test_() ->
- {
- "Single producer and consumer",
- [
- {
- "Queue with 3 max items",
- {
- foreach,
- fun setup_max_items/0,
- fun teardown/1,
- single_consumer_max_item_count() ++ common_cases()
- }
- },
- {
- "Queue with max size of 160 bytes",
- {
- foreach,
- fun setup_max_size/0,
- fun teardown/1,
- single_consumer_max_size() ++ common_cases()
- }
- },
- {
- "Queue with max size of 160 bytes and 3 max items",
- {
- foreach,
- fun setup_max_items_and_size/0,
- fun teardown/1,
- single_consumer_max_items_and_size() ++ common_cases()
- }
- }
- ]
- }.
-
-multiple_consumers_test_() ->
- {
- "Single producer and multiple consumers",
- [
- {
- "Queue with max size of 160 bytes and 3 max items",
- {
- foreach,
- fun setup_multi_workers/0,
- fun teardown/1,
- common_cases() ++ multiple_consumers()
- }
- }
- ]
- }.
-
-common_cases() ->
- [
- fun should_block_consumer_on_dequeue_from_empty_queue/1,
- fun should_consume_right_item/1,
- fun should_timeout_on_close_non_empty_queue/1,
- fun should_not_block_producer_for_non_empty_queue_after_close/1,
- fun should_be_closed/1
- ].
-
-single_consumer_max_item_count() ->
- [
- fun should_have_no_items_for_new_queue/1,
- fun should_block_producer_on_full_queue_count/1,
- fun should_receive_first_queued_item/1,
- fun should_consume_multiple_items/1,
- fun should_consume_all/1
- ].
-
-single_consumer_max_size() ->
- [
- fun should_have_zero_size_for_new_queue/1,
- fun should_block_producer_on_full_queue_size/1,
- fun should_increase_queue_size_on_produce/1,
- fun should_receive_first_queued_item/1,
- fun should_consume_multiple_items/1,
- fun should_consume_all/1
- ].
-
-single_consumer_max_items_and_size() ->
- single_consumer_max_item_count() ++ single_consumer_max_size().
-
-multiple_consumers() ->
- [
- fun should_have_zero_size_for_new_queue/1,
- fun should_have_no_items_for_new_queue/1,
- fun should_increase_queue_size_on_produce/1
- ].
-
-should_have_no_items_for_new_queue({Q, _, _}) ->
- ?_assertEqual(0, couch_work_queue:item_count(Q)).
-
-should_have_zero_size_for_new_queue({Q, _, _}) ->
- ?_assertEqual(0, couch_work_queue:size(Q)).
-
-should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumers}) when is_list(Consumers) ->
- [consume(C, 2) || C <- Consumers],
- Pongs = [ping(C) || C <- Consumers],
- ?_assertEqual([timeout, timeout, timeout], Pongs);
-should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumer}) ->
- consume(Consumer, 1),
- Pong = ping(Consumer),
- ?_assertEqual(timeout, Pong).
-
-should_consume_right_item({Q, Producer, Consumers}) when is_list(Consumers) ->
- [consume(C, 3) || C <- Consumers],
-
- Item1 = produce(Q, Producer, 10, false),
- ok = ping(Producer),
- ?assertEqual(0, couch_work_queue:item_count(Q)),
- ?assertEqual(0, couch_work_queue:size(Q)),
-
- Item2 = produce(Q, Producer, 10, false),
- ok = ping(Producer),
- ?assertEqual(0, couch_work_queue:item_count(Q)),
- ?assertEqual(0, couch_work_queue:size(Q)),
-
- Item3 = produce(Q, Producer, 10, false),
- ok = ping(Producer),
- ?assertEqual(0, couch_work_queue:item_count(Q)),
- ?assertEqual(0, couch_work_queue:size(Q)),
-
- R = [
- {ping(C), Item}
- || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])
- ],
-
- ?_assertEqual([{ok, Item1}, {ok, Item2}, {ok, Item3}], R);
-should_consume_right_item({Q, Producer, Consumer}) ->
- consume(Consumer, 1),
- Item = produce(Q, Producer, 10, false),
- produce(Q, Producer, 20, true),
- ok = ping(Producer),
- ok = ping(Consumer),
- {ok, Items} = last_consumer_items(Consumer),
- ?_assertEqual([Item], Items).
-
-should_increase_queue_size_on_produce({Q, Producer, _}) ->
- produce(Q, Producer, 50, true),
- ok = ping(Producer),
- Count1 = couch_work_queue:item_count(Q),
- Size1 = couch_work_queue:size(Q),
-
- produce(Q, Producer, 10, true),
- Count2 = couch_work_queue:item_count(Q),
- Size2 = couch_work_queue:size(Q),
-
- ?_assertEqual([{Count1, Size1}, {Count2, Size2}], [{1, 50}, {2, 60}]).
-
-should_block_producer_on_full_queue_count({Q, Producer, _}) ->
- produce(Q, Producer, 10, true),
- ?assertEqual(1, couch_work_queue:item_count(Q)),
- ok = ping(Producer),
-
- produce(Q, Producer, 15, true),
- ?assertEqual(2, couch_work_queue:item_count(Q)),
- ok = ping(Producer),
-
- produce(Q, Producer, 20, true),
- ?assertEqual(3, couch_work_queue:item_count(Q)),
- Pong = ping(Producer),
-
- ?_assertEqual(timeout, Pong).
-
-should_block_producer_on_full_queue_size({Q, Producer, _}) ->
- produce(Q, Producer, 100, true),
- ok = ping(Producer),
- ?assertEqual(1, couch_work_queue:item_count(Q)),
- ?assertEqual(100, couch_work_queue:size(Q)),
-
- produce(Q, Producer, 110, false),
- Pong = ping(Producer),
- ?assertEqual(2, couch_work_queue:item_count(Q)),
- ?assertEqual(210, couch_work_queue:size(Q)),
-
- ?_assertEqual(timeout, Pong).
-
-should_consume_multiple_items({Q, Producer, Consumer}) ->
- Item1 = produce(Q, Producer, 10, true),
- ok = ping(Producer),
-
- Item2 = produce(Q, Producer, 15, true),
- ok = ping(Producer),
-
- consume(Consumer, 2),
-
- {ok, Items} = last_consumer_items(Consumer),
- ?_assertEqual([Item1, Item2], Items).
-
-should_receive_first_queued_item({Q, Producer, Consumer}) ->
- consume(Consumer, 100),
- timeout = ping(Consumer),
-
- Item = produce(Q, Producer, 11, false),
- ok = ping(Producer),
-
- ok = ping(Consumer),
- ?assertEqual(0, couch_work_queue:item_count(Q)),
-
- {ok, Items} = last_consumer_items(Consumer),
- ?_assertEqual([Item], Items).
-
-should_consume_all({Q, Producer, Consumer}) ->
- Item1 = produce(Q, Producer, 10, true),
- Item2 = produce(Q, Producer, 15, true),
- Item3 = produce(Q, Producer, 20, true),
-
- consume(Consumer, all),
-
- {ok, Items} = last_consumer_items(Consumer),
- ?_assertEqual([Item1, Item2, Item3], Items).
-
-should_timeout_on_close_non_empty_queue({Q, Producer, _}) ->
- produce(Q, Producer, 1, true),
- Status = close_queue(Q),
-
- ?_assertEqual(timeout, Status).
-
-should_not_block_producer_for_non_empty_queue_after_close({Q, Producer, _}) ->
- produce(Q, Producer, 1, true),
- close_queue(Q),
- Pong = ping(Producer),
- Size = couch_work_queue:size(Q),
- Count = couch_work_queue:item_count(Q),
-
- ?_assertEqual({ok, 1, 1}, {Pong, Size, Count}).
-
-should_be_closed({Q, _, Consumers}) when is_list(Consumers) ->
- ok = close_queue(Q),
-
- [consume(C, 1) || C <- Consumers],
-
- LastConsumerItems = [last_consumer_items(C) || C <- Consumers],
- ItemsCount = couch_work_queue:item_count(Q),
- Size = couch_work_queue:size(Q),
-
- ?_assertEqual(
- {[closed, closed, closed], closed, closed},
- {LastConsumerItems, ItemsCount, Size}
- );
-should_be_closed({Q, _, Consumer}) ->
- ok = close_queue(Q),
-
- consume(Consumer, 1),
-
- LastConsumerItems = last_consumer_items(Consumer),
- ItemsCount = couch_work_queue:item_count(Q),
- Size = couch_work_queue:size(Q),
-
- ?_assertEqual(
- {closed, closed, closed},
- {LastConsumerItems, ItemsCount, Size}
- ).
-
-close_queue(Q) ->
- test_util:stop_sync(
- Q,
- fun() ->
- ok = couch_work_queue:close(Q)
- end,
- ?TIMEOUT
- ).
-
-spawn_consumer(Q) ->
- Parent = self(),
- spawn(fun() -> consumer_loop(Parent, Q, nil) end).
-
-consumer_loop(Parent, Q, PrevItem) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref};
- {ping, Ref} ->
- Parent ! {pong, Ref},
- consumer_loop(Parent, Q, PrevItem);
- {last_item, Ref} ->
- Parent ! {item, Ref, PrevItem},
- consumer_loop(Parent, Q, PrevItem);
- {consume, N} ->
- Result = couch_work_queue:dequeue(Q, N),
- consumer_loop(Parent, Q, Result)
- end.
-
-spawn_producer(Q) ->
- Parent = self(),
- spawn(fun() -> producer_loop(Parent, Q) end).
-
-producer_loop(Parent, Q) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref};
- {ping, Ref} ->
- Parent ! {pong, Ref},
- producer_loop(Parent, Q);
- {produce, Ref, Size} ->
- Item = crypto:strong_rand_bytes(Size),
- Parent ! {item, Ref, Item},
- ok = couch_work_queue:queue(Q, Item),
- producer_loop(Parent, Q)
- end.
-
-consume(Consumer, N) ->
- Consumer ! {consume, N}.
-
-last_consumer_items(Consumer) ->
- Ref = make_ref(),
- Consumer ! {last_item, Ref},
- receive
- {item, Ref, Items} ->
- Items
- after ?TIMEOUT ->
- timeout
- end.
-
-produce(Q, Producer, Size, Wait) ->
- Ref = make_ref(),
- ItemsCount = couch_work_queue:item_count(Q),
- Producer ! {produce, Ref, Size},
- receive
- {item, Ref, Item} when Wait ->
- ok = wait_increment(Q, ItemsCount),
- Item;
- {item, Ref, Item} ->
- Item
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout asking producer to produce an item"}
- ]}
- )
- end.
-
-ping(Pid) ->
- Ref = make_ref(),
- Pid ! {ping, Ref},
- receive
- {pong, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-stop(Pid, Name) ->
- Ref = make_ref(),
- Pid ! {stop, Ref},
- receive
- {ok, Ref} -> ok
- after ?TIMEOUT ->
- ?debugMsg("Timeout stopping " ++ Name),
- timeout
- end.
-
-wait_increment(Q, ItemsCount) ->
- test_util:wait(fun() ->
- case couch_work_queue:item_count(Q) > ItemsCount of
- true ->
- ok;
- false ->
- wait
- end
- end).
diff --git a/src/couch/test/eunit/couchdb_attachments_tests.erl b/src/couch/test/eunit/couchdb_attachments_tests.erl
deleted file mode 100644
index 376553985..000000000
--- a/src/couch/test/eunit/couchdb_attachments_tests.erl
+++ /dev/null
@@ -1,851 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_attachments_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(COMPRESSION_LEVEL, 8).
--define(ATT_BIN_NAME, <<"logo.png">>).
--define(ATT_TXT_NAME, <<"file.erl">>).
--define(FIXTURE_PNG, filename:join([?FIXTURESDIR, "logo.png"])).
--define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
--define(TIMEOUT, 5000).
--define(TIMEOUT_EUNIT, 100).
--define(TIMEWAIT, 1000).
--define(i2l(I), integer_to_list(I)).
-
-start() ->
- Ctx = test_util:start_couch(),
- % ensure in default compression settings for attachments_compression_tests
- config:set(
- "attachments",
- "compression_level",
- ?i2l(?COMPRESSION_LEVEL),
- false
- ),
- config:set("attachments", "compressible_types", "text/*", false),
- Ctx.
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, []),
- ok = couch_db:close(Db),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- Host = Addr ++ ":" ++ ?i2l(Port),
- {Host, ?b2l(DbName)}.
-
-setup({binary, standalone}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG);
-setup({text, standalone}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_standalone_text_att/2, Host, DbName, ?FIXTURE_TXT);
-setup({binary, inline}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_inline_png_att/2, Host, DbName, ?FIXTURE_PNG);
-setup({text, inline}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_inline_text_att/2, Host, DbName, ?FIXTURE_TXT);
-setup(compressed) ->
- {Host, DbName} = setup(),
- setup_att(fun create_already_compressed_att/2, Host, DbName, ?FIXTURE_TXT).
-setup_att(Fun, Host, DbName, File) ->
- HttpHost = "http://" ++ Host,
- AttUrl = Fun(HttpHost, DbName),
- {ok, Data} = file:read_file(File),
- DocUrl = string:join([HttpHost, DbName, "doc"], "/"),
- Helpers = {DbName, DocUrl, AttUrl},
- {Data, Helpers}.
-
-teardown(_, {_, {DbName, _, _}}) ->
- teardown(DbName).
-
-teardown({_, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(?l2b(DbName), []),
- ok.
-
-attachments_test_() ->
- {
- "Attachments tests",
- {
- setup,
- fun start/0,
- fun test_util:stop_couch/1,
- [
- attachments_md5_tests(),
- attachments_compression_tests()
- ]
- }
- }.
-
-attachments_md5_tests() ->
- {
- "Attachments MD5 tests",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_upload_attachment_without_md5/1,
- fun should_upload_attachment_by_chunks_without_md5/1,
- fun should_upload_attachment_with_valid_md5_header/1,
- fun should_upload_attachment_by_chunks_with_valid_md5_header/1,
- fun should_upload_attachment_by_chunks_with_valid_md5_trailer/1,
- fun should_reject_attachment_with_invalid_md5/1,
- fun should_reject_chunked_attachment_with_invalid_md5/1,
- fun should_reject_chunked_attachment_with_invalid_md5_trailer/1
- ]
- }
- }.
-
-attachments_compression_tests() ->
- Funs = [
- fun should_get_att_without_accept_gzip_encoding/2,
- fun should_get_att_with_accept_gzip_encoding/2,
- fun should_get_att_with_accept_deflate_encoding/2,
- fun should_return_406_response_on_unsupported_encoding/2,
- fun should_get_doc_with_att_data/2,
- fun should_get_doc_with_att_data_stub/2
- ],
- {
- "Attachments compression tests",
- [
- {
- "Created via Attachments API",
- created_attachments_compression_tests(standalone, Funs)
- },
- {
- "Created inline via Document API",
- created_attachments_compression_tests(inline, Funs)
- },
- {
- "Created already been compressed via Attachments API",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{compressed, Fun} || Fun <- Funs]
- }
- },
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_not_create_compressed_att_with_deflate_encoding/1,
- fun should_not_create_compressed_att_with_compress_encoding/1,
- fun should_create_compressible_att_with_ctype_params/1
- ]
- }
- ]
- }.
-
-created_attachments_compression_tests(Mod, Funs) ->
- [
- {
- "Compressiable attachments",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{{text, Mod}, Fun} || Fun <- Funs]
- }
- },
- {
- "Uncompressiable attachments",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{{binary, Mod}, Fun} || Fun <- Funs]
- }
- }
- ].
-
-should_upload_attachment_without_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- Body = "We all live in a yellow submarine!",
- Headers = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_by_chunks_without_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]), "\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Transfer-Encoding", "chunked"},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_with_valid_md5_header({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- Body = "We all live in a yellow submarine!",
- Headers = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(Body)))},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_by_chunks_with_valid_md5_header({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]), "\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(AttData)))},
- {"Host", Host},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [
- chunked_body([Part1, Part2]),
- "Content-MD5: ",
- base64:encode(couch_hash:md5_hash(AttData)),
- "\r\n\r\n"
- ],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Host", Host},
- {"Trailer", "Content-MD5"},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_reject_attachment_with_invalid_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- Body = "We all live in a yellow submarine!",
- Headers = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(400, Code),
- ?assertEqual(
- <<"content_md5_mismatch">>,
- get_json(Json, [<<"error">>])
- )
- end).
-
-should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]), "\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
- {"Host", Host},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(400, Code),
- ?assertEqual(
- <<"content_md5_mismatch">>,
- get_json(Json, [<<"error">>])
- )
- end).
-
-should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [
- chunked_body([Part1, Part2]),
- "Content-MD5: ",
- base64:encode(<<"foobar!">>),
- "\r\n\r\n"
- ],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Host", Host},
- {"Trailer", "Content-MD5"},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(400, Code),
- ?assertEqual(<<"content_md5_mismatch">>, get_json(Json, [<<"error">>]))
- end).
-
-should_get_att_without_accept_gzip_encoding(_, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(AttUrl),
- ?assertEqual(200, Code),
- ?assertNot(lists:member({"Content-Encoding", "gzip"}, Headers)),
- ?assertEqual(Data, iolist_to_binary(Body))
- end).
-
-should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]
- ),
- ?assertEqual(200, Code),
- ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
- ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
- end);
-should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]
- ),
- ?assertEqual(200, Code),
- ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
- ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
- end);
-should_get_att_with_accept_gzip_encoding({binary, _}, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]
- ),
- ?assertEqual(200, Code),
- ?assertEqual(
- undefined,
- couch_util:get_value("Content-Encoding", Headers)
- ),
- ?assertEqual(Data, iolist_to_binary(Body))
- end).
-
-should_get_att_with_accept_deflate_encoding(_, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "deflate"}]
- ),
- ?assertEqual(200, Code),
- ?assertEqual(
- undefined,
- couch_util:get_value("Content-Encoding", Headers)
- ),
- ?assertEqual(Data, iolist_to_binary(Body))
- end).
-
-should_return_406_response_on_unsupported_encoding(_, {_, {_, _, AttUrl}}) ->
- ?_assertEqual(
- 406,
- begin
- {ok, Code, _, _} = test_request:get(
- AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]
- ),
- Code
- end
- ).
-
-should_get_doc_with_att_data(compressed, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?attachments=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]
- ),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]
- ),
- AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]
- ),
- ?assertEqual(
- <<"text/plain">>,
- couch_util:get_nested_json_value(AttJson, [<<"content_type">>])
- ),
- ?assertEqual(Data, base64:decode(AttData))
- end);
-should_get_doc_with_att_data({text, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?attachments=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]
- ),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]
- ),
- AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]
- ),
- ?assertEqual(
- <<"text/plain">>,
- couch_util:get_nested_json_value(AttJson, [<<"content_type">>])
- ),
- ?assertEqual(Data, base64:decode(AttData))
- end);
-should_get_doc_with_att_data({binary, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?attachments=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]
- ),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_BIN_NAME]
- ),
- AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]
- ),
- ?assertEqual(
- <<"image/png">>,
- couch_util:get_nested_json_value(AttJson, [<<"content_type">>])
- ),
- ?assertEqual(Data, base64:decode(AttData))
- end).
-
-should_get_doc_with_att_data_stub(compressed, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?att_encoding_info=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]
- ),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]
- ),
- ?assertEqual(
- <<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)
- ),
- AttLength = couch_util:get_value(<<"length">>, AttJson),
- EncLength = couch_util:get_value(<<"encoded_length">>, AttJson),
- ?assertEqual(AttLength, EncLength),
- ?assertEqual(iolist_size(zlib:gzip(Data)), AttLength)
- end);
-should_get_doc_with_att_data_stub({text, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?att_encoding_info=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]
- ),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]
- ),
- ?assertEqual(
- <<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)
- ),
- AttEncLength = iolist_size(gzip(Data)),
- ?assertEqual(
- AttEncLength,
- couch_util:get_value(<<"encoded_length">>, AttJson)
- ),
- ?assertEqual(
- byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson)
- )
- end);
-should_get_doc_with_att_data_stub({binary, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?att_encoding_info=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]
- ),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_BIN_NAME]
- ),
- ?assertEqual(
- undefined,
- couch_util:get_value(<<"encoding">>, AttJson)
- ),
- ?assertEqual(
- undefined,
- couch_util:get_value(<<"encoded_length">>, AttJson)
- ),
- ?assertEqual(
- byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson)
- )
- end).
-
-should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) ->
- ?_assertEqual(
- 415,
- begin
- HttpHost = "http://" ++ Host,
- AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Body = zlib:compress(Data),
- Headers = [
- {"Content-Encoding", "deflate"},
- {"Content-Type", "text/plain"}
- ],
- {ok, Code, _, _} = test_request:put(AttUrl, Headers, Body),
- Code
- end
- ).
-
-should_not_create_compressed_att_with_compress_encoding({Host, DbName}) ->
- % Note: As of OTP R13B04, it seems there's no LZW compression
- % (i.e. UNIX compress utility implementation) lib in OTP.
- % However there's a simple working Erlang implementation at:
- % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php
- ?_assertEqual(
- 415,
- begin
- HttpHost = "http://" ++ Host,
- AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Headers = [
- {"Content-Encoding", "compress"},
- {"Content-Type", "text/plain"}
- ],
- {ok, Code, _, _} = test_request:put(AttUrl, Headers, Data),
- Code
- end
- ).
-
-should_create_compressible_att_with_ctype_params({Host, DbName}) ->
- {timeout, ?TIMEOUT_EUNIT,
- ?_test(begin
- HttpHost = "http://" ++ Host,
- DocUrl = string:join([HttpHost, DbName, ?docid()], "/"),
- AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Headers = [{"Content-Type", "text/plain; charset=UTF-8"}],
- {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data),
- ?assertEqual(201, Code0),
-
- {ok, Code1, _, Body} = test_request:get(
- DocUrl ++ "?att_encoding_info=true"
- ),
- ?assertEqual(200, Code1),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]
- ),
- ?assertEqual(
- <<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)
- ),
- AttEncLength = iolist_size(gzip(Data)),
- ?assertEqual(
- AttEncLength,
- couch_util:get_value(<<"encoded_length">>, AttJson)
- ),
- ?assertEqual(
- byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson)
- )
- end)}.
-
-compact_after_lowering_attachment_size_limit_test_() ->
- {
- "Compact after lowering attachment size limit",
- {
- foreach,
- fun() ->
- Ctx = test_util:start_couch(),
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- {Ctx, DbName}
- end,
- fun({Ctx, DbName}) ->
- config:delete("couchdb", "max_attachment_size"),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx)
- end,
- [
- fun should_compact_after_lowering_attachment_size_limit/1
- ]
- }
- }.
-
-should_compact_after_lowering_attachment_size_limit({_Ctx, DbName}) ->
- {timeout, ?TIMEOUT_EUNIT,
- ?_test(begin
- {ok, Db1} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = #doc{id = <<"doc1">>, atts = att(1000)},
- {ok, _} = couch_db:update_doc(Db1, Doc1, []),
- couch_db:close(Db1),
- config:set("couchdb", "max_attachment_size", "1", _Persist = false),
- compact_db(DbName),
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, Doc2} = couch_db:open_doc(Db2, <<"doc1">>),
- couch_db:close(Db2),
- [Att] = Doc2#doc.atts,
- ?assertEqual(1000, couch_att:fetch(att_len, Att))
- end)}.
-
-att(Size) when is_integer(Size), Size >= 1 ->
- [
- couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, fun(_Bytes) ->
- <<<<"x">> || _ <- lists:seq(1, Size)>>
- end}
- ])
- ].
-
-compact_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db).
-
-wait_compaction(DbName, Kind, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, ?TIMEOUT) of
- timeout ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, Line},
- {reason,
- "Timeout waiting for " ++
- Kind ++
- " database compaction"}
- ]}
- );
- _ ->
- ok
- end.
-
-is_compaction_running(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DbInfo} = couch_db:get_db_info(Db),
- couch_db:close(Db),
- couch_util:get_value(compact_running, DbInfo) =:= true.
-
-internal_replication_after_lowering_attachment_size_limit_test_() ->
- {
- "Internal replication after lowering max attachment size",
- {
- foreach,
- fun() ->
- Ctx = test_util:start_couch([mem3]),
- SrcName = ?tempdb(),
- {ok, SrcDb} = couch_db:create(SrcName, [?ADMIN_CTX]),
- ok = couch_db:close(SrcDb),
- TgtName = ?tempdb(),
- {ok, TgtDb} = couch_db:create(TgtName, [?ADMIN_CTX]),
- ok = couch_db:close(TgtDb),
- {Ctx, SrcName, TgtName}
- end,
- fun({Ctx, SrcName, TgtName}) ->
- config:delete("couchdb", "max_attachment_size"),
- ok = couch_server:delete(SrcName, [?ADMIN_CTX]),
- ok = couch_server:delete(TgtName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx)
- end,
- [
- fun should_replicate_after_lowering_attachment_size/1
- ]
- }
- }.
-
-should_replicate_after_lowering_attachment_size({_Ctx, SrcName, TgtName}) ->
- {timeout, ?TIMEOUT_EUNIT,
- ?_test(begin
- {ok, SrcDb} = couch_db:open(SrcName, [?ADMIN_CTX]),
- SrcDoc = #doc{id = <<"doc">>, atts = att(1000)},
- {ok, _} = couch_db:update_doc(SrcDb, SrcDoc, []),
- couch_db:close(SrcDb),
- config:set("couchdb", "max_attachment_size", "1", _Persist = false),
- % Create a pair of "fake" shards
- SrcShard = #shard{name = SrcName, node = node()},
- TgtShard = #shard{name = TgtName, node = node()},
- mem3_rep:go(SrcShard, TgtShard, []),
- {ok, TgtDb} = couch_db:open_int(TgtName, []),
- {ok, TgtDoc} = couch_db:open_doc(TgtDb, <<"doc">>),
- couch_db:close(TgtDb),
- [Att] = TgtDoc#doc.atts,
- ?assertEqual(1000, couch_att:fetch(att_len, Att))
- end)}.
-
-get_json(Json, Path) ->
- couch_util:get_nested_json_value(Json, Path).
-
-to_hex(Val) ->
- to_hex(Val, []).
-
-to_hex(0, Acc) ->
- Acc;
-to_hex(Val, Acc) ->
- to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
-
-hex_char(V) when V < 10 -> $0 + V;
-hex_char(V) -> $A + V - 10.
-
-chunked_body(Chunks) ->
- chunked_body(Chunks, []).
-
-chunked_body([], Acc) ->
- iolist_to_binary(lists:reverse(Acc, "0\r\n"));
-chunked_body([Chunk | Rest], Acc) ->
- Size = to_hex(size(Chunk)),
- chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
-
-get_socket() ->
- Options = [binary, {packet, 0}, {active, false}],
- Port = mochiweb_socket_server:get(couch_httpd, port),
- {ok, Sock} = gen_tcp:connect(bind_address(), Port, Options),
- Sock.
-
-bind_address() ->
- case config:get("httpd", "bind_address") of
- undefined -> any;
- Address -> Address
- end.
-
-request(Method, Url, Headers, Body) ->
- RequestHead = [Method, " ", Url, " HTTP/1.1"],
- RequestHeaders = [
- [string:join([Key, Value], ": "), "\r\n"]
- || {Key, Value} <- Headers
- ],
- Request = [RequestHead, "\r\n", RequestHeaders, "\r\n", Body],
- Sock = get_socket(),
- gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))),
- % must wait to receive complete response
- timer:sleep(?TIMEWAIT),
- {ok, R} = gen_tcp:recv(Sock, 0),
- gen_tcp:close(Sock),
- [Header, Body1] = re:split(R, "\r\n\r\n", [{return, binary}]),
- {ok, {http_response, _, Code, _}, _} =
- erlang:decode_packet(http, Header, []),
- Json = jiffy:decode(Body1),
- {ok, Code, Json}.
-
-create_standalone_text_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "text/plain"}], Data
- ),
- ?assertEqual(201, Code),
- Url.
-
-create_standalone_png_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_PNG),
- Url = string:join([Host, DbName, "doc", ?b2l(?ATT_BIN_NAME)], "/"),
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "image/png"}], Data
- ),
- ?assertEqual(201, Code),
- Url.
-
-create_inline_text_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Url = string:join([Host, DbName, "doc"], "/"),
- Doc =
- {[
- {<<"_attachments">>,
- {[
- {?ATT_TXT_NAME,
- {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"data">>, base64:encode(Data)}
- ]}}
- ]}}
- ]},
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)
- ),
- ?assertEqual(201, Code),
- string:join([Url, ?b2l(?ATT_TXT_NAME)], "/").
-
-create_inline_png_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_PNG),
- Url = string:join([Host, DbName, "doc"], "/"),
- Doc =
- {[
- {<<"_attachments">>,
- {[
- {?ATT_BIN_NAME,
- {[
- {<<"content_type">>, <<"image/png">>},
- {<<"data">>, base64:encode(Data)}
- ]}}
- ]}}
- ]},
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)
- ),
- ?assertEqual(201, Code),
- string:join([Url, ?b2l(?ATT_BIN_NAME)], "/").
-
-create_already_compressed_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Code, _Headers, _Body} = test_request:put(
- Url,
- [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}],
- zlib:gzip(Data)
- ),
- ?assertEqual(201, Code),
- Url.
-
-gzip(Data) ->
- Z = zlib:open(),
- ok = zlib:deflateInit(Z, ?COMPRESSION_LEVEL, deflated, 16 + 15, 8, default),
- Chunk = zlib:deflate(Z, Data),
- Last = zlib:deflate(Z, [], finish),
- ok = zlib:deflateEnd(Z),
- ok = zlib:close(Z),
- [Chunk, Last].
diff --git a/src/couch/test/eunit/couchdb_auth_tests.erl b/src/couch/test/eunit/couchdb_auth_tests.erl
deleted file mode 100644
index dfb22dc25..000000000
--- a/src/couch/test/eunit/couchdb_auth_tests.erl
+++ /dev/null
@@ -1,132 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_auth_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-setup(PortType) ->
- Hashed = couch_passwords:hash_admin_password("artischocko"),
- ok = config:set("admins", "rocko", binary_to_list(Hashed), _Persist = false),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- lists:concat(["http://", Addr, ":", port(PortType), "/_session"]).
-
-setup_require_valid_user(PortType) ->
- ok = config:set("chttpd", "require_valid_user", "true", _Persist = false),
- setup(PortType).
-
-teardown(_, _) ->
- ok.
-
-teardown_require_valid_user(_, _) ->
- config:set("chttpd", "require_valid_user", "false", _Persist = false).
-
-auth_test_() ->
- Tests = [
- fun should_return_username_on_post_to_session/2,
- fun should_not_return_authenticated_field/2,
- fun should_return_list_of_handlers/2
- ],
- RequireValidUserTests = [
- % See #1947 - this should work even with require_valid_user
- fun should_return_username_on_post_to_session/2
- ],
- {
- "Auth tests",
- {
- setup,
- fun() -> test_util:start_couch([chttpd]) end,
- fun test_util:stop_couch/1,
- [
- make_test_cases(clustered, Tests),
- make_test_cases(backdoor, Tests),
- make_require_valid_user_test_cases(clustered, RequireValidUserTests)
- ]
- }
- }.
-
-make_test_cases(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
- }.
-
-make_require_valid_user_test_cases(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s require_valid_user=true", [Mod])),
- {foreachx, fun setup_require_valid_user/1, fun teardown_require_valid_user/2, [
- {Mod, Fun}
- || Fun <- Funs
- ]}
- }.
-
-should_return_username_on_post_to_session(_PortType, Url) ->
- ?_assertEqual(
- <<"rocko">>,
- begin
- Hashed = couch_passwords:hash_admin_password(<<"artischocko">>),
- ok = config:set("admins", "rocko", binary_to_list(Hashed), false),
- {ok, _, _, Body} = test_request:post(
- Url,
- [{"Content-Type", "application/json"}],
- "{\"name\":\"rocko\", \"password\":\"artischocko\"}"
- ),
- {Json} = jiffy:decode(Body),
- proplists:get_value(<<"name">>, Json)
- end
- ).
-
-should_not_return_authenticated_field(_PortType, Url) ->
- ?_assertThrow(
- {not_found, _},
- begin
- couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authenticated">>
- ])
- end
- ).
-
-should_return_list_of_handlers(backdoor, Url) ->
- ?_assertEqual(
- [<<"cookie">>, <<"default">>],
- begin
- couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authentication_handlers">>
- ])
- end
- );
-should_return_list_of_handlers(clustered, Url) ->
- ?_assertEqual(
- [<<"cookie">>, <<"default">>],
- begin
- couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authentication_handlers">>
- ])
- end
- ).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-session(Url) ->
- {ok, _, _, Body} = test_request:get(
- Url,
- [{"Content-Type", "application/json"}],
- "{\"name\":\"rocko\", \"password\":\"artischocko\"}"
- ),
- jiffy:decode(Body).
-
-port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
diff --git a/src/couch/test/eunit/couchdb_cookie_domain_tests.erl b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl
deleted file mode 100755
index 17c41dafe..000000000
--- a/src/couch/test/eunit/couchdb_cookie_domain_tests.erl
+++ /dev/null
@@ -1,88 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_cookie_domain_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "cookie_domain_test_admin").
--define(PASS, "pass").
-
-setup() ->
- Ctx = test_util:start_couch([chttpd]),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = ?l2b(io_lib:format("http://~s:~b/_session", [Addr, Port])),
- ContentType = [{"Content-Type", "application/json"}],
- Payload = jiffy:encode({[{name, ?l2b(?USER)}, {password, ?l2b(?PASS)}]}),
- {ok, ?b2l(Url), ContentType, ?b2l(Payload), Ctx}.
-
-teardown({ok, _, _, _, Ctx}) ->
- ok = config:delete("admins", ?USER, _Persist = false),
- test_util:stop_couch(Ctx).
-
-cookie_test_() ->
- {
- "Cookie domain tests",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- fun({ok, Url, ContentType, Payload, _}) ->
- [
- should_set_cookie_domain(Url, ContentType, Payload),
- should_not_set_cookie_domain(Url, ContentType, Payload),
- should_delete_cookie_domain(Url, ContentType, Payload)
- ]
- end
- }
- }.
-
-should_set_cookie_domain(Url, ContentType, Payload) ->
- ?_test(begin
- ok = config:set(
- "couch_httpd_auth",
- "cookie_domain",
- "example.com",
- false
- ),
- {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload),
- ?assertEqual(200, Code),
- Cookie = proplists:get_value("Set-Cookie", Headers),
- ?assert(string:str(Cookie, "; Domain=example.com") > 0)
- end).
-
-should_not_set_cookie_domain(Url, ContentType, Payload) ->
- ?_test(begin
- ok = config:set("couch_httpd_auth", "cookie_domain", "", false),
- {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload),
- ?assertEqual(200, Code),
- Cookie = proplists:get_value("Set-Cookie", Headers),
- ?assertEqual(0, string:str(Cookie, "; Domain="))
- end).
-
-should_delete_cookie_domain(Url, ContentType, Payload) ->
- ?_test(begin
- ok = config:set(
- "couch_httpd_auth",
- "cookie_domain",
- "example.com",
- false
- ),
- {ok, Code, Headers, _} = test_request:delete(Url, ContentType, Payload),
- ?assertEqual(200, Code),
- Cookie = proplists:get_value("Set-Cookie", Headers),
- ?assert(string:str(Cookie, "; Domain=example.com") > 0)
- end).
diff --git a/src/couch/test/eunit/couchdb_cors_tests.erl b/src/couch/test/eunit/couchdb_cors_tests.erl
deleted file mode 100644
index dce07fd28..000000000
--- a/src/couch/test/eunit/couchdb_cors_tests.erl
+++ /dev/null
@@ -1,431 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_cors_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--include_lib("chttpd/include/chttpd_cors.hrl").
-
--define(TIMEOUT, 1000).
-
--define(_assertEqualLists(A, B),
- ?_assertEqual(lists:usort(A), lists:usort(B))
-).
-
--define(assertEqualLists(A, B),
- ?assertEqual(lists:usort(A), lists:usort(B))
-).
-
-start() ->
- Ctx = test_util:start_couch([ioq]),
- ok = config:set("chttpd", "enable_cors", "true", false),
- ok = config:set("vhosts", "example.com", "/", false),
- Ctx.
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
-
- config:set("cors", "credentials", "false", false),
- config:set("cors", "origins", "http://example.com", false),
-
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- Host = "http://" ++ Addr ++ ":" ++ Port,
- {Host, ?b2l(DbName)}.
-
-setup({Mod, VHost}) ->
- {Host, DbName} = setup(),
- Url =
- case Mod of
- server ->
- Host;
- db ->
- Host ++ "/" ++ DbName
- end,
- DefaultHeaders =
- [{"Origin", "http://example.com"}] ++
- maybe_append_vhost(VHost),
- {Host, DbName, Url, DefaultHeaders}.
-
-teardown(DbName) when is_list(DbName) ->
- ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
- ok;
-teardown({_, DbName}) ->
- teardown(DbName).
-
-teardown(_, {_, DbName, _, _}) ->
- teardown(DbName).
-
-cors_test_() ->
- Funs = [
- fun should_not_allow_origin/2,
- fun should_not_allow_origin_with_port_mismatch/2,
- fun should_not_allow_origin_with_scheme_mismatch/2,
- fun should_not_all_origin_due_case_mismatch/2,
- fun should_make_simple_request/2,
- fun should_make_preflight_request/2,
- fun should_make_prefligh_request_with_port/2,
- fun should_make_prefligh_request_with_scheme/2,
- fun should_make_prefligh_request_with_wildcard_origin/2,
- fun should_make_request_with_credentials/2,
- fun should_make_origin_request_with_auth/2,
- fun should_make_preflight_request_with_auth/2
- ],
- {
- "CORS (COUCHDB-431)",
- {
- setup,
- fun start/0,
- fun test_util:stop_couch/1,
- [
- cors_tests(Funs),
- vhost_cors_tests(Funs),
- headers_tests()
- ]
- }
- }.
-
-headers_tests() ->
- {
- "Various headers tests",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_not_return_cors_headers_for_invalid_origin/1,
- fun should_not_return_cors_headers_for_invalid_origin_preflight/1,
- fun should_make_request_against_attachment/1,
- fun should_make_range_request_against_attachment/1,
- fun should_make_request_with_if_none_match_header/1
- ]
- }
- }.
-
-cors_tests(Funs) ->
- {
- "CORS tests",
- [
- make_test_case(server, false, Funs),
- make_test_case(db, false, Funs)
- ]
- }.
-
-vhost_cors_tests(Funs) ->
- {
- "Virtual Host CORS",
- [
- make_test_case(server, true, Funs),
- make_test_case(db, true, Funs)
- ]
- }.
-
-make_test_case(Mod, UseVhost, Funs) ->
- {
- case Mod of
- server -> "Server";
- db -> "Database"
- end,
- {foreachx, fun setup/1, fun teardown/2, [
- {{Mod, UseVhost}, Fun}
- || Fun <- Funs
- ]}
- }.
-
-should_not_allow_origin(_, {_, _, Url, Headers0}) ->
- ?_assertEqual(
- undefined,
- begin
- config:delete("cors", "origins", false),
- Headers1 = proplists:delete("Origin", Headers0),
- Headers =
- [{"Origin", "http://127.0.0.1"}] ++
- Headers1,
- {ok, _, Resp, _} = test_request:get(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_not_allow_origin_with_port_mismatch({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(
- undefined,
- begin
- Headers =
- [
- {"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}
- ] ++
- maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_not_allow_origin_with_scheme_mismatch({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(
- undefined,
- begin
- Headers =
- [
- {"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}
- ] ++
- maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_not_all_origin_due_case_mismatch({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(
- undefined,
- begin
- Headers =
- [
- {"Origin", "http://ExAmPlE.CoM"},
- {"Access-Control-Request-Method", "GET"}
- ] ++
- maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_make_simple_request(_, {_, _, Url, DefaultHeaders}) ->
- ?_test(begin
- {ok, _, Resp, _} = test_request:get(Url, DefaultHeaders),
- ?assertEqual(
- undefined,
- proplists:get_value("Access-Control-Allow-Credentials", Resp)
- ),
- ?assertEqual(
- "http://example.com",
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- ),
- ?assertEqualLists(
- ?COUCH_HEADERS ++ list_simple_headers(Resp),
- split_list(proplists:get_value("Access-Control-Expose-Headers", Resp))
- )
- end).
-
-should_make_preflight_request(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqualLists(
- ?SUPPORTED_METHODS,
- begin
- Headers =
- DefaultHeaders ++
- [{"Access-Control-Request-Method", "GET"}],
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- split_list(proplists:get_value("Access-Control-Allow-Methods", Resp))
- end
- ).
-
-should_make_prefligh_request_with_port({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(
- "http://example.com:5984",
- begin
- config:set(
- "cors",
- "origins",
- "http://example.com:5984",
- false
- ),
- Headers =
- [
- {"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}
- ] ++
- maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_make_prefligh_request_with_scheme({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(
- "https://example.com:5984",
- begin
- config:set(
- "cors",
- "origins",
- "https://example.com:5984",
- false
- ),
- Headers =
- [
- {"Origin", "https://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}
- ] ++
- maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_make_prefligh_request_with_wildcard_origin({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(
- "https://example.com:5984",
- begin
- config:set("cors", "origins", "*", false),
- Headers =
- [
- {"Origin", "https://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}
- ] ++
- maybe_append_vhost(VHost),
- {ok, _, Resp, _} = test_request:options(Url, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_make_request_with_credentials(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqual(
- "true",
- begin
- ok = config:set("cors", "credentials", "true", false),
- {ok, _, Resp, _} = test_request:options(Url, DefaultHeaders),
- proplists:get_value("Access-Control-Allow-Credentials", Resp)
- end
- ).
-
-should_make_origin_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqual(
- "http://example.com",
- begin
- Hashed = couch_passwords:hash_admin_password(<<"test">>),
- config:set("admins", "test", ?b2l(Hashed), false),
- {ok, _, Resp, _} = test_request:get(
- Url, DefaultHeaders, [{basic_auth, {"test", "test"}}]
- ),
- config:delete("admins", "test", false),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_make_preflight_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqualLists(
- ?SUPPORTED_METHODS,
- begin
- Hashed = couch_passwords:hash_admin_password(<<"test">>),
- config:set("admins", "test", ?b2l(Hashed), false),
- Headers =
- DefaultHeaders ++
- [{"Access-Control-Request-Method", "GET"}],
- {ok, _, Resp, _} = test_request:options(
- Url, Headers, [{basic_auth, {"test", "test"}}]
- ),
- config:delete("admins", "test", false),
- split_list(proplists:get_value("Access-Control-Allow-Methods", Resp))
- end
- ).
-
-should_not_return_cors_headers_for_invalid_origin({Host, _}) ->
- ?_assertEqual(
- undefined,
- begin
- Headers = [{"Origin", "http://127.0.0.1"}],
- {ok, _, Resp, _} = test_request:get(Host, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_not_return_cors_headers_for_invalid_origin_preflight({Host, _}) ->
- ?_assertEqual(
- undefined,
- begin
- Headers = [
- {"Origin", "http://127.0.0.1"},
- {"Access-Control-Request-Method", "GET"}
- ],
- {ok, _, Resp, _} = test_request:options(Host, Headers),
- proplists:get_value("Access-Control-Allow-Origin", Resp)
- end
- ).
-
-should_make_request_against_attachment({Host, DbName}) ->
- {"COUCHDB-1689",
- ?_assertEqual(
- 200,
- begin
- Url = Host ++ "/" ++ DbName,
- {ok, Code0, _, _} = test_request:put(
- Url ++ "/doc/file.txt",
- [{"Content-Type", "text/plain"}],
- "hello, couch!"
- ),
- ?assert(Code0 =:= 201),
- {ok, Code, _, _} = test_request:get(
- Url ++ "/doc?attachments=true",
- [{"Origin", "http://example.com"}]
- ),
- Code
- end
- )}.
-
-should_make_range_request_against_attachment({Host, DbName}) ->
- {"COUCHDB-1689",
- ?_assertEqual(
- 206,
- begin
- Url = Host ++ "/" ++ DbName,
- {ok, Code0, _, _} = test_request:put(
- Url ++ "/doc/file.txt",
- [{"Content-Type", "application/octet-stream"}],
- "hello, couch!"
- ),
- ?assert(Code0 =:= 201),
- {ok, Code, _, _} = test_request:get(
- Url ++ "/doc/file.txt", [
- {"Origin", "http://example.com"},
- {"Range", "bytes=0-6"}
- ]
- ),
- Code
- end
- )}.
-
-should_make_request_with_if_none_match_header({Host, DbName}) ->
- {"COUCHDB-1697",
- ?_assertEqual(
- 304,
- begin
- Url = Host ++ "/" ++ DbName,
- {ok, Code0, Headers0, _} = test_request:put(
- Url ++ "/doc", [{"Content-Type", "application/json"}], "{}"
- ),
- ?assert(Code0 =:= 201),
- ETag = proplists:get_value("ETag", Headers0),
- {ok, Code, _, _} = test_request:get(
- Url ++ "/doc", [
- {"Origin", "http://example.com"},
- {"If-None-Match", ETag}
- ]
- ),
- Code
- end
- )}.
-
-maybe_append_vhost(true) ->
- [{"Host", "http://example.com"}];
-maybe_append_vhost(false) ->
- [].
-
-split_list(S) ->
- re:split(S, "\\s*,\\s*", [trim, {return, list}]).
-
-list_simple_headers(Headers) ->
- LCHeaders = [string:to_lower(K) || {K, _V} <- Headers],
- lists:filter(fun(H) -> lists:member(H, ?SIMPLE_HEADERS) end, LCHeaders).
diff --git a/src/couch/test/eunit/couchdb_db_tests.erl b/src/couch/test/eunit/couchdb_db_tests.erl
deleted file mode 100644
index 2f6993576..000000000
--- a/src/couch/test/eunit/couchdb_db_tests.erl
+++ /dev/null
@@ -1,88 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_db_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-setup() ->
- DbName = ?b2l(?tempdb()),
- fabric:create_db(DbName),
- DbName.
-
-teardown(DbName) ->
- (catch fabric:delete_db(DbName)),
- ok.
-
-clustered_db_test_() ->
- {
- "Checking clustered db API",
- {
- setup,
- fun() -> test_util:start_couch([ddoc_cache, mem3]) end,
- fun test_util:stop/1,
- [
- {
- "DB deletion",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_close_deleted_db/1,
- fun should_kill_caller_from_load_validation_funs_for_deleted_db/1
- ]
- }
- }
- ]
- }
- }.
-
-should_close_deleted_db(DbName) ->
- ?_test(begin
- [#shard{name = ShardName} | _] = mem3:shards(DbName),
- {ok, Db} = couch_db:open(ShardName, []),
-
- MonitorRef = couch_db:monitor(Db),
- fabric:delete_db(DbName),
- receive
- {'DOWN', MonitorRef, _Type, _Pid, _Info} ->
- ok
- after 2000 ->
- throw(timeout_error)
- end,
- test_util:wait(fun() ->
- case ets:lookup(couch_server:couch_dbs(DbName), DbName) of
- [] -> ok;
- _ -> wait
- end
- end),
- ?assertEqual([], ets:lookup(couch_server:couch_dbs(DbName), DbName))
- end).
-
-should_kill_caller_from_load_validation_funs_for_deleted_db(DbName) ->
- ?_test(begin
- [#shard{name = ShardName} | _] = mem3:shards(DbName),
- {ok, Db} = couch_db:open(ShardName, []),
-
- MonitorRef = couch_db:monitor(Db),
- fabric:delete_db(DbName),
- receive
- {'DOWN', MonitorRef, _Type, _Pid, _Info} ->
- ok
- after 2000 ->
- throw(timeout_error)
- end,
- ?assertError(database_does_not_exist, couch_db:load_validation_funs(Db))
- end).
diff --git a/src/couch/test/eunit/couchdb_design_doc_tests.erl b/src/couch/test/eunit/couchdb_design_doc_tests.erl
deleted file mode 100644
index c51d56f0b..000000000
--- a/src/couch/test/eunit/couchdb_design_doc_tests.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_design_doc_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_design_doc(DbName, <<"_design/foo">>),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- BaseUrl = "http://" ++ Addr ++ ":" ++ Port,
- {?b2l(DbName), BaseUrl}.
-
-teardown({DbName, _}) ->
- couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
- ok.
-
-design_list_test_() ->
- {
- "Check _list functionality",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_return_empty_when_plain_return/1,
- fun should_return_empty_when_no_docs/1
- ]
- }
- }
- }.
-
-should_return_empty_when_plain_return({DbName, BaseUrl}) ->
- ?_test(begin
- ?assertEqual(
- <<>>,
- query_text(BaseUrl, DbName, "foo", "_list/plain_return/simple_view")
- )
- end).
-
-should_return_empty_when_no_docs({DbName, BaseUrl}) ->
- ?_test(begin
- ?assertEqual(
- <<>>,
- query_text(BaseUrl, DbName, "foo", "_list/simple_render/simple_view")
- )
- end).
-
-create_design_doc(DbName, DDName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>,
- {[
- {<<"simple_view">>,
- {[
- {<<"map">>, <<"function(doc) {emit(doc._id, doc)}">>},
- {<<"reduce">>,
- <<"function (key, values, rereduce) {return sum(values);}">>}
- ]}}
- ]}},
- {<<"lists">>,
- {[
- {<<"plain_return">>, <<"function(head, req) {return;}">>},
- {<<"simple_render">>,
- <<"function(head, req) {var row; while(row=getRow()) {send(JSON.stringify(row)); }}">>}
- ]}}
- ]}
- ),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db),
- Rev.
-
-query_text(BaseUrl, DbName, DDoc, Path) ->
- {ok, Code, _Headers, Body} = test_request:get(
- BaseUrl ++ "/" ++ DbName ++ "/_design/" ++ DDoc ++ "/" ++ Path
- ),
- ?assertEqual(200, Code),
- Body.
diff --git a/src/couch/test/eunit/couchdb_file_compression_tests.erl b/src/couch/test/eunit/couchdb_file_compression_tests.erl
deleted file mode 100644
index 75bf18a12..000000000
--- a/src/couch/test/eunit/couchdb_file_compression_tests.erl
+++ /dev/null
@@ -1,251 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_file_compression_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DDOC_ID, <<"_design/test">>).
--define(DOCS_COUNT, 1000).
--define(TIMEOUT, 60).
-
-setup_all() ->
- Ctx = test_util:start_couch(),
- config:set("couchdb", "file_compression", "none", false),
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = populate_db(Db, ?DOCS_COUNT),
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, ?DDOC_ID},
- {<<"language">>, <<"javascript">>},
- {<<"views">>,
- {[
- {<<"by_id">>,
- {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>}
- ]}}
- ]}}
- ]}
- ),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- ok = couch_db:close(Db),
- {Ctx, DbName}.
-
-teardown_all({Ctx, DbName}) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx).
-
-couch_file_compression_test_() ->
- {
- "CouchDB file compression tests",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {with, [
- fun should_use_none/1,
- fun should_use_deflate_1/1,
- fun should_use_deflate_9/1,
- fun should_use_snappy/1,
- fun should_compare_compression_methods/1
- ]}
- }
- }.
-
-should_use_none({_, DbName}) -> run_test(DbName, "none").
-should_use_deflate_1({_, DbName}) -> run_test(DbName, "deflate_1").
-should_use_deflate_9({_, DbName}) -> run_test(DbName, "deflate_9").
-should_use_snappy({_, DbName}) -> run_test(DbName, "snappy").
-
-should_compare_compression_methods({_, DbName}) ->
- TestDb = setup_db(DbName),
- Name = "none > snappy > deflate_1 > deflate_9",
- try
- {Name, {timeout, ?TIMEOUT, ?_test(compare_methods(TestDb))}}
- after
- couch_server:delete(TestDb, [?ADMIN_CTX])
- end.
-
-run_test(DbName, Comp) ->
- config:set("couchdb", "file_compression", Comp, false),
- Timeout = 5 + ?TIMEOUT,
- TestDb = setup_db(DbName),
- Tests = [
- {"compact database", {timeout, Timeout, ?_test(compact_db(DbName))}},
- {"compact view", {timeout, Timeout, ?_test(compact_view(DbName))}}
- ],
- try
- {"Use compression: " ++ Comp, Tests}
- after
- ok = couch_server:delete(TestDb, [?ADMIN_CTX])
- end.
-
-compare_methods(DbName) ->
- config:set("couchdb", "file_compression", "none", false),
- ExternalSizePreCompact = db_external_size(DbName),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeNone = db_disk_size(DbName),
- ViewSizeNone = view_disk_size(DbName),
- ExternalSizeNone = db_external_size(DbName),
- ViewExternalSizeNone = view_external_size(DbName),
-
- config:set("couchdb", "file_compression", "snappy", false),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeSnappy = db_disk_size(DbName),
- ViewSizeSnappy = view_disk_size(DbName),
- ExternalSizeSnappy = db_external_size(DbName),
- ViewExternalSizeSnappy = view_external_size(DbName),
-
- ?assert(DbSizeNone > DbSizeSnappy),
- ?assert(ViewSizeNone > ViewSizeSnappy),
-
- config:set("couchdb", "file_compression", "deflate_1", false),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeDeflate1 = db_disk_size(DbName),
- ViewSizeDeflate1 = view_disk_size(DbName),
-
- ?assert(DbSizeSnappy > DbSizeDeflate1),
- ?assert(ViewSizeSnappy > ViewSizeDeflate1),
-
- config:set("couchdb", "file_compression", "deflate_9", false),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeDeflate9 = db_disk_size(DbName),
- ViewSizeDeflate9 = view_disk_size(DbName),
- ExternalSizeDeflate9 = db_external_size(DbName),
- ViewExternalSizeDeflate9 = view_external_size(DbName),
-
- ?assert(DbSizeDeflate1 > DbSizeDeflate9),
- ?assert(ViewSizeDeflate1 > ViewSizeDeflate9),
- ?assert(ExternalSizePreCompact >= ExternalSizeNone),
- ?assert(ExternalSizeNone =:= ExternalSizeSnappy),
- ?assert(ExternalSizeNone =:= ExternalSizeDeflate9),
- ?assert(ViewExternalSizeNone =:= ViewExternalSizeSnappy),
- ?assert(ViewExternalSizeNone =:= ViewExternalSizeDeflate9).
-
-populate_db(_Db, NumDocs) when NumDocs =< 0 ->
- ok;
-populate_db(Db, NumDocs) ->
- Docs = lists:map(
- fun(_) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, couch_uuids:random()},
- {<<"string">>, ?l2b(lists:duplicate(1000, $X))}
- ]}
- )
- end,
- lists:seq(1, 500)
- ),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- populate_db(Db, NumDocs - 500).
-
-setup_db(SrcDbName) ->
- TgtDbName = ?tempdb(),
- TgtDbFileName = binary_to_list(TgtDbName) ++ ".couch",
- couch_util:with_db(SrcDbName, fun(Db) ->
- OldPath = couch_db:get_filepath(Db),
- NewPath = filename:join(filename:dirname(OldPath), TgtDbFileName),
- {ok, _} = file:copy(OldPath, NewPath)
- end),
- refresh_index(TgtDbName),
- TgtDbName.
-
-refresh_index(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
- couch_mrview:query_view(Db, DDoc, <<"by_id">>, [{update, true}]),
- ok = couch_db:close(Db).
-
-compact_db(DbName) ->
- DiskSizeBefore = db_disk_size(DbName),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db),
- DiskSizeAfter = db_disk_size(DbName),
- ?assert(DiskSizeBefore > DiskSizeAfter).
-
-compact_view(DbName) ->
- DiskSizeBefore = view_disk_size(DbName),
- {ok, _MonRef} = couch_mrview:compact(DbName, ?DDOC_ID, [monitor]),
- wait_compaction(DbName, "view group", ?LINE),
- DiskSizeAfter = view_disk_size(DbName),
- ?assert(DiskSizeBefore > DiskSizeAfter).
-
-db_disk_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- ok = couch_db:close(Db),
- active_size(Info).
-
-db_external_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- ok = couch_db:close(Db),
- external_size(Info).
-
-view_disk_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
- {ok, Info} = couch_mrview:get_info(Db, DDoc),
- ok = couch_db:close(Db),
- active_size(Info).
-
-view_external_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
- {ok, Info} = couch_mrview:get_info(Db, DDoc),
- ok = couch_db:close(Db),
- external_size(Info).
-
-active_size(Info) ->
- couch_util:get_nested_json_value({Info}, [sizes, active]).
-
-external_size(Info) ->
- couch_util:get_nested_json_value({Info}, [sizes, external]).
-
-wait_compaction(DbName, Kind, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, ?TIMEOUT) of
- timeout ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, Line},
- {reason,
- "Timeout waiting for " ++
- Kind ++
- " database compaction"}
- ]}
- );
- _ ->
- ok
- end.
-
-is_compaction_running(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DbInfo} = couch_db:get_db_info(Db),
- {ok, ViewInfo} = couch_mrview:get_info(Db, ?DDOC_ID),
- couch_db:close(Db),
- (couch_util:get_value(compact_running, ViewInfo) =:= true) orelse
- (couch_util:get_value(compact_running, DbInfo) =:= true).
diff --git a/src/couch/test/eunit/couchdb_location_header_tests.erl b/src/couch/test/eunit/couchdb_location_header_tests.erl
deleted file mode 100644
index 08870f8c8..000000000
--- a/src/couch/test/eunit/couchdb_location_header_tests.erl
+++ /dev/null
@@ -1,83 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_location_header_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
-
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- Host = "http://" ++ Addr ++ ":" ++ Port,
- {Host, ?b2l(DbName)}.
-
-teardown({_, DbName}) ->
- ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
- ok.
-
-header_test_() ->
- {
- "CouchDB Location Header Tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_work_with_newlines_in_docs/1,
- fun should_work_with_newlines_in_attachments/1
- ]
- }
- }
- }.
-
-should_work_with_newlines_in_docs({Host, DbName}) ->
- Url = Host ++ "/" ++ DbName ++ "/docid%0A",
- {"COUCHDB-708",
- ?_assertEqual(
- Url,
- begin
- {ok, _, Headers, _} = test_request:put(
- Url,
- [{"Content-Type", "application/json"}],
- "{}"
- ),
- proplists:get_value("Location", Headers)
- end
- )}.
-
-should_work_with_newlines_in_attachments({Host, DbName}) ->
- Url = Host ++ "/" ++ DbName,
- AttUrl = Url ++ "/docid%0A/readme.txt",
- {"COUCHDB-708",
- ?_assertEqual(
- AttUrl,
- begin
- Body = "We all live in a yellow submarine!",
- Headers0 = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"}
- ],
- {ok, _, Headers, _} = test_request:put(AttUrl, Headers0, Body),
- proplists:get_value("Location", Headers)
- end
- )}.
diff --git a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
deleted file mode 100644
index 9822542f3..000000000
--- a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
+++ /dev/null
@@ -1,142 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_mrview_cors_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DDOC,
- {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"shows">>,
- {[
- {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>}
- ]}}
- ]}
-).
-
--define(USER, "mrview_cors_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
-start() ->
- Ctx = test_util:start_couch([chttpd]),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- ok = config:set("chttpd", "enable_cors", "true", false),
- ok = config:set("vhosts", "example.com", "/", false),
- Ctx.
-
-setup(PortType) ->
- DbName = ?tempdb(),
- ok = create_db(PortType, DbName),
-
- config:set("cors", "credentials", "false", false),
- config:set("cors", "origins", "http://example.com", false),
-
- Host = host_url(PortType),
- upload_ddoc(Host, ?b2l(DbName)),
- {Host, ?b2l(DbName)}.
-
-teardown(Ctx) ->
- ok = config:delete("admins", ?USER, _Persist = false),
- test_util:stop_couch(Ctx).
-
-teardown(PortType, {_Host, DbName}) ->
- delete_db(PortType, ?l2b(DbName)),
- ok.
-
-cors_test_() ->
- {
- "CORS for mrview",
- {
- setup,
- fun start/0,
- fun teardown/1,
- [show_tests()]
- }
- }.
-
-show_tests() ->
- {
- "Check CORS for show",
- [
- make_test_case(clustered, [fun should_make_shows_request/2]),
- make_test_case(backdoor, [fun should_make_shows_request/2])
- ]
- }.
-
-make_test_case(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
- }.
-
-should_make_shows_request(_, {Host, DbName}) ->
- ?_test(begin
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar",
- Headers = [
- {"Origin", "http://example.com"},
- {"Access-Control-Request-Method", "GET"},
- ?AUTH
- ],
- {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers),
- Origin = proplists:get_value("Access-Control-Allow-Origin", Resp),
- ?assertEqual("http://example.com", Origin),
- ?assertEqual(<<"<h1>wosh</h1>">>, Body)
- end).
-
-create_db(backdoor, DbName) ->
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db);
-create_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""),
- assert_success(create_db, Status),
- ok.
-
-delete_db(backdoor, DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]);
-delete_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]),
- assert_success(delete_db, Status),
- ok.
-
-assert_success(create_db, Status) ->
- true = lists:member(Status, [201, 202]);
-assert_success(delete_db, Status) ->
- true = lists:member(Status, [200, 202]).
-
-host_url(PortType) ->
- "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType).
-
-bind_address(PortType) ->
- config:get(section(PortType), "bind_address", "127.0.0.1").
-
-section(backdoor) -> "http";
-section(clustered) -> "chttpd".
-
-db_url(DbName) when is_binary(DbName) ->
- db_url(binary_to_list(DbName));
-db_url(DbName) when is_list(DbName) ->
- host_url(clustered) ++ "/" ++ DbName.
-
-port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
-
-upload_ddoc(Host, DbName) ->
- Url = Host ++ "/" ++ DbName ++ "/_design/foo",
- Body = couch_util:json_encode(?DDOC),
- {ok, 201, _Resp, _Body} = test_request:put(Url, [?AUTH], Body),
- ok.
diff --git a/src/couch/test/eunit/couchdb_mrview_tests.erl b/src/couch/test/eunit/couchdb_mrview_tests.erl
deleted file mode 100644
index 606c9c39a..000000000
--- a/src/couch/test/eunit/couchdb_mrview_tests.erl
+++ /dev/null
@@ -1,272 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_mrview_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DDOC,
- {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"shows">>,
- {[
- {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>}
- ]}},
- {<<"updates">>,
- {[
- {<<"report">>, <<
- "function(doc, req) {"
- "var data = JSON.parse(req.body); "
- "return ['test', data];"
- "}"
- >>}
- ]}},
- {<<"views">>,
- {[
- {<<"view1">>,
- {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc._rev)}">>}
- ]}}
- ]}}
- ]}
-).
-
--define(USER, "admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
-setup_all() ->
- Ctx = test_util:start_couch([chttpd]),
- ok = meck:new(mochiweb_socket, [passthrough]),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- ok = config:delete("admins", ?USER, _Persist = false),
- test_util:stop_couch(Ctx).
-
-setup(PortType) ->
- meck:reset([mochiweb_socket]),
- ok = meck:expect(mochiweb_socket, recv, fun mochiweb_socket_recv/3),
-
- DbName = ?tempdb(),
- ok = create_db(PortType, DbName),
-
- Host = host_url(PortType),
- upload_ddoc(Host, ?b2l(DbName)),
- {Host, ?b2l(DbName)}.
-
-teardown(PortType, {_Host, DbName}) ->
- delete_db(PortType, ?l2b(DbName)),
- ok.
-
-mrview_show_test_() ->
- {
- "Check show functionality",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- make_test_case(clustered, [fun should_return_invalid_request_body/2]),
- make_test_case(backdoor, [fun should_return_invalid_request_body/2])
- ]
- }
- }.
-
-mrview_query_test_() ->
- {
- "Check view query functionality",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- make_test_case(clustered, [fun should_return_400_for_wrong_order_of_keys/2]),
- make_test_case(backdoor, [fun should_return_400_for_wrong_order_of_keys/2])
- ]
- }
- }.
-
-mrview_cleanup_index_files_test_() ->
- {
- "Check index files cleanup",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- make_test_case(clustered, [fun should_cleanup_index_files/2])
- ]
- }
- }.
-
-make_test_case(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Mod, Fun} || Fun <- Funs]
- }
- }.
-
-should_return_invalid_request_body(PortType, {Host, DbName}) ->
- ?_test(begin
- ok = create_doc(PortType, ?l2b(DbName), <<"doc_id">>, {[]}),
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_update/report/doc_id",
- {ok, Status, _Headers, Body} =
- test_request:post(ReqUrl, [?AUTH], <<"{truncated}">>),
- {Props} = jiffy:decode(Body),
- ?assertEqual(
- <<"bad_request">>, couch_util:get_value(<<"error">>, Props)
- ),
- ?assertEqual(
- <<"Invalid request body">>, couch_util:get_value(<<"reason">>, Props)
- ),
- ?assertEqual(400, Status),
- ok
- end).
-
-should_return_400_for_wrong_order_of_keys(_PortType, {Host, DbName}) ->
- Args = [{start_key, "\"bbb\""}, {end_key, "\"aaa\""}],
- ?_test(begin
- ReqUrl =
- Host ++ "/" ++ DbName ++
- "/_design/foo/_view/view1?" ++ mochiweb_util:urlencode(Args),
- {ok, Status, _Headers, Body} = test_request:get(ReqUrl, [?AUTH]),
- {Props} = jiffy:decode(Body),
- ?assertEqual(
- <<"query_parse_error">>, couch_util:get_value(<<"error">>, Props)
- ),
- ?assertEqual(
- <<"No rows can match your key range, reverse your start_key and end_key or set descending=true">>,
- couch_util:get_value(<<"reason">>, Props)
- ),
- ?assertEqual(400, Status),
- ok
- end).
-
-should_cleanup_index_files(_PortType, {Host, DbName}) ->
- ?_test(begin
- IndexWildCard = [
- config:get("couchdb", "view_index_dir"),
- "/.shards/*/",
- DbName,
- ".[0-9]*_design/mrview/*"
- ],
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_view/view1",
- {ok, _Status0, _Headers0, _Body0} = test_request:get(ReqUrl, [?AUTH]),
- FileList0 = filelib:wildcard(IndexWildCard),
- ?assertNotEqual([], FileList0),
-
- % It is hard to simulate inactive view.
- % Since couch_mrview:cleanup is called on view definition change.
- % That's why we just create extra files in place
- ToDelete = lists:map(
- fun(FilePath) ->
- ViewFile = filename:join([
- filename:dirname(FilePath),
- "11111111111111111111111111111111.view"
- ]),
- file:write_file(ViewFile, <<>>),
- ViewFile
- end,
- FileList0
- ),
- FileList1 = filelib:wildcard(IndexWildCard),
- ?assertEqual([], lists:usort(FileList1 -- (FileList0 ++ ToDelete))),
-
- CleanupUrl = Host ++ "/" ++ DbName ++ "/_view_cleanup",
- {ok, _Status1, _Headers1, _Body1} = test_request:post(
- CleanupUrl, [], <<>>, [?AUTH]
- ),
- test_util:wait(fun() ->
- IndexFiles = filelib:wildcard(IndexWildCard),
- case lists:usort(FileList0) == lists:usort(IndexFiles) of
- false -> wait;
- true -> ok
- end
- end),
- ok
- end).
-
-create_doc(backdoor, DbName, Id, Body) ->
- JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body),
- Doc = couch_doc:from_json_obj(JsonDoc),
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- {ok, _} = couch_db:update_docs(Db, [Doc]),
- couch_db:close(Db);
-create_doc(clustered, DbName, Id, Body) ->
- JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body),
- Doc = couch_doc:from_json_obj(JsonDoc),
- {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
- ok.
-
-create_db(backdoor, DbName) ->
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db);
-create_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""),
- assert_success(create_db, Status),
- ok.
-
-delete_db(backdoor, DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]);
-delete_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]),
- assert_success(delete_db, Status),
- ok.
-
-assert_success(create_db, Status) ->
- ?assert(lists:member(Status, [201, 202]));
-assert_success(delete_db, Status) ->
- ?assert(lists:member(Status, [200, 202])).
-
-host_url(PortType) ->
- "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType).
-
-bind_address(PortType) ->
- config:get(section(PortType), "bind_address", "127.0.0.1").
-
-section(backdoor) -> "http";
-section(clustered) -> "chttpd".
-
-db_url(DbName) when is_binary(DbName) ->
- db_url(binary_to_list(DbName));
-db_url(DbName) when is_list(DbName) ->
- host_url(clustered) ++ "/" ++ DbName.
-
-port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
-
-upload_ddoc(Host, DbName) ->
- Url = Host ++ "/" ++ DbName ++ "/_design/foo",
- Body = couch_util:json_encode(?DDOC),
- {ok, 201, _Resp, _Body} = test_request:put(Url, [?AUTH], Body),
- ok.
-
-mochiweb_socket_recv(Sock, Len, Timeout) ->
- case meck:passthrough([Sock, Len, Timeout]) of
- {ok, <<"{truncated}">>} ->
- {error, closed};
- {ok, Data} ->
- {ok, Data};
- Else ->
- Else
- end.
diff --git a/src/couch/test/eunit/couchdb_os_proc_pool.erl b/src/couch/test/eunit/couchdb_os_proc_pool.erl
deleted file mode 100644
index 620265b32..000000000
--- a/src/couch/test/eunit/couchdb_os_proc_pool.erl
+++ /dev/null
@@ -1,390 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_os_proc_pool).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- ok = couch_proc_manager:reload(),
- meck:new(couch_os_process, [passthrough]),
- ok = setup_config().
-
-teardown(_) ->
- meck:unload(),
- ok.
-
-os_proc_pool_test_() ->
- {
- "OS processes pool tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- should_block_new_proc_on_full_pool(),
- should_free_slot_on_proc_unexpected_exit(),
- should_reuse_known_proc(),
- % should_process_waiting_queue_as_fifo(),
- should_reduce_pool_on_idle_os_procs(),
- should_not_return_broken_process_to_the_pool()
- ]
- }
- }
- }.
-
-should_block_new_proc_on_full_pool() ->
- ?_test(begin
- Client1 = spawn_client(),
- Client2 = spawn_client(),
- Client3 = spawn_client(),
-
- ?assertEqual(ok, ping_client(Client1)),
- ?assertEqual(ok, ping_client(Client2)),
- ?assertEqual(ok, ping_client(Client3)),
-
- Proc1 = get_client_proc(Client1, "1"),
- Proc2 = get_client_proc(Client2, "2"),
- Proc3 = get_client_proc(Client3, "3"),
-
- ?assertNotEqual(Proc1, Proc2),
- ?assertNotEqual(Proc2, Proc3),
- ?assertNotEqual(Proc3, Proc1),
-
- Client4 = spawn_client(),
- ?assertEqual(timeout, ping_client(Client4)),
-
- ?assertEqual(ok, stop_client(Client1)),
- ?assertEqual(ok, ping_client(Client4)),
-
- Proc4 = get_client_proc(Client4, "4"),
-
- ?assertEqual(Proc1#proc.pid, Proc4#proc.pid),
- ?assertNotEqual(Proc1#proc.client, Proc4#proc.client),
-
- lists:map(
- fun(C) ->
- ?assertEqual(ok, stop_client(C))
- end,
- [Client2, Client3, Client4]
- )
- end).
-
-should_free_slot_on_proc_unexpected_exit() ->
- ?_test(begin
- Client1 = spawn_client(),
- Client2 = spawn_client(),
- Client3 = spawn_client(),
-
- ?assertEqual(ok, ping_client(Client1)),
- ?assertEqual(ok, ping_client(Client2)),
- ?assertEqual(ok, ping_client(Client3)),
-
- Proc1 = get_client_proc(Client1, "1"),
- Proc2 = get_client_proc(Client2, "2"),
- Proc3 = get_client_proc(Client3, "3"),
-
- ?assertNotEqual(Proc1#proc.pid, Proc2#proc.pid),
- ?assertNotEqual(Proc1#proc.client, Proc2#proc.client),
- ?assertNotEqual(Proc2#proc.pid, Proc3#proc.pid),
- ?assertNotEqual(Proc2#proc.client, Proc3#proc.client),
- ?assertNotEqual(Proc3#proc.pid, Proc1#proc.pid),
- ?assertNotEqual(Proc3#proc.client, Proc1#proc.client),
-
- ?assertEqual(ok, kill_client(Client1)),
-
- Client4 = spawn_client(),
- ?assertEqual(ok, ping_client(Client4)),
-
- Proc4 = get_client_proc(Client4, "4"),
-
- ?assertEqual(Proc4#proc.pid, Proc1#proc.pid),
- ?assertNotEqual(Proc4#proc.client, Proc1#proc.client),
- ?assertNotEqual(Proc2#proc.pid, Proc4#proc.pid),
- ?assertNotEqual(Proc2#proc.client, Proc4#proc.client),
- ?assertNotEqual(Proc3#proc.pid, Proc4#proc.pid),
- ?assertNotEqual(Proc3#proc.client, Proc4#proc.client),
-
- lists:map(
- fun(C) ->
- ?assertEqual(ok, stop_client(C))
- end,
- [Client2, Client3, Client4]
- )
- end).
-
-should_reuse_known_proc() ->
- ?_test(begin
- Client1 = spawn_client(<<"ddoc1">>),
- Client2 = spawn_client(<<"ddoc2">>),
-
- ?assertEqual(ok, ping_client(Client1)),
- ?assertEqual(ok, ping_client(Client2)),
-
- Proc1 = get_client_proc(Client1, "1"),
- Proc2 = get_client_proc(Client2, "2"),
- ?assertNotEqual(Proc1#proc.pid, Proc2#proc.pid),
-
- ?assertEqual(ok, stop_client(Client1)),
- ?assertEqual(ok, stop_client(Client2)),
- ?assert(is_process_alive(Proc1#proc.pid)),
- ?assert(is_process_alive(Proc2#proc.pid)),
-
- Client1Again = spawn_client(<<"ddoc1">>),
- ?assertEqual(ok, ping_client(Client1Again)),
- Proc1Again = get_client_proc(Client1Again, "1-again"),
- ?assertEqual(Proc1#proc.pid, Proc1Again#proc.pid),
- ?assertNotEqual(Proc1#proc.client, Proc1Again#proc.client),
- ?assertEqual(ok, stop_client(Client1Again))
- end).
-
-%should_process_waiting_queue_as_fifo() ->
-% ?_test(begin
-% Client1 = spawn_client(<<"ddoc1">>),
-% Client2 = spawn_client(<<"ddoc2">>),
-% Client3 = spawn_client(<<"ddoc3">>),
-% Client4 = spawn_client(<<"ddoc4">>),
-% timer:sleep(100),
-% Client5 = spawn_client(<<"ddoc5">>),
-%
-% ?assertEqual(ok, ping_client(Client1)),
-% ?assertEqual(ok, ping_client(Client2)),
-% ?assertEqual(ok, ping_client(Client3)),
-% ?assertEqual(timeout, ping_client(Client4)),
-% ?assertEqual(timeout, ping_client(Client5)),
-%
-% Proc1 = get_client_proc(Client1, "1"),
-% ?assertEqual(ok, stop_client(Client1)),
-% ?assertEqual(ok, ping_client(Client4)),
-% Proc4 = get_client_proc(Client4, "4"),
-%
-% ?assertNotEqual(Proc4#proc.client, Proc1#proc.client),
-% ?assertEqual(Proc1#proc.pid, Proc4#proc.pid),
-% ?assertEqual(timeout, ping_client(Client5)),
-%
-% ?assertEqual(ok, stop_client(Client2)),
-% ?assertEqual(ok, stop_client(Client3)),
-% ?assertEqual(ok, stop_client(Client4)),
-% ?assertEqual(ok, stop_client(Client5))
-% end).
-
-should_reduce_pool_on_idle_os_procs() ->
- ?_test(begin
- %% os_process_idle_limit is in sec
- config:set(
- "query_server_config",
- "os_process_idle_limit",
- "1",
- false
- ),
- ok = confirm_config("os_process_idle_limit", "1"),
-
- Client1 = spawn_client(<<"ddoc1">>),
- Client2 = spawn_client(<<"ddoc2">>),
- Client3 = spawn_client(<<"ddoc3">>),
-
- ?assertEqual(ok, ping_client(Client1)),
- ?assertEqual(ok, ping_client(Client2)),
- ?assertEqual(ok, ping_client(Client3)),
-
- ?assertEqual(3, couch_proc_manager:get_proc_count()),
-
- ?assertEqual(ok, stop_client(Client1)),
- ?assertEqual(ok, stop_client(Client2)),
- ?assertEqual(ok, stop_client(Client3)),
-
- timer:sleep(1200),
- ?assertEqual(1, couch_proc_manager:get_proc_count())
- end).
-
-should_not_return_broken_process_to_the_pool() ->
- ?_test(begin
- config:set(
- "query_server_config",
- "os_process_soft_limit",
- "1",
- false
- ),
- ok = confirm_config("os_process_soft_limit", "1"),
-
- config:set(
- "query_server_config",
- "os_process_limit",
- "1",
- false
- ),
- ok = confirm_config("os_process_limit", "1"),
-
- DDoc1 = ddoc(<<"_design/ddoc1">>),
-
- meck:reset(couch_os_process),
-
- ?assertEqual(0, couch_proc_manager:get_proc_count()),
- ok = couch_query_servers:with_ddoc_proc(DDoc1, fun(_) -> ok end),
- ?assertEqual(0, meck:num_calls(couch_os_process, stop, 1)),
- ?assertEqual(1, couch_proc_manager:get_proc_count()),
-
- ?assertError(
- bad,
- couch_query_servers:with_ddoc_proc(DDoc1, fun(_) ->
- error(bad)
- end)
- ),
- ?assertEqual(1, meck:num_calls(couch_os_process, stop, 1)),
-
- WaitFun = fun() ->
- case couch_proc_manager:get_proc_count() of
- 0 -> ok;
- N when is_integer(N), N > 0 -> wait
- end
- end,
- case test_util:wait(WaitFun, 5000) of
- timeout -> error(timeout);
- _ -> ok
- end,
- ?assertEqual(0, couch_proc_manager:get_proc_count()),
-
- DDoc2 = ddoc(<<"_design/ddoc2">>),
- ok = couch_query_servers:with_ddoc_proc(DDoc2, fun(_) -> ok end),
- ?assertEqual(1, meck:num_calls(couch_os_process, stop, 1)),
- ?assertEqual(1, couch_proc_manager:get_proc_count())
- end).
-
-ddoc(DDocId) ->
- #doc{
- id = DDocId,
- revs = {1, [<<"abc">>]},
- body =
- {[
- {<<"language">>, <<"javascript">>},
- {<<"views">>,
- {[
- {<<"v1">>,
- {[
- {<<"map">>, <<"function(doc) {emit(doc.value,1);}">>}
- ]}}
- ]}}
- ]}
- }.
-
-setup_config() ->
- config:set("native_query_servers", "enable_erlang_query_server", "true", false),
- config:set("query_server_config", "os_process_limit", "3", false),
- config:set("query_server_config", "os_process_soft_limit", "2", false),
- ok = confirm_config("os_process_soft_limit", "2").
-
-confirm_config(Key, Value) ->
- confirm_config(Key, Value, 0).
-
-confirm_config(Key, Value, Count) ->
- case config:get("query_server_config", Key) of
- Value ->
- ok;
- _ when Count > 10 ->
- erlang:error(
- {config_setup, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, timeout}
- ]}
- );
- _ ->
- %% we need to wait to let gen_server:cast finish
- timer:sleep(10),
- confirm_config(Key, Value, Count + 1)
- end.
-
-spawn_client() ->
- Parent = self(),
- Ref = make_ref(),
- Pid = spawn(fun() ->
- Proc = couch_query_servers:get_os_process(<<"erlang">>),
- loop(Parent, Ref, Proc)
- end),
- {Pid, Ref}.
-
-spawn_client(DDocId) ->
- Parent = self(),
- Ref = make_ref(),
- Pid = spawn(fun() ->
- DDocKey = {DDocId, <<"1-abcdefgh">>},
- DDoc = #doc{body = {[{<<"language">>, <<"erlang">>}]}},
- Proc = couch_query_servers:get_ddoc_process(DDoc, DDocKey),
- loop(Parent, Ref, Proc)
- end),
- {Pid, Ref}.
-
-ping_client({Pid, Ref}) ->
- Pid ! ping,
- receive
- {pong, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-get_client_proc({Pid, Ref}, ClientName) ->
- Pid ! get_proc,
- receive
- {proc, Ref, Proc} -> Proc
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- "Timeout getting client " ++
- ClientName ++ " proc"}
- ]}
- )
- end.
-
-stop_client({Pid, Ref}) ->
- Pid ! stop,
- receive
- {stop, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-kill_client({Pid, Ref}) ->
- Pid ! die,
- receive
- {die, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-loop(Parent, Ref, Proc) ->
- receive
- ping ->
- Parent ! {pong, Ref},
- loop(Parent, Ref, Proc);
- get_proc ->
- Parent ! {proc, Ref, Proc},
- loop(Parent, Ref, Proc);
- stop ->
- couch_query_servers:ret_os_process(Proc),
- Parent ! {stop, Ref};
- die ->
- Parent ! {die, Ref},
- exit(some_error)
- end.
diff --git a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
deleted file mode 100644
index a7d449a2d..000000000
--- a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
+++ /dev/null
@@ -1,348 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_update_conflicts_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(i2l(I), integer_to_list(I)).
--define(DOC_ID, <<"foobar">>).
--define(LOCAL_DOC_ID, <<"_local/foobar">>).
--define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]).
--define(TIMEOUT, 20000).
-
-start() ->
- test_util:start_couch().
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX, overwrite]),
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, ?DOC_ID},
- {<<"value">>, 0}
- ]}
- ),
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
- ok = couch_db:close(Db),
- RevStr = couch_doc:rev_to_str(Rev),
- {DbName, RevStr}.
-setup(_) ->
- setup().
-
-teardown({DbName, _}) ->
- ok = couch_server:delete(DbName, []),
- ok.
-teardown(_, {DbName, _RevStr}) ->
- teardown({DbName, _RevStr}).
-
-view_indexes_cleanup_test_() ->
- {
- "Update conflicts",
- {
- setup,
- fun start/0,
- fun test_util:stop_couch/1,
- [
- concurrent_updates(),
- bulk_docs_updates()
- ]
- }
- }.
-
-concurrent_updates() ->
- {
- "Concurrent updates",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {NumClients, fun should_concurrently_update_doc/2}
- || NumClients <- ?NUM_CLIENTS
- ]
- }
- }.
-
-bulk_docs_updates() ->
- {
- "Bulk docs updates",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_bulk_create_delete_doc/1,
- fun should_bulk_create_local_doc/1,
- fun should_ignore_invalid_local_doc/1
- ]
- }
- }.
-
-should_concurrently_update_doc(NumClients, {DbName, InitRev}) ->
- {
- ?i2l(NumClients) ++ " clients",
- {inorder, [
- {"update doc",
- {timeout, ?TIMEOUT div 1000,
- ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}},
- {"ensure in single leaf", ?_test(ensure_in_single_revision_leaf(DbName))}
- ]}
- }.
-
-should_bulk_create_delete_doc({DbName, InitRev}) ->
- ?_test(bulk_delete_create(DbName, InitRev)).
-
-should_bulk_create_local_doc({DbName, _}) ->
- ?_test(bulk_create_local_doc(DbName)).
-
-should_ignore_invalid_local_doc({DbName, _}) ->
- ?_test(ignore_invalid_local_doc(DbName)).
-
-concurrent_doc_update(NumClients, DbName, InitRev) ->
- Clients = lists:map(
- fun(Value) ->
- ClientDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, ?DOC_ID},
- {<<"_rev">>, InitRev},
- {<<"value">>, Value}
- ]}
- ),
- Pid = spawn_client(DbName, ClientDoc),
- {Value, Pid, erlang:monitor(process, Pid)}
- end,
- lists:seq(1, NumClients)
- ),
-
- lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients),
-
- {NumConflicts, SavedValue} = lists:foldl(
- fun({Value, Pid, MonRef}, {AccConflicts, AccValue}) ->
- receive
- {'DOWN', MonRef, process, Pid, {ok, _NewRev}} ->
- {AccConflicts, Value};
- {'DOWN', MonRef, process, Pid, conflict} ->
- {AccConflicts + 1, AccValue};
- {'DOWN', MonRef, process, Pid, Error} ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- "Client " ++ ?i2l(Value) ++
- " got update error: " ++
- couch_util:to_list(Error)}
- ]}
- )
- after ?TIMEOUT div 2 ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- "Timeout waiting for client " ++
- ?i2l(Value) ++ " to die"}
- ]}
- )
- end
- end,
- {0, nil},
- Clients
- ),
- ?assertEqual(NumClients - 1, NumConflicts),
-
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
- ok = couch_db:close(Db),
- ?assertEqual(1, length(Leaves)),
-
- [{ok, Doc2}] = Leaves,
- {JsonDoc} = couch_doc:to_json_obj(Doc2, []),
- ?assertEqual(SavedValue, couch_util:get_value(<<"value">>, JsonDoc)).
-
-ensure_in_single_revision_leaf(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
- ok = couch_db:close(Db),
- [{ok, Doc}] = Leaves,
-
- %% FIXME: server restart won't work from test side
- %% stop(ok),
- %% start(),
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, Leaves2} = couch_db:open_doc_revs(Db2, ?DOC_ID, all, []),
- ok = couch_db:close(Db2),
- ?assertEqual(1, length(Leaves2)),
-
- [{ok, Doc2}] = Leaves,
- ?assertEqual(Doc, Doc2).
-
-bulk_delete_create(DbName, InitRev) ->
- {ok, Db} = couch_db:open_int(DbName, []),
-
- DeletedDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, ?DOC_ID},
- {<<"_rev">>, InitRev},
- {<<"_deleted">>, true}
- ]}
- ),
- NewDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, ?DOC_ID},
- {<<"value">>, 666}
- ]}
- ),
-
- {ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []),
- ok = couch_db:close(Db),
-
- ?assertEqual(2, length([ok || {ok, _} <- Results])),
- [{ok, Rev1}, {ok, Rev2}] = Results,
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, [{ok, Doc1}]} = couch_db:open_doc_revs(
- Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]
- ),
- {ok, [{ok, Doc2}]} = couch_db:open_doc_revs(
- Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]
- ),
- ok = couch_db:close(Db2),
-
- {Doc1Props} = couch_doc:to_json_obj(Doc1, []),
- {Doc2Props} = couch_doc:to_json_obj(Doc2, []),
-
- %% Document was deleted
- ?assert(couch_util:get_value(<<"_deleted">>, Doc1Props)),
- %% New document not flagged as deleted
- ?assertEqual(
- undefined,
- couch_util:get_value(
- <<"_deleted">>,
- Doc2Props
- )
- ),
- %% New leaf revision has the right value
- ?assertEqual(
- 666,
- couch_util:get_value(
- <<"value">>,
- Doc2Props
- )
- ),
- %% Deleted document has no conflicts
- ?assertEqual(
- undefined,
- couch_util:get_value(
- <<"_conflicts">>,
- Doc1Props
- )
- ),
- %% Deleted document has no deleted conflicts
- ?assertEqual(
- undefined,
- couch_util:get_value(
- <<"_deleted_conflicts">>,
- Doc1Props
- )
- ),
- %% New leaf revision doesn't have conflicts
- ?assertEqual(
- undefined,
- couch_util:get_value(
- <<"_conflicts">>,
- Doc1Props
- )
- ),
- %% New leaf revision doesn't have deleted conflicts
- ?assertEqual(
- undefined,
- couch_util:get_value(
- <<"_deleted_conflicts">>,
- Doc1Props
- )
- ),
-
- %% Deleted revision has position 2
- ?assertEqual(2, element(1, Rev1)),
- %% New leaf revision has position 3
- ?assertEqual(3, element(1, Rev2)).
-
-bulk_create_local_doc(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
-
- LocalDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, ?LOCAL_DOC_ID},
- {<<"_rev">>, <<"0-1">>}
- ]}
- ),
-
- {ok, Results} = couch_db:update_docs(
- Db,
- [LocalDoc],
- [],
- replicated_changes
- ),
- ok = couch_db:close(Db),
- ?assertEqual([], Results),
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, LocalDoc1} = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []),
- ok = couch_db:close(Db2),
- ?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id),
- ?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs).
-
-ignore_invalid_local_doc(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
-
- LocalDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, ?LOCAL_DOC_ID},
- {<<"_rev">>, <<"0-abcdef">>}
- ]}
- ),
-
- {ok, Results} = couch_db:update_docs(
- Db,
- [LocalDoc],
- [],
- replicated_changes
- ),
- ok = couch_db:close(Db),
- ?assertEqual([], Results),
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- Result2 = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []),
- ok = couch_db:close(Db2),
- ?assertEqual({not_found, missing}, Result2).
-
-spawn_client(DbName, Doc) ->
- spawn(fun() ->
- {ok, Db} = couch_db:open_int(DbName, []),
- receive
- go -> ok
- end,
- erlang:yield(),
- Result =
- try
- couch_db:update_doc(Db, Doc, [])
- catch
- _:Error ->
- Error
- end,
- ok = couch_db:close(Db),
- exit(Result)
- end).
diff --git a/src/couch/test/eunit/couchdb_vhosts_tests.erl b/src/couch/test/eunit/couchdb_vhosts_tests.erl
deleted file mode 100644
index d1b758914..000000000
--- a/src/couch/test/eunit/couchdb_vhosts_tests.erl
+++ /dev/null
@@ -1,346 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_vhosts_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
--define(iofmt(S, A), lists:flatten(io_lib:format(S, A))).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 666}
- ]}
- ),
-
- Doc1 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/doc1">>},
- {<<"shows">>,
- {[
- {<<"test">>, <<
- "function(doc, req) {\n"
- " return { json: {\n"
- " requested_path: '/' + req.requested_path.join('/'),\n"
- " path: '/' + req.path.join('/')}};}"
- >>}
- ]}},
- {<<"rewrites">>, [
- {[
- {<<"from">>, <<"/">>},
- {<<"to">>, <<"_show/test">>}
- ]}
- ]}
- ]}
- ),
- {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]),
- couch_db:close(Db),
-
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- Url = "http://" ++ Addr ++ ":" ++ Port,
- {Url, ?b2l(DbName)}.
-
-teardown({_, DbName}) ->
- ok = couch_server:delete(?l2b(DbName), []),
- ok.
-
-vhosts_test_() ->
- {
- "Virtual Hosts rewrite tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_return_database_info/1,
- fun should_return_revs_info/1,
- fun should_return_virtual_request_path_field_in_request/1,
- fun should_return_real_request_path_field_in_request/1,
- fun should_match_wildcard_vhost/1,
- fun should_return_db_info_for_wildcard_vhost_for_custom_db/1,
- fun should_replace_rewrite_variables_for_db_and_doc/1,
- fun should_return_db_info_for_vhost_with_resource/1,
- fun should_return_revs_info_for_vhost_with_resource/1,
- fun should_return_db_info_for_vhost_with_wildcard_resource/1,
- fun should_return_path_for_vhost_with_wildcard_host/1
- ]
- }
- }
- }.
-
-should_return_database_info({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example.com", "/" ++ DbName, false),
- case test_request:get(Url, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_return_revs_info({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example.com", "/" ++ DbName, false),
- case
- test_request:get(
- Url ++ "/doc1?revs_info=true",
- [],
- [{host_header, "example.com"}]
- )
- of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_return_virtual_request_path_field_in_request({Url, DbName}) ->
- ?_test(begin
- ok = config:set(
- "vhosts",
- "example1.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite/",
- false
- ),
- case test_request:get(Url, [], [{host_header, "example1.com"}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- ?assertEqual(
- <<"/">>,
- proplists:get_value(<<"requested_path">>, Json)
- );
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_return_real_request_path_field_in_request({Url, DbName}) ->
- ?_test(begin
- ok = config:set(
- "vhosts",
- "example1.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite/",
- false
- ),
- case test_request:get(Url, [], [{host_header, "example1.com"}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_match_wildcard_vhost({Url, DbName}) ->
- ?_test(begin
- ok = config:set(
- "vhosts",
- "*.example.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite",
- false
- ),
- case test_request:get(Url, [], [{host_header, "test.example.com"}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_return_db_info_for_wildcard_vhost_for_custom_db({Url, DbName}) ->
- ?_test(begin
- ok = config:set(
- "vhosts",
- ":dbname.example1.com",
- "/:dbname",
- false
- ),
- Host = DbName ++ ".example1.com",
- case test_request:get(Url, [], [{host_header, Host}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) ->
- ?_test(begin
- ok = config:set(
- "vhosts",
- ":appname.:dbname.example1.com",
- "/:dbname/_design/:appname/_rewrite/",
- false
- ),
- Host = "doc1." ++ DbName ++ ".example1.com",
- case test_request:get(Url, [], [{host_header, Host}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_return_db_info_for_vhost_with_resource({Url, DbName}) ->
- ?_test(begin
- ok = config:set(
- "vhosts",
- "example.com/test",
- "/" ++ DbName,
- false
- ),
- ReqUrl = Url ++ "/test",
- case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_return_revs_info_for_vhost_with_resource({Url, DbName}) ->
- ?_test(begin
- ok = config:set(
- "vhosts",
- "example.com/test",
- "/" ++ DbName,
- false
- ),
- ReqUrl = Url ++ "/test/doc1?revs_info=true",
- case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_return_db_info_for_vhost_with_wildcard_resource({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "*.example2.com/test", "/*", false),
- ReqUrl = Url ++ "/test",
- Host = DbName ++ ".example2.com",
- case test_request:get(ReqUrl, [], [{host_header, Host}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
-
-should_return_path_for_vhost_with_wildcard_host({Url, DbName}) ->
- ?_test(begin
- ok = config:set(
- "vhosts",
- "*/test1",
- "/" ++ DbName ++ "/_design/doc1/_show/test",
- false
- ),
- case test_request:get(Url ++ "/test1") of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}
- ]}
- )
- end
- end).
diff --git a/src/couch/test/eunit/couchdb_views_tests.erl b/src/couch/test/eunit/couchdb_views_tests.erl
deleted file mode 100644
index 0d32d7fcf..000000000
--- a/src/couch/test/eunit/couchdb_views_tests.erl
+++ /dev/null
@@ -1,1125 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_views_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(DELAY, 100).
--define(TIMEOUT, 1000).
--define(WAIT_DELAY_COUNT, 40).
--define(OLD_COLLATOR_VERSION, [1, 1, 1, 1]).
--define(HEADER_WRITE_WAIT_TIMEOUT, 4500).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- FooRev = create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- query_view(DbName, "foo", "bar"),
- BooRev = create_design_doc(DbName, <<"_design/boo">>, <<"baz">>),
- query_view(DbName, "boo", "baz"),
- {DbName, {FooRev, BooRev}}.
-
-setup_with_docs() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_docs(DbName),
- create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- DbName.
-
-% See src/couch/test/eunit/fixtures for fixture files
-%
-setup_legacy_2x() ->
- % see src/couch/test/eunit/fixtures folder
- DbName = "test",
- OldViewName = "6cf2c2f766f87b618edf6630b00f8736.view",
- NewViewName = "a1c5929f912aca32f13446122cc6ce50.view",
- setup_legacy(DbName, OldViewName, NewViewName).
-
-setup_legacy_3_2_1() ->
- DbName = "db321",
- ViewName = "15a5cb17365a99cd9ddc7327c82bbd0d.view",
- % View signature stays the same
- setup_legacy(DbName, ViewName, ViewName).
-
-setup_collator_test1() ->
- DbName = "colltest1",
- ViewName = "1f2c24bc334d701c2048f85e7438eef1.view",
- % View signature stays the same
- setup_legacy(DbName, ViewName, ViewName).
-
-setup_legacy(DbName, OldViewName, NewViewName) when
- is_list(DbName), is_list(OldViewName), is_list(NewViewName)
-->
- DbFileName = DbName ++ ".couch",
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
- FixtureViewFilePath = filename:join([?FIXTURESDIR, OldViewName]),
-
- DbDir = config:get("couchdb", "database_dir"),
- ViewDir = config:get("couchdb", "view_index_dir"),
- OldViewFilePath = filename:join([
- ViewDir,
- "." ++ DbName ++ "_design",
- "mrview",
- OldViewName
- ]),
- NewViewFilePath = filename:join([
- ViewDir,
- "." ++ DbName ++ "_design",
- "mrview",
- NewViewName
- ]),
-
- NewDbFilePath = filename:join([DbDir, DbFileName]),
-
- Files = [NewDbFilePath, OldViewFilePath, NewViewFilePath],
-
- %% make sure there is no left over
- lists:foreach(fun(File) -> file:delete(File) end, Files),
-
- % copy old db file into db dir
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
-
- % copy old view file into view dir
- ok = filelib:ensure_dir(OldViewFilePath),
-
- {ok, _} = file:copy(FixtureViewFilePath, OldViewFilePath),
-
- {?l2b(DbName), Files}.
-
-teardown({DbName, _}) ->
- teardown(DbName);
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown_legacy({_DbName, Files}) ->
- lists:foreach(fun(File) -> file:delete(File) end, Files).
-
-view_indexes_cleanup_test_() ->
- {
- "View indexes cleanup",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_have_two_indexes_alive_before_deletion/1,
- fun should_cleanup_index_file_after_ddoc_deletion/1,
- fun should_cleanup_all_index_files/1
- ]
- }
- }
- }.
-
-view_group_db_leaks_test_() ->
- {
- "View group db leaks",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup_with_docs/0,
- fun teardown/1,
- [
- fun couchdb_1138/1,
- fun couchdb_1309/1
- ]
- }
- }
- }.
-
-view_group_shutdown_test_() ->
- {
- "View group shutdown",
- {
- setup,
- fun() ->
- meck:new(couch_mrview_index, [passthrough]),
- test_util:start_couch()
- end,
- fun(Ctx) ->
- test_util:stop_couch(Ctx),
- meck:unload()
- end,
- [couchdb_1283()]
- }
- }.
-
-backup_restore_test_() ->
- {
- "Upgrade and bugs related tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup_with_docs/0,
- fun teardown/1,
- [
- fun should_not_remember_docs_in_index_after_backup_restore/1
- ]
- }
- }
- }.
-
-upgrade_2x_test_() ->
- {
- "Upgrade 2x tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup_legacy_2x/0,
- fun teardown_legacy/1,
- [
- fun should_upgrade_legacy_2x_view_files/1
- ]
- }
- }
- }.
-
-upgrade_3_2_1_test_() ->
- {
- "Upgrade 3.2.1 tests",
- {
- foreach,
- fun() ->
- Ctx = test_util:start_couch(),
- DbFiles = setup_legacy_3_2_1(),
- {Ctx, DbFiles}
- end,
- fun({Ctx, DbFiles}) ->
- teardown_legacy(DbFiles),
- test_util:stop_couch(Ctx)
- end,
- [
- fun should_upgrade_legacy_3_2_1_view_files/1,
- fun can_disable_auto_commit_on_view_upgrade/1
- ]
- }
- }.
-
-multiple_view_collators_test_() ->
- {
- "Test views with multiple collators",
- {
- foreach,
- fun() ->
- Ctx = test_util:start_couch(),
- DbFiles = setup_collator_test1(),
- {Ctx, DbFiles}
- end,
- fun({Ctx, DbFiles}) ->
- teardown_legacy(DbFiles),
- test_util:stop_couch(Ctx)
- end,
- [
- fun can_read_views_with_old_collators/1,
- fun can_update_views_with_old_collators/1
- ]
- }
- }.
-
-autocompact_view_to_upgrade_collators_test_() ->
- {
- "Auto compactions triggered to update collators",
- {
- foreach,
- fun() ->
- Ctx = test_util:start_couch([smoosh]),
- DbFiles = setup_collator_test1(),
- {Ctx, DbFiles}
- end,
- fun({Ctx, DbFiles}) ->
- teardown_legacy(DbFiles),
- test_util:stop_couch(Ctx)
- end,
- [
- fun view_collator_auto_upgrade_on_open/1,
- fun view_collator_auto_upgrade_on_update/1,
- fun view_collator_auto_upgrade_can_be_disabled/1
- ]
- }
- }.
-
-should_not_remember_docs_in_index_after_backup_restore(DbName) ->
- ?_test(begin
- %% COUCHDB-640
-
- ok = backup_db_file(DbName),
- create_doc(DbName, "doc666"),
-
- Rows0 = query_view(DbName, "foo", "bar"),
- ?assert(has_doc("doc1", Rows0)),
- ?assert(has_doc("doc2", Rows0)),
- ?assert(has_doc("doc3", Rows0)),
- ?assert(has_doc("doc666", Rows0)),
-
- ?assertEqual(ok, restore_backup_db_file(DbName)),
-
- Rows1 = query_view(DbName, "foo", "bar"),
- ?assert(has_doc("doc1", Rows1)),
- ?assert(has_doc("doc2", Rows1)),
- ?assert(has_doc("doc3", Rows1)),
- ?assertNot(has_doc("doc666", Rows1))
- end).
-
-should_upgrade_legacy_2x_view_files({DbName, Files}) ->
- ?_test(begin
- [_NewDbFilePath, OldViewFilePath, NewViewFilePath] = Files,
- ok = config:set("query_server_config", "commit_freq", "0", false),
-
- % ensure old header
- OldHeader = read_header(OldViewFilePath),
- ?assertEqual(6, tuple_size(OldHeader)),
- ?assertMatch(mrheader, element(1, OldHeader)),
-
- % query view for expected results
- Rows0 = query_view(DbName, "test", "test"),
- ?assertEqual(3, length(Rows0)),
-
- % ensure old file gone
- ?assertNot(filelib:is_regular(OldViewFilePath)),
-
- % add doc to trigger update
- DocUrl = db_url(DbName) ++ "/bar",
- {ok, _, _, _} = test_request:put(
- DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":4}">>
- ),
-
- % query view for expected results
- Rows1 = query_view(DbName, "test", "test"),
- ?assertEqual(4, length(Rows1)),
-
- % ensure new header
-
- % have to wait for awhile to upgrade the index
- wait_mrheader_record(NewViewFilePath),
- NewHeader = read_header(NewViewFilePath),
- ?assertMatch(#mrheader{}, NewHeader),
-
- % assert that 2.x header was upgraded with a view_info map
- ViewInfo = NewHeader#mrheader.view_info,
- ?assert(is_map(ViewInfo)),
- Ver = tuple_to_list(couch_ejson_compare:get_collator_version()),
- ?assertMatch(#{ucol_vs := [Ver]}, ViewInfo),
-
- NewViewStatus = hd(NewHeader#mrheader.view_states),
- ?assertEqual(5, tuple_size(NewViewStatus))
- end).
-
-should_upgrade_legacy_3_2_1_view_files({_, {DbName, Files}}) ->
- ?_test(begin
- [_NewDbFilePath, OldViewFilePath, NewViewFilePath] = Files,
- ok = config:set("query_server_config", "commit_freq", "0", false),
-
- % preliminary assert that we expect view signature and view files names
- % to stay exactly the same
- ?assertEqual(OldViewFilePath, NewViewFilePath),
-
- % ensure old header
- OldHeader = read_header(OldViewFilePath),
- ?assertEqual(5, tuple_size(OldHeader)),
- ?assertMatch(mrheader, element(1, OldHeader)),
-
- % query view for expected results
- Rows0 = query_view(DbName, "ddoc321", "view321"),
- ?assertEqual(2, length(Rows0)),
-
- % have to wait for a while to write to the index
- % with [view_upgrade] commit_on_header_upgrade should happen after open
- wait_mrheader_record(NewViewFilePath),
- NewHeader = read_header(NewViewFilePath),
- ?assertMatch(#mrheader{}, NewHeader),
-
- % assert that 3.2.1 header was upgraded with a view_info map
- ViewInfo = NewHeader#mrheader.view_info,
- ?assert(is_map(ViewInfo)),
- Ver = tuple_to_list(couch_ejson_compare:get_collator_version()),
- ?assertMatch(#{ucol_vs := [Ver]}, ViewInfo),
-
- NewViewStatus = hd(NewHeader#mrheader.view_states),
- ?assertEqual(5, tuple_size(NewViewStatus)),
-
- NewSig = get_signature(DbName, "ddoc321"),
- OldSig = filename:basename(OldViewFilePath, ".view"),
- ?assertEqual(OldSig, ?b2l(NewSig))
- end).
-
-can_disable_auto_commit_on_view_upgrade({_, {DbName, Files}}) ->
- ?_test(begin
- [_NewDbFilePath, OldViewFilePath, NewViewFilePath] = Files,
- ok = config:set("query_server_config", "commit_freq", "0", false),
- ok = config:set(
- "view_upgrade",
- "commit_on_header_upgrade",
- "false",
- false
- ),
-
- % preliminary assert that we expect view signature and view files names
- % to stay exactly the same
- ?assertEqual(OldViewFilePath, NewViewFilePath),
-
- % ensure old header
- OldHeader = read_header(OldViewFilePath),
- ?assertEqual(5, tuple_size(OldHeader)),
- ?assertMatch(mrheader, element(1, OldHeader)),
-
- % query view for expected results
- Rows0 = query_view(DbName, "ddoc321", "view321"),
- ?assertEqual(2, length(Rows0)),
-
- % ensure old header is still there after a query as we intend not to
- % auto-commit after header open
- AfterQueryHeader = read_header(NewViewFilePath),
- ?assertEqual(5, tuple_size(AfterQueryHeader)),
- ?assertMatch(mrheader, element(1, AfterQueryHeader)),
-
- % add 3 new documents
- create_docs(DbName),
-
- % query view for expected results
- Rows1 = query_view(DbName, "ddoc321", "view321"),
- ?assertEqual(5, length(Rows1)),
-
- % ensure old file is still there
- ?assert(filelib:is_regular(OldViewFilePath)),
-
- % ensure new header
-
- % have to wait for awhile to write to the index
- wait_mrheader_record(NewViewFilePath),
- NewHeader = read_header(NewViewFilePath),
- ?assertMatch(#mrheader{}, NewHeader),
-
- % assert that 3.2.1 header was upgraded with a view_info map
- ViewInfo = NewHeader#mrheader.view_info,
- ?assert(is_map(ViewInfo)),
- Ver = tuple_to_list(couch_ejson_compare:get_collator_version()),
- ?assertMatch(#{ucol_vs := [Ver]}, ViewInfo),
-
- NewViewStatus = hd(NewHeader#mrheader.view_states),
- ?assertEqual(5, tuple_size(NewViewStatus)),
-
- NewSig = get_signature(DbName, "ddoc321"),
- OldSig = filename:basename(OldViewFilePath, ".view"),
- ?assertEqual(OldSig, ?b2l(NewSig))
- end).
-
-can_read_views_with_old_collators({_, {DbName, Files}}) ->
- ?_test(begin
- [_NewDbFilePath, ViewFilePath, ViewFilePath] = Files,
-
- % check that there is an old (bogus) collator version
- Header1 = read_header(ViewFilePath),
- ViewInfo1 = Header1#mrheader.view_info,
- ?assert(is_map(ViewInfo1)),
- ?assertMatch(#{ucol_vs := [?OLD_COLLATOR_VERSION]}, ViewInfo1),
-
- % view query works with the old collator version
- Rows0 = query_view(DbName, "colltest1ddoc", "colltest1view"),
- ?assertEqual(2, length(Rows0))
- end).
-
-can_update_views_with_old_collators({_, {DbName, Files}}) ->
- ?_test(begin
- [_NewDbFilePath, ViewFilePath, ViewFilePath] = Files,
- ok = config:set("query_server_config", "commit_freq", "0", false),
-
- % check that there is an old (bogus) collator version
- Header1 = read_header(ViewFilePath),
- ViewInfo1 = Header1#mrheader.view_info,
- ?assert(is_map(ViewInfo1)),
- ?assertMatch(#{ucol_vs := [?OLD_COLLATOR_VERSION]}, ViewInfo1),
-
- create_docs(DbName),
- Rows1 = query_view(DbName, "colltest1ddoc", "colltest1view"),
- ?assertEqual(5, length(Rows1)),
-
- % ensure old view file is still there
- ?assert(filelib:is_regular(ViewFilePath)),
-
- % should have two collator versions
- CurVer = tuple_to_list(couch_ejson_compare:get_collator_version()),
- ExpVersions = [?OLD_COLLATOR_VERSION, CurVer],
- ok = wait_collator_versions(ExpVersions, ViewFilePath),
- Header2 = read_header(ViewFilePath),
- ViewInfo2 = Header2#mrheader.view_info,
- ?assertMatch(#{ucol_vs := ExpVersions}, ViewInfo2)
- end).
-
-view_collator_auto_upgrade_on_open({_, {DbName, Files}}) ->
- ?_test(begin
- [_NewDbFilePath, ViewFilePath, ViewFilePath] = Files,
- ok = config:set("query_server_config", "commit_freq", "0", false),
-
- % quick sanity check the test setup
- Header1 = read_header(ViewFilePath),
- ViewInfo1 = Header1#mrheader.view_info,
- ?assertMatch(#{ucol_vs := [?OLD_COLLATOR_VERSION]}, ViewInfo1),
-
- % make sure smoosh is active
- smoosh:resume(),
-
- % query the view
- Rows = query_view(DbName, "colltest1ddoc", "colltest1view"),
- ?assertEqual(2, length(Rows)),
-
- CurVer = tuple_to_list(couch_ejson_compare:get_collator_version()),
- wait_collator_versions([CurVer], ViewFilePath),
- Header2 = read_header(ViewFilePath),
- ViewInfo2 = Header2#mrheader.view_info,
- ?assertMatch(#{ucol_vs := [CurVer]}, ViewInfo2),
-
- % query the view again
- ?assertEqual(Rows, query_view(DbName, "colltest1ddoc", "colltest1view"))
- end).
-
-view_collator_auto_upgrade_on_update({_, {DbName, Files}}) ->
- ?_test(begin
- [_NewDbFilePath, ViewFilePath, ViewFilePath] = Files,
- ok = config:set("query_server_config", "commit_freq", "0", false),
-
- % quick sanity check the test setup
- Header1 = read_header(ViewFilePath),
- ViewInfo1 = Header1#mrheader.view_info,
- ?assertMatch(#{ucol_vs := [?OLD_COLLATOR_VERSION]}, ViewInfo1),
-
- % stop smoosh so the open/read trigger doesn't fire
- application:stop(smoosh),
-
- % open the view so after smoosh starts it won't trigger
- % the open auto-update event
- Rows0 = query_view(DbName, "colltest1ddoc", "colltest1view"),
- ?assertEqual(2, length(Rows0)),
-
- % update the db
- create_docs(DbName),
-
- % start smoosh
- application:start(smoosh),
- smoosh:resume(),
-
- % query the view to trigger an index commit event
- Rows1 = query_view(DbName, "colltest1ddoc", "colltest1view"),
- ?assertEqual(5, length(Rows1)),
-
- CurVer = tuple_to_list(couch_ejson_compare:get_collator_version()),
- wait_collator_versions([CurVer], ViewFilePath),
- Header2 = read_header(ViewFilePath),
- ViewInfo2 = Header2#mrheader.view_info,
- ?assertMatch(#{ucol_vs := [CurVer]}, ViewInfo2)
- end).
-
-view_collator_auto_upgrade_can_be_disabled({_, {DbName, Files}}) ->
- ?_test(begin
- [_NewDbFilePath, ViewFilePath, ViewFilePath] = Files,
- ok = config:set("query_server_config", "commit_freq", "0", false),
- ok = config:set(
- "view_upgrade",
- "compact_on_collator_upgrade",
- "false",
- false
- ),
-
- % quick sanity check the test setup
- Header1 = read_header(ViewFilePath),
- ViewInfo1 = Header1#mrheader.view_info,
- ?assertMatch(#{ucol_vs := [?OLD_COLLATOR_VERSION]}, ViewInfo1),
-
- % activate smoosh
- smoosh:resume(),
-
- % query the view
- Rows0 = query_view(DbName, "colltest1ddoc", "colltest1view"),
- ?assertEqual(2, length(Rows0)),
-
- % update the db and query again to trigger an index commit
- create_docs(DbName),
- Rows1 = query_view(DbName, "colltest1ddoc", "colltest1view"),
- ?assertEqual(5, length(Rows1)),
-
- % View header doesn't change
- CurVer = tuple_to_list(couch_ejson_compare:get_collator_version()),
- ExpVersions = [?OLD_COLLATOR_VERSION, CurVer],
- wait_collator_versions(ExpVersions, ViewFilePath),
- Header2 = read_header(ViewFilePath),
- ViewInfo2 = Header2#mrheader.view_info,
- ?assertMatch(#{ucol_vs := ExpVersions}, ViewInfo2)
- end).
-
-should_have_two_indexes_alive_before_deletion({DbName, _}) ->
- view_cleanup(DbName),
- ?_assertEqual(2, count_index_files(DbName)).
-
-should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) ->
- delete_design_doc(DbName, <<"_design/foo">>, FooRev),
- view_cleanup(DbName),
- ?_assertEqual(1, count_index_files(DbName)).
-
-should_cleanup_all_index_files({DbName, {FooRev, BooRev}}) ->
- delete_design_doc(DbName, <<"_design/foo">>, FooRev),
- delete_design_doc(DbName, <<"_design/boo">>, BooRev),
- view_cleanup(DbName),
- ?_assertEqual(0, count_index_files(DbName)).
-
-couchdb_1138(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>
- ),
- ?assert(is_pid(IndexerPid)),
- ?assert(is_process_alive(IndexerPid)),
- ?assertEqual(2, count_users(DbName)),
-
- wait_indexer(IndexerPid),
-
- Rows0 = query_view(DbName, "foo", "bar"),
- ?assertEqual(3, length(Rows0)),
- ?assertEqual(2, count_users(DbName)),
- ?assert(is_process_alive(IndexerPid)),
-
- create_doc(DbName, "doc1000"),
- Rows1 = query_view(DbName, "foo", "bar"),
- ?assertEqual(4, length(Rows1)),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid)),
-
- compact_db(DbName),
- ?assert(is_process_alive(IndexerPid)),
-
- compact_view_group(DbName, "foo"),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid)),
-
- create_doc(DbName, "doc1001"),
- Rows2 = query_view(DbName, "foo", "bar"),
- ?assertEqual(5, length(Rows2)),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid))
- end).
-
-couchdb_1309(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>
- ),
- ?assert(is_pid(IndexerPid)),
- ?assert(is_process_alive(IndexerPid)),
- ?assertEqual(2, count_users(DbName)),
-
- wait_indexer(IndexerPid),
-
- create_doc(DbName, "doc1001"),
- Rows0 = query_view(DbName, "foo", "bar"),
- check_rows_value(Rows0, null),
- ?assertEqual(4, length(Rows0)),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid)),
-
- update_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- {ok, NewIndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>
- ),
- ?assert(is_pid(NewIndexerPid)),
- ?assert(is_process_alive(NewIndexerPid)),
- ?assertNotEqual(IndexerPid, NewIndexerPid),
- UserCnt =
- case count_users(DbName) of
- N when N > 2 ->
- timer:sleep(1000),
- count_users(DbName);
- N ->
- N
- end,
- ?assertEqual(2, UserCnt),
-
- Rows1 = query_view(DbName, "foo", "bar", ok),
- ?assertEqual(0, length(Rows1)),
- Rows2 = query_view(DbName, "foo", "bar"),
- check_rows_value(Rows2, 1),
- ?assertEqual(4, length(Rows2)),
-
- %% FIXME we need to grab monitor earlier
- ok = stop_indexer(
- fun() -> ok end,
- IndexerPid,
- ?LINE,
- "old view group is not dead after ddoc update"
- ),
-
- ok = stop_indexer(
- fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end,
- NewIndexerPid,
- ?LINE,
- "new view group did not die after DB deletion"
- )
- end).
-
-couchdb_1283() ->
- ?_test(begin
- ok = config:set("couchdb", "max_dbs_open", "3", false),
-
- {ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"language">>, <<"javascript">>},
- {<<"views">>,
- {[
- {<<"foo">>,
- {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo2">>,
- {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo3">>,
- {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo4">>,
- {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo5">>,
- {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}}
- ]}}
- ]}
- ),
- {ok, _} = couch_db:update_doc(MDb1, DDoc, []),
- ok = populate_db(MDb1, 100, 100),
- query_view(couch_db:name(MDb1), "foo", "foo"),
- ok = couch_db:close(MDb1),
-
- {ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>
- ),
-
- % Start and pause compacton
- WaitRef = erlang:make_ref(),
- meck:expect(couch_mrview_index, compact, fun(Db, State, Opts) ->
- receive
- {WaitRef, From, init} -> ok
- end,
- From ! {WaitRef, inited},
- receive
- {WaitRef, go} -> ok
- end,
- meck:passthrough([Db, State, Opts])
- end),
-
- {ok, CPid} = gen_server:call(Pid, compact),
- CRef = erlang:monitor(process, CPid),
- ?assert(is_process_alive(CPid)),
-
- % Make sure that our compactor is waiting for us
- % before we continue our assertions
- CPid ! {WaitRef, self(), init},
- receive
- {WaitRef, inited} -> ok
- end,
-
- % Make sure that a compaction process takes a monitor
- % on the database's main_pid
- ?assertEqual(true, lists:member(CPid, couch_db:monitored_by(MDb1))),
-
- % Finish compaction to and make sure the monitor
- % disappears
- CPid ! {WaitRef, go},
- wait_for_process_shutdown(
- CRef,
- normal,
- {reason, "Failure compacting view group"}
- ),
-
- % Make sure that the monitor was removed
- ?assertEqual(false, lists:member(CPid, couch_db:monitored_by(MDb1)))
- end).
-
-wait_for_process_shutdown(Pid, ExpectedReason, Error) ->
- receive
- {'DOWN', Pid, process, _, Reason} ->
- ?assertEqual(ExpectedReason, Reason)
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [{module, ?MODULE}, {line, ?LINE}, Error]}
- )
- end.
-
-create_doc(DbName, DocId) when is_list(DocId) ->
- create_doc(DbName, ?l2b(DocId));
-create_doc(DbName, DocId) when is_binary(DocId) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc666 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DocId},
- {<<"value">>, 999}
- ]}
- ),
- {ok, _} = couch_db:update_docs(Db, [Doc666]),
- couch_db:close(Db).
-
-create_docs(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
- ]}
- ),
- Doc2 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
- ]}
- ),
- Doc3 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
- ]}
- ),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
- couch_db:close(Db).
-
-populate_db(Db, BatchSize, N) when N > 0 ->
- Docs = lists:map(
- fun(_) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, couch_uuids:new()},
- {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))}
- ]}
- )
- end,
- lists:seq(1, BatchSize)
- ),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- populate_db(Db, BatchSize, N - length(Docs));
-populate_db(_Db, _, _) ->
- ok.
-
-create_design_doc(DbName, DDName, ViewName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>,
- {[
- {ViewName,
- {[
- {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
- ]}}
- ]}}
- ]}
- ),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db),
- Rev.
-
-update_design_doc(DbName, DDName, ViewName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- {ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_CTX]),
- {Props} = couch_doc:to_json_obj(Doc, []),
- Rev = couch_util:get_value(<<"_rev">>, Props),
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDName},
- {<<"_rev">>, Rev},
- {<<"language">>, <<"javascript">>},
- {<<"views">>,
- {[
- {ViewName,
- {[
- {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
- ]}}
- ]}}
- ]}
- ),
- {ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_CTX]),
- couch_db:close(Db),
- NewRev.
-
-delete_design_doc(DbName, DDName, Rev) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDName},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"_deleted">>, true}
- ]}
- ),
- {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]),
- couch_db:close(Db).
-
-db_url(DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName).
-
-query_view(DbName, DDoc, View) ->
- query_view(DbName, DDoc, View, false).
-
-query_view(DbName, DDoc, View, Stale) ->
- {ok, Code, _Headers, Body} = test_request:get(
- db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View ++
- case Stale of
- false -> [];
- _ -> "?stale=" ++ atom_to_list(Stale)
- end
- ),
- ?assertEqual(200, Code),
- {Props} = jiffy:decode(Body),
- couch_util:get_value(<<"rows">>, Props, []).
-
-get_signature(DbName, DDoc) ->
- Url = db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_info",
- {ok, Code, _Headers, Body} = test_request:get(Url),
- ?assertEqual(200, Code),
- MapBody = jiffy:decode(Body, [return_maps]),
- #{<<"view_index">> := #{<<"signature">> := Sig}} = MapBody,
- Sig.
-
-check_rows_value(Rows, Value) ->
- lists:foreach(
- fun({Row}) ->
- ?assertEqual(Value, couch_util:get_value(<<"value">>, Row))
- end,
- Rows
- ).
-
-view_cleanup(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- couch_mrview:cleanup(Db),
- couch_db:close(Db).
-
-count_users(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- DbPid = couch_db:get_pid(Db),
- {monitored_by, Monitors0} = process_info(DbPid, monitored_by),
- Monitors = lists:filter(fun is_pid/1, Monitors0),
- CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined],
- ok = couch_db:close(Db),
- length(lists:usort(Monitors) -- [self() | CouchFiles]).
-
-count_index_files(DbName) ->
- % call server to fetch the index files
- RootDir = config:get("couchdb", "view_index_dir"),
- length(
- filelib:wildcard(
- RootDir ++ "/." ++
- binary_to_list(DbName) ++ "_design" ++ "/mrview/*"
- )
- ).
-
-has_doc(DocId1, Rows) ->
- DocId = iolist_to_binary(DocId1),
- lists:any(fun({R}) -> lists:member({<<"id">>, DocId}, R) end, Rows).
-
-backup_db_file(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- SrcPath = couch_db:get_filepath(Db),
- Src =
- if
- is_list(SrcPath) -> SrcPath;
- true -> binary_to_list(SrcPath)
- end,
- ok = copy_tree(Src, Src ++ ".backup")
- after
- couch_db:close(Db)
- end.
-
-restore_backup_db_file(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- Src = couch_db:get_filepath(Db),
- ok = couch_db:close(Db),
- DbPid = couch_db:get_pid(Db),
- exit(DbPid, shutdown),
- ok = copy_tree(Src ++ ".backup", Src),
-
- test_util:wait(
- fun() ->
- case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of
- {ok, WaitDb} ->
- case couch_db:get_pid(WaitDb) == DbPid of
- true -> wait;
- false -> ok
- end;
- Else ->
- Else
- end
- end,
- ?TIMEOUT,
- ?DELAY
- ).
-
-compact_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = couch_db:start_compact(Db),
- ok = couch_db:close(Db),
- wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT).
-
-wait_db_compact_done(_DbName, 0) ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}
- ]}
- );
-wait_db_compact_done(DbName, N) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- ok = couch_db:close(Db),
- CompactorPid = couch_db:get_compactor_pid(Db),
- case is_pid(CompactorPid) of
- false ->
- ok;
- true ->
- ok = timer:sleep(?DELAY),
- wait_db_compact_done(DbName, N - 1)
- end.
-
-compact_view_group(DbName, DDocId) when is_list(DDocId) ->
- compact_view_group(DbName, ?l2b("_design/" ++ DDocId));
-compact_view_group(DbName, DDocId) when is_binary(DDocId) ->
- ok = couch_mrview:compact(DbName, DDocId),
- wait_view_compact_done(DbName, DDocId, 10).
-
-wait_view_compact_done(_DbName, _DDocId, 0) ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}
- ]}
- );
-wait_view_compact_done(DbName, DDocId, N) ->
- {ok, Code, _Headers, Body} = test_request:get(
- db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"
- ),
- ?assertEqual(200, Code),
- {Info} = jiffy:decode(Body),
- {IndexInfo} = couch_util:get_value(<<"view_index">>, Info),
- CompactRunning = couch_util:get_value(<<"compact_running">>, IndexInfo),
- case CompactRunning of
- false ->
- ok;
- true ->
- ok = timer:sleep(?DELAY),
- wait_view_compact_done(DbName, DDocId, N - 1)
- end.
-
-read_header(File) ->
- {ok, Fd} = couch_file:open(File),
- {ok, {_Sig, Header}} = couch_file:read_header(Fd),
- couch_file:close(Fd),
- Header.
-
-stop_indexer(StopFun, Pid, Line, Reason) ->
- case test_util:stop_sync(Pid, StopFun) of
- timeout ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, Line},
- {reason, Reason}
- ]}
- );
- ok ->
- ok
- end.
-
-wait_indexer(IndexerPid) ->
- test_util:wait(fun() ->
- {ok, Info} = couch_index:get_info(IndexerPid),
- case couch_util:get_value(compact_running, Info) of
- true ->
- wait;
- false ->
- ok
- end
- end).
-
-copy_tree(Src, Dst) ->
- case filelib:is_dir(Src) of
- true ->
- {ok, Files} = file:list_dir(Src),
- copy_tree(Files, Src, Dst);
- false ->
- ok = filelib:ensure_dir(Dst),
- {ok, _} = file:copy(Src, Dst),
- ok
- end.
-
-copy_tree([], _Src, _Dst) ->
- ok;
-copy_tree([File | Rest], Src, Dst) ->
- FullSrc = filename:join(Src, File),
- FullDst = filename:join(Dst, File),
- ok = copy_tree(FullSrc, FullDst),
- copy_tree(Rest, Src, Dst).
-
-wait_mrheader_record(File) ->
- wait_mrheader_record(File, ?HEADER_WRITE_WAIT_TIMEOUT).
-
-wait_mrheader_record(File, TimeoutMSec) ->
- WaitFun = fun() ->
- try read_header(File) of
- #mrheader{} -> ok;
- _Other -> wait
- catch
- _:_ -> wait
- end
- end,
- test_util:wait(WaitFun, TimeoutMSec, 200).
-
-wait_collator_versions(Vers, File) ->
- wait_collator_versions(Vers, File, ?HEADER_WRITE_WAIT_TIMEOUT).
-
-wait_collator_versions(Vers, File, TimeoutMSec) ->
- WaitFun = fun() ->
- try read_header(File) of
- #mrheader{view_info = #{ucol_vs := Vers}} ->
- ok;
- _Other ->
- wait
- catch
- _:_ ->
- wait
- end
- end,
- test_util:wait(WaitFun, TimeoutMSec, 200).
diff --git a/src/couch/test/eunit/fixtures/15a5cb17365a99cd9ddc7327c82bbd0d.view b/src/couch/test/eunit/fixtures/15a5cb17365a99cd9ddc7327c82bbd0d.view
deleted file mode 100644
index 33cbd401f..000000000
--- a/src/couch/test/eunit/fixtures/15a5cb17365a99cd9ddc7327c82bbd0d.view
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/1f2c24bc334d701c2048f85e7438eef1.view b/src/couch/test/eunit/fixtures/1f2c24bc334d701c2048f85e7438eef1.view
deleted file mode 100644
index 25d19d746..000000000
--- a/src/couch/test/eunit/fixtures/1f2c24bc334d701c2048f85e7438eef1.view
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/6cf2c2f766f87b618edf6630b00f8736.view b/src/couch/test/eunit/fixtures/6cf2c2f766f87b618edf6630b00f8736.view
deleted file mode 100644
index a5668eeaa..000000000
--- a/src/couch/test/eunit/fixtures/6cf2c2f766f87b618edf6630b00f8736.view
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/colltest1.couch b/src/couch/test/eunit/fixtures/colltest1.couch
deleted file mode 100644
index d42a06630..000000000
--- a/src/couch/test/eunit/fixtures/colltest1.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg b/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg
deleted file mode 100644
index 30e475da8..000000000
--- a/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg
+++ /dev/null
@@ -1,19 +0,0 @@
-% Licensed to the Apache Software Foundation (ASF) under one
-% or more contributor license agreements. See the NOTICE file
-% distributed with this work for additional information
-% regarding copyright ownership. The ASF licenses this file
-% to you under the Apache License, Version 2.0 (the
-% "License"); you may not use this file except in compliance
-% with the License. You may obtain a copy of the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing,
-% software distributed under the License is distributed on an
-% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-% KIND, either express or implied. See the License for the
-% specific language governing permissions and limitations
-% under the License.
-
-{testing, stuff, "yay description"}.
-{number, '11', "randomosity"}.
diff --git a/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini b/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini
deleted file mode 100644
index cc5cd2187..000000000
--- a/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini
+++ /dev/null
@@ -1,20 +0,0 @@
-; Licensed to the Apache Software Foundation (ASF) under one
-; or more contributor license agreements. See the NOTICE file
-; distributed with this work for additional information
-; regarding copyright ownership. The ASF licenses this file
-; to you under the Apache License, Version 2.0 (the
-; "License"); you may not use this file except in compliance
-; with the License. You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing,
-; software distributed under the License is distributed on an
-; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-; KIND, either express or implied. See the License for the
-; specific language governing permissions and limitations
-; under the License.
-
-[stats]
-rate = 10000000 ; We call collect_sample in testing
-samples = [0, 1]
diff --git a/src/couch/test/eunit/fixtures/db321.couch b/src/couch/test/eunit/fixtures/db321.couch
deleted file mode 100644
index 7db9cac17..000000000
--- a/src/couch/test/eunit/fixtures/db321.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_non_partitioned.couch b/src/couch/test/eunit/fixtures/db_non_partitioned.couch
deleted file mode 100644
index 327d9bb5d..000000000
--- a/src/couch/test/eunit/fixtures/db_non_partitioned.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch
deleted file mode 100644
index b0d39c9ec..000000000
--- a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch
deleted file mode 100644
index b584fce31..000000000
--- a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch
deleted file mode 100644
index ee4e11b7f..000000000
--- a/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch
deleted file mode 100644
index 814feb8e1..000000000
--- a/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch
deleted file mode 100644
index cab8331db..000000000
--- a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch
deleted file mode 100644
index b613646b1..000000000
--- a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch
deleted file mode 100644
index 126fc919e..000000000
--- a/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch
deleted file mode 100644
index 762dc8dad..000000000
--- a/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/logo.png b/src/couch/test/eunit/fixtures/logo.png
deleted file mode 100644
index d21ac025b..000000000
--- a/src/couch/test/eunit/fixtures/logo.png
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/fixtures/multipart.http b/src/couch/test/eunit/fixtures/multipart.http
deleted file mode 100644
index fe9f271cc..000000000
--- a/src/couch/test/eunit/fixtures/multipart.http
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "_id": "our document goes here"
-}
-
---multipart_related_boundary~~~~~~~~~~~~~~~~~~~~
-Content-Type: application/json
-
-{"value":0,"_id":"doc0","_rev":"1-7e97409c987eac3a99385a17ad4cbabe","_attachments":{"plus1":{"stub":false,"follows":true,"content_type":"application/json","length":14}},".cache":{"plus1":{"timestamp":"2012-08-13T13:59:27.826Z"}}}
---multipart_related_boundary~~~~~~~~~~~~~~~~~~~~
-
-{"value":"01"}
---multipart_related_boundary~~~~~~~~~~~~~~~~~~~~--
-
diff --git a/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh b/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh
deleted file mode 100644
index 345c8b40b..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh -e
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-# Please do not make this file executable as that's the error being tested.
-
-sleep 5
diff --git a/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh b/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh
deleted file mode 100755
index 5bc10e83f..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh -e
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-sleep 2
diff --git a/src/couch/test/eunit/fixtures/os_daemon_configer.escript b/src/couch/test/eunit/fixtures/os_daemon_configer.escript
deleted file mode 100755
index f146b8314..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_configer.escript
+++ /dev/null
@@ -1,97 +0,0 @@
-#! /usr/bin/env escript
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include("../../include/couch_eunit.hrl").
-
-read() ->
- case io:get_line('') of
- eof ->
- stop;
- Data ->
- jiffy:decode(Data)
- end.
-
-write(Mesg) ->
- Data = iolist_to_binary(jiffy:encode(Mesg)),
- io:format(binary_to_list(Data) ++ "\n", []).
-
-get_cfg(Section) ->
- write([<<"get">>, Section]),
- read().
-
-get_cfg(Section, Name) ->
- write([<<"get">>, Section, Name]),
- read().
-
-log(Mesg) ->
- write([<<"log">>, Mesg]).
-
-log(Mesg, Level) ->
- write([<<"log">>, Mesg, {[{<<"level">>, Level}]}]).
-
-test_get_cfg1() ->
- Path = list_to_binary(?FILE),
- FileName = list_to_binary(filename:basename(?FILE)),
- {[{FileName, Path}]} = get_cfg(<<"os_daemons">>).
-
-test_get_cfg2() ->
- Path = list_to_binary(?FILE),
- FileName = list_to_binary(filename:basename(?FILE)),
- Path = get_cfg(<<"os_daemons">>, FileName),
- <<"sequential">> = get_cfg(<<"uuids">>, <<"algorithm">>).
-
-
-test_get_unknown_cfg() ->
- {[]} = get_cfg(<<"aal;3p4">>),
- null = get_cfg(<<"aal;3p4">>, <<"313234kjhsdfl">>).
-
-test_log() ->
- log(<<"foobar!">>),
- log(<<"some stuff!">>, <<"debug">>),
- log(2),
- log(true),
- write([<<"log">>, <<"stuff">>, 2]),
- write([<<"log">>, 3, null]),
- write([<<"log">>, [1, 2], {[{<<"level">>, <<"debug">>}]}]),
- write([<<"log">>, <<"true">>, {[]}]).
-
-do_tests() ->
- test_get_cfg1(),
- test_get_cfg2(),
- test_get_unknown_cfg(),
- test_log(),
- loop(io:read("")).
-
-loop({ok, _}) ->
- loop(io:read(""));
-loop(eof) ->
- init:stop();
-loop({error, _Reason}) ->
- init:stop().
-
-main([]) ->
- init_code_path(),
- do_tests().
-
-init_code_path() ->
- Paths = [
- "couchdb",
- "jiffy",
- "ibrowse",
- "mochiweb",
- "snappy"
- ],
- lists:foreach(fun(Name) ->
- code:add_patha(filename:join([?BUILDDIR(), "src", Name, "ebin"]))
- end, Paths).
diff --git a/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh b/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh
deleted file mode 100755
index 256ee7935..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh -e
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-exit 1
diff --git a/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh b/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh
deleted file mode 100755
index f5a13684e..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh -e
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-sleep 1
diff --git a/src/couch/test/eunit/fixtures/os_daemon_looper.escript b/src/couch/test/eunit/fixtures/os_daemon_looper.escript
deleted file mode 100755
index 73974e905..000000000
--- a/src/couch/test/eunit/fixtures/os_daemon_looper.escript
+++ /dev/null
@@ -1,26 +0,0 @@
-#! /usr/bin/env escript
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-loop() ->
- loop(io:read("")).
-
-loop({ok, _}) ->
- loop(io:read(""));
-loop(eof) ->
- stop;
-loop({error, Reason}) ->
- throw({error, Reason}).
-
-main([]) ->
- loop().
diff --git a/src/couch/test/eunit/fixtures/test.couch b/src/couch/test/eunit/fixtures/test.couch
deleted file mode 100644
index 5347a222f..000000000
--- a/src/couch/test/eunit/fixtures/test.couch
+++ /dev/null
Binary files differ
diff --git a/src/couch/test/eunit/global_changes_tests.erl b/src/couch/test/eunit/global_changes_tests.erl
deleted file mode 100644
index 92964bb74..000000000
--- a/src/couch/test/eunit/global_changes_tests.erl
+++ /dev/null
@@ -1,164 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
-setup() ->
- Host = get_host(),
- ok = add_admin(?USER, ?PASS),
- DbName = "foo/" ++ ?b2l(?tempdb()),
- ok = http_create_db(DbName),
- {Host, DbName}.
-
-teardown({_, DbName}) ->
- ok = http_delete_db(DbName),
- delete_admin(?USER),
- ok.
-
-http_create_db(Name) ->
- {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""),
- true = lists:member(Status, [201, 202]),
- ok.
-
-http_delete_db(Name) ->
- {ok, Status, _, _} = test_request:delete(db_url(Name), [?AUTH]),
- true = lists:member(Status, [200, 202]),
- ok.
-
-db_url(Name) ->
- get_host() ++ "/" ++ escape(Name).
-
-start_couch() ->
- Ctx = test_util:start_couch([chttpd, global_changes]),
- ok = ensure_db_exists("_global_changes"),
- Ctx.
-
-ensure_db_exists(Name) ->
- case fabric:create_db(Name) of
- ok ->
- ok;
- {error, file_exists} ->
- ok
- end.
-
-global_changes_test_() ->
- {
- "Checking global_changes endpoint",
- {
- setup,
- fun start_couch/0,
- fun test_util:stop/1,
- [
- check_response()
- ]
- }
- }.
-
-check_response() ->
- {
- "Check response",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_return_correct_response_on_create/1,
- fun should_return_correct_response_on_update/1
- ]
- }
- }.
-
-should_return_correct_response_on_create({Host, DbName}) ->
- ?_test(begin
- Headers = [?AUTH],
- create_doc(Host, DbName, "bar/baz"),
- {Status, Events} = request_updates(Host, DbName, Headers),
- ?assertEqual(200, Status),
- ?assertEqual([<<"created">>, <<"updated">>], Events)
- end).
-
-should_return_correct_response_on_update({Host, DbName}) ->
- ?_test(begin
- Headers = [?AUTH],
- create_doc(Host, DbName, "bar/baz"),
- update_doc(Host, DbName, "bar/baz", "new_value"),
- {Status, Events} = request_updates(Host, DbName, Headers),
- ?assertEqual(200, Status),
- ?assertEqual([<<"created">>, <<"updated">>], Events)
- end).
-
-create_doc(Host, DbName, Id) ->
- Headers = [?AUTH],
- Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id),
- Body = jiffy:encode(
- {[
- {key, "value"}
- ]}
- ),
- {ok, Status, _Headers, _Body} = test_request:put(Url, Headers, Body),
- ?assert(Status =:= 201 orelse Status =:= 202),
- timer:sleep(1000),
- ok.
-
-update_doc(Host, DbName, Id, Value) ->
- Headers = [?AUTH],
- Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id),
- {ok, 200, _Headers0, BinBody} = test_request:get(Url, Headers),
- [Rev] = decode_response(BinBody, [<<"_rev">>]),
- Body = jiffy:encode(
- {[
- {key, Value},
- {'_rev', Rev}
- ]}
- ),
- {ok, Status, _Headers1, _Body} = test_request:put(Url, Headers, Body),
- ?assert(Status =:= 201 orelse Status =:= 202),
- timer:sleep(1000),
- ok.
-
-request_updates(Host, DbName, Headers) ->
- Url = Host ++ "/_db_updates",
- {ok, Status, _Headers, BinBody} = test_request:get(Url, Headers),
- [Results] = decode_response(BinBody, [<<"results">>]),
- ToDecode = [<<"db_name">>, <<"type">>],
- Values = [decode_result(Result, ToDecode) || Result <- Results],
- Result = [Type || [DB, Type] <- Values, DB == ?l2b(DbName)],
- {Status, lists:sort(Result)}.
-
-decode_result({Props}, ToDecode) ->
- [couch_util:get_value(Key, Props) || Key <- ToDecode].
-
-decode_response(BinBody, ToDecode) ->
- {Body} = jiffy:decode(BinBody),
- [couch_util:get_value(Key, Body) || Key <- ToDecode].
-
-add_admin(User, Pass) ->
- Hashed = couch_passwords:hash_admin_password(Pass),
- config:set("admins", User, ?b2l(Hashed), _Persist = false).
-
-delete_admin(User) ->
- config:delete("admins", User, false).
-
-get_host() ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- "http://" ++ Addr ++ ":" ++ Port.
-
-escape(Path) ->
- re:replace(Path, "/", "%2f", [global, {return, list}]).
diff --git a/src/couch/test/eunit/json_stream_parse_tests.erl b/src/couch/test/eunit/json_stream_parse_tests.erl
deleted file mode 100644
index ab26be725..000000000
--- a/src/couch/test/eunit/json_stream_parse_tests.erl
+++ /dev/null
@@ -1,157 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(json_stream_parse_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(CASES, [
- {1, "1", "integer numeric literial"},
- % text representation may truncate, trail zeroes
- {3.1416, "3.14160", "float numeric literal"},
- {-1, "-1", "negative integer numeric literal"},
- {-3.1416, "-3.14160", "negative float numeric literal"},
- {12.0e10, "1.20000e+11", "float literal in scientific notation"},
- {1.234E+10, "1.23400e+10", "another float literal in scientific notation"},
- {-1.234E-10, "-1.23400e-10", "negative float literal in scientific notation"},
- {10.0, "1.0e+01", "yet another float literal in scientific notation"},
- {123.456, "1.23456E+2", "yet another float literal in scientific notation"},
- {10.0, "1e1", "yet another float literal in scientific notation"},
- {<<"foo">>, "\"foo\"", "string literal"},
- {<<"foo", 5, "bar">>, "\"foo\\u0005bar\"", "string literal with \\u0005"},
- {<<"">>, "\"\"", "empty string literal"},
- {<<"\n\n\n">>, "\"\\n\\n\\n\"", "only new lines literal"},
- {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\"", "only white spaces string literal"},
- {null, "null", "null literal"},
- {true, "true", "true literal"},
- {false, "false", "false literal"},
- {<<"null">>, "\"null\"", "null string literal"},
- {<<"true">>, "\"true\"", "true string literal"},
- {<<"false">>, "\"false\"", "false string literal"},
- {{[]}, "{}", "empty object literal"},
- {{[{<<"foo">>, <<"bar">>}]}, "{\"foo\":\"bar\"}", "simple object literal"},
- {
- {[{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]},
- "{\"foo\":\"bar\",\"baz\":123}",
- "another simple object literal"
- },
- {[], "[]", "empty array literal"},
- {[[]], "[[]]", "empty array literal inside a single element array literal"},
- {[1, <<"foo">>], "[1,\"foo\"]", "simple non-empty array literal"},
- {[1199344435545.0, 1], "[1199344435545.0,1]", "another simple non-empty array literal"},
- {[false, true, 321, null], "[false, true, 321, null]", "array of literals"},
- {{[{<<"foo">>, [123]}]}, "{\"foo\":[123]}", "object literal with an array valued property"},
- {{[{<<"foo">>, {[{<<"bar">>, true}]}}]}, "{\"foo\":{\"bar\":true}}", "nested object literal"},
- {
- {[
- {<<"foo">>, []},
- {<<"bar">>, {[{<<"baz">>, true}]}},
- {<<"alice">>, <<"bob">>}
- ]},
- "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}",
- "complex object literal"
- },
- {
- [-123, <<"foo">>, {[{<<"bar">>, []}]}, null],
- "[-123,\"foo\",{\"bar\":[]},null]",
- "complex array literal"
- }
-]).
-
-raw_json_input_test_() ->
- Tests = lists:map(
- fun({EJson, JsonString, Desc}) ->
- {Desc, ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))}
- end,
- ?CASES
- ),
- {"Tests with raw JSON string as the input", Tests}.
-
-one_byte_data_fun_test_() ->
- Tests = lists:map(
- fun({EJson, JsonString, Desc}) ->
- DataFun = fun() -> single_byte_data_fun(JsonString) end,
- {Desc, ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
- end,
- ?CASES
- ),
- {"Tests with a 1 byte output data function as the input", Tests}.
-
-test_multiple_bytes_data_fun_test_() ->
- Tests = lists:map(
- fun({EJson, JsonString, Desc}) ->
- DataFun = fun() -> multiple_bytes_data_fun(JsonString) end,
- {Desc, ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
- end,
- ?CASES
- ),
- {"Tests with a multiple bytes output data function as the input", Tests}.
-
-%% Test for equivalence of Erlang terms.
-%% Due to arbitrary order of construction, equivalent objects might
-%% compare unequal as erlang terms, so we need to carefully recurse
-%% through aggregates (tuples and objects).
-equiv({Props1}, {Props2}) ->
- equiv_object(Props1, Props2);
-equiv(L1, L2) when is_list(L1), is_list(L2) ->
- equiv_list(L1, L2);
-equiv(N1, N2) when is_number(N1), is_number(N2) ->
- N1 == N2;
-equiv(B1, B2) when is_binary(B1), is_binary(B2) ->
- B1 == B2;
-equiv(true, true) ->
- true;
-equiv(false, false) ->
- true;
-equiv(null, null) ->
- true.
-
-%% Object representation and traversal order is unknown.
-%% Use the sledgehammer and sort property lists.
-equiv_object(Props1, Props2) ->
- L1 = lists:keysort(1, Props1),
- L2 = lists:keysort(1, Props2),
- Pairs = lists:zip(L1, L2),
- true = lists:all(
- fun({{K1, V1}, {K2, V2}}) ->
- equiv(K1, K2) andalso equiv(V1, V2)
- end,
- Pairs
- ).
-
-%% Recursively compare tuple elements for equivalence.
-equiv_list([], []) ->
- true;
-equiv_list([V1 | L1], [V2 | L2]) ->
- equiv(V1, V2) andalso equiv_list(L1, L2).
-
-single_byte_data_fun([]) ->
- done;
-single_byte_data_fun([H | T]) ->
- {<<H>>, fun() -> single_byte_data_fun(T) end}.
-
-multiple_bytes_data_fun([]) ->
- done;
-multiple_bytes_data_fun(L) ->
- N = couch_rand:uniform(7) - 1,
- {Part, Rest} = split(L, N),
- {list_to_binary(Part), fun() -> multiple_bytes_data_fun(Rest) end}.
-
-split(L, N) when length(L) =< N ->
- {L, []};
-split(L, N) ->
- take(N, L, []).
-
-take(0, L, Acc) ->
- {lists:reverse(Acc), L};
-take(N, [H | L], Acc) ->
- take(N - 1, L, [H | Acc]).
diff --git a/src/couch/test/eunit/test_web.erl b/src/couch/test/eunit/test_web.erl
deleted file mode 100644
index 8998dad52..000000000
--- a/src/couch/test/eunit/test_web.erl
+++ /dev/null
@@ -1,114 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(test_web).
--behaviour(gen_server).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--export([start_link/0, stop/0, loop/1, get_port/0, set_assert/1, check_last/0]).
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--define(SERVER, test_web_server).
--define(HANDLER, test_web_handler).
--define(DELAY, 500).
-
-start_link() ->
- gen_server:start({local, ?HANDLER}, ?MODULE, [], []),
- mochiweb_http:start([
- {name, ?SERVER},
- {loop, {?MODULE, loop}},
- {port, 0}
- ]).
-
-loop(Req) ->
- %?debugFmt("Handling request: ~p", [Req]),
- case gen_server:call(?HANDLER, {check_request, Req}) of
- {ok, RespInfo} ->
- {ok, Req:respond(RespInfo)};
- {raw, {Status, Headers, BodyChunks}} ->
- Resp = Req:start_response({Status, Headers}),
- lists:foreach(fun(C) -> Resp:send(C) end, BodyChunks),
- erlang:put(mochiweb_request_force_close, true),
- {ok, Resp};
- {chunked, {Status, Headers, BodyChunks}} ->
- Resp = Req:respond({Status, Headers, chunked}),
- timer:sleep(?DELAY),
- lists:foreach(fun(C) -> Resp:write_chunk(C) end, BodyChunks),
- Resp:write_chunk([]),
- {ok, Resp};
- {error, Reason} ->
- ?debugFmt("Error: ~p", [Reason]),
- Body = lists:flatten(io_lib:format("Error: ~p", [Reason])),
- {ok, Req:respond({200, [], Body})}
- end.
-
-get_port() ->
- mochiweb_socket_server:get(?SERVER, port).
-
-set_assert(Fun) ->
- ?assertEqual(ok, gen_server:call(?HANDLER, {set_assert, Fun})).
-
-check_last() ->
- gen_server:call(?HANDLER, last_status).
-
-init(_) ->
- {ok, nil}.
-
-terminate(_Reason, _State) ->
- ok.
-
-stop() ->
- mochiweb_http:stop(?SERVER).
-
-handle_call({check_request, Req}, _From, State) when is_function(State, 1) ->
- Resp2 =
- case (catch State(Req)) of
- {ok, Resp} ->
- {reply, {ok, Resp}, was_ok};
- {raw, Resp} ->
- {reply, {raw, Resp}, was_ok};
- {chunked, Resp} ->
- {reply, {chunked, Resp}, was_ok};
- Error ->
- {reply, {error, Error}, not_ok}
- end,
- Req:cleanup(),
- Resp2;
-handle_call({check_request, _Req}, _From, _State) ->
- {reply, {error, no_assert_function}, not_ok};
-handle_call(last_status, _From, State) when is_atom(State) ->
- {reply, State, nil};
-handle_call(last_status, _From, State) ->
- {reply, {error, not_checked}, State};
-handle_call({set_assert, Fun}, _From, nil) ->
- {reply, ok, Fun};
-handle_call({set_assert, _}, _From, State) ->
- {reply, {error, assert_function_set}, State};
-handle_call(Msg, _From, State) ->
- {reply, {ignored, Msg}, State}.
-
-handle_cast(stop, State) ->
- {stop, normal, State};
-handle_cast(Msg, State) ->
- ?debugFmt("Ignoring cast message: ~p", [Msg]),
- {noreply, State}.
-
-handle_info(Msg, State) ->
- ?debugFmt("Ignoring info message: ~p", [Msg]),
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/couch/test/exunit/couch_compress_tests.exs b/src/couch/test/exunit/couch_compress_tests.exs
deleted file mode 100644
index 5550e0661..000000000
--- a/src/couch/test/exunit/couch_compress_tests.exs
+++ /dev/null
@@ -1,113 +0,0 @@
-defmodule Couch.Test.CouchCompress do
- use Couch.Test.ExUnit.Case
- alias Couch.Test.Utils
-
- import Utils
-
- @term {[{:a, 1}, {:b, 2}, {:c, 3}, {:d, 4}, {:e, 5}]}
-
- @none <<131, 104, 1, 108, 0, 0, 0, 5, 104, 2, 100, 0, 1, 97, 97, 1, 104, 2, 100, 0, 1,
- 98, 97, 2, 104, 2, 100, 0, 1, 99, 97, 3, 104, 2, 100, 0, 1, 100, 97, 4, 104, 2,
- 100, 0, 1, 101, 97, 5, 106>>
-
- @deflate <<131, 80, 0, 0, 0, 48, 120, 218, 203, 96, 204, 97, 96, 96, 96, 205, 96, 74,
- 97, 96, 76, 76, 100, 4, 211, 73, 137, 76, 96, 58, 57, 145, 25, 76, 167, 36,
- 178, 128, 233, 212, 68, 214, 44, 0, 212, 169, 9, 51>>
-
- @snappy <<1, 49, 64, 131, 104, 1, 108, 0, 0, 0, 5, 104, 2, 100, 0, 1, 97, 97, 1, 104, 1,
- 8, 8, 98, 97, 2, 5, 8, 8, 99, 97, 3, 5, 8, 44, 100, 97, 4, 104, 2, 100, 0, 1,
- 101, 97, 5, 106>>
-
- @snappy_bigendian <<1, 49, 60, 131, 104, 1, 108, 0, 0, 0, 5, 104, 2, 100, 0, 1, 97, 97,
- 1, 5, 8, 8, 98, 97, 2, 5, 8, 8, 99, 97, 3, 5, 8, 44, 100, 97, 4,
- 104, 2, 100, 0, 1, 101, 97, 5, 106>>
-
- @corrupt <<2, 12, 85, 06>>
-
- describe "couch_compress" do
- test "compress" do
- assert @none === :couch_compress.compress(@term, :none)
- assert @none !== :couch_compress.compress(@term, {:deflate, 9})
- assert @none !== :couch_compress.compress(@term, :snappy)
-
- # assert that compressed output is smaller than uncompressed input
- assert bit_size(:couch_compress.compress(@term, {:deflate, 9})) < bit_size(@none)
- assert bit_size(:couch_compress.compress(@term, :snappy)) < bit_size(@none)
- end
-
- test "decompress" do
- assert @term === :couch_compress.decompress(@none)
- assert @term === :couch_compress.decompress(@deflate)
- assert @term === :couch_compress.decompress(@snappy)
- assert @term === :couch_compress.decompress(@snappy_bigendian)
- assert catch_error(:couch_compress.decompress(@corrupt)) == :invalid_compression
- end
-
- test "recompress" do
- res = @none
-
- # none -> deflate
- res = :couch_compress.compress(res, {:deflate, 9})
- assert :couch_compress.is_compressed(res, {:deflate, 9})
-
- # deflate -> snappy
- res = :couch_compress.compress(res, :snappy)
- assert :couch_compress.is_compressed(res, :snappy)
-
- # snappy -> none
- res = :couch_compress.compress(res, :none)
- assert :couch_compress.is_compressed(res, :none)
-
- # none -> snappy
- res = :couch_compress.compress(res, :snappy)
- assert :couch_compress.is_compressed(res, :snappy)
-
- # snappy -> deflate
- res = :couch_compress.compress(res, {:deflate, 9})
- assert :couch_compress.is_compressed(res, {:deflate, 9})
-
- # deflate -> none
- res = :couch_compress.compress(res, :none)
- assert :couch_compress.is_compressed(res, :none)
- end
-
- test "is_compressed" do
- assert :couch_compress.is_compressed(@none, :none)
- assert :couch_compress.is_compressed(@deflate, {:deflate, 9})
- assert :couch_compress.is_compressed(@snappy, :snappy)
- assert :couch_compress.is_compressed(@snappy_bigendian, :snappy)
- refute :couch_compress.is_compressed(@none, {:deflate, 0})
- refute :couch_compress.is_compressed(@none, {:deflate, 9})
- refute :couch_compress.is_compressed(@none, :snappy)
- refute :couch_compress.is_compressed(@deflate, :none)
- refute :couch_compress.is_compressed(@deflate, :snappy)
- refute :couch_compress.is_compressed(@snappy, :none)
- refute :couch_compress.is_compressed(@snappy, {:deflate, 9})
- refute :couch_compress.is_compressed(@snappy_bigendian, :none)
- refute :couch_compress.is_compressed(@snappy_bigendian, {:deflate, 9})
-
- assert catch_error(:couch_compress.is_compressed(@corrupt, :none)) ==
- :invalid_compression
-
- assert catch_error(:couch_compress.is_compressed(@corrupt, {:deflate, 9})) ==
- :invalid_compression
-
- assert catch_error(:couch_compress.is_compressed(@corrupt, :snappy)) ==
- :invalid_compression
- end
-
- test "uncompressed_size" do
- assert :couch_compress.uncompressed_size(@none) === 49
- assert :couch_compress.uncompressed_size(@deflate) === 49
- assert :couch_compress.uncompressed_size(@snappy) === 49
- assert :couch_compress.uncompressed_size(@snappy_bigendian) === 49
-
- assert :couch_compress.uncompressed_size(
- :couch_compress.compress(:x, {:deflate, 9})
- ) === 5
-
- assert catch_error(:couch_compress.uncompressed_size(@corrupt)) ==
- :invalid_compression
- end
- end
-end
diff --git a/src/couch/test/exunit/fabric_test.exs b/src/couch/test/exunit/fabric_test.exs
deleted file mode 100644
index bdb84e9a2..000000000
--- a/src/couch/test/exunit/fabric_test.exs
+++ /dev/null
@@ -1,101 +0,0 @@
-defmodule Couch.Test.Fabric do
- use Couch.Test.ExUnit.Case
- alias Couch.Test.Utils
-
- alias Couch.Test.Setup
-
- alias Couch.Test.Setup.Step
-
- import Couch.DBTest
-
- import Utils
-
- @admin {:user_ctx, user_ctx(roles: ["_admin"])}
-
- def with_db(context, setup) do
- setup =
- setup
- |> Setup.Common.with_db()
- |> Setup.run()
-
- context =
- Map.merge(context, %{
- db_name: setup |> Setup.get(:db) |> Step.Create.DB.name()
- })
-
- {context, setup}
- end
-
- describe "Fabric miscellaneous API" do
- @describetag setup: &__MODULE__.with_db/2
- test "Get inactive_index_files", ctx do
- {:ok, _rev} = update_doc(ctx.db_name, %{"_id" => "doc1"})
-
- design_doc = %{
- "_id" => "_design/test",
- "language" => "javascript",
- "views" => %{
- "view" => %{
- "map" => "function(doc){emit(doc._id, doc._rev)}"
- }
- }
- }
-
- {:ok, rev1} = update_doc(ctx.db_name, design_doc)
- wait_sig_update(ctx.db_name, "test", "")
- prev_active = get_active_sig(ctx.db_name, "test")
-
- updated_design_doc =
- put_in(design_doc, ["views", "view", "map"], "function(doc){emit(doc._id, null)}")
-
- {:ok, rev2} =
- update_doc(
- ctx.db_name,
- Map.put(updated_design_doc, "_rev", rev1)
- )
-
- assert rev1 != rev2
- wait_sig_update(ctx.db_name, "test", prev_active)
-
- {:ok, info} = :fabric.get_view_group_info(ctx.db_name, "_design/test")
- active = info[:signature]
-
- files = Enum.map(:fabric.inactive_index_files(ctx.db_name), &List.to_string/1)
-
- assert [] != files, "We should have some inactive"
-
- assert not Enum.any?(files, fn
- file_path -> String.contains?(file_path, active)
- end),
- "We are not suppose to return active views"
-
- assert Enum.all?(files, fn
- file_path -> String.contains?(file_path, prev_active)
- end),
- "We expect all files to contain previous active signature"
- end
- end
-
- defp update_doc(db_name, body) do
- json_body = :jiffy.decode(:jiffy.encode(body))
-
- case :fabric.update_doc(db_name, json_body, [@admin]) do
- {:ok, rev} ->
- {:ok, :couch_doc.rev_to_str(rev)}
-
- error ->
- error
- end
- end
-
- defp get_active_sig(db_name, ddoc_id) do
- {:ok, info} = :fabric.get_view_group_info(db_name, "_design/#{ddoc_id}")
- info[:signature]
- end
-
- defp wait_sig_update(db_name, ddoc_id, prev_active) do
- retry_until(fn ->
- get_active_sig(db_name, ddoc_id) != prev_active
- end)
- end
-end
diff --git a/src/couch/test/exunit/same_site_cookie_tests.exs b/src/couch/test/exunit/same_site_cookie_tests.exs
deleted file mode 100644
index bad32ada4..000000000
--- a/src/couch/test/exunit/same_site_cookie_tests.exs
+++ /dev/null
@@ -1,44 +0,0 @@
-defmodule SameSiteCookieTests do
- use CouchTestCase
-
- @moduletag :authentication
-
- def get_cookie(user, pass) do
- resp = Couch.post("/_session", body: %{:username => user, :password => pass})
-
- true = resp.body["ok"]
- resp.headers[:"set-cookie"]
- end
-
- @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", "None"}]
- test "Set same_site None" do
- cookie = get_cookie("jan", "apple")
- assert cookie =~ "; SameSite=None"
- end
-
- @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", ""}]
- test "same_site not set" do
- cookie = get_cookie("jan", "apple")
- assert cookie
- refute cookie =~ "; SameSite="
- end
-
- @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", "Strict"}]
- test "Set same_site Strict" do
- cookie = get_cookie("jan", "apple")
- assert cookie =~ "; SameSite=Strict"
- end
-
- @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", "Lax"}]
- test "Set same_site Lax" do
- cookie = get_cookie("jan", "apple")
- assert cookie =~ "; SameSite=Lax"
- end
-
- @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", "Invalid"}]
- test "Set same_site invalid" do
- cookie = get_cookie("jan", "apple")
- assert cookie
- refute cookie =~ "; SameSite="
- end
-end
diff --git a/src/couch/test/exunit/test_helper.exs b/src/couch/test/exunit/test_helper.exs
deleted file mode 100644
index 314050085..000000000
--- a/src/couch/test/exunit/test_helper.exs
+++ /dev/null
@@ -1,2 +0,0 @@
-ExUnit.configure(formatters: [JUnitFormatter, ExUnit.CLIFormatter])
-ExUnit.start()
diff --git a/src/couch_dist/LICENSE b/src/couch_dist/LICENSE
deleted file mode 100644
index c6d336b1a..000000000
--- a/src/couch_dist/LICENSE
+++ /dev/null
@@ -1,177 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/src/couch_dist/rebar.config b/src/couch_dist/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/couch_dist/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/couch_dist/src/couch_dist.app.src b/src/couch_dist/src/couch_dist.app.src
deleted file mode 100644
index 4906d2d41..000000000
--- a/src/couch_dist/src/couch_dist.app.src
+++ /dev/null
@@ -1,19 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_dist,
- [{description, "A Custom Erlang network protocol for CouchDB"},
- {vsn, git},
- {modules, [couch_dist]},
- {registered, [couch_dist]},
- {applications, [kernel, stdlib, ssl]}
-]}.
diff --git a/src/couch_dist/src/couch_dist.erl b/src/couch_dist/src/couch_dist.erl
deleted file mode 100644
index a0922ebc5..000000000
--- a/src/couch_dist/src/couch_dist.erl
+++ /dev/null
@@ -1,149 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_dist).
-
--export([
- childspecs/0,
- listen/1,
- listen/2,
- accept/1,
- accept_connection/5,
- setup/5,
- close/1,
- select/1,
- is_node_name/1
-]).
-
-% Just for tests
--export([no_tls/1, get_init_args/0]).
-
-childspecs() ->
- {ok, [
- {ssl_dist_sup, {ssl_dist_sup, start_link, []}, permanent, infinity, supervisor, [
- ssl_dist_sup
- ]}
- ]}.
-
-listen(Name) ->
- NodeName =
- case is_atom(Name) of
- true -> atom_to_list(Name);
- false -> Name
- end,
- Host = get_node_host(),
- Mod = inet_dist(NodeName ++ "@" ++ Host),
- Mod:listen(NodeName).
-
-listen(Name, Host) ->
- NodeName =
- case is_atom(Name) of
- true -> atom_to_list(Name);
- false -> Name
- end,
- Mod = inet_dist(NodeName ++ "@" ++ Host),
- Mod:listen(NodeName, Host).
-
-accept(Listen) ->
- Mod = inet_dist(node()),
- Mod:accept(Listen).
-
-accept_connection(AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) ->
- Mod = inet_dist(MyNode),
- Mod:accept_connection(AcceptPid, DistCtrl, MyNode, Allowed, SetupTime).
-
-setup(Node, Type, MyNode, LongOrShortNames, SetupTime) ->
- Mod = inet_dist(Node),
- Mod:setup(Node, Type, MyNode, LongOrShortNames, SetupTime).
-
-close(Socket) ->
- inet_tls_dist:close(Socket).
-
-select(Node) ->
- inet_tls_dist:select(Node).
-
-is_node_name(Node) ->
- inet_tls_dist:is_node_name(Node).
-
-get_init_args() ->
- init:get_argument(couch_dist).
-
-get_node_host() ->
- % Cannot use `node()` since distribution hasn't started yet. Use
- % similar logic as erl_distribition and net_kernel to parse it
- % from the arguments list
- case {init:get_argument(sname), init:get_argument(name)} of
- {{ok, [[SName]]}, _} ->
- case split_host(SName) of
- [$@ | Host] when length(Host) > 0 ->
- Host;
- _ ->
- inet_db:gethostname()
- end;
- {error, {ok, [[Name]]}} ->
- case split_host(Name) of
- [$@ | Host] when length(Host) > 0 ->
- Host;
- _ ->
- OwnHost = inet_db:gethostname(),
- case inet_db:res_option(domain) of
- Domain when is_list(Domain), length(Domain) > 0 ->
- OwnHost ++ "." ++ Domain;
- _ ->
- OwnHost
- end
- end
- end.
-
-split_host(Name) ->
- % Copied from net_kernel. Modifed to return Host only
- {_, Host} = lists:splitwith(fun(C) -> C =/= $@ end, Name),
- Host.
-
-inet_dist(Node) ->
- case no_tls(Node) of
- true -> inet_tcp_dist;
- false -> inet_tls_dist
- end.
-
-no_tls(NodeName) when is_atom(NodeName) ->
- no_tls(atom_to_list(NodeName));
-no_tls(NodeName) when is_list(NodeName) ->
- case ?MODULE:get_init_args() of
- {ok, Args} ->
- GlobPatterns = [V || [K, V] <- Args, K == "no_tls"],
- lists:any(fun(P) -> match(NodeName, P) end, GlobPatterns);
- error ->
- false
- end.
-
-match(_NodeName, "true") ->
- true;
-match(_NodeName, "false") ->
- false;
-match(NodeName, Pattern) ->
- {ok, RE} =
- case string:split(Pattern, [$"], all) of
- ["", GlobPattern, ""] -> to_re(GlobPattern);
- _ -> to_re(Pattern)
- end,
- re:run(NodeName, RE) /= nomatch.
-
-to_re(GlobPattern) ->
- re:compile([$^, lists:flatmap(fun glob_re/1, GlobPattern), $$]).
-
-glob_re($*) ->
- ".*";
-glob_re($?) ->
- ".";
-glob_re(C) ->
- [C].
diff --git a/src/couch_dist/test/eunit/couch_dist_tests.erl b/src/couch_dist/test/eunit/couch_dist_tests.erl
deleted file mode 100644
index abe5ff572..000000000
--- a/src/couch_dist/test/eunit/couch_dist_tests.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_dist_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-no_tls_test_() ->
- {
- "test couch_dist no_tls/1",
- {
- setup,
- fun() -> meck:new(couch_dist, [passthrough]) end,
- fun(_) -> meck:unload() end,
- [
- no_tls_test_with_true(),
- no_tls_test_with_false(),
- no_tls_test_with_character(),
- no_tls_test_with_wildcard(),
- no_tls_test_with_question_mark(),
- no_tls_test_with_error()
- ]
- }
- }.
-
-mock_get_init_args(Reply) ->
- meck:expect(couch_dist, get_init_args, fun() -> Reply end).
-
-no_tls_test_with_true() ->
- ?_test(
- begin
- mock_get_init_args({ok, [["no_tls", "true"]]}),
- ?assert(couch_dist:no_tls('abc123')),
- ?assert(couch_dist:no_tls("123abd"))
- end
- ).
-
-no_tls_test_with_false() ->
- ?_test(
- begin
- mock_get_init_args({ok, [["no_tls", "false"]]}),
- ?assertNot(couch_dist:no_tls('abc123')),
- ?assertNot(couch_dist:no_tls("123abc"))
- end
- ).
-
-no_tls_test_with_character() ->
- ?_test(
- begin
- mock_get_init_args({ok, [["no_tls", "node@127.0.0.1"]]}),
- ?assert(couch_dist:no_tls('node@127.0.0.1')),
- ?assert(couch_dist:no_tls("node@127.0.0.1"))
- end
- ).
-
-no_tls_test_with_wildcard() ->
- ?_test(
- begin
- mock_get_init_args({ok, [["no_tls", "\"a*2\""]]}),
- ?assert(couch_dist:no_tls('ab12')),
- ?assert(couch_dist:no_tls("a12")),
- ?assert(couch_dist:no_tls("a2")),
- ?assertNot(couch_dist:no_tls('a')),
- ?assertNot(couch_dist:no_tls("2"))
- end
- ).
-
-no_tls_test_with_question_mark() ->
- ?_test(
- begin
- mock_get_init_args({ok, [["no_tls", "\"a?2\""]]}),
- ?assert(couch_dist:no_tls('a12')),
- ?assert(couch_dist:no_tls("ab2")),
- ?assertNot(couch_dist:no_tls('a2')),
- ?assertNot(couch_dist:no_tls("a"))
- end
- ).
-
-no_tls_test_with_error() ->
- ?_test(
- begin
- mock_get_init_args(error),
- ?assertNot(couch_dist:no_tls('abc123')),
- ?assertNot(couch_dist:no_tls("123abc"))
- end
- ).
diff --git a/src/couch_epi/.gitignore b/src/couch_epi/.gitignore
deleted file mode 100644
index 2cd33974b..000000000
--- a/src/couch_epi/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.rebar
-ebin
-erl_crash.dump
-.eunit
diff --git a/src/couch_epi/LICENSE b/src/couch_epi/LICENSE
deleted file mode 100644
index 94ad231b8..000000000
--- a/src/couch_epi/LICENSE
+++ /dev/null
@@ -1,203 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/src/couch_epi/README.md b/src/couch_epi/README.md
deleted file mode 100644
index 368ad9afd..000000000
--- a/src/couch_epi/README.md
+++ /dev/null
@@ -1,166 +0,0 @@
-# What it is
-
-`couch_epi` is extensible plugin interface (EPI) for couchdb.
-
-## Requirements
-
- 1. Automatically discoverable
- 2. Minimize apps that need to be started for tests
- 3. Support release upgrades
-
-## Glossary
-
- * service - an abstract functionality defined by unique name and API
- * provider - a self-contained implementation of `Service`'s API
- * subscriber - an application or a process which uses functionality provided by `Provider`
- * epi_key - is a routing key it has to be in one of the following forms
- - `{service_id :: atom(), key :: term()}` - for `couch_epi_data_source`
- - `service_id :: atom()` - for `couch_epi_functions`
- * handle - is opaque data structure returned from `couch_epi:get_handle(EpiKey)`
-
-## Support release upgrade
-
-We monitor the modules involved in configuration of the service/provider so we
-get notified when there is a code upgrade. We use this notification in order to:
-
- - regenerate dispatch module if needed
- - call notify/3 of a module implementing couch_epi_plugin behaviour
-
-Call to notify/3 would be called for both providers and data_providers.
-
-## data example
-
-Any application that wants to register some configuration data for a service using module
-could add an entry in its implementation of couch_epi_plugin behaviour:
-
- data_providers() ->
- [
- {{couch_stats, descriptions},
- {priv_file, "stats_descriptions.cfg"}, [{interval, 5000}]}
- {{couch_stats, descriptions},
- {file, "/tmp/extra_stats.cfg"}, [{interval, 5000}]},
- {{couch_stats, descriptions}, {static_module, my_stats}},
- {{couch_stats, descriptions}, {callback_module, my_stats}}
- ].
-
-When service provider wants to learn about all the installed config data for it to use
-it would then just do something like:
-
-
- couch_epi:get(Handle, Service, Key)
-
-The service provider also has to mention the data keys it is using in its
-implementation of couch_epi_plugin behaviour
-
- data_subscriptions() ->
- [{couch_stats, descriptions}].
-
-There are also additional functions to get the same data in various formats
-
-- `couch_epi:all(Handle)` - returns config data for all services for a given handle
-- `couch_epi:get(Handle, Subscriber)` - returns config data for a given subscriber
-- `couch_epi:get_value(Handle, Subscriber, Key)` - returns config data for a given subscriber and key
-- `couch_epi:by_key(Handle, Key)` - returns config data for a given key
-- `couch_epi:by_key(Handle)` - returns config data grouped by key
-- `couch_epi:by_source(Handle)` - returns config data grouped by source (subscriber)
-- `couch_epi:keys(Handle)` - returns list of configured keys
-- `couch_epi:subscribers(Handle)` - return list of known subscribers
-
-The difference between `static_module` and `callback_module` providers is in how
-couch_epi detects the changes. `static_module` is designed for the cases when you
-have your data hardcoded in the module. For example you might have the following:
-
-```
--export([data/0]).
-
-data() ->
- [
- {[complex, key, 2], [
- {type, counter},
- {desc, bar}
- ]},
- {[complex, key, 1], [
- {type, counter},
- {desc, updated_foo}
- ]}
- ].
-```
-
-The changes are detected by relying on `vsn` module attribute. Therefore we
-would notice the change only when data source module is recompiled.
-
-The `callback_module` provider uses the return value from `data/0` to detect
-changes and it is useful for cases when the data term is constructed dynamically.
-For example to cache values of CouchDB config one could use the following:
-
-```
--export([data/0]).
-data() ->
- config:get("dreyfus").
-```
-
-# Function dispatch example
-
-Any application that wants to register implementation functions for a service
-could add the following into it's implementation of couch_epi_plugin behaviour:
-
- providers() ->
- [{my_service, module_which_implements_the_functions}].
-
-Adding the entry would generate a dispatch methods for any exported function
-of modules passed.
-
-Services have to be defined in one of the implementations of couch_epi_plugin
-behaviour as:
-
- services() ->
- [{my_service, module_to_monitor_for_codechange}].
-
-When app wants to dispatch the call to all service providers it calls
-
- couch_epi:apply(Handle, ServiceId, Function, Args, Opts)
-
-There are multiple ways of doing the apply which is controlled by Opts
-
- - ignore_errors - the call is wrapped into try/catch
- - concurrent - spawn a new process for every service provider
- - pipe - use output of one service provider as an input for the next one
-
-Notes:
-
- - `concurrent` is incompatible with `pipe`
- - if there are multiple plugins providing same service they will be called in the order
- they listed in application:get_env(couch_epi, plugins)
- - if the same plugin provides multiple implementations of the same service
- the order is as defined in providers callback
-
-## decide functionality
-
-There are cases when we want to call configured providers until any of them
-would make a decision. We also would want to be able to find out if any
-decision has been made so we could call default handler. In order to be able
-to do so there is couch_epi:decide/5. Every service which uses this feature
-would get either:
-
- - no_decision
- - {decided, Decision :: term()}
-
-The provider module should return one of the above results. The current logic is
-to call all configured providers in order of their definition until we get
-`{decided, term()}`. If none of the providers would return this term we would
-return `no_decision`.
-
-# couch_epi_plugin behaviour
-
-The module implementing this behaviour needs to export the following functions:
-
- - Module:app/0 - Returns atom representing the application name
- - Module:providers/0 - Returns list of {service_id(), module()} tuples
- for defined providers
- - Module:services/0 - Returns list of {service_id(), module()} tuples
- for defined services
- - Module:data_subscriptions/0 - Returns list of keys we define
- - Module:data_providers/0 - Returns list of keys we provide
- - Module:processes/0 - Supervisor specs which we would be injected into
- application supervisor
- - Module:notify/3 - Notification callback
diff --git a/src/couch_epi/rebar.config b/src/couch_epi/rebar.config
deleted file mode 100644
index 3c7f8af73..000000000
--- a/src/couch_epi/rebar.config
+++ /dev/null
@@ -1,7 +0,0 @@
-{cover_enabled, true}.
-
-{cover_print_enabled, true}.
-
-{erl_opts, [
- {platform_define, "^R16", 'pre18'},
- {platform_define, "^17", 'pre18'}]}.
diff --git a/src/couch_epi/src/couch_epi.app.src.script b/src/couch_epi/src/couch_epi.app.src.script
deleted file mode 100644
index daa5e4d73..000000000
--- a/src/couch_epi/src/couch_epi.app.src.script
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-ConfigFile = filename:join([os:getenv("COUCHDB_APPS_CONFIG_DIR"), "couch_epi.config"]).
-{ok, AppConfig} = file:consult(ConfigFile).
-
-{application, couch_epi,
- [
- {description, "extensible plugin interface"},
- {vsn, git},
- {registered, [couch_epi_sup, couch_epi_server]},
- {applications, [
- kernel,
- stdlib,
- crypto
- ]},
- {mod, { couch_epi_app, []}},
- {env, AppConfig}
- ]}.
diff --git a/src/couch_epi/src/couch_epi.erl b/src/couch_epi/src/couch_epi.erl
deleted file mode 100644
index c708e5a0b..000000000
--- a/src/couch_epi/src/couch_epi.erl
+++ /dev/null
@@ -1,199 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi).
-
--export([get_handle/1]).
--export([register_service/2]).
-
-%% queries and introspection
--export([
- dump/1,
- get/2,
- get_value/3,
- by_key/1, by_key/2,
- by_source/1, by_source/2,
- keys/1,
- subscribers/1
-]).
-
-%% apply
--export([apply/5, decide/5]).
--export([any/5, all/5]).
-
--export([is_configured/3]).
-
-%% ------------------------------------------------------------------
-%% Types Definitions
-%% ------------------------------------------------------------------
-
--export_type([
- service_id/0,
- app/0,
- key/0,
- handle/0,
- plugin_id/0,
- data_spec/0,
- apply_opts/0
-]).
-
--type app() :: atom().
--type key() :: term().
--type service_id() :: atom().
-
--type properties() :: [{key(), term()}].
-
--type plugin_id() :: module().
-
--opaque handle() :: module().
-
--type apply_opt() ::
- ignore_errors
- | concurrent
- | pipe.
-
--type apply_opts() :: [apply_opt()].
-
--type data_spec() ::
- {static_module, module()}
- | {callback_module, module()}
- | {priv_file, FileName :: string()}
- | {file, FileName :: string()}.
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
--spec dump(Handle :: handle()) ->
- [Config :: properties()].
-
-dump(Handle) when Handle /= undefined ->
- couch_epi_data_gen:get(Handle).
-
--spec get(Handle :: handle(), Key :: key()) ->
- [Config :: properties()].
-
-get(Handle, Key) when Handle /= undefined ->
- couch_epi_data_gen:get(Handle, Key).
-
--spec get_value(Handle :: handle(), Subscriber :: app(), Key :: key()) ->
- term().
-
-get_value(Handle, Subscriber, Key) when Handle /= undefined ->
- couch_epi_data_gen:get(Handle, Subscriber, Key).
-
--spec by_key(Handle :: handle()) ->
- [{Key :: key(), [{Source :: app(), properties()}]}].
-
-by_key(Handle) when Handle /= undefined ->
- couch_epi_data_gen:by_key(Handle).
-
--spec by_key(Handle :: handle(), Key :: key()) ->
- [{Source :: app(), properties()}].
-
-by_key(Handle, Key) when Handle /= undefined ->
- couch_epi_data_gen:by_key(Handle, Key).
-
--spec by_source(Handle :: handle()) ->
- [{Source :: app(), [{Key :: key(), properties()}]}].
-
-by_source(Handle) when Handle /= undefined ->
- couch_epi_data_gen:by_source(Handle).
-
--spec by_source(Handle :: handle(), Subscriber :: app()) ->
- [{Key :: key(), properties()}].
-
-by_source(Handle, Subscriber) when Handle /= undefined ->
- couch_epi_data_gen:by_source(Handle, Subscriber).
-
--spec keys(Handle :: handle()) ->
- [Key :: key()].
-
-keys(Handle) when Handle /= undefined ->
- couch_epi_data_gen:keys(Handle).
-
--spec subscribers(Handle :: handle()) ->
- [Subscriber :: app()].
-
-subscribers(Handle) when Handle /= undefined ->
- couch_epi_data_gen:subscribers(Handle).
-
--spec apply(
- Handle :: handle(),
- ServiceId :: atom(),
- Function :: atom(),
- Args :: [term()],
- Opts :: apply_opts()
-) -> [any()].
-
-apply(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
- couch_epi_functions_gen:apply(Handle, ServiceId, Function, Args, Opts).
-
--spec get_handle
- ({ServiceId :: service_id(), Key :: key()}) -> handle();
- (ServiceId :: service_id()) -> handle().
-
-get_handle({_ServiceId, _Key} = EPIKey) ->
- couch_epi_data_gen:get_handle(EPIKey);
-get_handle(ServiceId) when is_atom(ServiceId) ->
- couch_epi_functions_gen:get_handle(ServiceId).
-
--spec any(
- Handle :: handle(),
- ServiceId :: atom(),
- Function :: atom(),
- Args :: [term()],
- Opts :: apply_opts()
-) -> boolean().
-
-any(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
- Replies = apply(Handle, ServiceId, Function, Args, Opts),
- [] /= [Reply || Reply <- Replies, Reply == true].
-
--spec all(
- Handle :: handle(),
- ServiceId :: atom(),
- Function :: atom(),
- Args :: [term()],
- Opts :: apply_opts()
-) -> boolean().
-
-all(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
- Replies = apply(Handle, ServiceId, Function, Args, Opts),
- [] == [Reply || Reply <- Replies, Reply == false].
-
--spec is_configured(
- Handle :: handle(), Function :: atom(), Arity :: pos_integer()
-) -> boolean().
-
-is_configured(Handle, Function, Arity) when Handle /= undefined ->
- [] /= couch_epi_functions_gen:modules(Handle, Function, Arity).
-
--spec register_service(
- PluginId :: plugin_id(), Children :: [supervisor:child_spec()]
-) ->
- [supervisor:child_spec()].
-
-register_service(Plugin, Children) ->
- couch_epi_sup:plugin_childspecs(Plugin, Children).
-
--spec decide(
- Handle :: handle(),
- ServiceId :: atom(),
- Function :: atom(),
- Args :: [term()],
- Opts :: apply_opts()
-) ->
- no_decision | {decided, term()}.
-
-decide(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
- couch_epi_functions_gen:decide(Handle, ServiceId, Function, Args, Opts).
diff --git a/src/couch_epi/src/couch_epi.hrl b/src/couch_epi/src/couch_epi.hrl
deleted file mode 100644
index a8bd1d542..000000000
--- a/src/couch_epi/src/couch_epi.hrl
+++ /dev/null
@@ -1,15 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(couch_epi_spec, {
- behaviour, app, kind, options, key, value, codegen, type
-}).
diff --git a/src/couch_epi/src/couch_epi_app.erl b/src/couch_epi/src/couch_epi_app.erl
deleted file mode 100644
index 0dd42c2ee..000000000
--- a/src/couch_epi/src/couch_epi_app.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_app).
-
--behaviour(application).
-
--export([start/2, stop/1]).
-
-start(_Type, _Args) ->
- couch_epi_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/couch_epi/src/couch_epi_codechange_monitor.erl b/src/couch_epi/src/couch_epi_codechange_monitor.erl
deleted file mode 100644
index 214aea14d..000000000
--- a/src/couch_epi/src/couch_epi_codechange_monitor.erl
+++ /dev/null
@@ -1,69 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_codechange_monitor).
-
--behaviour(gen_server).
-
-%% ------------------------------------------------------------------
-%% API Function Exports
-%% ------------------------------------------------------------------
-
--export([start_link/1]).
-
-%% ------------------------------------------------------------------
-%% gen_server Function Exports
-%% ------------------------------------------------------------------
-
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-start_link(Handler) ->
- gen_server:start_link(?MODULE, [Handler], []).
-
-%% ------------------------------------------------------------------
-%% gen_server Function Definitions
-%% ------------------------------------------------------------------
-
-init([Handler]) ->
- couch_epi_module_keeper:reload(Handler),
- {ok, Handler}.
-
-handle_call(_Request, _From, State) ->
- {reply, ok, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, Keeper, _Extra) ->
- couch_epi_module_keeper:reload(Keeper),
- {ok, Keeper}.
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
diff --git a/src/couch_epi/src/couch_epi_codegen.erl b/src/couch_epi/src/couch_epi_codegen.erl
deleted file mode 100644
index 212a4e31a..000000000
--- a/src/couch_epi/src/couch_epi_codegen.erl
+++ /dev/null
@@ -1,94 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_codegen).
-
--export([generate/2, scan/1, parse/1, function/1, format_term/1]).
-
-generate(ModuleName, Forms) when is_atom(ModuleName) ->
- generate(atom_to_list(ModuleName), Forms);
-generate(ModuleName, Forms0) ->
- Forms = scan("-module(" ++ ModuleName ++ ").") ++ Forms0,
- ASTForms = parse(Forms),
- {ok, Mod, Bin} = compile:forms(ASTForms, [verbose, report_errors]),
- {module, Mod} = code:load_binary(Mod, atom_to_list(Mod) ++ ".erl", Bin),
- ok.
-
-scan(String) ->
- Exprs = [E || E <- re:split(String, "\\.\n", [{return, list}, trim])],
- FormsTokens = lists:foldl(
- fun(Expr, Acc) ->
- case erl_scan:string(Expr) of
- {ok, [], _} ->
- Acc;
- {ok, Tokens, _} ->
- [{Expr, fixup_terminator(Tokens)} | Acc]
- end
- end,
- [],
- Exprs
- ),
- lists:reverse(FormsTokens).
-
-parse(FormsTokens) ->
- ASTForms = lists:foldl(
- fun(Tokens, Forms) ->
- {ok, AST} = parse_form(Tokens),
- [AST | Forms]
- end,
- [],
- FormsTokens
- ),
- lists:reverse(ASTForms).
-
-format_term(Data) ->
- lists:flatten(io_lib:format("~w", [Data])).
-
-parse_form(Tokens) ->
- {Expr, Forms} = split_expression(Tokens),
- case erl_parse:parse_form(Forms) of
- {ok, AST} -> {ok, AST};
- {error, {_, _, Reason}} -> {error, Expr, Reason}
- end.
-
-split_expression({Expr, Forms}) ->
- {Expr, Forms};
-split_expression(Tokens) ->
- {Exprs, Forms} = lists:unzip(Tokens),
- {string:join(Exprs, "\n"), lists:append(Forms)}.
-
-function(Clauses) ->
- [lists:flatten(Clauses)].
-
-fixup_terminator(Tokens) ->
- case lists:last(Tokens) of
- {dot, _} ->
- Tokens;
- {';', _} ->
- Tokens;
- Token ->
- Line = line(Token),
- Tokens ++ [{dot, Line}]
- end.
-
--ifdef(pre18).
-
-line(Token) ->
- {line, Line} = erl_scan:token_info(Token, line),
- Line.
-
--else.
-
-line(Token) ->
- erl_scan:line(Token).
-
--endif.
diff --git a/src/couch_epi/src/couch_epi_data.erl b/src/couch_epi/src/couch_epi_data.erl
deleted file mode 100644
index ec554a40e..000000000
--- a/src/couch_epi/src/couch_epi_data.erl
+++ /dev/null
@@ -1,120 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_data).
-
--include("couch_epi.hrl").
-
-%% ------------------------------------------------------------------
-%% API Function Exports
-%% ------------------------------------------------------------------
-
--export([interval/1, data/1]).
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-interval(Specs) ->
- extract_minimal_interval(Specs).
-
-data(Specs) ->
- Locators = locate_sources(Specs),
- case lists:foldl(fun collect_data/2, {ok, [], []}, Locators) of
- {ok, Hashes, Data} ->
- {ok, couch_epi_util:hash(Hashes), Data};
- Error ->
- Error
- end.
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-collect_data({App, Locator}, {ok, HashAcc, DataAcc}) ->
- case definitions(Locator) of
- {ok, Hash, Data} ->
- {ok, [Hash | HashAcc], [{App, Data} | DataAcc]};
- Error ->
- Error
- end;
-collect_data({_App, _Locator}, Error) ->
- Error.
-
-extract_minimal_interval(Specs) ->
- lists:foldl(fun minimal_interval/2, undefined, Specs).
-
-minimal_interval({_App, #couch_epi_spec{options = Options}}, Min) ->
- case lists:keyfind(interval, 1, Options) of
- {interval, Interval} -> min(Interval, Min);
- false -> Min
- end.
-
-locate_sources(Specs) ->
- lists:map(
- fun({ProviderApp, #couch_epi_spec{value = Src}}) ->
- {ok, Locator} = locate(ProviderApp, Src),
- {ProviderApp, Locator}
- end,
- Specs
- ).
-
-locate(App, {priv_file, FileName}) ->
- case priv_path(App, FileName) of
- {ok, FilePath} ->
- ok = check_exists(FilePath),
- {ok, {file, FilePath}};
- Else ->
- Else
- end;
-locate(_App, {file, FilePath}) ->
- ok = check_exists(FilePath),
- {ok, {file, FilePath}};
-locate(_App, Locator) ->
- {ok, Locator}.
-
-priv_path(AppName, FileName) ->
- case code:priv_dir(AppName) of
- {error, _Error} = Error ->
- Error;
- Dir ->
- {ok, filename:join(Dir, FileName)}
- end.
-
-check_exists(FilePath) ->
- case filelib:is_regular(FilePath) of
- true ->
- ok;
- false ->
- {error, {notfound, FilePath}}
- end.
-
-definitions({file, FilePath}) ->
- case file:consult(FilePath) of
- {ok, Data} ->
- {ok, hash_of_file(FilePath), Data};
- {error, Reason} ->
- {error, {FilePath, Reason}}
- end;
-definitions({static_module, Module}) when is_atom(Module) ->
- definitions({static_module, [Module]});
-definitions({static_module, Modules}) ->
- Data = lists:append([M:data() || M <- Modules]),
- Hash = couch_epi_functions_gen:hash(Modules),
- {ok, Hash, Data};
-definitions({callback_module, Module}) ->
- Data = Module:data(),
- {ok, erlang:phash2(Data), Data}.
-
-hash_of_file(FilePath) ->
- {ok, Data} = file:read_file(FilePath),
- couch_hash:md5_hash(Data).
diff --git a/src/couch_epi/src/couch_epi_data_gen.erl b/src/couch_epi/src/couch_epi_data_gen.erl
deleted file mode 100644
index 65d689fbf..000000000
--- a/src/couch_epi/src/couch_epi_data_gen.erl
+++ /dev/null
@@ -1,307 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_data_gen).
-
-%% @doc
-%% We generate and compile module with name constructed as:
-%% "couch_epi_data_" + Service + "_" + Key
-%% To get an idea about he code of the generated module see preamble()
-
--export([get_handle/1]).
--export([get/1, get/2, get/3]).
--export([generate/2]).
--export([by_key/1, by_key/2]).
--export([by_source/1, by_source/2]).
--export([keys/1, subscribers/1]).
-
--export([get_current_definitions/1]).
-
-get(Handle) ->
- Handle:all().
-
-get(Handle, Key) ->
- Handle:all(Key).
-
-get(Handle, Source, Key) ->
- Handle:get(Source, Key).
-
-by_key(Handle) ->
- Handle:by_key().
-
-by_key(Handle, Key) ->
- Handle:by_key(Key).
-
-by_source(Handle) ->
- Handle:by_source().
-
-by_source(Handle, Source) ->
- Handle:by_source(Source).
-
-keys(Handle) ->
- Handle:keys().
-
-subscribers(Handle) ->
- Handle:subscribers().
-
-get_handle({Service, Key}) ->
- module_name({atom_to_list(Service), atom_to_list(Key)}).
-
-%% ------------------------------------------------------------------
-%% Codegeneration routines
-%% ------------------------------------------------------------------
-
-preamble() ->
- "\n"
- " -export([by_key/0, by_key/1]).\n"
- " -export([by_source/0, by_source/1]).\n"
- " -export([all/0, all/1, get/2]).\n"
- " -export([version/0, version/1]).\n"
- " -export([keys/0, subscribers/0]).\n"
- " -compile({no_auto_import,[get/0, get/1]}).\n"
- " all() ->\n"
- " lists:foldl(fun({Key, Defs}, Acc) ->\n"
- " [D || {_Subscriber, D} <- Defs ] ++ Acc\n"
- " end, [], by_key()).\n"
- "\n"
- " all(Key) ->\n"
- " lists:foldl(fun({Subscriber, Data}, Acc) ->\n"
- " [Data | Acc]\n"
- " end, [], by_key(Key)).\n"
- "\n"
- " by_key() ->\n"
- " [{Key, by_key(Key)} || Key <- keys()].\n"
- "\n"
- " by_key(Key) ->\n"
- " lists:foldl(\n"
- " fun(Source, Acc) -> append_if_defined(Source, get(Source, Key), Acc)\n"
- " end, [], subscribers()).\n"
- "\n"
- "\n"
- " by_source() ->\n"
- " [{Source, by_source(Source)} || Source <- subscribers()].\n"
- "\n"
- " by_source(Source) ->\n"
- " lists:foldl(\n"
- " fun(Key, Acc) -> append_if_defined(Key, get(Source, Key), Acc)\n"
- " end, [], keys()).\n"
- "\n"
- " version() ->\n"
- " [{Subscriber, version(Subscriber)} || Subscriber <- subscribers()].\n"
- "\n"
- " %% Helper functions\n"
- " append_if_defined(Type, undefined, Acc) -> Acc;\n"
- " append_if_defined(Type, Value, Acc) -> [{Type, Value} | Acc].\n"
- " "
-%% In addition to preamble we also generate following methods
-%% get(Source1, Key1) -> Data;
-%% get(Source, Key) -> undefined.
-
-%% version(Source1) -> "HASH";
-%% version(Source) -> {error, {unknown, Source}}.
-
-%% keys() -> [].
-%% subscribers() -> [].
-.
-
-generate(Handle, Defs) ->
- GetFunForms = couch_epi_codegen:function(getters(Defs)),
- VersionFunForms = couch_epi_codegen:function(version_method(Defs)),
- KeysForms = keys_method(Defs),
- SubscribersForms = subscribers_method(Defs),
-
- Forms =
- couch_epi_codegen:scan(preamble()) ++
- GetFunForms ++ VersionFunForms ++
- KeysForms ++ SubscribersForms,
-
- couch_epi_codegen:generate(Handle, Forms).
-
-keys_method(Defs) ->
- Keys = couch_epi_codegen:format_term(defined_keys(Defs)),
- couch_epi_codegen:scan("keys() -> " ++ Keys ++ ".").
-
-subscribers_method(Defs) ->
- Subscribers = couch_epi_codegen:format_term(defined_subscribers(Defs)),
- couch_epi_codegen:scan("subscribers() -> " ++ Subscribers ++ ".").
-
-getters(Defs) ->
- DefaultClause = "get(_S, _K) -> undefined.",
- fold_defs(
- Defs,
- [couch_epi_codegen:scan(DefaultClause)],
- fun({Source, Key, Data}, Acc) ->
- getter(Source, Key, Data) ++ Acc
- end
- ).
-
-version_method(Defs) ->
- DefaultClause = "version(S) -> {error, {unknown, S}}.",
- lists:foldl(
- fun({Source, Data}, Clauses) ->
- version(Source, Data) ++ Clauses
- end,
- [couch_epi_codegen:scan(DefaultClause)],
- Defs
- ).
-
-getter(Source, Key, Data) ->
- D = couch_epi_codegen:format_term(Data),
- Src = atom_to_list(Source),
- couch_epi_codegen:scan(
- "get(" ++ Src ++ ", " ++ format_key(Key) ++ ") ->" ++ D ++ ";"
- ).
-
-version(Source, Data) ->
- Src = atom_to_list(Source),
- VSN = couch_epi_util:hash(Data),
- couch_epi_codegen:scan("version(" ++ Src ++ ") ->" ++ VSN ++ ";").
-
-format_key(Key) when is_tuple(Key) ->
- Parts = lists:map(fun format_key/1, tuple_to_list(Key)),
- "{" ++ string:join(Parts, ",") ++ "}";
-format_key(Key) when is_list(Key) ->
- case lists:reverse(Key) of
- "*" ++ K -> "\"" ++ lists:reverse(K) ++ "\" ++ _";
- _ -> couch_epi_codegen:format_term(Key)
- end;
-format_key(Key) when is_binary(Key) andalso size(Key) > 0 ->
- case binary:last(Key) of
- $* ->
- KeyList = binary_to_list(binary:part(Key, {0, size(Key) - 1})),
- "<<\"" ++ KeyList ++ "\", _/binary>>";
- _ ->
- "<<\"" ++ binary_to_list(Key) ++ "\">>"
- end;
-format_key(Key) ->
- couch_epi_codegen:format_term(Key).
-
-%% ------------------------------------------------------------------
-%% Helper functions
-%% ------------------------------------------------------------------
-
-module_name({Service, Key}) when is_list(Service) andalso is_list(Key) ->
- list_to_atom(string:join([atom_to_list(?MODULE), Service, Key], "_")).
-
-get_current_definitions(Handle) ->
- if_exists(Handle, by_source, 0, [], fun() ->
- Handle:by_source()
- end).
-
-if_exists(Handle, Func, Arity, Default, Fun) ->
- case erlang:function_exported(Handle, Func, Arity) of
- true -> Fun();
- false -> Default
- end.
-
-defined_keys(Defs) ->
- Keys = fold_defs(Defs, [], fun({_Source, Key, _Data}, Acc) ->
- [Key | Acc]
- end),
- lists:usort(Keys).
-
-defined_subscribers(Defs) ->
- [Source || {Source, _} <- Defs].
-
-fold_defs(Defs, Acc, Fun) ->
- lists:foldr(
- fun({Source, SourceData}, Clauses) ->
- lists:foldr(
- fun({Key, Data}, InAcc) ->
- Fun({Source, Key, Data}, InAcc)
- end,
- [],
- SourceData
- ) ++ Clauses
- end,
- Acc,
- Defs
- ).
-
-%% ------------------------------------------------------------------
-%% Tests
-%% ------------------------------------------------------------------
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-basic_test() ->
- Module = foo_bar_baz_bugz,
-
- Data1 = [some_nice_data],
- Data2 = "other data",
- Data3 = {"even more data"},
- Defs1 = [{foo, Data1}],
- Defs2 = lists:usort([{foo, Data2}, {bar, Data3}]),
-
- Defs = [{app1, Defs1}, {app2, Defs2}],
- generate(Module, Defs),
-
- ?assertEqual([bar, foo], lists:usort(Module:keys())),
- ?assertEqual([app1, app2], lists:usort(Module:subscribers())),
-
- ?assertEqual(Data1, Module:get(app1, foo)),
- ?assertEqual(Data2, Module:get(app2, foo)),
- ?assertEqual(Data3, Module:get(app2, bar)),
-
- ?assertEqual(undefined, Module:get(bad, key)),
- ?assertEqual(undefined, Module:get(source, bad)),
-
- ?assertEqual("3KZ4EG4WBF4J683W8GSDDPYR3", Module:version(app1)),
- ?assertEqual("4EFUU47W9XDNMV9RMZSSJQU3Y", Module:version(app2)),
-
- ?assertEqual({error, {unknown, bad}}, Module:version(bad)),
-
- ?assertEqual(
- [
- {app1, "3KZ4EG4WBF4J683W8GSDDPYR3"},
- {app2, "4EFUU47W9XDNMV9RMZSSJQU3Y"}
- ],
- lists:usort(Module:version())
- ),
-
- ?assertEqual(
- [{app1, [some_nice_data]}, {app2, "other data"}],
- lists:usort(Module:by_key(foo))
- ),
-
- ?assertEqual([], lists:usort(Module:by_key(bad))),
-
- ?assertEqual(
- [
- {bar, [{app2, {"even more data"}}]},
- {foo, [{app2, "other data"}, {app1, [some_nice_data]}]}
- ],
- lists:usort(Module:by_key())
- ),
-
- ?assertEqual(Defs1, lists:usort(Module:by_source(app1))),
- ?assertEqual(Defs2, lists:usort(Module:by_source(app2))),
-
- ?assertEqual([], lists:usort(Module:by_source(bad))),
-
- ?assertEqual(
- [
- {app1, [{foo, [some_nice_data]}]},
- {app2, [{foo, "other data"}, {bar, {"even more data"}}]}
- ],
- lists:usort(Module:by_source())
- ),
-
- ?assertEqual(
- lists:usort([Data1, Data2, Data3]), lists:usort(Module:all())
- ),
- ?assertEqual(lists:usort([Data1, Data2]), lists:usort(Module:all(foo))),
- ?assertEqual([], lists:usort(Module:all(bad))),
- ok.
-
--endif.
diff --git a/src/couch_epi/src/couch_epi_functions.erl b/src/couch_epi/src/couch_epi_functions.erl
deleted file mode 100644
index 1c5fd3403..000000000
--- a/src/couch_epi/src/couch_epi_functions.erl
+++ /dev/null
@@ -1,53 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_functions).
-
--include("couch_epi.hrl").
-
-%% ------------------------------------------------------------------
-%% API Function Exports
-%% ------------------------------------------------------------------
-
--export([interval/1, data/1]).
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-interval(_) ->
- undefined.
-
-data(Specs) ->
- Defs = [{A, definitions(M)} || {A, #couch_epi_spec{value = M}} <- Specs],
- Modules = lists:flatten([M || {_App, #couch_epi_spec{value = M}} <- Specs]),
- {ok, couch_epi_functions_gen:hash(Modules), group(Defs)}.
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-definitions(Module) when is_atom(Module) ->
- definitions([Module]);
-definitions(Modules) ->
- Blacklist = [{module_info, 0}, {module_info, 1}],
- [{M, M:module_info(exports) -- Blacklist} || M <- Modules].
-
-group(KV) ->
- Dict = lists:foldr(
- fun({K, V}, D) ->
- dict:append_list(K, V, D)
- end,
- dict:new(),
- KV
- ),
- [{K, lists:reverse(V)} || {K, V} <- dict:to_list(Dict)].
diff --git a/src/couch_epi/src/couch_epi_functions_gen.erl b/src/couch_epi/src/couch_epi_functions_gen.erl
deleted file mode 100644
index d7364c044..000000000
--- a/src/couch_epi/src/couch_epi_functions_gen.erl
+++ /dev/null
@@ -1,492 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_functions_gen).
-
--export([
- generate/2,
- get_current_definitions/1,
- get_handle/1,
- hash/1
-]).
-
--export([
- apply/4,
- apply/5,
- modules/3,
- decide/5
-]).
-
--ifdef(TEST).
-
--export([foo/2, bar/0]).
-
--endif.
-
--record(opts, {
- ignore_errors = false,
- pipe = false,
- concurrent = false,
- interruptible = false
-}).
-
-get_handle(ServiceId) ->
- module_name(atom_to_list(ServiceId)).
-
-apply(ServiceId, Function, Args, Opts) when is_atom(ServiceId) ->
- apply(get_handle(ServiceId), ServiceId, Function, Args, Opts).
-
--spec apply(
- Handle :: atom(),
- ServiceId :: atom(),
- Function :: atom(),
- Args :: [term()],
- Opts :: couch_epi:apply_opts()
-) -> [any()].
-
-apply(Handle, _ServiceId, Function, Args, Opts) ->
- DispatchOpts = parse_opts(Opts),
- Modules = providers(Handle, Function, length(Args), DispatchOpts),
- dispatch(Handle, Modules, Function, Args, DispatchOpts).
-
--spec decide(
- Handle :: atom(),
- ServiceId :: atom(),
- Function :: atom(),
- Args :: [term()],
- Opts :: couch_epi:apply_opts()
-) ->
- no_decision | {decided, term()}.
-
-decide(Handle, _ServiceId, Function, Args, Opts) ->
- DispatchOpts = parse_opts([interruptible | Opts]),
- Modules = providers(Handle, Function, length(Args), DispatchOpts),
- dispatch(Handle, Modules, Function, Args, DispatchOpts).
-
-%% ------------------------------------------------------------------
-%% Codegeneration routines
-%% ------------------------------------------------------------------
-
-preamble() ->
- "\n"
- " -export([version/0, version/1]).\n"
- " -export([providers/0, providers/2]).\n"
- " -export([definitions/0, definitions/1]).\n"
- " -export([dispatch/3]).\n"
- " -export([callbacks/2]).\n"
- "\n"
- " version() ->\n"
- " [{Provider, version(Provider)} || Provider <- providers()].\n"
- "\n"
- " definitions() ->\n"
- " [{Provider, definitions(Provider)} || Provider <- providers()].\n"
- "\n"
- " callbacks(Provider, Function) ->\n"
- " [].\n"
- "\n"
- " "
-%% In addition to preamble we also generate following methods
-%% dispatch(Module, Function, [A1, A2]) -> Module:Function(A1, A2);
-
-%% version(Source1) -> "HASH";
-%% version(Source) -> {error, {unknown, Source}}.
-
-%% providers() -> [].
-%% providers(Function, Arity) -> [].
-%% definitions(Provider) -> [{Module, [{Fun, Arity}]}].
-.
-
-generate(Handle, Defs) ->
- DispatchFunForms = couch_epi_codegen:function(dispatchers(Defs)),
- VersionFunForms = couch_epi_codegen:function(version_method(Defs)),
-
- AllProvidersForms = all_providers_method(Defs),
- ProvidersForms = couch_epi_codegen:function(providers_method(Defs)),
- DefinitionsForms = couch_epi_codegen:function(definitions_method(Defs)),
-
- Forms =
- couch_epi_codegen:scan(preamble()) ++
- DispatchFunForms ++ VersionFunForms ++
- ProvidersForms ++ AllProvidersForms ++
- DefinitionsForms,
-
- couch_epi_codegen:generate(Handle, Forms).
-
-all_providers_method(Defs) ->
- Providers = couch_epi_codegen:format_term(defined_providers(Defs)),
- couch_epi_codegen:scan("providers() -> " ++ Providers ++ ".").
-
-providers_method(Defs) ->
- Providers = providers_by_function(Defs),
- DefaultClause = "providers(_, _) -> [].",
- lists:foldl(
- fun({{Fun, Arity}, Modules}, Clauses) ->
- providers(Fun, Arity, Modules) ++ Clauses
- end,
- [couch_epi_codegen:scan(DefaultClause)],
- Providers
- ).
-
-providers(Function, Arity, Modules) ->
- ArityStr = integer_to_list(Arity),
- Mods = couch_epi_codegen:format_term(Modules),
- Fun = atom_to_list(Function),
- %% providers(Function, Arity) -> [Module];
- couch_epi_codegen:scan(
- "providers(" ++ Fun ++ "," ++ ArityStr ++ ") ->" ++ Mods ++ ";"
- ).
-
-dispatchers(Defs) ->
- DefaultClause = "dispatch(_Module, _Fun, _Args) -> ok.",
- fold_defs(
- Defs,
- [couch_epi_codegen:scan(DefaultClause)],
- fun({_Source, Module, Function, Arity}, Acc) ->
- dispatcher(Module, Function, Arity) ++ Acc
- end
- ).
-
-version_method(Defs) ->
- DefaultClause = "version(S) -> {error, {unknown, S}}.",
- lists:foldl(
- fun({Source, SrcDefs}, Clauses) ->
- version(Source, SrcDefs) ++ Clauses
- end,
- [couch_epi_codegen:scan(DefaultClause)],
- Defs
- ).
-
-definitions_method(Defs) ->
- DefaultClause = "definitions(S) -> {error, {unknown, S}}.",
- lists:foldl(
- fun({Source, SrcDefs}, Clauses) ->
- definition(Source, SrcDefs) ++ Clauses
- end,
- [couch_epi_codegen:scan(DefaultClause)],
- Defs
- ).
-
-definition(Source, Defs) ->
- Src = atom_to_list(Source),
- DefsStr = couch_epi_codegen:format_term(Defs),
- couch_epi_codegen:scan("definitions(" ++ Src ++ ") -> " ++ DefsStr ++ ";").
-
-dispatcher(Module, Function, 0) ->
- M = atom_to_list(Module),
- Fun = atom_to_list(Function),
-
- %% dispatch(Module, Function, []) -> Module:Function();
- couch_epi_codegen:scan(
- "dispatch(" ++ M ++ "," ++ Fun ++ ", []) ->" ++
- M ++ ":" ++ Fun ++ "();"
- );
-dispatcher(Module, Function, Arity) ->
- Args = args_string(Arity),
- M = atom_to_list(Module),
- Fun = atom_to_list(Function),
- %% dispatch(Module, Function, [A1, A2]) -> Module:Function(A1, A2);
- couch_epi_codegen:scan(
- "dispatch(" ++ M ++ "," ++ Fun ++ ", [" ++ Args ++ "]) ->" ++
- M ++ ":" ++ Fun ++ "(" ++ Args ++ ");"
- ).
-
-args_string(Arity) ->
- Vars = ["A" ++ integer_to_list(Seq) || Seq <- lists:seq(1, Arity)],
- string:join(Vars, ", ").
-
-version(Source, SrcDefs) ->
- Modules = [Module || {Module, _Exports} <- SrcDefs],
- couch_epi_codegen:scan(
- "version(" ++ atom_to_list(Source) ++ ") ->" ++ hash(Modules) ++ ";"
- ).
-
-%% ------------------------------------------------------------------
-%% Helper functions
-%% ------------------------------------------------------------------
-
-module_name(ServiceId) when is_list(ServiceId) ->
- list_to_atom(string:join([atom_to_list(?MODULE), ServiceId], "_")).
-
-get_current_definitions(Handle) ->
- if_exists(Handle, definitions, 0, [], fun() ->
- Handle:definitions()
- end).
-
-if_exists(Handle, Func, Arity, Default, Fun) ->
- case erlang:function_exported(Handle, Func, Arity) of
- true -> Fun();
- false -> Default
- end.
-
-defined_providers(Defs) ->
- [Source || {Source, _} <- Defs].
-
-%% Defs = [{Source, [{Module, [{Fun, Arity}]}]}]
-fold_defs(Defs, Acc, Fun) ->
- lists:foldl(
- fun({Source, SourceData}, Clauses) ->
- lists:foldl(
- fun({Module, Exports}, ExportsAcc) ->
- lists:foldl(
- fun({Function, Arity}, InAcc) ->
- Fun({Source, Module, Function, Arity}, InAcc)
- end,
- [],
- Exports
- ) ++ ExportsAcc
- end,
- [],
- SourceData
- ) ++ Clauses
- end,
- Acc,
- Defs
- ).
-
-providers_by_function(Defs) ->
- Providers = fold_defs(
- Defs,
- [],
- fun({_Source, Module, Function, Arity}, Acc) ->
- [{{Function, Arity}, Module} | Acc]
- end
- ),
- Dict = lists:foldl(
- fun({K, V}, Acc) ->
- dict:update(
- K,
- fun(Modules) ->
- append_if_missing(Modules, V)
- end,
- [V],
- Acc
- )
- end,
- dict:new(),
- Providers
- ),
- dict:to_list(Dict).
-
-append_if_missing(List, Value) ->
- case lists:member(Value, List) of
- true -> List;
- false -> [Value | List]
- end.
-
-hash(Modules) ->
- VSNs = [couch_epi_util:module_version(M) || M <- lists:usort(Modules)],
- couch_epi_util:hash(VSNs).
-
-dispatch(_Handle, _Modules, _Func, _Args, #opts{concurrent = true, pipe = true}) ->
- throw({error, {incompatible_options, [concurrent, pipe]}});
-dispatch(
- Handle,
- Modules,
- Function,
- Args,
- #opts{pipe = true, ignore_errors = true}
-) ->
- lists:foldl(
- fun(Module, Acc) ->
- try
- Handle:dispatch(Module, Function, Acc)
- catch
- _:_ ->
- Acc
- end
- end,
- Args,
- Modules
- );
-dispatch(
- Handle,
- Modules,
- Function,
- Args,
- #opts{pipe = true}
-) ->
- lists:foldl(
- fun(Module, Acc) ->
- Handle:dispatch(Module, Function, Acc)
- end,
- Args,
- Modules
- );
-dispatch(
- Handle,
- Modules,
- Function,
- Args,
- #opts{interruptible = true}
-) ->
- apply_while(Modules, Handle, Function, Args);
-dispatch(Handle, Modules, Function, Args, #opts{} = Opts) ->
- [do_dispatch(Handle, Module, Function, Args, Opts) || Module <- Modules].
-
-do_dispatch(
- Handle,
- Module,
- Function,
- Args,
- #opts{concurrent = true, ignore_errors = true}
-) ->
- spawn(fun() ->
- (catch Handle:dispatch(Module, Function, Args))
- end);
-do_dispatch(
- Handle,
- Module,
- Function,
- Args,
- #opts{ignore_errors = true}
-) ->
- (catch Handle:dispatch(Module, Function, Args));
-do_dispatch(
- Handle,
- Module,
- Function,
- Args,
- #opts{concurrent = true}
-) ->
- spawn(fun() -> Handle:dispatch(Module, Function, Args) end);
-do_dispatch(Handle, Module, Function, Args, #opts{}) ->
- Handle:dispatch(Module, Function, Args).
-
-apply_while([], _Handle, _Function, _Args) ->
- no_decision;
-apply_while([Module | Modules], Handle, Function, Args) ->
- case Handle:dispatch(Module, Function, Args) of
- no_decision ->
- apply_while(Modules, Handle, Function, Args);
- {decided, _Decission} = Result ->
- Result
- end.
-
-parse_opts(Opts) ->
- parse_opts(Opts, #opts{}).
-
-parse_opts([ignore_errors | Rest], #opts{} = Acc) ->
- parse_opts(Rest, Acc#opts{ignore_errors = true});
-parse_opts([pipe | Rest], #opts{} = Acc) ->
- parse_opts(Rest, Acc#opts{pipe = true});
-parse_opts([concurrent | Rest], #opts{} = Acc) ->
- parse_opts(Rest, Acc#opts{concurrent = true});
-parse_opts([interruptible | Rest], #opts{} = Acc) ->
- parse_opts(Rest, Acc#opts{interruptible = true});
-parse_opts([], Acc) ->
- Acc.
-
-providers(Handle, Function, Arity, #opts{}) ->
- Handle:providers(Function, Arity).
-
--spec modules(Handle :: atom(), Function :: atom(), Arity :: pos_integer()) ->
- list().
-modules(Handle, Function, Arity) ->
- providers(Handle, Function, Arity, #opts{}).
-
-%% ------------------------------------------------------------------
-%% Tests
-%% ------------------------------------------------------------------
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-foo(A1, A2) ->
- {A1, A2}.
-
-bar() ->
- [].
-
-basic_test() ->
- Module = foo_bar_dispatcher,
- Defs = [{?MODULE, [{foo, 2}, {bar, 0}]}],
-
- generate(Module, [{app1, Defs}, {app2, Defs}]),
-
- Exports = lists:sort([
- {callbacks, 2},
- {version, 1},
- {providers, 2},
- {definitions, 1},
- {module_info, 0},
- {version, 0},
- {dispatch, 3},
- {providers, 0},
- {module_info, 1},
- {definitions, 0}
- ]),
-
- ?assertEqual(Exports, lists:sort(Module:module_info(exports))),
- ?assertEqual([app1, app2], lists:sort(Module:providers())),
-
- ?assertEqual([?MODULE], lists:sort(Module:providers(foo, 2))),
- ?assertEqual([?MODULE], lists:sort(Module:providers(bar, 0))),
-
- Defs2 = lists:usort(Module:definitions()),
- ?assertMatch([{app1, [{?MODULE, _}]}, {app2, [{?MODULE, _}]}], Defs2),
-
- ?assertMatch([{app1, Hash}, {app2, Hash}], Module:version()),
-
- ?assertMatch([], Module:dispatch(?MODULE, bar, [])),
- ?assertMatch({1, 2}, Module:dispatch(?MODULE, foo, [1, 2])),
-
- ok.
-
-generate_module(Name, Body) ->
- Tokens = couch_epi_codegen:scan(Body),
- couch_epi_codegen:generate(Name, Tokens).
-
-decide_module(decide) ->
- "\n"
- " -export([inc/1]).\n"
- "\n"
- " inc(A) ->\n"
- " {decided, A + 1}.\n"
- " ";
-decide_module(no_decision) ->
- "\n"
- " -export([inc/1]).\n"
- "\n"
- " inc(_A) ->\n"
- " no_decision.\n"
- " ".
-
-decide_test() ->
- ok = generate_module(decide, decide_module(decide)),
- ok = generate_module(no_decision, decide_module(no_decision)),
-
- DecideDef = {foo_app, [{decide, [{inc, 1}]}]},
- NoDecissionDef = {bar_app, [{no_decision, [{inc, 1}]}]},
-
- DecideFirstHandle = decide_first_handle,
- ok = generate(DecideFirstHandle, [DecideDef, NoDecissionDef]),
- ?assertMatch([decide, no_decision], DecideFirstHandle:providers(inc, 1)),
- ?assertMatch({decided, 4}, decide(DecideFirstHandle, anything, inc, [3], [])),
-
- DecideSecondHandle = decide_second_handle,
- ok = generate(DecideSecondHandle, [NoDecissionDef, DecideDef]),
- ?assertMatch([no_decision, decide], DecideSecondHandle:providers(inc, 1)),
- ?assertMatch({decided, 4}, decide(DecideSecondHandle, anything, inc, [3], [])),
-
- NoDecissionHandle = no_decision_handle,
- ok = generate(NoDecissionHandle, [NoDecissionDef]),
- ?assertMatch([no_decision], NoDecissionHandle:providers(inc, 1)),
- ?assertMatch(no_decision, decide(NoDecissionHandle, anything, inc, [3], [])),
-
- NoHandle = no_handle,
- ok = generate(NoHandle, []),
- ?assertMatch([], NoHandle:providers(inc, 1)),
- ?assertMatch(no_decision, decide(NoHandle, anything, inc, [3], [])),
-
- ok.
-
--endif.
diff --git a/src/couch_epi/src/couch_epi_module_keeper.erl b/src/couch_epi/src/couch_epi_module_keeper.erl
deleted file mode 100644
index 97420ea7b..000000000
--- a/src/couch_epi/src/couch_epi_module_keeper.erl
+++ /dev/null
@@ -1,174 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_module_keeper).
-
--behaviour(gen_server).
-
-%% ------------------------------------------------------------------
-%% API Function Exports
-%% ------------------------------------------------------------------
-
--export([start_link/3, stop/1]).
--export([reload/1]).
-
-%% ------------------------------------------------------------------
-%% gen_server Function Exports
-%% ------------------------------------------------------------------
-
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
--record(state, {
- codegen,
- module,
- key,
- type,
- handle,
- hash,
- kind,
- timer = {undefined, undefined}
-}).
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-start_link(Type, Key, Codegen) ->
- Handle = Codegen:get_handle(Key),
- gen_server:start_link(
- {local, Handle}, ?MODULE, [Type, Codegen, Key, Handle], []
- ).
-
-stop(Server) ->
- catch gen_server:call(Server, stop).
-
-reload(Server) ->
- gen_server:call(Server, reload).
-
-%% ------------------------------------------------------------------
-%% gen_server Function Definitions
-%% ------------------------------------------------------------------
-
-init([Kind, Codegen, Key, Handle]) ->
- Type = type(Kind),
- State = #state{
- codegen = Codegen,
- key = Key,
- type = Type,
- handle = Handle,
- kind = Kind
- },
- compile_module(State).
-
-handle_call(reload, _From, State0) ->
- {Reply, State1} = reload_if_updated(State0),
- {reply, Reply, State1};
-handle_call(_Request, _From, State) ->
- {reply, ok, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(tick, State0) ->
- {_Res, State1} = reload_if_updated(State0),
- {noreply, State1};
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State0, _Extra) ->
- {_Res, State1} = reload_if_updated(State0),
- {ok, State1}.
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-type(data_providers) -> couch_epi_data;
-type(providers) -> couch_epi_functions;
-type(services) -> couch_epi_functions.
-
-reload_if_updated(#state{handle = Module} = State) ->
- case couch_epi_util:module_exists(Module) of
- true ->
- do_reload_if_updated(State);
- false ->
- {ok, State}
- end.
-
-compile_module(State) ->
- do_reload_if_updated(State).
-
-do_reload_if_updated(#state{} = State0) ->
- #state{
- hash = OldHash,
- type = Type,
- key = Key,
- kind = Kind
- } = State0,
- Defs = couch_epi_plugin:definitions(Kind, Key),
- case Type:data(Defs) of
- {ok, OldHash, _Data} ->
- {ok, State0};
- {ok, Hash, Data} ->
- {ok, OldData, State1} = safe_set(Hash, Data, State0),
- notify(Key, OldData, Data, Defs),
- State2 = update_interval(Type:interval(Defs), State1),
- {ok, State2};
- Else ->
- {Else, State0}
- end.
-
-update_interval(undefined, #state{timer = Timer} = State) ->
- State#state{timer = cancel_timer(Timer)};
-update_interval(Interval, #state{timer = Timer} = State) ->
- State#state{timer = start_timer(Interval, Timer)}.
-
-start_timer(Interval, {undefined, undefined}) ->
- {ok, Timer} = timer:send_interval(Interval, self(), tick),
- {Timer, Interval};
-start_timer(Interval, {Timer, _Interval}) ->
- start_timer(Interval, cancel_timer(Timer)).
-
-cancel_timer({undefined, undefined}) ->
- {undefined, undefined};
-cancel_timer({Timer, _Interval}) ->
- timer:cancel(Timer),
- {undefined, undefined}.
-
-safe_set(Hash, Data, #state{} = State) ->
- #state{
- handle = Handle,
- codegen = CodeGen
- } = State,
- try
- OldData = CodeGen:get_current_definitions(Handle),
- ok = CodeGen:generate(Handle, Data),
- {ok, OldData, State#state{hash = Hash}}
- catch
- Class:Reason ->
- {{Class, Reason}, State}
- end.
-
-notify(Key, OldData, NewData, Defs) ->
- Specs = [Spec || {_App, Spec} <- Defs],
- couch_epi_plugin:notify(Key, OldData, NewData, Specs),
- ok.
diff --git a/src/couch_epi/src/couch_epi_plugin.erl b/src/couch_epi/src/couch_epi_plugin.erl
deleted file mode 100644
index 1ec09d8dc..000000000
--- a/src/couch_epi/src/couch_epi_plugin.erl
+++ /dev/null
@@ -1,395 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_plugin).
-
--include("couch_epi.hrl").
-
--export([
- definitions/1,
- definitions/2,
- grouped_definitions/1,
- plugin_processes/2,
- codegen/1
-]).
-
--export([notify/4]).
-
-%% ------------------------------------------------------------------
-%% Types Definitions
-%% ------------------------------------------------------------------
-
--type kind() ::
- providers
- | data_providers
- | services
- | data_subscriptions.
-
--type key() ::
- {ServiceId :: couch_epi:service_id(), Key :: couch_epi:key()}
- | couch_epi:service_id().
-
--callback app() -> couch_epi:app().
--callback providers() -> [{couch_epi:service_id(), module()}].
--callback services() -> [{couch_epi:service_id(), module()}].
--callback data_subscriptions() -> [{couch_epi:service_id(), couch_epi:key()}].
--callback data_providers() ->
- [
- {couch_epi:key(), couch_epi:data_spec()}
- | {couch_epi:key(), couch_epi:data_spec(), [couch_epi:data_spec_opt()]}
- ].
--callback processes() -> [{couch_epi:plugin_id(), [supervisor:child_spec()]}].
--callback notify(Key :: term(), Old :: term(), New :: term()) -> ok.
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-definitions(Plugins) ->
- lists:append([extract_definitions(Plugin) || Plugin <- Plugins]).
-
-plugin_processes(Plugin, Plugins) ->
- lists:append([Specs || P0 <- Plugins, {P1, Specs} <- P0:processes(), P1 =:= Plugin]).
-
-grouped_definitions(Plugins) ->
- Defs = lists:append([extract_definitions(Plugin) || Plugin <- Plugins]),
- group_specs(Defs).
-
-definitions(Kind, Key) ->
- Plugins = application:get_env(couch_epi, plugins, []),
- Definitions = definitions(Plugins),
- Filtered = filter_by_key(Definitions, Kind, Key),
- case group_specs(Filtered) of
- [] -> [];
- [{_, Defs}] -> Defs
- end.
-
-notify(Key, OldData, NewData, Specs) ->
- Plugins = lists:usort([Plugin || #couch_epi_spec{behaviour = Plugin} <- Specs]),
- [notify_plugin(Plugin, Key, OldData, NewData) || Plugin <- Plugins],
- ok.
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-notify_plugin(Plugin, Key, OldData, NewData) ->
- App = Plugin:app(),
- Plugin:notify(Key, app_data(App, OldData), app_data(App, NewData)).
-
-app_data(App, Data) ->
- case lists:keyfind(App, 1, Data) of
- {App, AppData} -> AppData;
- false -> []
- end.
-
-filter_by_key(Definitions, Kind, Key) ->
- lists:filter(fun(Spec) -> by_key(Spec, Kind, Key) end, Definitions).
-
-by_key(#couch_epi_spec{kind = Kind, key = Key}, Kind, Key) -> true;
-by_key(_, _, _) -> false.
-
-extract_definitions(Plugin) ->
- specs(Plugin, providers) ++
- specs(Plugin, data_providers) ++
- specs(Plugin, services) ++
- specs(Plugin, data_subscriptions).
-
--spec group_specs(Specs :: [#couch_epi_spec{}]) -> GroupedSpecs when
- GroupedSpecs ::
- [{{kind(), key()}, [{couch_epi:app(), #couch_epi_spec{}}]}].
-
-group_specs(Specs) ->
- Grouped = group(
- [
- {{Kind, Key}, group([{App, Spec}])}
- || #couch_epi_spec{kind = Kind, key = Key, app = App} = Spec <- Specs
- ]
- ),
- [{K, lists:reverse(V)} || {K, V} <- Grouped].
-
-group(KV) ->
- dict:to_list(
- lists:foldr(
- fun({K, V}, D) ->
- dict:append_list(K, V, D)
- end,
- dict:new(),
- KV
- )
- ).
-
-specs(Plugin, Kind) ->
- [spec(parse(Spec, Kind), Plugin, Kind) || Spec <- Plugin:Kind()].
-
-spec({Key, Value, Options}, Plugin, Kind) ->
- App = Plugin:app(),
- #couch_epi_spec{
- app = App,
- behaviour = Plugin,
- kind = Kind,
- options = Options,
- key = Key,
- value = Value,
- codegen = codegen(Kind),
- type = type(Kind, Value)
- }.
-
-parse({Key, Value}, Kind) ->
- parse({Key, Value, []}, Kind);
-parse({Key, Value, Options}, data_subscriptions) ->
- {{Key, Value}, undefined, Options};
-parse({_, _, _} = Tuple, _Kind) ->
- Tuple.
-
-codegen(providers) -> couch_epi_functions_gen;
-codegen(services) -> couch_epi_functions_gen;
-codegen(data_providers) -> couch_epi_data_gen;
-codegen(data_subscriptions) -> couch_epi_data_gen.
-
-type(providers, _) -> couch_epi_functions;
-type(services, _) -> couch_epi_functions;
-type(data_providers, _) -> couch_epi_data;
-type(data_subscriptions, _) -> undefined.
-
-%% ------------------------------------------------------------------
-%% Tests
-%% ------------------------------------------------------------------
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-plugin_module(foo_epi) ->
- "\n"
- " -compile([export_all]).\n"
- "\n"
- " app() -> foo.\n"
- " providers() ->\n"
- " [\n"
- " {chttpd_handlers, foo_provider},\n"
- " {bar_handlers, bar_provider1},\n"
- " {bar_handlers, bar_provider2}\n"
- " ].\n"
- "\n"
- " services() ->\n"
- " [\n"
- " {foo_handlers, foo_service}\n"
- " ].\n"
- "\n"
- " data_providers() ->\n"
- " [\n"
- " {{foo_service, data1}, {file, \"abs_file\"}, [{interval, 5000}]},\n"
- " {{foo_service, data2}, {priv_file, \"priv_file\"}},\n"
- " {{foo_service, data3}, {module, foo_data}}\n"
- " ].\n"
- "\n"
- " data_subscriptions() ->\n"
- " [\n"
- " {stats, foo_definitions}\n"
- " ].\n"
- "\n"
- " processes() -> [].\n"
- "\n"
- " notify(_, _, _) -> ok.\n"
- " ";
-plugin_module(bar_epi) ->
- "\n"
- " -compile([export_all]).\n"
- "\n"
- " app() -> bar.\n"
- " providers() ->\n"
- " [\n"
- " {chttpd_handlers, bar_provider},\n"
- " {bar_handlers, bar_provider}\n"
- " ].\n"
- "\n"
- " services() ->\n"
- " [\n"
- " {bar_handlers, bar_service}\n"
- " ].\n"
- "\n"
- " data_providers() ->\n"
- " [].\n"
- "\n"
- " data_subscriptions() ->\n"
- " [\n"
- " {foo_service, data1}\n"
- " ].\n"
- "\n"
- " processes() -> [].\n"
- "\n"
- " notify(_, _, _) -> ok.\n"
- " ".
-
-generate_module(Name, Body) ->
- Tokens = couch_epi_codegen:scan(Body),
- couch_epi_codegen:generate(Name, Tokens).
-
-generate_modules(Kind, Providers) ->
- [generate_module(P, Kind(P)) || P <- Providers].
-
-provider_modules_order_test() ->
- [ok, ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
- ok = application:set_env(couch_epi, plugins, [foo_epi, bar_epi]),
- Expected = [
- {foo, bar_provider1},
- {foo, bar_provider2},
- {bar, bar_provider}
- ],
-
- Defs = definitions(providers, bar_handlers),
- Results = [{App, V} || {App, #couch_epi_spec{value = V}} <- Defs],
- Tests = lists:zip(Expected, Results),
- [?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
- ok.
-
-providers_order_test() ->
- [ok, ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
- Expected = [
- {foo, bar_provider1},
- {foo, bar_provider2},
- {bar, bar_provider}
- ],
- AllDefs = grouped_definitions([foo_epi, bar_epi]),
- {_, Defs} = lists:keyfind({providers, bar_handlers}, 1, AllDefs),
- Results = [{App, V} || {App, #couch_epi_spec{value = V}} <- Defs],
- Tests = lists:zip(Expected, Results),
- [?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
- ok.
-
-definitions_test() ->
- Expected = lists:sort([
- #couch_epi_spec{
- behaviour = bar_epi,
- app = bar,
- kind = providers,
- options = [],
- key = bar_handlers,
- value = bar_provider,
- codegen = couch_epi_functions_gen,
- type = couch_epi_functions
- },
- #couch_epi_spec{
- behaviour = bar_epi,
- app = bar,
- kind = services,
- options = [],
- key = bar_handlers,
- value = bar_service,
- codegen = couch_epi_functions_gen,
- type = couch_epi_functions
- },
- #couch_epi_spec{
- behaviour = bar_epi,
- app = bar,
- kind = providers,
- options = [],
- key = chttpd_handlers,
- value = bar_provider,
- codegen = couch_epi_functions_gen,
- type = couch_epi_functions
- },
- #couch_epi_spec{
- behaviour = bar_epi,
- app = bar,
- kind = data_subscriptions,
- options = [],
- key = {foo_service, data1},
- value = undefined,
- codegen = couch_epi_data_gen
- },
- #couch_epi_spec{
- behaviour = foo_epi,
- app = foo,
- kind = providers,
- options = [],
- key = bar_handlers,
- value = bar_provider1,
- codegen = couch_epi_functions_gen,
- type = couch_epi_functions
- },
- #couch_epi_spec{
- behaviour = foo_epi,
- app = foo,
- kind = providers,
- options = [],
- key = bar_handlers,
- value = bar_provider2,
- codegen = couch_epi_functions_gen,
- type = couch_epi_functions
- },
- #couch_epi_spec{
- behaviour = foo_epi,
- app = foo,
- kind = providers,
- options = [],
- key = chttpd_handlers,
- value = foo_provider,
- codegen = couch_epi_functions_gen,
- type = couch_epi_functions
- },
- #couch_epi_spec{
- behaviour = foo_epi,
- app = foo,
- kind = services,
- options = [],
- key = foo_handlers,
- value = foo_service,
- codegen = couch_epi_functions_gen,
- type = couch_epi_functions
- },
- #couch_epi_spec{
- behaviour = foo_epi,
- app = foo,
- kind = data_providers,
- options = [{interval, 5000}],
- key = {foo_service, data1},
- value = {file, "abs_file"},
- codegen = couch_epi_data_gen,
- type = couch_epi_data
- },
- #couch_epi_spec{
- behaviour = foo_epi,
- app = foo,
- kind = data_providers,
- options = [],
- key = {foo_service, data2},
- value = {priv_file, "priv_file"},
- codegen = couch_epi_data_gen,
- type = couch_epi_data
- },
- #couch_epi_spec{
- behaviour = foo_epi,
- app = foo,
- kind = data_providers,
- options = [],
- key = {foo_service, data3},
- value = {module, foo_data},
- codegen = couch_epi_data_gen,
- type = couch_epi_data
- },
- #couch_epi_spec{
- behaviour = foo_epi,
- app = foo,
- kind = data_subscriptions,
- options = [],
- key = {stats, foo_definitions},
- value = undefined,
- codegen = couch_epi_data_gen
- }
- ]),
-
- [ok, ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
- Tests = lists:zip(Expected, lists:sort(definitions([foo_epi, bar_epi]))),
- [?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
- ok.
--endif.
diff --git a/src/couch_epi/src/couch_epi_sup.erl b/src/couch_epi/src/couch_epi_sup.erl
deleted file mode 100644
index aca423a7d..000000000
--- a/src/couch_epi/src/couch_epi_sup.erl
+++ /dev/null
@@ -1,163 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_sup).
-
-%% --------------------
-%% Important assumption
-%% ====================
-%% Keeper and codechange_monitor childspecs rely on undocumented behaviour.
-%% According to supervisor docs:
-%% ...if the child process is a supervisor, gen_server, or gen_fsm, this
-%% should be a list with one element [Module].
-%% However it is perfectly fine to have more than one module in the list.
-%% Modules property is used to determine if process is suspendable.
-%% Only suspendable processes are hot code upgraded, others are killed.
-%% The check looks like `lists:member(Module, Modules)`
-%% The assumption is that it is indeed underdocumented fact and not
-%% an implementation detail.
-
--behaviour(supervisor).
-
--include("couch_epi.hrl").
-
-%% API
--export([start_link/0]).
--export([plugin_childspecs/2]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% For testing
--export([
- plugin_childspecs/3
-]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-%% ===================================================================
-%% API functions
-%% ===================================================================
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-plugin_childspecs(Plugin, Children) ->
- Plugins = application:get_env(couch_epi, plugins, []),
- plugin_childspecs(Plugin, Plugins, Children).
-
-%% ===================================================================
-%% Supervisor callbacks
-%% ===================================================================
-
-init([]) ->
- {ok, {{one_for_one, 5, 10}, keepers()}}.
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-keepers() ->
- Plugins = application:get_env(couch_epi, plugins, []),
- Definitions = couch_epi_plugin:grouped_definitions(Plugins),
- Children = keeper_childspecs(Definitions),
- remove_duplicates(Children).
-
-plugin_childspecs(Plugin, Plugins, Children) ->
- Definitions = couch_epi_plugin:grouped_definitions([Plugin]),
- ExtraChildren = couch_epi_plugin:plugin_processes(Plugin, Plugins),
- merge(ExtraChildren, Children) ++ childspecs(Definitions).
-
-childspecs(Definitions) ->
- lists:map(
- fun({{Kind, Key}, Defs}) ->
- CodeGen = couch_epi_plugin:codegen(Kind),
- Handle = CodeGen:get_handle(Key),
- Modules = lists:append([modules(Spec) || {_App, Spec} <- Defs]),
- Name = service_name(Key) ++ "|" ++ atom_to_list(Kind),
- code_monitor(Name, [Handle], [Handle | Modules])
- end,
- Definitions
- ).
-
-%% ------------------------------------------------------------------
-%% Helper Function Definitions
-%% ------------------------------------------------------------------
-
-remove_duplicates(Definitions) ->
- lists:ukeysort(1, Definitions).
-
-keeper_childspecs(Definitions) ->
- lists:map(
- fun({{Kind, Key}, _Specs}) ->
- Name = service_name(Key) ++ "|keeper",
- CodeGen = couch_epi_plugin:codegen(Kind),
- Handle = CodeGen:get_handle(Key),
- keeper(Name, [provider_kind(Kind), Key, CodeGen], [Handle])
- end,
- Definitions
- ).
-
-keeper(Name, Args, Modules) ->
- {
- "couch_epi|" ++ Name,
- {couch_epi_module_keeper, start_link, Args},
- permanent,
- 5000,
- worker,
- Modules
- }.
-
-code_monitor(Name, Args, Modules0) ->
- Modules = [couch_epi_codechange_monitor | Modules0],
- {
- "couch_epi_codechange_monitor|" ++ Name,
- {couch_epi_codechange_monitor, start_link, Args},
- permanent,
- 5000,
- worker,
- Modules
- }.
-
-provider_kind(services) -> providers;
-provider_kind(data_subscriptions) -> data_providers;
-provider_kind(Kind) -> Kind.
-
-service_name({ServiceId, Key}) ->
- atom_to_list(ServiceId) ++ ":" ++ atom_to_list(Key);
-service_name(ServiceId) ->
- atom_to_list(ServiceId).
-
-modules(#couch_epi_spec{kind = providers, value = Module}) ->
- [Module];
-modules(#couch_epi_spec{kind = services, value = Module}) ->
- [Module];
-modules(#couch_epi_spec{kind = data_providers, value = Value}) ->
- case Value of
- {static_module, Module} -> [Module];
- {callback_module, Module} -> [Module];
- _ -> []
- end;
-modules(#couch_epi_spec{kind = data_subscriptions, behaviour = Module}) ->
- [Module].
-
-merge([], Children) ->
- Children;
-merge([{Id, _, _, _, _, _} = Spec | Rest], Children) ->
- merge(Rest, lists:keystore(Id, 1, Children, Spec));
-merge([#{id := Id} = Spec | Rest], Children) ->
- Replace = fun
- (#{id := I}) when I == Id -> Spec;
- (E) -> E
- end,
- merge(Rest, lists:map(Replace, Children)).
diff --git a/src/couch_epi/src/couch_epi_util.erl b/src/couch_epi/src/couch_epi_util.erl
deleted file mode 100644
index 2c86a96e2..000000000
--- a/src/couch_epi/src/couch_epi_util.erl
+++ /dev/null
@@ -1,29 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_util).
-
--export([module_version/1, hash/1, module_exists/1]).
-
--compile([nowarn_deprecated_function]).
-
-module_version(Module) ->
- Attributes = Module:module_info(attributes),
- {vsn, VSNs} = lists:keyfind(vsn, 1, Attributes),
- VSNs.
-
-hash(Term) ->
- <<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
- lists:flatten(io_lib:format("\"~.36B\"", [SigInt])).
-
-module_exists(Module) ->
- erlang:function_exported(Module, module_info, 0).
diff --git a/src/couch_epi/test/eunit/couch_epi_basic_test.erl b/src/couch_epi/test/eunit/couch_epi_basic_test.erl
deleted file mode 100644
index a99e9f900..000000000
--- a/src/couch_epi/test/eunit/couch_epi_basic_test.erl
+++ /dev/null
@@ -1,167 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_basic_test).
-
--export([
- start_link/0
-]).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_providers/0,
- data_subscriptions/0,
- processes/0,
- notify/3
-]).
-
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-start_link() -> ok.
-
-%% BEGIN couch_epi_plugin behaviour callbacks
-
-app() -> test_app.
-
-providers() ->
- [
- {my_service, provider1},
- {my_service, provider2}
- ].
-
-services() ->
- [
- {my_service, ?MODULE}
- ].
-
-data_providers() ->
- [
- {{test_app, descriptions}, {static_module, ?MODULE}, [{interval, 100}]}
- ].
-
-data_subscriptions() ->
- [
- {test_app, descriptions}
- ].
-
-processes() ->
- [
- {?MODULE, [?CHILD(extra_process, worker)]},
- {?MODULE, [{to_replace, {new, start_link, [bar]}, permanent, 5000, worker, [bar]}]},
- {?MODULE, [
- #{
- id => to_replace_map,
- start => {new, start_link, [bar]},
- modules => [bar]
- }
- ]}
- ].
-
-notify(_Key, _OldData, _NewData) ->
- ok.
-
-%% END couch_epi_plugin behaviour callbacks
-
-parse_child_id(Id) when is_atom(Id) ->
- Id;
-parse_child_id(Id) ->
- ["couch_epi_codechange_monitor", ServiceName, KindStr] =
- string:tokens(Id, "|"),
- Kind = list_to_atom(KindStr),
- case string:tokens(ServiceName, ":") of
- [ServiceId, Key] ->
- {{list_to_atom(ServiceId), list_to_atom(Key)}, Kind};
- [Key] ->
- {list_to_atom(Key), Kind}
- end.
-
--include_lib("eunit/include/eunit.hrl").
-
-basic_test() ->
- Expected = [
- {extra_process, [], [extra_process]},
- {to_replace, [bar], [bar]},
- {to_replace_map, [bar], [bar]},
- {{my_service, providers}, [couch_epi_functions_gen_my_service], [
- couch_epi_codechange_monitor,
- couch_epi_functions_gen_my_service,
- provider1,
- provider2
- ]},
- {
- {my_service, services},
- [couch_epi_functions_gen_my_service],
- lists:sort([
- couch_epi_codechange_monitor,
- couch_epi_functions_gen_my_service,
- ?MODULE
- ])
- },
- {
- {{test_app, descriptions}, data_subscriptions},
- [couch_epi_data_gen_test_app_descriptions],
- lists:sort([
- couch_epi_codechange_monitor,
- couch_epi_data_gen_test_app_descriptions,
- ?MODULE
- ])
- },
- {
- {{test_app, descriptions}, data_providers},
- [couch_epi_data_gen_test_app_descriptions],
- lists:sort([
- couch_epi_codechange_monitor,
- couch_epi_data_gen_test_app_descriptions,
- ?MODULE
- ])
- }
- ],
-
- ToReplace = [
- {to_replace, {old, start_link, [foo]}, permanent, 5000, worker, [foo]},
- #{id => to_replace_map, start => {old, start_link, [foo]}}
- ],
- Children = lists:sort(
- couch_epi_sup:plugin_childspecs(
- ?MODULE, [?MODULE], ToReplace
- )
- ),
-
- Results = lists:map(
- fun
- ({Id, {_M, _F, Args}, _, _, _, Modules}) ->
- {parse_child_id(Id), Args, lists:sort(Modules)};
- (#{id := Id, start := {_M, _F, Args}, modules := Modules}) ->
- {parse_child_id(Id), Args, lists:sort(Modules)}
- end,
- Children
- ),
-
- Tests = lists:zip(lists:sort(Expected), lists:sort(Results)),
- [?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
-
- ExpectedChild = {to_replace, {new, start_link, [bar]}, permanent, 5000, worker, [bar]},
- ?assertEqual(
- ExpectedChild,
- lists:keyfind(to_replace, 1, Children)
- ),
-
- ExpectedMapChildSpec = #{
- id => to_replace_map,
- start => {new, start_link, [bar]},
- modules => [bar]
- },
- [MapChildSpec] = [E || #{id := to_replace_map} = E <- Children],
- ?assertEqual(ExpectedMapChildSpec, MapChildSpec),
- ok.
diff --git a/src/couch_epi/test/eunit/couch_epi_tests.erl b/src/couch_epi/test/eunit/couch_epi_tests.erl
deleted file mode 100644
index 08307fe30..000000000
--- a/src/couch_epi/test/eunit/couch_epi_tests.erl
+++ /dev/null
@@ -1,724 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(DATA_FILE1, ?ABS_PATH("test/eunit/fixtures/app_data1.cfg")).
--define(DATA_FILE2, ?ABS_PATH("test/eunit/fixtures/app_data2.cfg")).
-
--export([notify_cb/4, save/3, get/2]).
-
--record(ctx, {file, handle, pid, kv, key, modules = []}).
-
--define(TIMEOUT, 5000).
--define(RELOAD_WAIT, 1000).
-
--define(temp_atom, fun() ->
- {A, B, C} = os:timestamp(),
- list_to_atom(lists:flatten(io_lib:format("~p~p~p", [A, B, C])))
-end).
-
--define(MODULE1(Name),
- "\n"
- " -export([inc/2, fail/2]).\n"
- "\n"
- " inc(KV, A) ->\n"
- " Reply = A + 1,\n"
- " couch_epi_tests:save(KV, inc1, Reply),\n"
- " [KV, Reply].\n"
- "\n"
- " fail(KV, A) ->\n"
- " inc(KV, A).\n"
-).
-
--define(MODULE2(Name),
- "\n"
- " -export([inc/2, fail/2]).\n"
- "\n"
- " inc(KV, A) ->\n"
- " Reply = A + 1,\n"
- " couch_epi_tests:save(KV, inc2, Reply),\n"
- " [KV, Reply].\n"
- "\n"
- " fail(KV, _A) ->\n"
- " couch_epi_tests:save(KV, inc2, check_error),\n"
- " throw(check_error).\n"
-).
-
--define(DATA_MODULE1(Name),
- "\n"
- " -export([data/0]).\n"
- "\n"
- " data() ->\n"
- " [\n"
- " {[complex, key, 1], [\n"
- " {type, counter},\n"
- " {desc, foo}\n"
- " ]}\n"
- " ].\n"
-).
-
--define(DATA_MODULE2(Name),
- "\n"
- " -export([data/0]).\n"
- "\n"
- " data() ->\n"
- " [\n"
- " {[complex, key, 2], [\n"
- " {type, counter},\n"
- " {desc, bar}\n"
- " ]},\n"
- " {[complex, key, 1], [\n"
- " {type, counter},\n"
- " {desc, updated_foo}\n"
- " ]}\n"
- " ].\n"
-).
-
--define(DATA_MODULE3(Name, Kv),
- "\n"
- " -export([data/0]).\n"
- "\n"
- "data() ->\n"
- " {ok, Data} = couch_epi_tests:get('" ++ atom_to_list(Kv) ++
- "', data),\n"
- " Data.\n"
-).
-
-%% ------------------------------------------------------------------
-%% couch_epi_plugin behaviour
-%% ------------------------------------------------------------------
-
-plugin_module([KV, Spec]) when is_tuple(Spec) ->
- SpecStr = io_lib:format("~w", [Spec]),
- KVStr = "'" ++ atom_to_list(KV) ++ "'",
- "\n"
- " -compile([export_all]).\n"
- "\n"
- " app() -> test_app.\n"
- " providers() ->\n"
- " [].\n"
- "\n"
- " services() ->\n"
- " [].\n"
- "\n"
- " data_providers() ->\n"
- " [\n"
- " {{test_app, descriptions}, " ++ SpecStr ++
- ", [{interval, 100}]}\n"
- " ].\n"
- "\n"
- " data_subscriptions() ->\n"
- " [\n"
- " {test_app, descriptions}\n"
- " ].\n"
- "\n"
- " processes() -> [].\n"
- "\n"
- " notify(Key, OldData, Data) ->\n"
- " couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++
- ").\n"
- " ";
-plugin_module([KV, Provider]) when is_atom(Provider) ->
- KVStr = "'" ++ atom_to_list(KV) ++ "'",
- "\n"
- " -compile([export_all]).\n"
- "\n"
- " app() -> test_app.\n"
- " providers() ->\n"
- " [\n"
- " {my_service, " ++ atom_to_list(Provider) ++
- "}\n"
- " ].\n"
- "\n"
- " services() ->\n"
- " [\n"
- " {my_service, " ++ atom_to_list(Provider) ++
- "}\n"
- " ].\n"
- "\n"
- " data_providers() ->\n"
- " [].\n"
- "\n"
- " data_subscriptions() ->\n"
- " [].\n"
- "\n"
- " processes() -> [].\n"
- "\n"
- " notify(Key, OldData, Data) ->\n"
- " couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++
- ").\n"
- " ".
-
-notify_cb(Key, OldData, Data, KV) ->
- save(KV, is_called, {Key, OldData, Data}).
-
-start_epi(Plugins) ->
- application:load(couch_epi),
- PluginsModules = lists:map(
- fun({Module, Body}) ->
- ok = generate_module(Module, Body),
- Module
- end,
- Plugins
- ),
- application:set_env(couch_epi, plugins, PluginsModules),
- application:start(couch_epi).
-
-setup(data_file) ->
- error_logger:tty(false),
-
- Key = {test_app, descriptions},
- File = ?tempfile(),
- {ok, _} = file:copy(?DATA_FILE1, File),
- KV = start_state_storage(),
-
- ok = start_epi([{provider_epi, plugin_module([KV, {file, File}])}]),
-
- Pid = whereis(couch_epi:get_handle(Key)),
-
- #ctx{
- file = File,
- key = Key,
- handle = couch_epi:get_handle(Key),
- kv = KV,
- pid = Pid
- };
-setup(static_data_module) ->
- error_logger:tty(false),
-
- Key = {test_app, descriptions},
-
- ok = generate_module(provider, ?DATA_MODULE1(provider)),
- KV = start_state_storage(),
-
- ok = start_epi([{provider_epi, plugin_module([KV, {static_module, provider}])}]),
-
- Pid = whereis(couch_epi:get_handle(Key)),
- Handle = couch_epi:get_handle(Key),
-
- #ctx{
- key = Key,
- handle = Handle,
- modules = [Handle, provider],
- kv = KV,
- pid = Pid
- };
-setup(callback_data_module) ->
- error_logger:tty(false),
-
- Key = {test_app, descriptions},
-
- KV = start_state_storage(),
- Value = [
- {[complex, key, 1], [
- {type, counter},
- {desc, foo}
- ]}
- ],
- save(KV, data, Value),
-
- ok = generate_module(provider, ?DATA_MODULE3(provider, KV)),
-
- ok = start_epi([{provider_epi, plugin_module([KV, {callback_module, provider}])}]),
-
- Pid = whereis(couch_epi:get_handle(Key)),
- Handle = couch_epi:get_handle(Key),
-
- #ctx{
- key = Key,
- handle = Handle,
- modules = [Handle, provider],
- kv = KV,
- pid = Pid
- };
-setup(functions) ->
- Key = my_service,
- error_logger:tty(false),
-
- ok = generate_module(provider1, ?MODULE1(provider1)),
- ok = generate_module(provider2, ?MODULE2(provider2)),
-
- KV = start_state_storage(),
-
- ok = start_epi([
- {provider_epi1, plugin_module([KV, provider1])},
- {provider_epi2, plugin_module([KV, provider2])}
- ]),
-
- Pid = whereis(couch_epi:get_handle(Key)),
- Handle = couch_epi:get_handle(Key),
-
- #ctx{
- key = Key,
- handle = Handle,
- modules = [Handle, provider1, provider2],
- kv = KV,
- pid = Pid
- };
-setup({options, _Opts}) ->
- setup(functions).
-
-teardown(_Case, #ctx{} = Ctx) ->
- teardown(Ctx).
-
-teardown(#ctx{file = File} = Ctx) when File /= undefined ->
- file:delete(File),
- teardown(Ctx#ctx{file = undefined});
-teardown(#ctx{kv = KV}) ->
- call(KV, stop),
- application:stop(couch_epi),
- ok.
-
-upgrade_release(Pid, Modules) ->
- sys:suspend(Pid),
- [ok = sys:change_code(Pid, M, undefined, []) || M <- Modules],
- sys:resume(Pid),
- ok.
-
-epi_config_update_test_() ->
- Funs = [
- fun ensure_notified_when_changed/2,
- fun ensure_not_notified_when_no_change/2
- ],
- Cases = [
- data_file,
- static_data_module,
- callback_data_module,
- functions
- ],
- {
- "config update tests",
- [make_case("Check notifications for: ", Cases, Funs)]
- }.
-
-epi_data_source_test_() ->
- Funs = [
- fun check_dump/2,
- fun check_get/2,
- fun check_get_value/2,
- fun check_by_key/2,
- fun check_by_source/2,
- fun check_keys/2,
- fun check_subscribers/2
- ],
- Cases = [
- data_file,
- static_data_module,
- callback_data_module
- ],
- {
- "epi data API tests",
- [make_case("Check query API for: ", Cases, Funs)]
- }.
-
-epi_apply_test_() ->
- {
- "epi dispatch tests",
- {
- foreach,
- fun() -> setup(functions) end,
- fun teardown/1,
- [
- fun check_pipe/1,
- fun check_broken_pipe/1,
- fun ensure_fail/1,
- fun ensure_fail_pipe/1
- ]
- }
- }.
-
-epi_providers_order_test_() ->
- {
- "epi providers' order test",
- {
- foreach,
- fun() -> setup(functions) end,
- fun teardown/1,
- [
- fun check_providers_order/1
- ]
- }
- }.
-
-epi_reload_test_() ->
- Cases = [
- data_file,
- static_data_module,
- callback_data_module,
- functions
- ],
- Funs = [
- fun ensure_reload_if_manually_triggered/2,
- fun ensure_reload_if_changed/2,
- fun ensure_no_reload_when_no_change/2
- ],
- {
- "epi reload tests",
- [make_case("Check reload for: ", Cases, Funs)]
- }.
-
-apply_options_test_() ->
- Funs = [fun ensure_apply_is_called/2],
- Setups = {options, valid_options_permutations()},
- {
- "apply options tests",
- [make_case("Apply with options: ", Setups, Funs)]
- }.
-
-make_case(Msg, {Tag, P}, Funs) ->
- Cases = [{Tag, Case} || Case <- P],
- make_case(Msg, Cases, Funs);
-make_case(Msg, P, Funs) ->
- [
- {format_case_name(Msg, Case), [
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Case, make_fun(Fun, 2)} || Fun <- Funs]
- }
- ]}
- || Case <- P
- ].
-
-make_fun(Fun, Arity) ->
- {arity, A} = lists:keyfind(arity, 1, erlang:fun_info(Fun)),
- make_fun(Fun, Arity, A).
-
-make_fun(Fun, A, A) -> Fun;
-make_fun(Fun, 2, 1) -> fun(_, A) -> Fun(A) end;
-make_fun(Fun, 1, 2) -> fun(A) -> Fun(undefined, A) end.
-
-format_case_name(Msg, Case) ->
- lists:flatten(Msg ++ io_lib:format("~p", [Case])).
-
-valid_options_permutations() ->
- [
- [],
- [ignore_errors],
- [pipe],
- [pipe, ignore_errors],
- [concurrent],
- [concurrent, ignore_errors]
- ].
-
-ensure_notified_when_changed(functions, #ctx{key = Key} = Ctx) ->
- ?_test(begin
- subscribe(Ctx, test_app, Key),
- update(functions, Ctx),
- Result = get(Ctx, is_called),
- ExpectedDefs = [
- {provider1, [{inc, 2}, {fail, 2}]},
- {provider2, [{inc, 2}, {fail, 2}]}
- ],
- ?assertEqual({ok, {Key, ExpectedDefs, ExpectedDefs}}, Result),
- ok
- end);
-ensure_notified_when_changed(Case, #ctx{key = Key} = Ctx) ->
- ?_test(begin
- subscribe(Ctx, test_app, Key),
- update(Case, Ctx),
- ExpectedData = lists:usort([
- {[complex, key, 1], [{type, counter}, {desc, updated_foo}]},
- {[complex, key, 2], [{type, counter}, {desc, bar}]}
- ]),
- Result = get(Ctx, is_called),
- ?assertMatch({ok, {Key, _OldData, _Data}}, Result),
- {ok, {Key, OldData, Data}} = Result,
- ?assertMatch(ExpectedData, lists:usort(Data)),
- ?assertMatch(
- [{[complex, key, 1], [{type, counter}, {desc, foo}]}],
- lists:usort(OldData)
- )
- end).
-
-ensure_not_notified_when_no_change(_Case, #ctx{key = Key} = Ctx) ->
- ?_test(begin
- subscribe(Ctx, test_app, Key),
- timer:sleep(?RELOAD_WAIT),
- ?assertMatch(error, get(Ctx, is_called))
- end).
-
-ensure_apply_is_called({options, Opts}, #ctx{handle = Handle, kv = KV, key = Key} = Ctx) ->
- ?_test(begin
- couch_epi:apply(Handle, Key, inc, [KV, 2], Opts),
- maybe_wait(Opts),
- ?assertMatch({ok, _}, get(Ctx, inc1)),
- ?assertMatch({ok, _}, get(Ctx, inc2)),
- ok
- end);
-ensure_apply_is_called(undefined, #ctx{} = Ctx) ->
- ensure_apply_is_called({options, []}, Ctx).
-
-check_pipe(#ctx{handle = Handle, kv = KV, key = Key}) ->
- ?_test(begin
- Result = couch_epi:apply(Handle, Key, inc, [KV, 2], [pipe]),
- ?assertMatch([KV, 4], Result),
- ok
- end).
-
-check_broken_pipe(#ctx{handle = Handle, kv = KV, key = Key} = Ctx) ->
- ?_test(begin
- Result = couch_epi:apply(Handle, Key, fail, [KV, 2], [pipe, ignore_errors]),
- ?assertMatch([KV, 3], Result),
- ?assertMatch([3, check_error], pipe_state(Ctx)),
- ok
- end).
-
-ensure_fail_pipe(#ctx{handle = Handle, kv = KV, key = Key}) ->
- ?_test(begin
- ?assertThrow(
- check_error,
- couch_epi:apply(Handle, Key, fail, [KV, 2], [pipe])
- ),
- ok
- end).
-
-ensure_fail(#ctx{handle = Handle, kv = KV, key = Key}) ->
- ?_test(begin
- ?assertThrow(
- check_error,
- couch_epi:apply(Handle, Key, fail, [KV, 2], [])
- ),
- ok
- end).
-
-pipe_state(Ctx) ->
- Trace = [get(Ctx, inc1), get(Ctx, inc2)],
- lists:usort([State || {ok, State} <- Trace]).
-
-check_dump(_Case, #ctx{handle = Handle}) ->
- ?_test(begin
- ?assertMatch(
- [[{type, counter}, {desc, foo}]],
- couch_epi:dump(Handle)
- )
- end).
-
-check_get(_Case, #ctx{handle = Handle}) ->
- ?_test(begin
- ?assertMatch(
- [[{type, counter}, {desc, foo}]],
- couch_epi:get(Handle, [complex, key, 1])
- )
- end).
-
-check_get_value(_Case, #ctx{handle = Handle}) ->
- ?_test(begin
- ?assertMatch(
- [{type, counter}, {desc, foo}],
- couch_epi:get_value(Handle, test_app, [complex, key, 1])
- )
- end).
-
-check_by_key(_Case, #ctx{handle = Handle}) ->
- ?_test(begin
- ?assertMatch(
- [{[complex, key, 1], [{test_app, [{type, counter}, {desc, foo}]}]}],
- couch_epi:by_key(Handle)
- ),
- ?assertMatch(
- [{test_app, [{type, counter}, {desc, foo}]}],
- couch_epi:by_key(Handle, [complex, key, 1])
- )
- end).
-
-check_by_source(_Case, #ctx{handle = Handle}) ->
- ?_test(begin
- ?assertMatch(
- [{test_app, [{[complex, key, 1], [{type, counter}, {desc, foo}]}]}],
- couch_epi:by_source(Handle)
- ),
- ?assertMatch(
- [{[complex, key, 1], [{type, counter}, {desc, foo}]}],
- couch_epi:by_source(Handle, test_app)
- )
- end).
-
-check_keys(_Case, #ctx{handle = Handle}) ->
- ?_assertMatch([[complex, key, 1]], couch_epi:keys(Handle)).
-
-check_subscribers(_Case, #ctx{handle = Handle}) ->
- ?_assertMatch([test_app], couch_epi:subscribers(Handle)).
-
-ensure_reload_if_manually_triggered(Case, #ctx{pid = Pid, key = Key} = Ctx) ->
- ?_test(begin
- subscribe(Ctx, test_app, Key),
- update_definitions(Case, Ctx),
- couch_epi_module_keeper:reload(Pid),
- timer:sleep(?RELOAD_WAIT),
- ?assertNotEqual(error, get(Ctx, is_called))
- end).
-
-ensure_reload_if_changed(
- data_file = Case,
- #ctx{key = Key, handle = Handle} = Ctx
-) ->
- ?_test(begin
- Version = Handle:version(),
- subscribe(Ctx, test_app, Key),
- update_definitions(Case, Ctx),
- timer:sleep(?RELOAD_WAIT),
- ?assertNotEqual(Version, Handle:version()),
- ?assertNotEqual(error, get(Ctx, is_called))
- end);
-ensure_reload_if_changed(
- Case,
- #ctx{key = Key, handle = Handle} = Ctx
-) ->
- ?_test(begin
- Version = Handle:version(),
- subscribe(Ctx, test_app, Key),
- update(Case, Ctx),
- ?assertNotEqual(Version, Handle:version()),
- %% Allow some time for notify to be called
- timer:sleep(?RELOAD_WAIT),
- ?assertNotEqual(error, get(Ctx, is_called))
- end).
-
-ensure_no_reload_when_no_change(
- functions,
- #ctx{pid = Pid, key = Key, handle = Handle, modules = Modules} = Ctx
-) ->
- ?_test(begin
- Version = Handle:version(),
- subscribe(Ctx, test_app, Key),
- upgrade_release(Pid, Modules),
- ?assertEqual(Version, Handle:version()),
- ?assertEqual(error, get(Ctx, is_called))
- end);
-ensure_no_reload_when_no_change(
- _Case,
- #ctx{key = Key, handle = Handle} = Ctx
-) ->
- ?_test(begin
- Version = Handle:version(),
- subscribe(Ctx, test_app, Key),
- timer:sleep(?RELOAD_WAIT),
- ?assertEqual(Version, Handle:version()),
- ?assertEqual(error, get(Ctx, is_called))
- end).
-
-check_providers_order(#ctx{handle = Handle, kv = KV, key = Key} = Ctx) ->
- ?_test(begin
- Result = couch_epi:apply(Handle, Key, inc, [KV, 2], [pipe]),
- ?assertMatch([KV, 4], Result),
- Order = [element(2, get(Ctx, K)) || K <- [inc1, inc2]],
- ?assertEqual(Order, [3, 4]),
- ok
- end).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-generate_module(Name, Body) ->
- Tokens = couch_epi_codegen:scan(Body),
- couch_epi_codegen:generate(Name, Tokens).
-
-update(Case, #ctx{pid = Pid, modules = Modules} = Ctx) ->
- update_definitions(Case, Ctx),
- upgrade_release(Pid, Modules),
- wait_update(Ctx).
-
-update_definitions(data_file, #ctx{file = File}) ->
- {ok, _} = file:copy(?DATA_FILE2, File),
- ok;
-update_definitions(static_data_module, #ctx{}) ->
- ok = generate_module(provider, ?DATA_MODULE2(provider));
-update_definitions(callback_data_module, #ctx{kv = Kv}) ->
- Value = [
- {[complex, key, 2], [
- {type, counter},
- {desc, bar}
- ]},
- {[complex, key, 1], [
- {type, counter},
- {desc, updated_foo}
- ]}
- ],
- save(Kv, data, Value),
- ok;
-update_definitions(functions, #ctx{}) ->
- ok = generate_module(provider1, ?MODULE2(provider1)).
-
-subscribe(#ctx{kv = Kv}, _App, _Key) ->
- call(Kv, empty),
- ok.
-
-maybe_wait(Opts) ->
- case lists:member(concurrent, Opts) of
- true ->
- timer:sleep(?RELOAD_WAIT);
- false ->
- ok
- end.
-
-wait_update(Ctx) ->
- case get(Ctx, is_called) of
- error ->
- timer:sleep(?RELOAD_WAIT),
- wait_update(Ctx);
- _ ->
- ok
- end.
-
-%% ------------
-%% State tracer
-
-save(Kv, Key, Value) ->
- call(Kv, {set, Key, Value}).
-
-get(#ctx{kv = Kv}, Key) ->
- call(Kv, {get, Key});
-get(Kv, Key) ->
- call(Kv, {get, Key}).
-
-call(Server, Msg) ->
- Ref = make_ref(),
- Server ! {{Ref, self()}, Msg},
- receive
- {reply, Ref, Reply} ->
- Reply
- after ?TIMEOUT ->
- {error, {timeout, Msg}}
- end.
-
-reply({Ref, From}, Msg) ->
- From ! {reply, Ref, Msg}.
-
-start_state_storage() ->
- Pid = state_storage(),
- Name = ?temp_atom(),
- register(Name, Pid),
- Name.
-
-state_storage() ->
- spawn_link(fun() -> state_storage(dict:new()) end).
-
-state_storage(Dict) ->
- receive
- {From, {set, Key, Value}} ->
- reply(From, ok),
- state_storage(dict:store(Key, Value, Dict));
- {From, {get, Key}} ->
- reply(From, dict:find(Key, Dict)),
- state_storage(Dict);
- {From, empty} ->
- reply(From, ok),
- state_storage(dict:new());
- {From, stop} ->
- reply(From, ok)
- end.
diff --git a/src/couch_epi/test/eunit/fixtures/app_data1.cfg b/src/couch_epi/test/eunit/fixtures/app_data1.cfg
deleted file mode 100644
index 4c9f3fe2d..000000000
--- a/src/couch_epi/test/eunit/fixtures/app_data1.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-{[complex, key, 1], [
- {type, counter},
- {desc, foo}
-]}.
diff --git a/src/couch_epi/test/eunit/fixtures/app_data2.cfg b/src/couch_epi/test/eunit/fixtures/app_data2.cfg
deleted file mode 100644
index e5a5ffb8c..000000000
--- a/src/couch_epi/test/eunit/fixtures/app_data2.cfg
+++ /dev/null
@@ -1,8 +0,0 @@
-{[complex, key, 2], [
- {type, counter},
- {desc, bar}
-]}.
-{[complex, key, 1], [
- {type, counter},
- {desc, updated_foo}
-]}.
diff --git a/src/couch_event/.gitignore b/src/couch_event/.gitignore
deleted file mode 100644
index 1204ed70e..000000000
--- a/src/couch_event/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-deps/
-ebin/
diff --git a/src/couch_event/LICENSE b/src/couch_event/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_event/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_event/README.md b/src/couch_event/README.md
deleted file mode 100644
index ab2e56877..000000000
--- a/src/couch_event/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Couch Event Notifications
-
-The replacement for couch\_db\_update and related code.
diff --git a/src/couch_event/rebar.config b/src/couch_event/rebar.config
deleted file mode 100644
index f68b4b5ed..000000000
--- a/src/couch_event/rebar.config
+++ /dev/null
@@ -1 +0,0 @@
-{erl_first_files, ["src/couch_event_listener.erl"]}.
diff --git a/src/couch_event/src/couch_event.app.src b/src/couch_event/src/couch_event.app.src
deleted file mode 100644
index b2ac917b9..000000000
--- a/src/couch_event/src/couch_event.app.src
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_event, [
- {description, "Event notification system for Apache CouchDB"},
- {vsn, git},
- {registered, [
- couch_event_sup,
- couch_event_server
- ]},
- {applications, [kernel, stdlib, khash, couch_log, config]},
- {mod, {couch_event_app, []}}
-]}.
diff --git a/src/couch_event/src/couch_event.erl b/src/couch_event/src/couch_event.erl
deleted file mode 100644
index 2579349d7..000000000
--- a/src/couch_event/src/couch_event.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event).
-
--export([
- notify/2
-]).
-
--export([
- listen/4,
- link_listener/4,
- stop_listener/1
-]).
-
--export([
- register/2,
- register_many/2,
- register_all/1,
- unregister/1
-]).
-
--define(SERVER, couch_event_server).
-
-notify(DbName, Event) ->
- gen_server:cast(?SERVER, {notify, DbName, Event}).
-
-listen(Module, Function, State, Options) ->
- couch_event_listener_mfa:enter_loop(Module, Function, State, Options).
-
-link_listener(Module, Function, State, Options) ->
- couch_event_listener_mfa:start_link(Module, Function, State, Options).
-
-stop_listener(Pid) ->
- couch_event_listener_mfa:stop(Pid).
-
-register(Pid, DbName) ->
- gen_server:call(?SERVER, {register, Pid, [DbName]}).
-
-register_many(Pid, DbNames) when is_list(DbNames) ->
- gen_server:call(?SERVER, {register, Pid, DbNames}).
-
-register_all(Pid) ->
- gen_server:call(?SERVER, {register, Pid, [all_dbs]}).
-
-unregister(Pid) ->
- gen_server:call(?SERVER, {unregister, Pid}).
diff --git a/src/couch_event/src/couch_event_app.erl b/src/couch_event/src/couch_event_app.erl
deleted file mode 100644
index 19621f0bf..000000000
--- a/src/couch_event/src/couch_event_app.erl
+++ /dev/null
@@ -1,25 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_app).
--behavior(application).
-
--export([
- start/2,
- stop/1
-]).
-
-start(_StartType, _StartArgs) ->
- couch_event_sup2:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/couch_event/src/couch_event_int.hrl b/src/couch_event/src/couch_event_int.hrl
deleted file mode 100644
index f837e1dec..000000000
--- a/src/couch_event/src/couch_event_int.hrl
+++ /dev/null
@@ -1,19 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(REGISTRY_TABLE, couch_event_registry).
--define(MONITOR_TABLE, couch_event_registry_monitors).
-
--record(client, {
- dbname,
- pid
-}).
diff --git a/src/couch_event/src/couch_event_listener.erl b/src/couch_event/src/couch_event_listener.erl
deleted file mode 100644
index 40f1a5c65..000000000
--- a/src/couch_event/src/couch_event_listener.erl
+++ /dev/null
@@ -1,218 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_listener).
-
--export([
- start/3,
- start/4,
- start_link/3,
- start_link/4,
- enter_loop/3,
- cast/2
-]).
-
--export([
- do_init/3,
- loop/2
-]).
-
--record(st, {
- module,
- state
-}).
-
--callback init(Arg :: term()) ->
- term().
-
--callback terminate(Reason :: term(), State :: term()) ->
- term().
-
--callback handle_cast(Message :: term(), State :: term()) ->
- term().
-
--callback handle_event(DbName :: term(), Event :: term(), State :: term()) ->
- term().
-
--callback handle_info(Message :: term(), State :: term()) ->
- term().
-
-start(Mod, Arg, Options) ->
- Pid = erlang:spawn(?MODULE, do_init, [Mod, Arg, Options]),
- {ok, Pid}.
-
-start(Name, Mod, Arg, Options) ->
- case where(Name) of
- undefined ->
- start(Mod, Arg, [{name, Name} | Options]);
- Pid ->
- {error, {already_started, Pid}}
- end.
-
-start_link(Mod, Arg, Options) ->
- Pid = erlang:spawn_link(?MODULE, do_init, [Mod, Arg, Options]),
- {ok, Pid}.
-
-start_link(Name, Mod, Arg, Options) ->
- case where(Name) of
- undefined ->
- start_link(Mod, Arg, [{name, Name} | Options]);
- Pid ->
- {error, {already_started, Pid}}
- end.
-
-enter_loop(Module, State, Options) ->
- ok = register_listeners(Options),
- ?MODULE:loop(#st{module = Module, state = State}, infinity).
-
-cast(Pid, Message) ->
- Pid ! {'$couch_event_cast', Message},
- ok.
-
-do_init(Module, Arg, Options) ->
- ok = maybe_name_process(Options),
- ok = register_listeners(Options),
- case (catch Module:init(Arg)) of
- {ok, State} ->
- ?MODULE:loop(#st{module = Module, state = State}, infinity);
- {ok, State, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(#st{module = Module, state = State}, Timeout);
- Else ->
- erlang:exit(Else)
- end.
-
-loop(St, Timeout) ->
- receive
- {'$couch_event', DbName, Event} ->
- do_event(St, DbName, Event);
- {'$couch_event_cast', Message} ->
- do_cast(St, Message);
- Else ->
- do_info(St, Else)
- after Timeout ->
- do_info(St, timeout)
- end.
-
-maybe_name_process(Options) ->
- case proplists:lookup(name, Options) of
- {name, Name} ->
- case name_register(Name) of
- true ->
- ok;
- {false, Pid} ->
- erlang:error({already_started, Pid})
- end;
- none ->
- ok
- end.
-
-register_listeners(Options) ->
- case get_all_dbnames(Options) of
- all_dbs ->
- couch_event:register_all(self());
- DbNames ->
- couch_event:register_many(self(), DbNames)
- end,
- ok.
-
-do_event(#st{module = Module, state = State} = St, DbName, Event) ->
- case (catch Module:handle_event(DbName, Event, State)) of
- {ok, NewState} ->
- ?MODULE:loop(St#st{state = NewState}, infinity);
- {ok, NewState, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(St#st{state = NewState}, Timeout);
- {stop, Reason, NewState} ->
- do_terminate(Reason, St#st{state = NewState});
- Else ->
- erlang:error(Else)
- end.
-
-do_cast(#st{module = Module, state = State} = St, Message) ->
- case (catch Module:handle_cast(Message, State)) of
- {ok, NewState} ->
- ?MODULE:loop(St#st{state = NewState}, infinity);
- {ok, NewState, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(St#st{state = NewState}, Timeout);
- {stop, Reason, NewState} ->
- do_terminate(Reason, St#st{state = NewState});
- Else ->
- erlang:error(Else)
- end.
-
-do_info(#st{module = Module, state = State} = St, Message) ->
- case (catch Module:handle_info(Message, State)) of
- {ok, NewState} ->
- ?MODULE:loop(St#st{state = NewState}, infinity);
- {ok, NewState, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(St#st{state = NewState}, Timeout);
- {stop, Reason, NewState} ->
- do_terminate(Reason, St#st{state = NewState});
- Else ->
- erlang:error(Else)
- end.
-
-do_terminate(Reason, #st{module = Module, state = State}) ->
- % Order matters. We want to make sure Module:terminate/1
- % is called even if couch_event:unregister/1 hangs
- % indefinitely.
- catch Module:terminate(Reason, State),
- catch couch_event:unregister(self()),
- Status =
- case Reason of
- normal -> normal;
- shutdown -> normal;
- ignore -> normal;
- Else -> Else
- end,
- erlang:exit(Status).
-
-where({global, Name}) -> global:whereis_name(Name);
-where({local, Name}) -> whereis(Name).
-
-name_register({global, Name} = GN) ->
- case global:register_name(Name, self()) of
- yes -> true;
- no -> {false, where(GN)}
- end;
-name_register({local, Name} = LN) ->
- try register(Name, self()) of
- true -> true
- catch
- error:_ ->
- {false, where(LN)}
- end.
-
-get_all_dbnames(Options) ->
- case proplists:get_value(all_dbs, Options) of
- true -> all_dbs;
- _ -> get_all_dbnames(Options, [])
- end.
-
-get_all_dbnames([], []) ->
- erlang:error(no_dbnames_provided);
-get_all_dbnames([], Acc) ->
- lists:usort(convert_dbname_list(Acc));
-get_all_dbnames([{dbname, DbName} | Rest], Acc) ->
- get_all_dbnames(Rest, [DbName | Acc]);
-get_all_dbnames([{dbnames, DbNames} | Rest], Acc) when is_list(DbNames) ->
- get_all_dbnames(Rest, DbNames ++ Acc);
-get_all_dbnames([_Ignored | Rest], Acc) ->
- get_all_dbnames(Rest, Acc).
-
-convert_dbname_list([]) ->
- [];
-convert_dbname_list([DbName | Rest]) when is_binary(DbName) ->
- [DbName | convert_dbname_list(Rest)];
-convert_dbname_list([DbName | Rest]) when is_list(DbName) ->
- [list_to_binary(DbName) | convert_dbname_list(Rest)];
-convert_dbname_list([DbName | _]) ->
- erlang:error({invalid_dbname, DbName}).
diff --git a/src/couch_event/src/couch_event_listener_mfa.erl b/src/couch_event/src/couch_event_listener_mfa.erl
deleted file mode 100644
index b4cd9148a..000000000
--- a/src/couch_event/src/couch_event_listener_mfa.erl
+++ /dev/null
@@ -1,96 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_listener_mfa).
--behavior(couch_event_listener).
-
--export([
- start_link/4,
- enter_loop/4,
- stop/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_event/3,
- handle_cast/2,
- handle_info/2
-]).
-
--record(st, {
- mod,
- func,
- state,
- parent
-}).
-
-start_link(Mod, Func, State, Options) ->
- Parent =
- case proplists:get_value(parent, Options) of
- P when is_pid(P) -> P;
- _ -> self()
- end,
- Arg = {Parent, Mod, Func, State},
- couch_event_listener:start_link(?MODULE, Arg, Options).
-
-enter_loop(Mod, Func, State, Options) ->
- Parent =
- case proplists:get_value(parent, Options) of
- P when is_pid(P) ->
- erlang:monitor(process, P),
- P;
- _ ->
- undefined
- end,
- St = #st{
- mod = Mod,
- func = Func,
- state = State,
- parent = Parent
- },
- couch_event_listener:enter_loop(?MODULE, St, Options).
-
-stop(Pid) ->
- couch_event_listener:cast(Pid, shutdown).
-
-init({Parent, Mod, Func, State}) ->
- erlang:monitor(process, Parent),
- {ok, #st{
- mod = Mod,
- func = Func,
- state = State,
- parent = Parent
- }}.
-
-terminate(_Reason, _MFA) ->
- ok.
-
-handle_event(DbName, Event, #st{mod = Mod, func = Func, state = State} = St) ->
- case (catch Mod:Func(DbName, Event, State)) of
- {ok, NewState} ->
- {ok, St#st{state = NewState}};
- stop ->
- {stop, normal, St};
- Else ->
- erlang:error(Else)
- end.
-
-handle_cast(shutdown, St) ->
- {stop, normal, St};
-handle_cast(_Msg, St) ->
- {ok, St}.
-
-handle_info({'DOWN', _Ref, process, Parent, _Reason}, #st{parent = Parent} = St) ->
- {stop, normal, St};
-handle_info(_Msg, St) ->
- {ok, St}.
diff --git a/src/couch_event/src/couch_event_os_listener.erl b/src/couch_event/src/couch_event_os_listener.erl
deleted file mode 100644
index ef379402a..000000000
--- a/src/couch_event/src/couch_event_os_listener.erl
+++ /dev/null
@@ -1,67 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_os_listener).
--behaviour(gen_server).
--vsn(1).
-
--export([
- start_link/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
-start_link(Exe) when is_list(Exe) ->
- gen_server:start_link(?MODULE, Exe, []).
-
-init(Exe) ->
- process_flag(trap_exit, true),
- ok = couch_event:register_all(self()),
- couch_os_process:start_link(Exe, []).
-
-terminate(_Reason, Pid) when is_pid(Pid) ->
- couch_os_process:stop(Pid);
-terminate(_Reason, _Pid) ->
- ok.
-
-handle_call(Msg, From, Pid) ->
- couch_log:notice("~s ignoring call ~w from ~w", [?MODULE, Msg, From]),
- {reply, ignored, Pid, 0}.
-
-handle_cast(Msg, Pid) ->
- couch_log:notice("~s ignoring cast ~w", [?MODULE, Msg]),
- {noreply, Pid, 0}.
-
-handle_info({'$couch_event', DbName, Event}, Pid) ->
- Obj =
- {[
- {db, DbName},
- {type, list_to_binary(atom_to_list(Event))}
- ]},
- ok = couch_os_process:send(Pid, Obj),
- {noreply, Pid};
-handle_info({'EXIT', Pid, Reason}, Pid) ->
- couch_log:error("Update notificatio process ~w died: ~w", [Pid, Reason]),
- {stop, normal, nil};
-handle_info(Msg, Pid) ->
- couch_log:notice("~s ignoring info ~w", [?MODULE, Msg]),
- {noreply, Pid, 0}.
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
diff --git a/src/couch_event/src/couch_event_server.erl b/src/couch_event/src/couch_event_server.erl
deleted file mode 100644
index f633a8409..000000000
--- a/src/couch_event/src/couch_event_server.erl
+++ /dev/null
@@ -1,150 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_server).
--behaviour(gen_server).
--vsn(1).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--include("couch_event_int.hrl").
-
--record(st, {
- by_pid,
- by_dbname
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, nil, []).
-
-init(_) ->
- {ok, ByPid} = khash:new(),
- {ok, ByDbName} = khash:new(),
- {ok, #st{
- by_pid = ByPid,
- by_dbname = ByDbName
- }}.
-
-terminate(_Reason, _St) ->
- ok.
-
-handle_call({register, Pid, NewDbNames}, _From, St) ->
- case khash:get(St#st.by_pid, Pid) of
- undefined ->
- NewRef = erlang:monitor(process, Pid),
- register(St, NewRef, Pid, NewDbNames);
- {ReuseRef, OldDbNames} ->
- unregister(St, Pid, OldDbNames),
- register(St, ReuseRef, Pid, NewDbNames)
- end,
- {reply, ok, St};
-handle_call({unregister, Pid}, _From, St) ->
- Reply =
- case khash:get(St#st.by_pid, Pid) of
- undefined ->
- not_registered;
- {Ref, OldDbNames} ->
- unregister(St, Pid, OldDbNames),
- erlang:demonitor(Ref, [flush]),
- ok
- end,
- {reply, Reply, St};
-handle_call(Msg, From, St) ->
- couch_log:notice("~s ignoring call ~w from ~w", [?MODULE, Msg, From]),
- {reply, ignored, St}.
-
-handle_cast({notify, DbName, Event}, St) ->
- notify_listeners(St#st.by_dbname, DbName, Event),
- {noreply, St};
-handle_cast(Msg, St) ->
- couch_log:notice("~s ignoring cast ~w", [?MODULE, Msg]),
- {noreply, St}.
-
-handle_info({'DOWN', Ref, process, Pid, _Reason}, St) ->
- case khash:get(St#st.by_pid, Pid) of
- {Ref, OldDbNames} ->
- unregister(St, Pid, OldDbNames);
- undefined ->
- ok
- end,
- {noreply, St};
-handle_info(Msg, St) ->
- couch_log:notice("~s ignoring info ~w", [?MODULE, Msg]),
- {noreply, St}.
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-notify_listeners(ByDbName, DbName, Event) ->
- Msg = {'$couch_event', DbName, Event},
- notify_listeners(khash:get(ByDbName, all_dbs), Msg),
- notify_listeners(khash:get(ByDbName, DbName), Msg).
-
-notify_listeners(undefined, _) ->
- ok;
-notify_listeners(Listeners, Msg) ->
- khash:fold(
- Listeners,
- fun(Pid, _, _) ->
- Pid ! Msg,
- nil
- end,
- nil
- ).
-
-register(St, Ref, Pid, DbNames) ->
- khash:put(St#st.by_pid, Pid, {Ref, DbNames}),
- lists:foreach(
- fun(DbName) ->
- add_listener(St#st.by_dbname, DbName, Pid)
- end,
- DbNames
- ).
-
-add_listener(ByDbName, DbName, Pid) ->
- case khash:lookup(ByDbName, DbName) of
- {value, Listeners} ->
- khash:put(Listeners, Pid, nil);
- not_found ->
- {ok, NewListeners} = khash:new(),
- khash:put(NewListeners, Pid, nil),
- khash:put(ByDbName, DbName, NewListeners)
- end.
-
-unregister(St, Pid, OldDbNames) ->
- ok = khash:del(St#st.by_pid, Pid),
- lists:foreach(
- fun(DbName) ->
- rem_listener(St#st.by_dbname, DbName, Pid)
- end,
- OldDbNames
- ).
-
-rem_listener(ByDbName, DbName, Pid) ->
- {value, Listeners} = khash:lookup(ByDbName, DbName),
- khash:del(Listeners, Pid),
- Size = khash:size(Listeners),
- if
- Size > 0 -> ok;
- true -> khash:del(ByDbName, DbName)
- end.
diff --git a/src/couch_event/src/couch_event_sup2.erl b/src/couch_event/src/couch_event_sup2.erl
deleted file mode 100644
index a815c440b..000000000
--- a/src/couch_event/src/couch_event_sup2.erl
+++ /dev/null
@@ -1,36 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% This is named couch_event_sup2 to avoid
-% naming collisions with the couch_event_sup
-% module contained in the couch app. When
-% that supervisor is removed we'll be free
-% to rename this one.
-
--module(couch_event_sup2).
--behavior(supervisor).
-
--export([
- start_link/0,
- init/1
-]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, nil).
-
-init(_) ->
- Children = [
- {couch_event_server, {couch_event_server, start_link, []}, permanent, 5000, worker, [
- couch_event_server
- ]}
- ],
- {ok, {{one_for_one, 5, 10}, Children}}.
diff --git a/src/couch_index/.gitignore b/src/couch_index/.gitignore
deleted file mode 100644
index e24db8ab4..000000000
--- a/src/couch_index/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/ebin
-.eunit
-.rebar
diff --git a/src/couch_index/LICENSE b/src/couch_index/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_index/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_index/rebar.config b/src/couch_index/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/couch_index/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/couch_index/src/couch_index.app.src b/src/couch_index/src/couch_index.app.src
deleted file mode 100644
index 834be3f3c..000000000
--- a/src/couch_index/src/couch_index.app.src
+++ /dev/null
@@ -1,19 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_index, [
- {description, "CouchDB Secondary Index Manager"},
- {vsn, git},
- {registered, []},
- {applications, [kernel, stdlib, couch_epi]},
- {mod, {couch_index_app, []}}
-]}.
diff --git a/src/couch_index/src/couch_index.erl b/src/couch_index/src/couch_index.erl
deleted file mode 100644
index a6b62be7c..000000000
--- a/src/couch_index/src/couch_index.erl
+++ /dev/null
@@ -1,618 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index).
--behaviour(gen_server).
-
--compile(tuple_calls).
-
--vsn(3).
-
-%% API
--export([start_link/1, stop/1, get_state/2, get_info/1]).
--export([trigger_update/2]).
--export([compact/1, compact/2, get_compactor_pid/1]).
-
-%% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% 10 minutes
--define(CHECK_INTERVAL, 600000).
-
--record(st, {
- mod,
- idx_state,
- updater,
- compactor,
- waiters = [],
- committed = true,
- shutdown = false
-}).
-
-start_link({Module0, IdxState0}) ->
- [Module, IdxState] = couch_index_plugin:before_open(Module0, IdxState0),
- proc_lib:start_link(?MODULE, init, [{Module, IdxState}]).
-
-stop(Pid) ->
- gen_server:cast(Pid, stop).
-
-get_state(Pid, RequestSeq) ->
- gen_server:call(Pid, {get_state, RequestSeq}, infinity).
-
-get_info(Pid) ->
- gen_server:call(Pid, get_info, group_info_timeout_msec()).
-
-trigger_update(Pid, UpdateSeq) ->
- gen_server:cast(Pid, {trigger_update, UpdateSeq}).
-
-compact(Pid) ->
- compact(Pid, []).
-
-compact(Pid, Options) ->
- {ok, CPid} = gen_server:call(Pid, compact),
- case lists:member(monitor, Options) of
- true -> {ok, erlang:monitor(process, CPid)};
- false -> ok
- end.
-
-get_compactor_pid(Pid) ->
- gen_server:call(Pid, get_compactor_pid).
-
-init({Mod, IdxState}) ->
- DbName = Mod:get(db_name, IdxState),
- erlang:send_after(?CHECK_INTERVAL, self(), maybe_close),
- Resp = couch_util:with_db(DbName, fun(Db) ->
- case Mod:open(Db, IdxState) of
- {ok, IdxSt} ->
- couch_db:monitor(Db),
- {ok, IdxSt};
- Error ->
- Error
- end
- end),
- case Resp of
- {ok, NewIdxState} ->
- {ok, UPid} = couch_index_updater:start_link(self(), Mod),
- {ok, CPid} = couch_index_compactor:start_link(self(), Mod),
- State = #st{
- mod = Mod,
- idx_state = NewIdxState,
- updater = UPid,
- compactor = CPid
- },
- Args = [
- Mod:get(db_name, IdxState),
- Mod:get(idx_name, IdxState),
- couch_index_util:hexsig(Mod:get(signature, IdxState))
- ],
- couch_log:debug("Opening index for db: ~s idx: ~s sig: ~p", Args),
- proc_lib:init_ack({ok, self()}),
- gen_server:enter_loop(?MODULE, [], State);
- Other ->
- proc_lib:init_ack(Other)
- end.
-
-terminate(Reason0, State) ->
- #st{mod = Mod, idx_state = IdxState} = State,
- case Reason0 of
- {shutdown, ddoc_updated} ->
- Mod:shutdown(IdxState),
- Reason = ddoc_updated;
- _ ->
- Mod:close(IdxState),
- Reason = Reason0
- end,
- send_all(State#st.waiters, Reason),
- couch_util:shutdown_sync(State#st.updater),
- couch_util:shutdown_sync(State#st.compactor),
- Args = [
- Mod:get(db_name, IdxState),
- Mod:get(idx_name, IdxState),
- couch_index_util:hexsig(Mod:get(signature, IdxState)),
- Reason
- ],
- couch_log:debug("Closing index for db: ~s idx: ~s sig: ~p because ~r", Args),
- ok.
-
-handle_call({get_state, ReqSeq}, From, State) ->
- #st{
- mod = Mod,
- idx_state = IdxState,
- waiters = Waiters
- } = State,
- IdxSeq = Mod:get(update_seq, IdxState),
- case ReqSeq =< IdxSeq of
- true ->
- {reply, {ok, IdxState}, State};
- % View update required
- _ ->
- couch_index_updater:run(State#st.updater, IdxState),
- Waiters2 = [{From, ReqSeq} | Waiters],
- {noreply, State#st{waiters = Waiters2}, infinity}
- end;
-handle_call(get_info, _From, State) ->
- #st{mod = Mod} = State,
- IdxState = State#st.idx_state,
- {ok, Info0} = Mod:get(info, IdxState),
- IsUpdating = couch_index_updater:is_running(State#st.updater),
- IsCompacting = couch_index_compactor:is_running(State#st.compactor),
- IdxSeq = Mod:get(update_seq, IdxState),
- GetCommSeq = fun(Db) -> couch_db:get_committed_update_seq(Db) end,
- DbName = Mod:get(db_name, IdxState),
- CommittedSeq = couch_util:with_db(DbName, GetCommSeq),
- Info =
- Info0 ++
- [
- {updater_running, IsUpdating},
- {compact_running, IsCompacting},
- {waiting_commit, State#st.committed == false},
- {waiting_clients, length(State#st.waiters)},
- {pending_updates, max(CommittedSeq - IdxSeq, 0)}
- ],
- {reply, {ok, Info}, State};
-handle_call(reset, _From, State) ->
- #st{
- mod = Mod,
- idx_state = IdxState
- } = State,
- {ok, NewIdxState} = Mod:reset(IdxState),
- {reply, {ok, NewIdxState}, State#st{idx_state = NewIdxState}};
-handle_call(compact, _From, State) ->
- Resp = couch_index_compactor:run(State#st.compactor, State#st.idx_state),
- {reply, Resp, State};
-handle_call(get_compactor_pid, _From, State) ->
- {reply, {ok, State#st.compactor}, State};
-handle_call({compacted, NewIdxState}, _From, State) ->
- #st{
- mod = Mod,
- idx_state = OldIdxState
- } = State,
- assert_signature_match(Mod, OldIdxState, NewIdxState),
- NewSeq = Mod:get(update_seq, NewIdxState),
- OldSeq = Mod:get(update_seq, OldIdxState),
- % For indices that require swapping files, we have to make sure we're
- % up to date with the current index. Otherwise indexes could roll back
- % (perhaps considerably) to previous points in history.
- case is_recompaction_enabled(NewIdxState, State) of
- true ->
- case NewSeq >= OldSeq of
- true -> {reply, ok, commit_compacted(NewIdxState, State)};
- false -> {reply, recompact, State}
- end;
- false ->
- {reply, ok, commit_compacted(NewIdxState, State)}
- end;
-handle_call({compaction_failed, Reason}, _From, State) ->
- #st{
- mod = Mod,
- idx_state = OldIdxState,
- waiters = Waiters
- } = State,
- send_all(Waiters, Reason),
- {ok, NewIdxState} = Mod:remove_compacted(OldIdxState),
- NewState = State#st{idx_state = NewIdxState, waiters = []},
- {reply, {ok, NewIdxState}, NewState}.
-
-handle_cast({trigger_update, UpdateSeq}, State) ->
- #st{
- mod = Mod,
- idx_state = IdxState
- } = State,
- case UpdateSeq =< Mod:get(update_seq, IdxState) of
- true ->
- {noreply, State};
- false ->
- couch_index_updater:run(State#st.updater, IdxState),
- {noreply, State}
- end;
-handle_cast({updated, NewIdxState}, State) ->
- {noreply, NewState} = handle_cast({new_state, NewIdxState}, State),
- case NewState#st.shutdown andalso (NewState#st.waiters =:= []) of
- true ->
- {stop, normal, NewState};
- false ->
- maybe_restart_updater(NewState),
- {noreply, NewState}
- end;
-handle_cast({new_state, NewIdxState}, State) ->
- #st{
- mod = Mod,
- idx_state = OldIdxState
- } = State,
- OldFd = Mod:get(fd, OldIdxState),
- NewFd = Mod:get(fd, NewIdxState),
- case NewFd == OldFd of
- true ->
- assert_signature_match(Mod, OldIdxState, NewIdxState),
- CurrSeq = Mod:get(update_seq, NewIdxState),
- Args = [
- Mod:get(db_name, NewIdxState),
- Mod:get(idx_name, NewIdxState),
- CurrSeq
- ],
- couch_log:debug("Updated index for db: ~s idx: ~s seq: ~B", Args),
- Rest = send_replies(State#st.waiters, CurrSeq, NewIdxState),
- case State#st.committed of
- true -> erlang:send_after(commit_delay(), self(), commit);
- false -> ok
- end,
- {noreply, State#st{
- idx_state = NewIdxState,
- waiters = Rest,
- committed = false
- }};
- false ->
- Fmt = "Ignoring update from old indexer for db: ~s idx: ~s",
- Args = [
- Mod:get(db_name, NewIdxState),
- Mod:get(idx_name, NewIdxState)
- ],
- couch_log:warning(Fmt, Args),
- {noreply, State}
- end;
-handle_cast({update_error, Error}, State) ->
- send_all(State#st.waiters, Error),
- {noreply, State#st{waiters = []}};
-handle_cast(stop, State) ->
- {stop, normal, State};
-handle_cast(delete, State) ->
- #st{mod = Mod, idx_state = IdxState} = State,
- ok = Mod:delete(IdxState),
- {stop, normal, State};
-handle_cast({ddoc_updated, DDocResult}, State) ->
- #st{mod = Mod, idx_state = IdxState} = State,
- Shutdown =
- case DDocResult of
- {not_found, deleted} ->
- true;
- {ok, DDoc} ->
- DbName = Mod:get(db_name, IdxState),
- couch_util:with_db(DbName, fun(Db) ->
- {ok, NewIdxState} = Mod:init(Db, DDoc),
- Mod:get(signature, NewIdxState) =/= Mod:get(signature, IdxState)
- end)
- end,
- case Shutdown of
- true ->
- {stop, {shutdown, ddoc_updated}, State#st{shutdown = true}};
- false ->
- {noreply, State#st{shutdown = false}}
- end;
-handle_cast(ddoc_updated, State) ->
- #st{mod = Mod, idx_state = IdxState} = State,
- DbName = Mod:get(db_name, IdxState),
- DDocId = Mod:get(idx_name, IdxState),
- Shutdown = couch_util:with_db(DbName, fun(Db) ->
- case couch_db:open_doc(Db, DDocId, [ejson_body, ?ADMIN_CTX]) of
- {not_found, deleted} ->
- true;
- {ok, DDoc} ->
- {ok, NewIdxState} = Mod:init(Db, DDoc),
- Mod:get(signature, NewIdxState) =/= Mod:get(signature, IdxState)
- end
- end),
- case Shutdown of
- true ->
- {stop, {shutdown, ddoc_updated}, State#st{shutdown = true}};
- false ->
- {noreply, State#st{shutdown = false}}
- end;
-handle_cast(_Mesg, State) ->
- {stop, unhandled_cast, State}.
-
-handle_info(commit, #st{committed = true} = State) ->
- {noreply, State};
-handle_info(commit, State) ->
- #st{mod = Mod, idx_state = IdxState} = State,
- DbName = Mod:get(db_name, IdxState),
- IdxName = Mod:get(idx_name, IdxState),
- GetCommSeq = fun(Db) -> couch_db:get_committed_update_seq(Db) end,
- CommittedSeq = couch_util:with_db(DbName, GetCommSeq),
- case CommittedSeq >= Mod:get(update_seq, IdxState) of
- true ->
- % Commit the updates
- ok = Mod:commit(IdxState),
- couch_event:notify(DbName, {index_commit, IdxName}),
- {noreply, State#st{committed = true}};
- _ ->
- % We can't commit the header because the database seq that's
- % fully committed to disk is still behind us. If we committed
- % now and the database lost those changes our view could be
- % forever out of sync with the database. But a crash before we
- % commit these changes, no big deal, we only lose incremental
- % changes since last committal.
- erlang:send_after(commit_delay(), self(), commit),
- {noreply, State}
- end;
-handle_info(maybe_close, State) ->
- % We need to periodically check if our index file still
- % exists on disk because index cleanups don't notify
- % the couch_index process when a file has been deleted. If
- % we don't check for this condition then the index can
- % remain open indefinitely wasting disk space.
- %
- % We make sure that we're idle before closing by looking
- % to see if we have any clients waiting for an update.
- Mod = State#st.mod,
- case State#st.waiters of
- [] ->
- case Mod:index_file_exists(State#st.idx_state) of
- true ->
- erlang:send_after(?CHECK_INTERVAL, self(), maybe_close),
- {noreply, State};
- false ->
- {stop, normal, State}
- end;
- _ ->
- erlang:send_after(?CHECK_INTERVAL, self(), maybe_close),
- {noreply, State}
- end;
-handle_info({'DOWN', _, _, _Pid, _}, #st{mod = Mod, idx_state = IdxState} = State) ->
- Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
- couch_log:debug("Index shutdown by monitor notice for db: ~s idx: ~s", Args),
- catch send_all(State#st.waiters, shutdown),
- {stop, normal, State#st{waiters = []}}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-maybe_restart_updater(#st{waiters = []}) ->
- ok;
-maybe_restart_updater(#st{idx_state = IdxState} = State) ->
- couch_index_updater:run(State#st.updater, IdxState).
-
-send_all(Waiters, Reply) ->
- [gen_server:reply(From, Reply) || {From, _} <- Waiters].
-
-send_replies(Waiters, UpdateSeq, IdxState) ->
- Pred = fun({_, S}) -> S =< UpdateSeq end,
- {ToSend, Remaining} = lists:partition(Pred, Waiters),
- [gen_server:reply(From, {ok, IdxState}) || {From, _} <- ToSend],
- Remaining.
-
-assert_signature_match(Mod, OldIdxState, NewIdxState) ->
- case {Mod:get(signature, OldIdxState), Mod:get(signature, NewIdxState)} of
- {Sig, Sig} -> ok;
- _ -> erlang:error(signature_mismatch)
- end.
-
-commit_compacted(NewIdxState, State) ->
- #st{
- mod = Mod,
- idx_state = OldIdxState,
- updater = Updater
- } = State,
- {ok, NewIdxState1} = Mod:swap_compacted(OldIdxState, NewIdxState),
- % Restart the indexer if it's running.
- case couch_index_updater:is_running(Updater) of
- true -> ok = couch_index_updater:restart(Updater, NewIdxState1);
- false -> ok
- end,
- case State#st.committed of
- true -> erlang:send_after(commit_delay(), self(), commit);
- false -> ok
- end,
- State#st{
- idx_state = NewIdxState1,
- committed = false
- }.
-
-is_recompaction_enabled(IdxState, #st{mod = Mod}) ->
- DbName = binary_to_list(Mod:get(db_name, IdxState)),
- IdxName = binary_to_list(Mod:get(idx_name, IdxState)),
- IdxKey = DbName ++ ":" ++ IdxName,
-
- IdxSignature = couch_index_util:hexsig((Mod:get(signature, IdxState))),
-
- Global = get_value("view_compaction", "enabled_recompaction"),
- PerSignature = get_value("view_compaction.recompaction", IdxSignature),
- PerIdx = get_value("view_compaction.recompaction", IdxKey),
- PerDb = get_value("view_compaction.recompaction", DbName),
-
- find_most_specific([Global, PerDb, PerIdx, PerSignature], true).
-
-find_most_specific(Settings, Default) ->
- Reversed = lists:reverse([Default | Settings]),
- [Value | _] = lists:dropwhile(fun(A) -> A =:= undefined end, Reversed),
- Value.
-
-get_value(Section, Key) ->
- case config:get(Section, Key) of
- "enabled" -> true;
- "disabled" -> false;
- "true" -> true;
- "false" -> false;
- undefined -> undefined
- end.
-
-commit_delay() ->
- config:get_integer("query_server_config", "commit_freq", 5) * 1000.
-
-group_info_timeout_msec() ->
- Timeout = config:get("query_server_config", "group_info_timeout", "5000"),
- case Timeout of
- "infinity" ->
- infinity;
- Milliseconds ->
- list_to_integer(Milliseconds)
- end.
-
--ifdef(TEST).
--include_lib("couch/include/couch_eunit.hrl").
-
-get(db_name, _, _) ->
- <<"db_name">>;
-get(idx_name, _, _) ->
- <<"idx_name">>;
-get(signature, _, _) ->
- <<61, 237, 157, 230, 136, 93, 96, 201, 204, 17, 137, 186, 50, 249, 44, 135>>.
-
-setup_all() ->
- Ctx = test_util:start_couch(),
- ok = meck:new([config], [passthrough]),
- ok = meck:new([test_index], [non_strict]),
- ok = meck:expect(test_index, get, fun get/3),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-setup(Settings) ->
- meck:reset([config, test_index]),
- ok = meck:expect(config, get, fun(Section, Key) ->
- configure(Section, Key, Settings)
- end),
- {undefined, #st{mod = {test_index}}}.
-
-teardown(_, _) ->
- ok.
-
-configure("view_compaction", "enabled_recompaction", [Global, _Db, _Index]) ->
- Global;
-configure("view_compaction.recompaction", "db_name", [_Global, Db, _Index]) ->
- Db;
-configure("view_compaction.recompaction", "db_name:" ++ _, [_, _, Index]) ->
- Index;
-configure(Section, Key, _) ->
- meck:passthrough([Section, Key]).
-
-recompaction_configuration_test_() ->
- {
- "Compaction tests",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- recompaction_configuration_tests()
- }
- }
- }.
-
-recompaction_configuration_tests() ->
- AllCases = couch_tests_combinatorics:product([
- [undefined, "true", "false"],
- [undefined, "enabled", "disabled"],
- [undefined, "enabled", "disabled"]
- ]),
-
- EnabledCases = [
- [undefined, undefined, undefined],
-
- [undefined, undefined, "enabled"],
- [undefined, "enabled", undefined],
- [undefined, "disabled", "enabled"],
- [undefined, "enabled", "enabled"],
-
- ["true", undefined, undefined],
- ["true", undefined, "enabled"],
- ["true", "disabled", "enabled"],
- ["true", "enabled", undefined],
- ["true", "enabled", "enabled"],
-
- ["false", undefined, "enabled"],
- ["false", "enabled", undefined],
- ["false", "disabled", "enabled"],
- ["false", "enabled", "enabled"]
- ],
-
- DisabledCases = [
- [undefined, undefined, "disabled"],
- [undefined, "disabled", undefined],
- [undefined, "disabled", "disabled"],
- [undefined, "enabled", "disabled"],
-
- ["true", undefined, "disabled"],
- ["true", "disabled", undefined],
- ["true", "disabled", "disabled"],
- ["true", "enabled", "disabled"],
-
- ["false", undefined, undefined],
- ["false", undefined, "disabled"],
- ["false", "disabled", undefined],
- ["false", "disabled", "disabled"],
- ["false", "enabled", "disabled"]
- ],
-
- ?assertEqual([], AllCases -- (EnabledCases ++ DisabledCases)),
-
- [{Settings, fun should_not_call_recompact/2} || Settings <- DisabledCases] ++
- [{Settings, fun should_call_recompact/2} || Settings <- EnabledCases].
-
-should_call_recompact(Settings, {IdxState, State}) ->
- {
- test_id(Settings),
- ?_test(begin
- ?assert(is_recompaction_enabled(IdxState, State)),
- ok
- end)
- }.
-
-should_not_call_recompact(Settings, {IdxState, State}) ->
- {
- test_id(Settings),
- ?_test(begin
- ?assertNot(is_recompaction_enabled(IdxState, State)),
- ok
- end)
- }.
-
-to_string(undefined) -> "undefined";
-to_string(Value) -> Value.
-
-test_id(Settings0) ->
- Settings1 = [to_string(Value) || Value <- Settings0],
- "[ " ++ lists:flatten(string:join(Settings1, " , ")) ++ " ]".
-
-get_group_timeout_info_test_() ->
- {
- foreach,
- fun() -> ok end,
- fun(_) -> meck:unload() end,
- [
- t_group_timeout_info_integer(),
- t_group_timeout_info_infinity()
- ]
- }.
-
-t_group_timeout_info_integer() ->
- ?_test(begin
- meck:expect(
- config,
- get,
- fun("query_server_config", "group_info_timeout", _) ->
- "5001"
- end
- ),
- ?assertEqual(5001, group_info_timeout_msec())
- end).
-
-t_group_timeout_info_infinity() ->
- ?_test(begin
- meck:expect(
- config,
- get,
- fun("query_server_config", "group_info_timeout", _) ->
- "infinity"
- end
- ),
- ?assertEqual(infinity, group_info_timeout_msec())
- end).
-
--endif.
diff --git a/src/couch_index/src/couch_index_app.erl b/src/couch_index/src/couch_index_app.erl
deleted file mode 100644
index bdf770cb2..000000000
--- a/src/couch_index/src/couch_index_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, StartArgs) ->
- couch_index_sup:start_link(StartArgs).
-
-stop(_State) ->
- ok.
diff --git a/src/couch_index/src/couch_index_compactor.erl b/src/couch_index/src/couch_index_compactor.erl
deleted file mode 100644
index 8b592d140..000000000
--- a/src/couch_index/src/couch_index_compactor.erl
+++ /dev/null
@@ -1,122 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_compactor).
--behaviour(gen_server).
-
-%% API
--export([start_link/2, run/2, cancel/1, is_running/1, get_compacting_pid/1]).
-
-%% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(st, {
- idx,
- mod,
- pid
-}).
-
-start_link(Index, Module) ->
- gen_server:start_link(?MODULE, {Index, Module}, []).
-
-run(Pid, IdxState) ->
- gen_server:call(Pid, {compact, IdxState}).
-
-cancel(Pid) ->
- gen_server:call(Pid, cancel).
-
-is_running(Pid) ->
- gen_server:call(Pid, is_running).
-
-get_compacting_pid(Pid) ->
- gen_server:call(Pid, get_compacting_pid).
-
-init({Index, Module}) ->
- process_flag(trap_exit, true),
- {ok, #st{idx = Index, mod = Module}}.
-
-terminate(_Reason, State) ->
- couch_util:shutdown_sync(State#st.pid),
- ok.
-
-handle_call({compact, _}, _From, #st{pid = Pid} = State) when is_pid(Pid) ->
- {reply, {ok, Pid}, State};
-handle_call({compact, IdxState}, _From, #st{idx = Idx} = State) ->
- Pid = spawn_link(fun() -> compact(Idx, State#st.mod, IdxState) end),
- {reply, {ok, Pid}, State#st{pid = Pid}};
-handle_call(cancel, _From, #st{pid = undefined} = State) ->
- {reply, ok, State};
-handle_call(cancel, _From, #st{pid = Pid} = State) ->
- unlink(Pid),
- exit(Pid, kill),
- {reply, ok, State#st{pid = undefined}};
-handle_call(get_compacting_pid, _From, #st{pid = Pid} = State) ->
- {reply, {ok, Pid}, State};
-handle_call(is_running, _From, #st{pid = Pid} = State) when is_pid(Pid) ->
- {reply, true, State};
-handle_call(is_running, _From, State) ->
- {reply, false, State}.
-
-handle_cast(_Mesg, State) ->
- {stop, unknown_cast, State}.
-
-handle_info({'EXIT', Pid, normal}, #st{pid = Pid} = State) ->
- {noreply, State#st{pid = undefined}};
-handle_info({'EXIT', Pid, Reason}, #st{pid = Pid} = State) ->
- #st{idx = Idx, mod = Mod} = State,
- {ok, IdxState} = gen_server:call(Idx, {compaction_failed, Reason}),
- DbName = Mod:get(db_name, IdxState),
- IdxName = Mod:get(idx_name, IdxState),
- Args = [DbName, IdxName, Reason],
- couch_log:error("Compaction failed for db: ~s idx: ~s reason: ~p", Args),
- {noreply, State#st{pid = undefined}};
-handle_info({'EXIT', _Pid, normal}, State) ->
- {noreply, State};
-handle_info({'EXIT', Pid, _Reason}, #st{idx = Pid} = State) ->
- {stop, normal, State};
-handle_info(_Mesg, State) ->
- {stop, unknown_info, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-compact(Parent, Mod, IdxState) ->
- DbName = Mod:get(db_name, IdxState),
- %% We use with_db here to make sure we hold db open
- %% during both phases of compaction
- %% * compact
- %% * recompact
- couch_util:with_db(DbName, fun(_) ->
- compact(Parent, Mod, IdxState, [])
- end).
-
-compact(Idx, Mod, IdxState, Opts) ->
- DbName = Mod:get(db_name, IdxState),
- IndexName = Mod:get(idx_name, IdxState),
- erlang:put(io_priority, {view_compact, DbName, IndexName}),
- Args = [DbName, Mod:get(idx_name, IdxState)],
- couch_log:info("Compaction started for db: ~s idx: ~s", Args),
- {ok, NewIdxState} = couch_util:with_db(DbName, fun(Db) ->
- Mod:compact(Db, IdxState, Opts)
- end),
- ok = Mod:commit(NewIdxState),
- case gen_server:call(Idx, {compacted, NewIdxState}) of
- recompact ->
- couch_log:info("Compaction restarting for db: ~s idx: ~s", Args),
- compact(Idx, Mod, NewIdxState, [recompact]);
- _ ->
- couch_log:info("Compaction finished for db: ~s idx: ~s", Args),
- ok
- end.
diff --git a/src/couch_index/src/couch_index_debug.erl b/src/couch_index/src/couch_index_debug.erl
deleted file mode 100644
index 3de7fad79..000000000
--- a/src/couch_index/src/couch_index_debug.erl
+++ /dev/null
@@ -1,171 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_debug).
-
--export([
- help/0,
- help/1
-]).
-
--export([
- names/0,
- print_linked_processes/0,
- busy/1,
- busy/2,
- restart_busy/1,
- restart_busy/2,
- restart_busy/3
-]).
-
--type throw(_Reason) :: no_return().
-
--type process_name() :: atom().
--type function_name() :: atom().
-
-help() ->
- [
- %% list of provided commands
- names,
- print_linked_processes,
- busy,
- restart_busy
- ].
-
--spec help(Function :: function_name()) -> ok.
-%% erlfmt-ignore
-help(names) ->
- io:format("
- names()
- --------------
-
- Returns list of named processes which constitutes
- a sharded couch_index_server
- ---
- ", []);
-help(print_linked_processes) ->
- io:format("
- print_linked_processes()
- --------------
-
- Print cluster of linked processes. The output would look like similar to:
-
- |name | reductions | message_queue_len | memory |id
- |--------------------------------------------------|------------|-------------------|--------------|--
- |index_server_1[<0.320.0>] | 1115 | 0 | 17000 |
- | couch_secondary_services[<0.312.0>] | 93258 | 0 | 68600 |
- | couch_event_listener:do_init/3[<0.323.0>] | 195 | 0 | 2856 |
- | index_server_1[<0.320.0>] | 1115 | 0 | 17000 |
- | | | | |
- |index_server_2[<0.324.0>] | 278 | 0 | 6088 |
- | couch_secondary_services[<0.312.0>] | 93260 | 0 | 68600 |
- | couch_event_listener:do_init/3[<0.326.0>] | 161 | 0 | 2856 |
- | index_server_2[<0.324.0>] | 278 | 0 | 6088 |
- ---
- ", []);
-help(busy) ->
- io:format("
- busy(Thereshold)
- busy(Thereshold, Property)
- --------------
-
- Finds list of couch_index_server processes and returns the ones with
- a Property value greater than provided Threshold.
-
- If Property is not specified we use message box size
-
- Properties which can be used are listed below
-
- - heap_size
- - memory
- - message_queue_len (default)
- - reductions
- - total_heap_size
- ---
- ", []);
-help(restart_busy) ->
- io:format("
- restart_busy(Thereshold)
- restart_busy(Thereshold, DelayInMsec)
- restart_busy(Thereshold, DelayInMsec, Property)
- --------------
-
- Finds list of couch_index_server processes and returns the ones with
- a Property value greater than provided Threshold.
-
- Then it restart the identified processes.
-
- If Property is not specified we use message box size
-
- Properties which can be used are listed below
-
- - heap_size
- - memory
- - message_queue_len (default)
- - reductions
- - total_heap_size
-
- The restarts happen sequentially with a given DelayInMsec between them.
- If DelayInMsec is not provided the default value is one second.
- The function doesn't proceed to next server until the replacement server
- process starts.
- ---
- ", []);
-help(Unknown) ->
- io:format("Unknown function: `~p`. Please try one of the following:~n", [Unknown]),
- [io:format(" - ~s~n", [Function]) || Function <- help()],
- io:format(" ---~n", []),
- ok.
-
--spec names() -> [process_name()].
-
-names() ->
- couch_index_server:names().
-
--spec print_linked_processes() -> ok.
-
-print_linked_processes() ->
- couch_debug:print_linked_processes(couch_index_server).
-
--spec busy(Thershold :: pos_integer()) ->
- [Name :: process_name()].
-
-busy(Threshold) when Threshold > 0 ->
- couch_debug:busy(names(), Threshold).
-
--spec busy(Thershold :: pos_integer(), Property :: couch_debug:busy_properties()) ->
- [Name :: process_name()].
-
-busy(Threshold, Property) when Threshold > 0 ->
- couch_debug:busy(names(), Threshold, Property).
-
--spec restart_busy(Threshold :: pos_integer()) ->
- throw({timeout, Name :: process_name()}).
-
-restart_busy(Threshold) ->
- couch_debug:restart_busy(names(), Threshold, 1000).
-
--spec restart_busy(Thershold :: pos_integer(), DelayInMsec :: pos_integer()) ->
- throw({timeout, Name :: process_name()}).
-
-restart_busy(Threshold, DelayInMsec) ->
- couch_debug:restart_busy(names(), Threshold, DelayInMsec).
-
--spec restart_busy(
- Thershold :: pos_integer(),
- DelayInMsec :: pos_integer(),
- Property :: couch_debug:busy_properties()
-) ->
- throw({timeout, Name :: process_name()}).
-
-restart_busy(Threshold, DelayInMsec, Property) ->
- couch_debug:restart_busy(names(), Threshold, DelayInMsec, Property).
diff --git a/src/couch_index/src/couch_index_epi.erl b/src/couch_index/src/couch_index_epi.erl
deleted file mode 100644
index 1c4eb9596..000000000
--- a/src/couch_index/src/couch_index_epi.erl
+++ /dev/null
@@ -1,50 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- couch_index.
-
-providers() ->
- [
- {couch_db, couch_index_plugin_couch_db}
- ].
-
-services() ->
- [
- {couch_index, couch_index_plugin}
- ].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/couch_index/src/couch_index_plugin.erl b/src/couch_index/src/couch_index_plugin.erl
deleted file mode 100644
index 4c2f7e68a..000000000
--- a/src/couch_index/src/couch_index_plugin.erl
+++ /dev/null
@@ -1,51 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_plugin).
-
--export([index_update/4]).
-
--export([before_open/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(SERVICE_ID, couch_index).
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-index_update(State, View, Updated, Removed) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- case couch_epi:is_configured(Handle, index_update, 4) of
- true ->
- update(Handle, State, View, Updated, Removed);
- false ->
- ok
- end.
-
-before_open(Mod, State) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, before_open, [Mod, State], [pipe]).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-maybe_transform(Fun) when is_function(Fun) ->
- Fun();
-maybe_transform(Items) ->
- Items.
-
-update(Handle, State, View, Updated, Removed) ->
- Args = [State, View, maybe_transform(Updated), maybe_transform(Removed)],
- couch_epi:apply(Handle, ?SERVICE_ID, index_update, Args, []).
diff --git a/src/couch_index/src/couch_index_plugin_couch_db.erl b/src/couch_index/src/couch_index_plugin_couch_db.erl
deleted file mode 100644
index b90fa6cf2..000000000
--- a/src/couch_index/src/couch_index_plugin_couch_db.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_plugin_couch_db).
-
--export([
- is_valid_purge_client/2,
- on_compact/2
-]).
-
-is_valid_purge_client(DbName, Props) ->
- couch_mrview_index:verify_index_exists(DbName, Props).
-
-on_compact(DbName, DDocs) ->
- couch_mrview_index:ensure_local_purge_docs(DbName, DDocs).
diff --git a/src/couch_index/src/couch_index_server.erl b/src/couch_index/src/couch_index_server.erl
deleted file mode 100644
index 2e368bfc2..000000000
--- a/src/couch_index/src/couch_index_server.erl
+++ /dev/null
@@ -1,396 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_server).
--behaviour(gen_server).
--behaviour(config_listener).
-
--vsn(2).
-
--export([start_link/1, validate/2, get_index/4, get_index/3, get_index/2]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% Sharding functions
--export([num_servers/0, server_name/1, by_sig/1, by_pid/1, by_db/1]).
--export([aggregate_queue_len/0, names/0]).
-
-% Exported for callbacks
--export([
- handle_config_change/5,
- handle_config_terminate/3,
- handle_db_event/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(RELISTEN_DELAY, 5000).
-
--record(st, {
- root_dir,
- server_name,
- by_sig,
- by_pid,
- by_db
-}).
-
-start_link(N) ->
- gen_server:start_link({local, server_name(N)}, ?MODULE, [N], []).
-
-validate(Db, DDoc) ->
- LoadModFun = fun
- ({ModNameList, "true"}) ->
- try
- [list_to_existing_atom(ModNameList)]
- catch
- error:badarg ->
- []
- end;
- ({_ModNameList, _Enabled}) ->
- []
- end,
- ValidateFun = fun(ModName) ->
- ModName:validate(Db, DDoc)
- end,
- EnabledIndexers = lists:flatmap(LoadModFun, config:get("indexers")),
- lists:foreach(ValidateFun, EnabledIndexers).
-
-get_index(Module, <<"shards/", _/binary>> = DbName, DDoc) when
- is_record(DDoc, doc)
-->
- get_index(Module, DbName, DDoc, nil);
-get_index(Module, <<"shards/", _/binary>> = DbName, DDoc) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- exit(fabric:open_doc(mem3:dbname(DbName), DDoc, [ejson_body, ?ADMIN_CTX]))
- end),
- receive
- {'DOWN', Ref, process, Pid, {ok, Doc}} ->
- get_index(Module, DbName, Doc, nil);
- {'DOWN', Ref, process, Pid, Error} ->
- Error
- after 61000 ->
- erlang:demonitor(Ref, [flush]),
- {error, timeout}
- end;
-get_index(Module, DbName, DDoc) when is_binary(DbName) ->
- get_index(Module, DbName, DDoc, nil);
-get_index(Module, Db, DDoc) ->
- get_index(Module, couch_db:name(Db), DDoc).
-
-get_index(Module, DbName, DDoc, Fun) when is_binary(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- get_index(Module, Db, DDoc, Fun)
- end);
-get_index(Module, Db, DDoc, Fun) when is_binary(DDoc) ->
- case couch_db:open_doc(Db, DDoc, [ejson_body, ?ADMIN_CTX]) of
- {ok, Doc} -> get_index(Module, Db, Doc, Fun);
- Error -> Error
- end;
-get_index(Module, Db, DDoc, Fun) when is_function(Fun, 1) ->
- {ok, InitState} = Module:init(Db, DDoc),
- {ok, FunResp} = Fun(InitState),
- {ok, Pid} = get_index(Module, InitState),
- {ok, Pid, FunResp};
-get_index(Module, Db, DDoc, _Fun) ->
- {ok, InitState} = Module:init(Db, DDoc),
- get_index(Module, InitState).
-
-get_index(Module, IdxState) ->
- DbName = Module:get(db_name, IdxState),
- Sig = Module:get(signature, IdxState),
- case ets:lookup(by_sig(DbName), {DbName, Sig}) of
- [{_, Pid}] when is_pid(Pid) ->
- DDocId = Module:get(idx_name, IdxState),
- case ets:match_object(by_db(DbName), {DbName, {DDocId, Sig}}) of
- [] ->
- Args = [Pid, DbName, DDocId, Sig],
- gen_server:cast(server_name(DbName), {add_to_ets, Args});
- _ ->
- ok
- end,
- {ok, Pid};
- _ ->
- Args = {Module, IdxState, DbName, Sig},
- gen_server:call(server_name(DbName), {get_index, Args}, infinity)
- end.
-
-init([N]) ->
- process_flag(trap_exit, true),
- ets:new(by_sig(N), [protected, set, named_table]),
- ets:new(by_pid(N), [private, set, named_table]),
- ets:new(by_db(N), [protected, bag, named_table]),
- RootDir = couch_index_util:root_dir(),
- % We only need one of the index servers to nuke this on startup.
- case N of
- 1 -> couch_file:init_delete_dir(RootDir);
- _ -> ok
- end,
- St = #st{
- root_dir = RootDir,
- server_name = server_name(N),
- by_sig = by_sig(N),
- by_pid = by_pid(N),
- by_db = by_db(N)
- },
- ok = config:listen_for_changes(?MODULE, St),
- couch_event:link_listener(?MODULE, handle_db_event, St, [all_dbs]),
- {ok, St}.
-
-terminate(_Reason, State) ->
- Pids = [Pid || {Pid, _} <- ets:tab2list(State#st.by_pid)],
- lists:map(fun couch_util:shutdown_sync/1, Pids),
- ok.
-
-handle_call({get_index, {_Mod, _IdxState, DbName, Sig} = Args}, From, State) ->
- case ets:lookup(State#st.by_sig, {DbName, Sig}) of
- [] ->
- spawn_link(fun() -> new_index(Args) end),
- ets:insert(State#st.by_sig, {{DbName, Sig}, [From]}),
- {noreply, State};
- [{_, Waiters}] when is_list(Waiters) ->
- ets:insert(State#st.by_sig, {{DbName, Sig}, [From | Waiters]}),
- {noreply, State};
- [{_, Pid}] when is_pid(Pid) ->
- {reply, {ok, Pid}, State}
- end;
-handle_call({async_open, {DbName, DDocId, Sig}, {ok, Pid}}, _From, State) ->
- [{_, Waiters}] = ets:lookup(State#st.by_sig, {DbName, Sig}),
- [gen_server:reply(From, {ok, Pid}) || From <- Waiters],
- link(Pid),
- add_to_ets(DbName, Sig, DDocId, Pid, State),
- {reply, ok, State};
-handle_call({async_error, {DbName, _DDocId, Sig}, Error}, _From, State) ->
- [{_, Waiters}] = ets:lookup(State#st.by_sig, {DbName, Sig}),
- [gen_server:reply(From, Error) || From <- Waiters],
- ets:delete(State#st.by_sig, {DbName, Sig}),
- {reply, ok, State};
-handle_call({reset_indexes, DbName}, _From, State) ->
- reset_indexes(DbName, State),
- {reply, ok, State}.
-
-handle_cast({reset_indexes, DbName}, State) ->
- reset_indexes(DbName, State),
- {noreply, State};
-handle_cast({add_to_ets, [Pid, DbName, DDocId, Sig]}, State) ->
- % check if Pid still exists
- case ets:lookup(State#st.by_pid, Pid) of
- [{Pid, {DbName, Sig}}] when is_pid(Pid) ->
- ets:insert(State#st.by_db, {DbName, {DDocId, Sig}});
- _ ->
- ok
- end,
- {noreply, State};
-handle_cast({rem_from_ets, [DbName, DDocId, Sig]}, State) ->
- ets:delete_object(State#st.by_db, {DbName, {DDocId, Sig}}),
- {noreply, State}.
-
-handle_info({'EXIT', Pid, Reason}, Server) ->
- case ets:lookup(Server#st.by_pid, Pid) of
- [{Pid, {DbName, Sig}}] ->
- DDocIds = [
- DDocId
- || {_, {DDocId, _}} <-
- ets:match_object(Server#st.by_db, {DbName, {'$1', Sig}})
- ],
- rem_from_ets(DbName, Sig, DDocIds, Pid, Server);
- [] when Reason /= normal ->
- exit(Reason);
- _Else ->
- ok
- end,
- {noreply, Server};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, couch_index_util:root_dir()),
- {noreply, State};
-handle_info(Msg, State) ->
- couch_log:warning("~p did not expect ~p", [?MODULE, Msg]),
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_config_change("couchdb", "index_dir", RootDir, _, #st{root_dir = RootDir} = St) ->
- {ok, St};
-handle_config_change("couchdb", "view_index_dir", RootDir, _, #st{root_dir = RootDir} = St) ->
- {ok, St};
-handle_config_change("couchdb", "index_dir", _, _, St) ->
- exit(whereis(St#st.server_name), config_change),
- remove_handler;
-handle_config_change("couchdb", "view_index_dir", _, _, St) ->
- exit(whereis(St#st.server_name), config_change),
- remove_handler;
-handle_config_change(_, _, _, _, St) ->
- {ok, St}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(State#st.server_name), restart_config_listener),
- {ok, State}.
-
-new_index({Mod, IdxState, DbName, Sig}) ->
- DDocId = Mod:get(idx_name, IdxState),
- case couch_index:start_link({Mod, IdxState}) of
- {ok, Pid} ->
- ok = gen_server:call(
- server_name(DbName), {async_open, {DbName, DDocId, Sig}, {ok, Pid}}
- ),
- unlink(Pid);
- Error ->
- ok = gen_server:call(
- server_name(DbName), {async_error, {DbName, DDocId, Sig}, Error}
- )
- end.
-
-reset_indexes(DbName, #st{} = State) ->
- % shutdown all the updaters and clear the files, the db got changed
- SigDDocIds = lists:foldl(
- fun({_, {DDocId, Sig}}, DDict) ->
- dict:append(Sig, DDocId, DDict)
- end,
- dict:new(),
- ets:lookup(State#st.by_db, DbName)
- ),
- Fun = fun({Sig, DDocIds}) ->
- [{_, Pid}] = ets:lookup(State#st.by_sig, {DbName, Sig}),
- unlink(Pid),
- gen_server:cast(Pid, delete),
- receive
- {'EXIT', Pid, _} ->
- ok
- after 0 ->
- ok
- end,
- rem_from_ets(DbName, Sig, DDocIds, Pid, State)
- end,
- lists:foreach(Fun, dict:to_list(SigDDocIds)),
- % We only need one of the index servers to do this.
- case State#st.server_name == server_name(1) of
- true ->
- Path = couch_index_util:index_dir("", DbName),
- couch_file:nuke_dir(State#st.root_dir, Path);
- false ->
- ok
- end.
-
-add_to_ets(DbName, Sig, DDocId, Pid, #st{} = St) ->
- ets:insert(St#st.by_sig, {{DbName, Sig}, Pid}),
- ets:insert(St#st.by_pid, {Pid, {DbName, Sig}}),
- ets:insert(St#st.by_db, {DbName, {DDocId, Sig}}).
-
-rem_from_ets(DbName, Sig, DDocIds, Pid, #st{} = St) ->
- ets:delete(St#st.by_sig, {DbName, Sig}),
- ets:delete(St#st.by_pid, Pid),
- lists:foreach(
- fun(DDocId) ->
- ets:delete_object(St#st.by_db, {DbName, {DDocId, Sig}})
- end,
- DDocIds
- ).
-
-handle_db_event(DbName, created, St) ->
- gen_server:cast(St#st.server_name, {reset_indexes, DbName}),
- {ok, St};
-handle_db_event(DbName, deleted, St) ->
- gen_server:cast(St#st.server_name, {reset_indexes, DbName}),
- {ok, St};
-handle_db_event(<<"shards/", _/binary>> = DbName, {ddoc_updated, DDocId}, St) ->
- DDocResult = couch_util:with_db(DbName, fun(Db) ->
- couch_db:open_doc(Db, DDocId, [ejson_body, ?ADMIN_CTX])
- end),
- LocalShards =
- try
- mem3:local_shards(mem3:dbname(DbName))
- catch
- error:database_does_not_exist ->
- []
- end,
- DbShards = [mem3:name(Sh) || Sh <- LocalShards],
- lists:foreach(
- fun(DbShard) ->
- lists:foreach(
- fun({_DbShard, {_DDocId, Sig}}) ->
- % check if there are other ddocs with the same Sig for the same db
- SigDDocs = ets:match_object(St#st.by_db, {DbShard, {'$1', Sig}}),
- if
- length(SigDDocs) > 1 ->
- % remove records from by_db for this DDoc
- Args = [DbShard, DDocId, Sig],
- gen_server:cast(St#st.server_name, {rem_from_ets, Args});
- true ->
- % single DDoc with this Sig - close couch_index processes
- case ets:lookup(St#st.by_sig, {DbShard, Sig}) of
- [{_, IndexPid}] ->
- (catch gen_server:cast(IndexPid, {ddoc_updated, DDocResult}));
- [] ->
- []
- end
- end
- end,
- ets:match_object(St#st.by_db, {DbShard, {DDocId, '$1'}})
- )
- end,
- DbShards
- ),
- {ok, St};
-handle_db_event(DbName, {ddoc_updated, DDocId}, St) ->
- lists:foreach(
- fun({_DbName, {_DDocId, Sig}}) ->
- case ets:lookup(St#st.by_sig, {DbName, Sig}) of
- [{_, IndexPid}] ->
- (catch gen_server:cast(IndexPid, ddoc_updated));
- [] ->
- ok
- end
- end,
- ets:match_object(St#st.by_db, {DbName, {DDocId, '$1'}})
- ),
- {ok, St};
-handle_db_event(_DbName, _Event, St) ->
- {ok, St}.
-
-num_servers() ->
- erlang:system_info(schedulers).
-
-server_name(Arg) ->
- name("index_server", Arg).
-
-by_sig(Arg) ->
- name("couchdb_indexes_by_sig", Arg).
-
-by_pid(Arg) ->
- name("couchdb_indexes_by_pid", Arg).
-
-by_db(Arg) ->
- name("couchdb_indexes_by_db", Arg).
-
-name(BaseName, Arg) when is_list(Arg) ->
- name(BaseName, ?l2b(Arg));
-name(BaseName, Arg) when is_binary(Arg) ->
- N = 1 + erlang:phash2(Arg, num_servers()),
- name(BaseName, N);
-name(BaseName, N) when is_integer(N), N > 0 ->
- list_to_atom(BaseName ++ "_" ++ integer_to_list(N)).
-
-names() ->
- N = num_servers(),
- [server_name(I) || I <- lists:seq(1, N)].
-
-aggregate_queue_len() ->
- N = num_servers(),
- Names = [server_name(I) || I <- lists:seq(1, N)],
- MQs = [
- process_info(whereis(Name), message_queue_len)
- || Name <- Names
- ],
- lists:sum([X || {_, X} <- MQs]).
diff --git a/src/couch_index/src/couch_index_sup.erl b/src/couch_index/src/couch_index_sup.erl
deleted file mode 100644
index eea4cc3ab..000000000
--- a/src/couch_index/src/couch_index_sup.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_sup).
--behaviour(supervisor).
--export([init/1]).
-
--export([start_link/1]).
-
-start_link(Args) ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
-
-init([]) ->
- {ok, {{one_for_one, 3, 10}, couch_epi:register_service(couch_index_epi, [])}}.
diff --git a/src/couch_index/src/couch_index_updater.erl b/src/couch_index/src/couch_index_updater.erl
deleted file mode 100644
index fe2150505..000000000
--- a/src/couch_index/src/couch_index_updater.erl
+++ /dev/null
@@ -1,231 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_updater).
--behaviour(gen_server).
-
-%% API
--export([start_link/2, run/2, is_running/1, update/2, restart/2]).
-
-%% for upgrades
--export([update/3]).
-
-%% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(st, {
- idx,
- mod,
- pid = nil
-}).
-
-start_link(Index, Module) ->
- gen_server:start_link(?MODULE, {Index, Module}, []).
-
-run(Pid, IdxState) ->
- gen_server:call(Pid, {update, IdxState}).
-
-is_running(Pid) ->
- gen_server:call(Pid, is_running).
-
-update(Mod, State) ->
- update(nil, Mod, State).
-
-restart(Pid, IdxState) ->
- gen_server:call(Pid, {restart, IdxState}).
-
-init({Index, Module}) ->
- process_flag(trap_exit, true),
- {ok, #st{idx = Index, mod = Module}}.
-
-terminate(_Reason, State) ->
- couch_util:shutdown_sync(State#st.pid),
- ok.
-
-handle_call({update, _IdxState}, _From, #st{pid = Pid} = State) when is_pid(Pid) ->
- {reply, ok, State};
-handle_call({update, IdxState}, _From, #st{idx = Idx, mod = Mod} = State) ->
- Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
- couch_log:info("Starting index update for db: ~s idx: ~s", Args),
- Pid = spawn_link(?MODULE, update, [Idx, Mod, IdxState]),
- {reply, ok, State#st{pid = Pid}};
-handle_call({restart, IdxState}, _From, #st{idx = Idx, mod = Mod} = State) ->
- Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
- couch_log:info("Restarting index update for db: ~s idx: ~s", Args),
- Pid = State#st.pid,
- case is_pid(Pid) of
- true -> couch_util:shutdown_sync(State#st.pid);
- _ -> ok
- end,
- % Make sure and flush a possible 'EXIT' message
- % that's already in our mailbox
- receive
- {'EXIT', Pid, _} -> ok
- after 0 ->
- ok
- end,
- NewPid = spawn_link(?MODULE, update, [Idx, State#st.mod, IdxState]),
- {reply, ok, State#st{pid = NewPid}};
-handle_call(is_running, _From, #st{pid = Pid} = State) when is_pid(Pid) ->
- {reply, true, State};
-handle_call(is_running, _From, State) ->
- {reply, false, State}.
-
-handle_cast(_Mesg, State) ->
- {stop, unknown_cast, State}.
-
-handle_info({'EXIT', _, {updated, Pid, IdxState}}, #st{pid = Pid} = State) ->
- Mod = State#st.mod,
- Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
- couch_log:info("Index update finished for db: ~s idx: ~s", Args),
- ok = gen_server:cast(State#st.idx, {updated, IdxState}),
- {noreply, State#st{pid = undefined}};
-handle_info({'EXIT', _, {reset, Pid}}, #st{idx = Idx, pid = Pid} = State) ->
- {ok, NewIdxState} = gen_server:call(State#st.idx, reset),
- Pid2 = spawn_link(?MODULE, update, [Idx, State#st.mod, NewIdxState]),
- {noreply, State#st{pid = Pid2}};
-handle_info({'EXIT', Pid, normal}, #st{pid = Pid} = State) ->
- {noreply, State#st{pid = undefined}};
-handle_info({'EXIT', Pid, {{nocatch, Error}, _Trace}}, State) ->
- handle_info({'EXIT', Pid, Error}, State);
-handle_info({'EXIT', Pid, Error}, #st{pid = Pid} = State) ->
- ok = gen_server:cast(State#st.idx, {update_error, Error}),
- {noreply, State#st{pid = undefined}};
-handle_info({'EXIT', Pid, _Reason}, #st{idx = Pid} = State) ->
- {stop, normal, State};
-handle_info({'EXIT', _Pid, normal}, State) ->
- {noreply, State};
-handle_info(_Mesg, State) ->
- {stop, unknown_info, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-update(Idx, Mod, IdxState) ->
- DbName = Mod:get(db_name, IdxState),
- IndexName = Mod:get(idx_name, IdxState),
- erlang:put(io_priority, {view_update, DbName, IndexName}),
- CurrSeq = Mod:get(update_seq, IdxState),
- UpdateOpts = Mod:get(update_options, IdxState),
- CommittedOnly = lists:member(committed_only, UpdateOpts),
- IncludeDesign = lists:member(include_design, UpdateOpts),
- DocOpts =
- case lists:member(local_seq, UpdateOpts) of
- true -> [conflicts, deleted_conflicts, local_seq];
- _ -> [conflicts, deleted_conflicts]
- end,
-
- couch_util:with_db(DbName, fun(Db) ->
- DbUpdateSeq = couch_db:get_update_seq(Db),
- DbCommittedSeq = couch_db:get_committed_update_seq(Db),
-
- NumUpdateChanges = couch_db:count_changes_since(Db, CurrSeq),
- NumPurgeChanges = count_pending_purged_docs_since(Db, Mod, IdxState),
- TotalChanges = NumUpdateChanges + NumPurgeChanges,
- {ok, PurgedIdxState} = purge_index(Db, Mod, IdxState),
-
- GetSeq = fun
- (#full_doc_info{update_seq = Seq}) -> Seq;
- (#doc_info{high_seq = Seq}) -> Seq
- end,
-
- GetInfo = fun
- (#full_doc_info{id = Id, update_seq = Seq, deleted = Del} = FDI) ->
- {Id, Seq, Del, couch_doc:to_doc_info(FDI)};
- (#doc_info{id = Id, high_seq = Seq, revs = [RI | _]} = DI) ->
- {Id, Seq, RI#rev_info.deleted, DI}
- end,
-
- LoadDoc = fun(DI) ->
- {DocId, Seq, Deleted, DocInfo} = GetInfo(DI),
-
- case {IncludeDesign, DocId} of
- {false, <<"_design/", _/binary>>} ->
- {nil, Seq};
- _ when Deleted ->
- {#doc{id = DocId, deleted = true}, Seq};
- _ ->
- {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts),
- {Doc, Seq}
- end
- end,
-
- Proc = fun(DocInfo, {IdxStateAcc, _}) ->
- case CommittedOnly and (GetSeq(DocInfo) > DbCommittedSeq) of
- true ->
- {stop, {IdxStateAcc, false}};
- false ->
- {Doc, Seq} = LoadDoc(DocInfo),
- {ok, NewSt} = Mod:process_doc(Doc, Seq, IdxStateAcc),
- garbage_collect(),
- {ok, {NewSt, true}}
- end
- end,
- {ok, InitIdxState} = Mod:start_update(
- Idx,
- PurgedIdxState,
- TotalChanges,
- NumPurgeChanges
- ),
-
- Acc0 = {InitIdxState, true},
- {ok, Acc} = couch_db:fold_changes(Db, CurrSeq, Proc, Acc0, []),
- {ProcIdxSt, SendLast} = Acc,
-
- % If we didn't bail due to hitting the last committed seq we need
- % to send our last update_seq through.
- {ok, LastIdxSt} =
- case SendLast of
- true ->
- Mod:process_doc(nil, DbUpdateSeq, ProcIdxSt);
- _ ->
- {ok, ProcIdxSt}
- end,
-
- {ok, FinalIdxState} = Mod:finish_update(LastIdxSt),
- exit({updated, self(), FinalIdxState})
- end).
-
-purge_index(Db, Mod, IdxState) ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- IdxPurgeSeq = Mod:get(purge_seq, IdxState),
- if
- IdxPurgeSeq == DbPurgeSeq ->
- {ok, IdxState};
- true ->
- FoldFun = fun({PurgeSeq, _UUId, Id, Revs}, Acc) ->
- Mod:purge(Db, PurgeSeq, [{Id, Revs}], Acc)
- end,
- {ok, NewStateAcc} =
- try
- couch_db:fold_purge_infos(
- Db,
- IdxPurgeSeq,
- FoldFun,
- IdxState,
- []
- )
- catch
- error:{invalid_start_purge_seq, _} ->
- exit({reset, self()})
- end,
- Mod:update_local_purge_doc(Db, NewStateAcc),
- {ok, NewStateAcc}
- end.
-
-count_pending_purged_docs_since(Db, Mod, IdxState) ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- IdxPurgeSeq = Mod:get(purge_seq, IdxState),
- DbPurgeSeq - IdxPurgeSeq.
diff --git a/src/couch_index/src/couch_index_util.erl b/src/couch_index/src/couch_index_util.erl
deleted file mode 100644
index 3a7d283bf..000000000
--- a/src/couch_index/src/couch_index_util.erl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_util).
-
--export([root_dir/0, index_dir/2, index_file/3]).
--export([load_doc/3, sort_lib/1, hexsig/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-root_dir() ->
- config:get("couchdb", "view_index_dir").
-
-index_dir(Module, DbName) when is_binary(DbName) ->
- DbDir = "." ++ binary_to_list(DbName) ++ "_design",
- filename:join([root_dir(), DbDir, Module]);
-index_dir(Module, Db) ->
- index_dir(Module, couch_db:name(Db)).
-
-index_file(Module, DbName, FileName) ->
- filename:join(index_dir(Module, DbName), FileName).
-
-load_doc(Db, #doc_info{} = DI, Opts) ->
- Deleted = lists:member(deleted, Opts),
- case (catch couch_db:open_doc(Db, DI, Opts)) of
- {ok, #doc{deleted = false} = Doc} -> Doc;
- {ok, #doc{deleted = true} = Doc} when Deleted -> Doc;
- _Else -> null
- end;
-load_doc(Db, {DocId, Rev}, Opts) ->
- case (catch load_doc(Db, DocId, Rev, Opts)) of
- #doc{deleted = false} = Doc -> Doc;
- _ -> null
- end.
-
-load_doc(Db, DocId, Rev, Options) ->
- case Rev of
- % open most recent rev
- nil ->
- case (catch couch_db:open_doc(Db, DocId, Options)) of
- {ok, Doc} -> Doc;
- _Error -> null
- end;
- % open a specific rev (deletions come back as stubs)
- _ ->
- case (catch couch_db:open_doc_revs(Db, DocId, [Rev], Options)) of
- {ok, [{ok, Doc}]} -> Doc;
- {ok, [{{not_found, missing}, Rev}]} -> null;
- {ok, [_Else]} -> null
- end
- end.
-
-sort_lib({Lib}) ->
- sort_lib(Lib, []).
-sort_lib([], LAcc) ->
- lists:keysort(1, LAcc);
-sort_lib([{LName, {LObj}} | Rest], LAcc) ->
- % descend into nested object
- LSorted = sort_lib(LObj, []),
- sort_lib(Rest, [{LName, LSorted} | LAcc]);
-sort_lib([{LName, LCode} | Rest], LAcc) ->
- sort_lib(Rest, [{LName, LCode} | LAcc]).
-
-hexsig(Sig) ->
- couch_util:to_hex(binary_to_list(Sig)).
diff --git a/src/couch_index/test/eunit/couch_index_compaction_tests.erl b/src/couch_index/test/eunit/couch_index_compaction_tests.erl
deleted file mode 100644
index fee04fd9c..000000000
--- a/src/couch_index/test/eunit/couch_index_compaction_tests.erl
+++ /dev/null
@@ -1,123 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_compaction_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(WAIT_TIMEOUT, 1000).
-
-setup_all() ->
- Ctx = test_util:start_couch(),
- meck:new([test_index], [non_strict]),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
- fake_index(DbName),
- {ok, IndexerPid} = couch_index_server:get_index(test_index, Db, undefined),
- ?assertNot(is_opened(Db)),
- {Db, IndexerPid}.
-
-fake_index(DbName) ->
- ok = meck:expect(test_index, init, ['_', '_'], {ok, 10}),
- ok = meck:expect(test_index, open, fun(_Db, State) ->
- {ok, State}
- end),
- ok = meck:expect(
- test_index,
- compact,
- ['_', '_', '_'],
- %% to trigger recompaction
- meck:seq([{ok, 9}, {ok, 10}])
- ),
- ok = meck:expect(test_index, commit, ['_'], ok),
- ok = meck:expect(test_index, get, fun
- (db_name, _) ->
- DbName;
- (idx_name, _) ->
- <<"idx_name">>;
- (signature, _) ->
- <<61, 237, 157, 230, 136, 93, 96, 201, 204, 17, 137, 186, 50, 249, 44, 135>>;
- (update_seq, Seq) ->
- Seq
- end),
- ok = meck:expect(test_index, close, ['_'], ok),
- ok = meck:expect(test_index, swap_compacted, fun(_, NewState) ->
- {ok, NewState}
- end).
-
-teardown(_) ->
- ok.
-
-compaction_test_() ->
- {
- "Check compaction",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun hold_db_for_recompaction/1
- ]
- }
- }
- }.
-
-hold_db_for_recompaction({Db, Idx}) ->
- ?_test(begin
- ?assertNot(is_opened(Db)),
- ok = meck:reset(test_index),
- {ok, Monitor} = couch_index:compact(Idx, [monitor]),
-
- %% we expect Mod:commit/1 to be called twice
- %% once for compact and once for recompact
- meck:wait(2, test_index, commit, ['_'], 5000),
- ?assertEqual(1, meck:num_calls(test_index, compact, ['_', '_', []])),
- ?assertEqual(1, meck:num_calls(test_index, compact, ['_', '_', [recompact]])),
-
- %% wait compaction finish
- receive
- {'DOWN', Monitor, _, _, _} -> ok
- after 5000 ->
- throw(timeout)
- end,
-
- ?assertEqual(ok, wait_db_close(Db)),
- ok
- end).
-
-wait_db_close(Db) ->
- test_util:wait(
- fun() ->
- case is_opened(Db) of
- false -> ok;
- true -> wait
- end
- end,
- ?WAIT_TIMEOUT
- ).
-
-is_opened(Db) ->
- Monitors = [M || M <- couch_db:monitored_by(Db), M =/= self()],
- Monitors /= [].
diff --git a/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
deleted file mode 100644
index 3af58d2fc..000000000
--- a/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_ddoc_updated_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-start() ->
- fake_index(),
- Ctx = test_util:start_couch([mem3, fabric]),
- DbName = ?tempdb(),
- ok = fabric:create_db(DbName, [?ADMIN_CTX]),
- {Ctx, DbName}.
-
-stop({Ctx, DbName}) ->
- meck:unload(test_index),
- ok = fabric:delete_db(DbName, [?ADMIN_CTX]),
- DbDir = config:get("couchdb", "database_dir", "."),
- WaitFun = fun() ->
- filelib:fold_files(
- DbDir,
- <<".*", DbName/binary, "\.[0-9]+.*">>,
- true,
- fun(_F, _A) -> wait end,
- ok
- )
- end,
- ok = test_util:wait(WaitFun),
- test_util:stop_couch(Ctx),
- ok.
-
-ddoc_update_test_() ->
- {
- "Check ddoc update actions",
- {
- setup,
- fun start/0,
- fun stop/1,
- fun check_all_indexers_exit_on_ddoc_change/1
- }
- }.
-
-check_all_indexers_exit_on_ddoc_change({_Ctx, DbName}) ->
- ?_test(begin
- [DbShard1 | RestDbShards] = lists:map(
- fun(Sh) ->
- {ok, ShardDb} = couch_db:open(mem3:name(Sh), []),
- ShardDb
- end,
- mem3:local_shards(mem3:dbname(DbName))
- ),
-
- % create a DDoc on Db1
- DDocID = <<"idx_name">>,
- DDocJson = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDocID},
- {<<"value">>, 1}
- ]}
- ),
- {ok, _Rev} = couch_db:update_doc(DbShard1, DDocJson, []),
- {ok, DbShard} = couch_db:reopen(DbShard1),
- {ok, DDoc} = couch_db:open_doc(
- DbShard, DDocID, [ejson_body, ?ADMIN_CTX]
- ),
- DbShards = [DbShard | RestDbShards],
- N = length(DbShards),
-
- % run couch_index process for each shard database
- ok = meck:reset(test_index),
- lists:foreach(
- fun(ShardDb) ->
- couch_index_server:get_index(test_index, ShardDb, DDoc)
- end,
- DbShards
- ),
-
- IndexesBefore = get_indexes_by_ddoc(DDocID, N),
- ?assertEqual(N, length(IndexesBefore)),
-
- AliveBefore = lists:filter(fun erlang:is_process_alive/1, IndexesBefore),
- ?assertEqual(N, length(AliveBefore)),
-
- % update ddoc
- DDocJson2 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDocID},
- {<<"value">>, 2},
- {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)}
- ]}
- ),
- {ok, _} = couch_db:update_doc(DbShard, DDocJson2, []),
-
- % assert that all index processes exit after ddoc updated
- ok = meck:reset(test_index),
- lists:foreach(
- fun(I) ->
- couch_index_server:handle_db_event(
- couch_db:name(DbShard),
- {ddoc_updated, DDocID},
- {st, "", couch_index_server:server_name(I), couch_index_server:by_sig(I),
- couch_index_server:by_pid(I), couch_index_server:by_db(I)}
- )
- end,
- seq()
- ),
-
- ok = meck:wait(N, test_index, init, ['_', '_'], 5000),
- IndexesAfter = get_indexes_by_ddoc(DDocID, 0),
- ?assertEqual(0, length(IndexesAfter)),
-
- %% assert that previously running indexes are gone
- AliveAfter = lists:filter(fun erlang:is_process_alive/1, IndexesBefore),
- ?assertEqual(0, length(AliveAfter)),
- ok
- end).
-
-fake_index() ->
- ok = meck:new([test_index], [non_strict]),
- ok = meck:expect(test_index, init, fun(Db, DDoc) ->
- {ok, {couch_db:name(Db), DDoc}}
- end),
- ok = meck:expect(test_index, open, fun(_Db, State) ->
- {ok, State}
- end),
- ok = meck:expect(test_index, get, fun
- (db_name, {DbName, _DDoc}) ->
- DbName;
- (idx_name, {_DbName, DDoc}) ->
- DDoc#doc.id;
- (signature, {_DbName, DDoc}) ->
- couch_hash:md5_hash(term_to_binary(DDoc));
- (update_seq, Seq) ->
- Seq
- end),
- ok = meck:expect(test_index, shutdown, ['_'], ok).
-
-get_indexes_by_ddoc(DDocID, N) ->
- Indexes = test_util:wait(fun() ->
- Indxs = lists:flatmap(
- fun(I) ->
- ets:match_object(
- couch_index_server:by_db(I), {'$1', {DDocID, '$2'}}
- )
- end,
- seq()
- ),
- case length(Indxs) == N of
- true ->
- Indxs;
- false ->
- wait
- end
- end),
- lists:foldl(
- fun({DbName, {_DDocID, Sig}}, Acc) ->
- case ets:lookup(couch_index_server:by_sig(DbName), {DbName, Sig}) of
- [{_, Pid}] -> [Pid | Acc];
- _ -> Acc
- end
- end,
- [],
- Indexes
- ).
-
-seq() ->
- lists:seq(1, couch_index_server:num_servers()).
diff --git a/src/couch_log/.gitignore b/src/couch_log/.gitignore
deleted file mode 100644
index e24db8ab4..000000000
--- a/src/couch_log/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/ebin
-.eunit
-.rebar
diff --git a/src/couch_log/LICENSE b/src/couch_log/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_log/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_log/include/couch_log.hrl b/src/couch_log/include/couch_log.hrl
deleted file mode 100644
index fa544a88b..000000000
--- a/src/couch_log/include/couch_log.hrl
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(log_entry, {
- level,
- pid,
- msg,
- msg_id,
- time_stamp
-}).
-
-
--define(COUCH_LOG_TEST_TABLE, couch_log_test_table).
diff --git a/src/couch_log/priv/stats_descriptions.cfg b/src/couch_log/priv/stats_descriptions.cfg
deleted file mode 100644
index 31e41614b..000000000
--- a/src/couch_log/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,48 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-% Style guide for descriptions: Start with a lowercase letter & do not add
-% a trailing full-stop / period
-% Please keep this in alphabetical order
-
-{[couch_log, level, alert], [
- {type, counter},
- {desc, <<"number of logged alert messages">>}
-]}.
-{[couch_log, level, critical], [
- {type, counter},
- {desc, <<"number of logged critical messages">>}
-]}.
-{[couch_log, level, debug], [
- {type, counter},
- {desc, <<"number of logged debug messages">>}
-]}.
-{[couch_log, level, emergency], [
- {type, counter},
- {desc, <<"number of logged emergency messages">>}
-]}.
-{[couch_log, level, error], [
- {type, counter},
- {desc, <<"number of logged error messages">>}
-]}.
-{[couch_log, level, info], [
- {type, counter},
- {desc, <<"number of logged info messages">>}
-]}.
-{[couch_log, level, notice], [
- {type, counter},
- {desc, <<"number of logged notice messages">>}
-]}.
-{[couch_log, level, warning], [
- {type, counter},
- {desc, <<"number of logged warning messages">>}
-]}.
diff --git a/src/couch_log/rebar.config b/src/couch_log/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/couch_log/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/couch_log/src/couch_log.app.src b/src/couch_log/src/couch_log.app.src
deleted file mode 100644
index 50adfe646..000000000
--- a/src/couch_log/src/couch_log.app.src
+++ /dev/null
@@ -1,19 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_log, [
- {description, "CouchDB Log API"},
- {vsn, git},
- {registered, [couch_log_sup]},
- {applications, [kernel, stdlib, config]},
- {mod, {couch_log_app, []}}
-]}.
diff --git a/src/couch_log/src/couch_log.erl b/src/couch_log/src/couch_log.erl
deleted file mode 100644
index b8a1ca4bd..000000000
--- a/src/couch_log/src/couch_log.erl
+++ /dev/null
@@ -1,65 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log).
-
--export([
- debug/2,
- info/2,
- notice/2,
- warning/2,
- error/2,
- critical/2,
- alert/2,
- emergency/2,
-
- set_level/1
-]).
-
--spec debug(string(), list()) -> ok.
-debug(Fmt, Args) -> log(debug, Fmt, Args).
-
--spec info(string(), list()) -> ok.
-info(Fmt, Args) -> log(info, Fmt, Args).
-
--spec notice(string(), list()) -> ok.
-notice(Fmt, Args) -> log(notice, Fmt, Args).
-
--spec warning(string(), list()) -> ok.
-warning(Fmt, Args) -> log(warning, Fmt, Args).
-
--spec error(string(), list()) -> ok.
-error(Fmt, Args) -> log(error, Fmt, Args).
-
--spec critical(string(), list()) -> ok.
-critical(Fmt, Args) -> log(critical, Fmt, Args).
-
--spec alert(string(), list()) -> ok.
-alert(Fmt, Args) -> log(alert, Fmt, Args).
-
--spec emergency(string(), list()) -> ok.
-emergency(Fmt, Args) -> log(emergency, Fmt, Args).
-
--spec set_level(atom() | string() | integer()) -> true.
-set_level(Level) ->
- config:set("log", "level", couch_log_util:level_to_string(Level)).
-
--spec log(atom(), string(), list()) -> ok.
-log(Level, Fmt, Args) ->
- case couch_log_util:should_log(Level) of
- true ->
- couch_stats:increment_counter([couch_log, level, Level]),
- Entry = couch_log_formatter:format(Level, self(), Fmt, Args),
- ok = couch_log_server:log(Entry);
- false ->
- ok
- end.
diff --git a/src/couch_log/src/couch_log_app.erl b/src/couch_log/src/couch_log_app.erl
deleted file mode 100644
index 28c8bb193..000000000
--- a/src/couch_log/src/couch_log_app.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_app).
-
--behaviour(application).
-
--export([start/2, stop/1]).
-
-start(_Type, _StartArgs) ->
- couch_log_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/couch_log/src/couch_log_config.erl b/src/couch_log/src/couch_log_config.erl
deleted file mode 100644
index 925973178..000000000
--- a/src/couch_log/src/couch_log_config.erl
+++ /dev/null
@@ -1,120 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% Based on Bob Ippolitto's mochiglobal.erl
-
--module(couch_log_config).
-
--export([
- init/0,
- reconfigure/0,
- get/1
-]).
-
--define(MOD_NAME, couch_log_config_dyn).
--define(ERL_FILE, "couch_log_config_dyn.erl").
-
--spec init() -> ok.
-init() ->
- reconfigure().
-
--spec reconfigure() -> ok.
-reconfigure() ->
- {ok, ?MOD_NAME, Bin} = compile:forms(forms(), [verbose, report_errors]),
- code:purge(?MOD_NAME),
- {module, ?MOD_NAME} = code:load_binary(?MOD_NAME, ?ERL_FILE, Bin),
- ok.
-
--spec get(atom()) -> term().
-get(Key) ->
- ?MOD_NAME:get(Key).
-
--spec entries() -> [string()].
-entries() ->
- [
- {level, "level", "info"},
- {level_int, "level", "info"},
- {max_message_size, "max_message_size", "16000"},
- {strip_last_msg, "strip_last_msg", "true"},
- {filter_fields, "filter_fields", "[pid, registered_name, error_info, messages]"}
- ].
-
--spec forms() -> [erl_syntax:syntaxTree()].
-forms() ->
- GetFunClauses = lists:map(
- fun({FunKey, CfgKey, Default}) ->
- FunVal = transform(FunKey, config:get("log", CfgKey, Default)),
- Patterns = [erl_syntax:abstract(FunKey)],
- Bodies = [erl_syntax:abstract(FunVal)],
- erl_syntax:clause(Patterns, none, Bodies)
- end,
- entries()
- ),
-
- Statements = [
- % -module(?MOD_NAME)
- erl_syntax:attribute(
- erl_syntax:atom(module),
- [erl_syntax:atom(?MOD_NAME)]
- ),
-
- % -export([lookup/1]).
- erl_syntax:attribute(
- erl_syntax:atom(export),
- [
- erl_syntax:list([
- erl_syntax:arity_qualifier(
- erl_syntax:atom(get),
- erl_syntax:integer(1)
- )
- ])
- ]
- ),
-
- % list(Key) -> Value.
- erl_syntax:function(erl_syntax:atom(get), GetFunClauses)
- ],
- [erl_syntax:revert(X) || X <- Statements].
-
-transform(level, LevelStr) ->
- couch_log_util:level_to_atom(LevelStr);
-transform(level_int, LevelStr) ->
- Level = couch_log_util:level_to_atom(LevelStr),
- couch_log_util:level_to_integer(Level);
-transform(max_message_size, SizeStr) ->
- try list_to_integer(SizeStr) of
- Size -> Size
- catch
- _:_ ->
- 16000
- end;
-transform(strip_last_msg, "false") ->
- false;
-transform(strip_last_msg, _) ->
- true;
-transform(filter_fields, FieldsStr) ->
- Default = [pid, registered_name, error_info, messages],
- case parse_term(FieldsStr) of
- {ok, List} when is_list(List) ->
- case lists:all(fun erlang:is_atom/1, List) of
- true ->
- List;
- false ->
- Default
- end;
- _ ->
- Default
- end.
-
-parse_term(List) ->
- {ok, Tokens, _} = erl_scan:string(List ++ "."),
- erl_parse:parse_term(Tokens).
diff --git a/src/couch_log/src/couch_log_config_dyn.erl b/src/couch_log/src/couch_log_config_dyn.erl
deleted file mode 100644
index ff781d3a0..000000000
--- a/src/couch_log/src/couch_log_config_dyn.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% This module gets replaced at runtime with a dynamically
-% compiled version so don't rely on these default's making
-% sense. They only mirror what's in the default.ini checked
-% into the root Apache CouchDB Git repository.
-
--module(couch_log_config_dyn).
-
--export([
- get/1
-]).
-
-get(level) -> info;
-get(level_int) -> 2;
-get(max_message_size) -> 16000;
-get(strip_last_msg) -> true;
-get(filter_fields) -> [pid, registered_name, error_info, messages].
diff --git a/src/couch_log/src/couch_log_error_logger_h.erl b/src/couch_log/src/couch_log_error_logger_h.erl
deleted file mode 100644
index ff7ae045f..000000000
--- a/src/couch_log/src/couch_log_error_logger_h.erl
+++ /dev/null
@@ -1,48 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% This file is primarily based on error_logger_lager_h.erl from
-% https://github.com/basho/lager which is available under the
-% above marked ASFL v2 license.
-
--module(couch_log_error_logger_h).
-
--behaviour(gen_event).
-
--export([
- init/1,
- terminate/2,
- handle_call/2,
- handle_event/2,
- handle_info/2,
- code_change/3
-]).
-
-init(_) ->
- {ok, undefined}.
-
-terminate(_Reason, _St) ->
- ok.
-
-handle_call(_, St) ->
- {ok, ignored, St}.
-
-handle_event(Event, St) ->
- Entry = couch_log_formatter:format(Event),
- ok = couch_log_server:log(Entry),
- {ok, St}.
-
-handle_info(_, St) ->
- {ok, St}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/couch_log/src/couch_log_formatter.erl b/src/couch_log/src/couch_log_formatter.erl
deleted file mode 100644
index 2ce0fba6d..000000000
--- a/src/couch_log/src/couch_log_formatter.erl
+++ /dev/null
@@ -1,438 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% @doc The formatting functions in this module are pulled
-% from lager's error_logger_lager_h.erl which is available
-% under the ASFv2 license.
-
--module(couch_log_formatter).
-
--export([
- format/4,
- format/3,
- format/1,
-
- format_reason/1,
- format_mfa/1,
- format_trace/1,
- format_args/3
-]).
-
--include("couch_log.hrl").
-
--define(DEFAULT_TRUNCATION, 1024).
-
-format(Level, Pid, Fmt, Args) ->
- #log_entry{
- level = couch_log_util:level_to_atom(Level),
- pid = Pid,
- msg = maybe_truncate(Fmt, Args),
- msg_id = couch_log_util:get_msg_id(),
- time_stamp = couch_log_util:iso8601_timestamp()
- }.
-
-format(Level, Pid, Msg) ->
- #log_entry{
- level = couch_log_util:level_to_atom(Level),
- pid = Pid,
- msg = maybe_truncate(Msg),
- msg_id = couch_log_util:get_msg_id(),
- time_stamp = couch_log_util:iso8601_timestamp()
- }.
-
-format(Event) ->
- try
- do_format(Event)
- catch
- Tag:Err ->
- Msg = "Encountered error ~w when formatting ~w",
- format(error, self(), Msg, [{Tag, Err}, Event])
- end.
-
-do_format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) ->
- %% gen_server terminate
- [Name, LastMsg0, State, Reason | Extra] = Args,
- LastMsg =
- case couch_log_config:get(strip_last_msg) of
- true ->
- redacted;
- false ->
- LastMsg0
- end,
- MsgFmt =
- "gen_server ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p~n extra: ~p",
- MsgArgs = [Name, format_reason(Reason), LastMsg, State, Extra],
- format(error, Pid, MsgFmt, MsgArgs);
-do_format({error, _GL, {Pid, "** State machine " ++ _, Args}}) ->
- %% gen_fsm terminate
- [Name, LastMsg0, StateName, State, Reason | Extra] = Args,
- LastMsg =
- case couch_log_config:get(strip_last_msg) of
- true ->
- redacted;
- false ->
- LastMsg0
- end,
- MsgFmt =
- "gen_fsm ~w in state ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p~n extra: ~p",
- MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State, Extra],
- format(error, Pid, MsgFmt, MsgArgs);
-do_format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) ->
- %% gen_event handler terminate
- [ID, Name, LastMsg0, State, Reason] = Args,
- LastMsg =
- case couch_log_config:get(strip_last_msg) of
- true ->
- redacted;
- false ->
- LastMsg0
- end,
- MsgFmt =
- "gen_event ~w installed in ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p",
- MsgArgs = [ID, Name, format_reason(Reason), LastMsg, State],
- format(error, Pid, MsgFmt, MsgArgs);
-do_format({error, _GL, {emulator, "~s~n", [Msg]}}) when is_list(Msg) ->
- % These messages are for whenever any process exits due
- % to a throw or error. We intercept here to remove the
- % extra newlines.
- NewMsg = lists:sublist(Msg, length(Msg) - 1),
- format(error, emulator, NewMsg);
-do_format({error, _GL, {Pid, Fmt, Args}}) ->
- format(error, Pid, Fmt, Args);
-do_format({error_report, _GL, {Pid, std_error, D}}) ->
- format(error, Pid, print_silly_list(D));
-do_format({error_report, _GL, {Pid, supervisor_report, D}}) ->
- case lists:sort(D) of
- [
- {errorContext, Ctx},
- {offender, Off},
- {reason, Reason},
- {supervisor, Name}
- ] ->
- Offender = format_offender(Off),
- MsgFmt =
- "Supervisor ~w had child ~s exit " ++
- "with reason ~s in context ~w",
- Args = [
- supervisor_name(Name),
- Offender,
- format_reason(Reason),
- Ctx
- ],
- format(error, Pid, MsgFmt, Args);
- _ ->
- format(error, Pid, "SUPERVISOR REPORT " ++ print_silly_list(D))
- end;
-do_format({error_report, _GL, {Pid, crash_report, [Report, Neighbors]}}) ->
- Msg = "CRASH REPORT " ++ format_crash_report(Report, Neighbors),
- format(error, Pid, Msg);
-do_format({warning_msg, _GL, {Pid, Fmt, Args}}) ->
- format(warning, Pid, Fmt, Args);
-do_format({warning_report, _GL, {Pid, std_warning, Report}}) ->
- format(warning, Pid, print_silly_list(Report));
-do_format({info_msg, _GL, {Pid, Fmt, Args}}) ->
- format(info, Pid, Fmt, Args);
-do_format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) ->
- case lists:sort(D) of
- [{application, App}, {exited, Reason}, {type, _Type}] ->
- MsgFmt = "Application ~w exited with reason: ~s",
- format(info, Pid, MsgFmt, [App, format_reason(Reason)]);
- _ ->
- format(info, Pid, print_silly_list(D))
- end;
-do_format({info_report, _GL, {Pid, std_info, D}}) ->
- format(info, Pid, "~w", [D]);
-do_format({info_report, _GL, {Pid, progress, D}}) ->
- case lists:sort(D) of
- [{application, App}, {started_at, Node}] ->
- MsgFmt = "Application ~w started on node ~w",
- format(info, Pid, MsgFmt, [App, Node]);
- [{started, Started}, {supervisor, Name}] ->
- MFA = format_mfa(get_value(mfargs, Started)),
- ChildPid = get_value(pid, Started),
- MsgFmt = "Supervisor ~w started ~s at pid ~w",
- format(debug, Pid, MsgFmt, [supervisor_name(Name), MFA, ChildPid]);
- _ ->
- format(info, Pid, "PROGRESS REPORT " ++ print_silly_list(D))
- end;
-do_format(Event) ->
- format(warning, self(), "Unexpected error_logger event ~w", [Event]).
-
-format_crash_report(Report, Neighbours) ->
- Pid = get_value(pid, Report),
- Name =
- case get_value(registered_name, Report) of
- undefined ->
- pid_to_list(Pid);
- Atom ->
- io_lib:format("~s (~w)", [Atom, Pid])
- end,
- {Class, Reason, Trace} = get_value(error_info, Report),
- ReasonStr = format_reason({Reason, Trace}),
- Type =
- case Class of
- exit -> "exited";
- _ -> "crashed"
- end,
- MsgFmt = "Process ~s with ~w neighbors ~s with reason: ~s",
- Args = [Name, length(Neighbours), Type, ReasonStr],
- Msg = io_lib:format(MsgFmt, Args),
- case filter_silly_list(Report) of
- [] ->
- Msg;
- Rest ->
- Msg ++ "; " ++ print_silly_list(Rest)
- end.
-
-format_offender(Off) ->
- case get_value(mfargs, Off) of
- undefined ->
- %% supervisor_bridge
- Args = [get_value(mod, Off), get_value(pid, Off)],
- io_lib:format("at module ~w at ~w", Args);
- MFArgs ->
- %% regular supervisor
- MFA = format_mfa(MFArgs),
-
- %% In 2014 the error report changed from `name' to
- %% `id', so try that first.
- Name =
- case get_value(id, Off) of
- undefined ->
- get_value(name, Off);
- Id ->
- Id
- end,
- Args = [Name, MFA, get_value(pid, Off)],
- io_lib:format("~p started with ~s at ~w", Args)
- end.
-
-format_reason({'function not exported', [{M, F, A} | Trace]}) ->
- [
- "call to unexported function ",
- format_mfa({M, F, A}),
- " at ",
- format_trace(Trace)
- ];
-format_reason({'function not exported' = C, [{M, F, A, _Props} | Rest]}) ->
- %% Drop line number from undefined function
- format_reason({C, [{M, F, A} | Rest]});
-format_reason({undef, [MFA | Trace]}) ->
- [
- "call to undefined function ",
- format_mfa(MFA),
- " at ",
- format_trace(Trace)
- ];
-format_reason({bad_return, {MFA, Val}}) ->
- ["bad return value ", print_val(Val), " from ", format_mfa(MFA)];
-format_reason({bad_return_value, Val}) ->
- ["bad return value ", print_val(Val)];
-format_reason({{bad_return_value, Val}, MFA}) ->
- ["bad return value ", print_val(Val), " at ", format_mfa(MFA)];
-format_reason({{badrecord, Record}, Trace}) ->
- ["bad record ", print_val(Record), " at ", format_trace(Trace)];
-format_reason({{case_clause, Val}, Trace}) ->
- ["no case clause matching ", print_val(Val), " at ", format_trace(Trace)];
-format_reason({function_clause, [MFA | Trace]}) ->
- [
- "no function clause matching ",
- format_mfa(MFA),
- " at ",
- format_trace(Trace)
- ];
-format_reason({if_clause, Trace}) ->
- [
- "no true branch found while evaluating if expression at ",
- format_trace(Trace)
- ];
-format_reason({{try_clause, Val}, Trace}) ->
- ["no try clause matching ", print_val(Val), " at ", format_trace(Trace)];
-format_reason({badarith, Trace}) ->
- ["bad arithmetic expression at ", format_trace(Trace)];
-format_reason({{badmatch, Val}, Trace}) ->
- [
- "no match of right hand value ",
- print_val(Val),
- " at ",
- format_trace(Trace)
- ];
-format_reason({emfile, Trace}) ->
- [
- "maximum number of file descriptors exhausted, check ulimit -n; ",
- format_trace(Trace)
- ];
-format_reason({system_limit, [{M, F, A} | Trace]}) ->
- Limit =
- case {M, F} of
- {erlang, open_port} ->
- "maximum number of ports exceeded";
- {erlang, spawn} ->
- "maximum number of processes exceeded";
- {erlang, spawn_opt} ->
- "maximum number of processes exceeded";
- {erlang, list_to_atom} ->
- "tried to create an atom larger than 255, or maximum atom count exceeded";
- {ets, new} ->
- "maximum number of ETS tables exceeded";
- _ ->
- format_mfa({M, F, A})
- end,
- ["system limit: ", Limit, " at ", format_trace(Trace)];
-format_reason({badarg, [MFA | Trace]}) ->
- [
- "bad argument in call to ",
- format_mfa(MFA),
- " at ",
- format_trace(Trace)
- ];
-format_reason({{badarg, Stack}, _}) ->
- format_reason({badarg, Stack});
-format_reason({{badarity, {Fun, Args}}, Trace}) ->
- {arity, Arity} = lists:keyfind(arity, 1, erlang:fun_info(Fun)),
- MsgFmt = "function called with wrong arity of ~w instead of ~w at ",
- [io_lib:format(MsgFmt, [length(Args), Arity]), format_trace(Trace)];
-format_reason({noproc, MFA}) ->
- ["no such process or port in call to ", format_mfa(MFA)];
-format_reason({{badfun, Term}, Trace}) ->
- ["bad function ", print_val(Term), " called at ", format_trace(Trace)];
-format_reason({Reason, [{M, F, A} | _] = Trace}) when
- is_atom(M), is_atom(F), is_integer(A)
-->
- [format_reason(Reason), " at ", format_trace(Trace)];
-format_reason({Reason, [{M, F, A} | _] = Trace}) when
- is_atom(M), is_atom(F), is_list(A)
-->
- [format_reason(Reason), " at ", format_trace(Trace)];
-format_reason({Reason, [{M, F, A, Props} | _] = Trace}) when
- is_atom(M), is_atom(F), is_integer(A), is_list(Props)
-->
- [format_reason(Reason), " at ", format_trace(Trace)];
-format_reason({Reason, [{M, F, A, Props} | _] = Trace}) when
- is_atom(M), is_atom(F), is_list(A), is_list(Props)
-->
- [format_reason(Reason), " at ", format_trace(Trace)];
-format_reason(Reason) ->
- {Str, _} = couch_log_trunc_io:print(Reason, 500),
- Str.
-
-format_mfa({M, F, A}) when is_list(A) ->
- {FmtStr, Args} = format_args(A, [], []),
- io_lib:format("~w:~w(" ++ FmtStr ++ ")", [M, F | Args]);
-format_mfa({M, F, A}) when is_integer(A) ->
- io_lib:format("~w:~w/~w", [M, F, A]);
-format_mfa({M, F, A, Props}) when is_list(Props) ->
- case get_value(line, Props) of
- undefined ->
- format_mfa({M, F, A});
- Line ->
- [format_mfa({M, F, A}), io_lib:format("(line:~w)", [Line])]
- end;
-format_mfa(Trace) when is_list(Trace) ->
- format_trace(Trace);
-format_mfa(Other) ->
- io_lib:format("~w", [Other]).
-
-format_trace([MFA]) ->
- [trace_mfa(MFA)];
-format_trace([MFA | Rest]) ->
- [trace_mfa(MFA), " <= ", format_trace(Rest)];
-format_trace(Other) ->
- io_lib:format("~w", [Other]).
-
-trace_mfa({M, F, A}) when is_list(A) ->
- format_mfa({M, F, length(A)});
-trace_mfa({M, F, A, Props}) when is_list(A) ->
- format_mfa({M, F, length(A), Props});
-trace_mfa(Other) ->
- format_mfa(Other).
-
-format_args([], FmtAcc, ArgsAcc) ->
- {string:join(lists:reverse(FmtAcc), ", "), lists:reverse(ArgsAcc)};
-format_args([H | T], FmtAcc, ArgsAcc) ->
- {Str, _} = couch_log_trunc_io:print(H, 100),
- format_args(T, ["~s" | FmtAcc], [Str | ArgsAcc]).
-
-maybe_truncate(Fmt, Args) ->
- MaxMsgSize = couch_log_config:get(max_message_size),
- couch_log_trunc_io:format(Fmt, Args, MaxMsgSize).
-
-maybe_truncate(Msg) ->
- MaxMsgSize = couch_log_config:get(max_message_size),
- case iolist_size(Msg) > MaxMsgSize of
- true ->
- MsgBin = iolist_to_binary(Msg),
- PrefixSize = MaxMsgSize - 3,
- <<Prefix:PrefixSize/binary, _/binary>> = MsgBin,
- [Prefix, "..."];
- false ->
- Msg
- end.
-
-print_silly_list(L) when is_list(L) ->
- case couch_log_util:string_p(L) of
- true ->
- couch_log_trunc_io:format("~s", [L], ?DEFAULT_TRUNCATION);
- _ ->
- print_silly_list(L, [], [])
- end;
-print_silly_list(L) ->
- {Str, _} = couch_log_trunc_io:print(L, ?DEFAULT_TRUNCATION),
- Str.
-
-print_silly_list([], Fmt, Acc) ->
- couch_log_trunc_io:format(
- string:join(lists:reverse(Fmt), ", "),
- lists:reverse(Acc),
- ?DEFAULT_TRUNCATION
- );
-print_silly_list([{K, V} | T], Fmt, Acc) ->
- print_silly_list(T, ["~p: ~p" | Fmt], [V, K | Acc]);
-print_silly_list([H | T], Fmt, Acc) ->
- print_silly_list(T, ["~p" | Fmt], [H | Acc]).
-
-print_val(Val) ->
- {Str, _} = couch_log_trunc_io:print(Val, 500),
- Str.
-
-filter_silly_list(KV) ->
- %% The complete list of fields is from here
- %% https://github.com/erlang/otp/blob/7ca7a6c59543db8a6d26b95ae434e61a044b0800/lib/stdlib/src/proc_lib.erl#L539:L553
- FilterFields = couch_log_config:get(filter_fields),
- filter_silly_list(KV, FilterFields).
-
-filter_silly_list([], _) ->
- [];
-filter_silly_list([{K, V} | T], Filter) ->
- case lists:member(K, Filter) of
- true ->
- filter_silly_list(T, Filter);
- false ->
- [{K, V} | filter_silly_list(T, Filter)]
- end;
-filter_silly_list([H | T], Filter) ->
- [H | filter_silly_list(T, Filter)].
-
-get_value(Key, Value) ->
- get_value(Key, Value, undefined).
-
-get_value(Key, List, Default) ->
- case lists:keyfind(Key, 1, List) of
- false -> Default;
- {Key, Value} -> Value
- end.
-
-supervisor_name({local, Name}) -> Name;
-supervisor_name(Name) -> Name.
diff --git a/src/couch_log/src/couch_log_monitor.erl b/src/couch_log/src/couch_log_monitor.erl
deleted file mode 100644
index b5ac0a844..000000000
--- a/src/couch_log/src/couch_log_monitor.erl
+++ /dev/null
@@ -1,69 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_monitor).
-
--behaviour(gen_server).
--vsn(1).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--define(HANDLER_MOD, couch_log_error_logger_h).
-
-start_link() ->
- gen_server:start_link(?MODULE, [], []).
-
-% OTP_RELEASE defined in OTP >= 21 only
--ifdef(OTP_RELEASE).
-
-init(_) ->
- % see https://erlang.org/doc/man/error_logger.html#add_report_handler-1
- ok = error_logger:add_report_handler(?HANDLER_MOD),
- ok = gen_event:add_sup_handler(error_logger, ?HANDLER_MOD, []),
- {ok, nil}.
-
--else.
-
-init(_) ->
- error_logger:start(),
- ok = gen_event:add_sup_handler(error_logger, ?HANDLER_MOD, []),
- {ok, nil}.
-
--endif.
-
-terminate(_, _) ->
- ok.
-
-handle_call(_Msg, _From, St) ->
- {reply, ignored, St}.
-
-handle_cast(_Msg, St) ->
- {noreply, St}.
-
-handle_info({gen_event_EXIT, ?HANDLER_MOD, Reason}, St) ->
- {stop, Reason, St};
-handle_info(_Msg, St) ->
- {noreply, St}.
-
-code_change(_, State, _) ->
- {ok, State}.
diff --git a/src/couch_log/src/couch_log_server.erl b/src/couch_log/src/couch_log_server.erl
deleted file mode 100644
index 05cf92a75..000000000
--- a/src/couch_log/src/couch_log_server.erl
+++ /dev/null
@@ -1,92 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_server).
--behavior(gen_server).
-
--export([
- start_link/0,
- reconfigure/0,
- log/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--include("couch_log.hrl").
-
--record(st, {
- writer
-}).
-
--ifdef(TEST).
--define(SEND(Entry), gen_server:call(?MODULE, {log, Entry})).
--else.
--define(SEND(Entry), gen_server:cast(?MODULE, {log, Entry})).
--endif.
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-reconfigure() ->
- gen_server:call(?MODULE, reconfigure).
-
-log(Entry) ->
- ?SEND(Entry).
-
-init(_) ->
- couch_util:set_mqd_off_heap(?MODULE),
- process_flag(trap_exit, true),
- {ok, #st{
- writer = couch_log_writer:init()
- }}.
-
-terminate(Reason, St) ->
- ok = couch_log_writer:terminate(Reason, St#st.writer).
-
-handle_call(reconfigure, _From, St) ->
- ok = couch_log_writer:terminate(reconfiguring, St#st.writer),
- {reply, ok, St#st{
- writer = couch_log_writer:init()
- }};
-handle_call({log, Entry}, _From, St) ->
- % We re-check if we should log here in case an operator
- % adjusted the log level and then realized it was a bad
- % idea because it filled our message queue.
- case couch_log_util:should_log(Entry) of
- true ->
- NewWriter = couch_log_writer:write(Entry, St#st.writer),
- {reply, ok, St#st{writer = NewWriter}};
- false ->
- {reply, ok, St}
- end;
-handle_call(Ignore, From, St) ->
- Args = [?MODULE, Ignore],
- Entry = couch_log_formatter:format(error, ?MODULE, "~s ignored ~p", Args),
- handle_call({log, Entry}, From, St).
-
-handle_cast(Msg, St) ->
- {reply, ok, NewSt} = handle_call(Msg, nil, St),
- {noreply, NewSt}.
-
-handle_info(Msg, St) ->
- {reply, ok, NewSt} = handle_call(Msg, nil, St),
- {noreply, NewSt}.
-
-code_change(_Vsn, St, _Extra) ->
- {ok, St}.
diff --git a/src/couch_log/src/couch_log_sup.erl b/src/couch_log/src/couch_log_sup.erl
deleted file mode 100644
index 0167192d8..000000000
--- a/src/couch_log/src/couch_log_sup.erl
+++ /dev/null
@@ -1,93 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_sup).
-
--behaviour(supervisor).
--vsn(1).
--behaviour(config_listener).
-
--export([init/1]).
--export([start_link/0]).
--export([handle_config_change/5, handle_config_terminate/3]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init([]) ->
- ok = couch_log_config:init(),
- {ok, {{one_for_one, 10, 10}, children()}}.
-
-children() ->
- [
- {
- couch_log_server,
- {couch_log_server, start_link, []},
- permanent,
- 5000,
- worker,
- [couch_log_server]
- },
- {
- couch_log_monitor,
- {couch_log_monitor, start_link, []},
- permanent,
- 5000,
- worker,
- [couch_log_monitor]
- },
- {
- config_listener_mon,
- {config_listener_mon, start_link, [?MODULE, nil]},
- permanent,
- 5000,
- worker,
- [config_listener_mon]
- }
- ].
-
-handle_config_change("log", Key, _, _, S) ->
- case Key of
- "level" ->
- couch_log_config:reconfigure();
- "max_message_size" ->
- couch_log_config:reconfigure();
- "strip_last_msg" ->
- couch_log_config:reconfigure();
- "filter_fields" ->
- couch_log_config:reconfigure();
- _ ->
- % Someone may have changed the config for
- % the writer so we need to re-initialize.
- couch_log_server:reconfigure()
- end,
- notify_listeners(),
- {ok, S};
-handle_config_change(_, _, _, _, S) ->
- {ok, S}.
-
-handle_config_terminate(_Server, _Reason, _State) ->
- ok.
-
--ifdef(TEST).
-notify_listeners() ->
- Listeners = application:get_env(couch_log, config_listeners, []),
- lists:foreach(
- fun(L) ->
- L ! couch_log_config_change_finished
- end,
- Listeners
- ).
--else.
-notify_listeners() ->
- ok.
--endif.
diff --git a/src/couch_log/src/couch_log_trunc_io.erl b/src/couch_log/src/couch_log_trunc_io.erl
deleted file mode 100644
index 9736e87e1..000000000
--- a/src/couch_log/src/couch_log_trunc_io.erl
+++ /dev/null
@@ -1,1105 +0,0 @@
-%% ``The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with your Erlang distribution. If not, it can be
-%% retrieved via the world wide web at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Initial Developer of the Original Code is Corelatus AB.
-%% Portions created by Corelatus are Copyright 2003, Corelatus
-%% AB. All Rights Reserved.''
-%%
-%% @doc Module to print out terms for logging. Limits by length rather than depth.
-%%
-%% The resulting string may be slightly larger than the limit; the intention
-%% is to provide predictable CPU and memory consumption for formatting
-%% terms, not produce precise string lengths.
-%%
-%% Typical use:
-%%
-%% trunc_io:print(Term, 500).
-%%
-%% Source license: Erlang Public License.
-%% Original author: Matthias Lang, <tt>matthias@corelatus.se</tt>
-%%
-%% Various changes to this module, most notably the format/3 implementation
-%% were added by Andrew Thompson `<andrew@basho.com>'. The module has been renamed
-%% to avoid conflicts with the vanilla module.
-%%
-%% Module renamed to couch_log_trunc_io to avoid naming collisions with
-%% the lager version.
-
--module(couch_log_trunc_io).
--author('matthias@corelatus.se').
-%% And thanks to Chris Newcombe for a bug fix
-
-% interface functions
--export([format/3, format/4, print/2, print/3, fprint/2, fprint/3, safe/2]).
--version("$Id: trunc_io.erl,v 1.11 2009-02-23 12:01:06 matthias Exp $").
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
--endif.
-
--type option() ::
- {'depth', integer()}
- | {'lists_as_strings', boolean()}
- | {'force_strings', boolean()}.
--type options() :: [option()].
-
--record(print_options, {
- %% negative depth means no depth limiting
- depth = -1 :: integer(),
- %% whether to print lists as strings, if possible
- lists_as_strings = true :: boolean(),
- %% force strings, or binaries to be printed as a string,
- %% even if they're not printable
- force_strings = false :: boolean()
-}).
-
-format(Fmt, Args, Max) ->
- format(Fmt, Args, Max, []).
-
-format(Fmt, Args, Max, Options) ->
- try
- couch_log_trunc_io_fmt:format(Fmt, Args, Max, Options)
- catch
- _What:_Why ->
- erlang:error(badarg, [Fmt, Args])
- end.
-
-%% @doc Returns an flattened list containing the ASCII representation of the given
-%% term.
--spec fprint(term(), pos_integer()) -> string().
-fprint(Term, Max) ->
- fprint(Term, Max, []).
-
-%% @doc Returns an flattened list containing the ASCII representation of the given
-%% term.
--spec fprint(term(), pos_integer(), options()) -> string().
-fprint(T, Max, Options) ->
- {L, _} = print(T, Max, prepare_options(Options, #print_options{})),
- lists:flatten(L).
-
-%% @doc Same as print, but never crashes.
-%%
-%% This is a tradeoff. Print might conceivably crash if it's asked to
-%% print something it doesn't understand, for example some new data
-%% type in a future version of Erlang. If print crashes, we fall back
-%% to io_lib to format the term, but then the formatting is
-%% depth-limited instead of length limited, so you might run out
-%% memory printing it. Out of the frying pan and into the fire.
-%%
--spec safe(term(), pos_integer()) -> {string(), pos_integer()} | {string()}.
-safe(What, Len) ->
- case catch print(What, Len) of
- {L, Used} when is_list(L) -> {L, Used};
- _ -> {"unable to print" ++ io_lib:write(What, 99)}
- end.
-
-%% @doc Returns {List, Length}
--spec print(term(), pos_integer()) -> {iolist(), pos_integer()}.
-print(Term, Max) ->
- print(Term, Max, []).
-
-%% @doc Returns {List, Length}
--spec print(term(), pos_integer(), options() | #print_options{}) -> {iolist(), pos_integer()}.
-print(Term, Max, Options) when is_list(Options) ->
- %% need to convert the proplist to a record
- print(Term, Max, prepare_options(Options, #print_options{}));
-print(Term, _Max, #print_options{force_strings = true}) when
- not is_list(Term), not is_binary(Term), not is_atom(Term)
-->
- erlang:error(badarg);
-print(_, Max, _Options) when Max < 0 -> {"...", 3};
-print(_, _, #print_options{depth = 0}) ->
- {"...", 3};
-%% @doc We assume atoms, floats, funs, integers, PIDs, ports and refs never need
-%% to be truncated. This isn't strictly true, someone could make an
-%% arbitrarily long bignum. Let's assume that won't happen unless someone
-%% is being malicious.
-%%
-print(Atom, _Max, #print_options{force_strings = NoQuote}) when is_atom(Atom) ->
- L = atom_to_list(Atom),
- R =
- case atom_needs_quoting_start(L) andalso not NoQuote of
- true -> lists:flatten([$', L, $']);
- false -> L
- end,
- {R, length(R)};
-print(<<>>, _Max, #print_options{depth = 1}) ->
- {"<<>>", 4};
-print(Bin, _Max, #print_options{depth = 1}) when is_binary(Bin) ->
- {"<<...>>", 7};
-print(<<>>, _Max, Options) ->
- case Options#print_options.force_strings of
- true ->
- {"", 0};
- false ->
- {"<<>>", 4}
- end;
-print(Binary, 0, _Options) when is_bitstring(Binary) ->
- {"<<..>>", 6};
-print(Bin, Max, _Options) when is_binary(Bin), Max < 2 ->
- {"<<...>>", 7};
-print(Binary, Max, Options) when is_binary(Binary) ->
- B = binary_to_list(Binary, 1, lists:min([Max, byte_size(Binary)])),
- {Res, Length} =
- case
- Options#print_options.lists_as_strings orelse
- Options#print_options.force_strings
- of
- true ->
- Depth = Options#print_options.depth,
- MaxSize = (Depth - 1) * 4,
- %% check if we need to truncate based on depth
- In =
- case
- Depth > -1 andalso MaxSize < length(B) andalso
- not Options#print_options.force_strings
- of
- true ->
- string:substr(B, 1, MaxSize);
- false ->
- B
- end,
- MaxLen =
- case Options#print_options.force_strings of
- true ->
- Max;
- false ->
- %% make room for the leading doublequote
- Max - 1
- end,
- try alist(In, MaxLen, Options) of
- {L0, Len0} ->
- case Options#print_options.force_strings of
- false ->
- case B /= In of
- true ->
- {[$", L0, "..."], Len0 + 4};
- false ->
- {[$" | L0], Len0 + 1}
- end;
- true ->
- {L0, Len0}
- end
- catch
- throw:{unprintable, C} ->
- Index = string:chr(In, C),
- case
- Index > 1 andalso Options#print_options.depth =< Index andalso
- Options#print_options.depth > -1 andalso
- not Options#print_options.force_strings
- of
- true ->
- %% print first Index-1 characters followed by ...
- {L0, Len0} = alist_start(
- string:substr(In, 1, Index - 1), Max - 1, Options
- ),
- {L0 ++ "...", Len0 + 3};
- false ->
- list_body(In, Max - 4, dec_depth(Options), true)
- end
- end;
- _ ->
- list_body(B, Max - 4, dec_depth(Options), true)
- end,
- case Options#print_options.force_strings of
- true ->
- {Res, Length};
- _ ->
- {["<<", Res, ">>"], Length + 4}
- end;
-%% bitstrings are binary's evil brother who doesn't end on an 8 bit boundary.
-%% This makes printing them extremely annoying, so list_body/list_bodyc has
-%% some magic for dealing with the output of bitstring_to_list, which returns
-%% a list of integers (as expected) but with a trailing binary that represents
-%% the remaining bits.
-print({inline_bitstring, B}, _Max, _Options) when is_bitstring(B) ->
- Size = bit_size(B),
- <<Value:Size>> = B,
- ValueStr = integer_to_list(Value),
- SizeStr = integer_to_list(Size),
- {[ValueStr, $:, SizeStr], length(ValueStr) + length(SizeStr) + 1};
-print(BitString, Max, Options) when is_bitstring(BitString) ->
- BL =
- case byte_size(BitString) > Max of
- true ->
- binary_to_list(BitString, 1, Max);
- _ ->
- R = erlang:bitstring_to_list(BitString),
- {Bytes, [Bits]} = lists:splitwith(fun erlang:is_integer/1, R),
- %% tag the trailing bits with a special tuple we catch when
- %% list_body calls print again
- Bytes ++ [{inline_bitstring, Bits}]
- end,
- {X, Len0} = list_body(BL, Max - 4, dec_depth(Options), true),
- {["<<", X, ">>"], Len0 + 4};
-print(Float, _Max, _Options) when is_float(Float) ->
- %% use the same function io_lib:format uses to print floats
- %% float_to_list is way too verbose.
- L = io_lib_format:fwrite_g(Float),
- {L, length(L)};
-print(Fun, Max, _Options) when is_function(Fun) ->
- L = erlang:fun_to_list(Fun),
- case length(L) > Max of
- true ->
- S = erlang:max(5, Max),
- Res = string:substr(L, 1, S) ++ "..>",
- {Res, length(Res)};
- _ ->
- {L, length(L)}
- end;
-print(Integer, _Max, _Options) when is_integer(Integer) ->
- L = integer_to_list(Integer),
- {L, length(L)};
-print(Pid, _Max, _Options) when is_pid(Pid) ->
- L = pid_to_list(Pid),
- {L, length(L)};
-print(Ref, _Max, _Options) when is_reference(Ref) ->
- L = erlang:ref_to_list(Ref),
- {L, length(L)};
-print(Port, _Max, _Options) when is_port(Port) ->
- L = erlang:port_to_list(Port),
- {L, length(L)};
-print({'$lager_record', Name, Fields}, Max, Options) ->
- Leader = "#" ++ atom_to_list(Name) ++ "{",
- {RC, Len} = record_fields(Fields, Max - length(Leader) + 1, dec_depth(Options)),
- {[Leader, RC, "}"], Len + length(Leader) + 1};
-print(Tuple, Max, Options) when is_tuple(Tuple) ->
- {TC, Len} = tuple_contents(Tuple, Max - 2, Options),
- {[${, TC, $}], Len + 2};
-print(List, Max, Options) when is_list(List) ->
- case
- Options#print_options.lists_as_strings orelse
- Options#print_options.force_strings
- of
- true ->
- alist_start(List, Max, dec_depth(Options));
- _ ->
- {R, Len} = list_body(List, Max - 2, dec_depth(Options), false),
- {[$[, R, $]], Len + 2}
- end;
-print(Map, Max, Options) ->
- case erlang:is_builtin(erlang, is_map, 1) andalso erlang:is_map(Map) of
- true ->
- {MapBody, Len} = map_body(Map, Max - 3, dec_depth(Options)),
- {[$#, ${, MapBody, $}], Len + 3};
- false ->
- error(badarg, [Map, Max, Options])
- end.
-
-%% Returns {List, Length}
-tuple_contents(Tuple, Max, Options) ->
- L = tuple_to_list(Tuple),
- list_body(L, Max, dec_depth(Options), true).
-
-%% Format the inside of a list, i.e. do not add a leading [ or trailing ].
-%% Returns {List, Length}
-list_body([], _Max, _Options, _Tuple) ->
- {[], 0};
-list_body(_, Max, _Options, _Tuple) when Max < 4 -> {"...", 3};
-list_body(_, _Max, #print_options{depth = 0}, _Tuple) ->
- {"...", 3};
-list_body([H], Max, Options = #print_options{depth = 1}, _Tuple) ->
- print(H, Max, Options);
-list_body([H | _], Max, Options = #print_options{depth = 1}, Tuple) ->
- {List, Len} = print(H, Max - 4, Options),
- Sep =
- case Tuple of
- true -> $,;
- false -> $|
- end,
- {[List ++ [Sep | "..."]], Len + 4};
-list_body([H | T], Max, Options, Tuple) ->
- {List, Len} = print(H, Max, Options),
- {Final, FLen} = list_bodyc(T, Max - Len, Options, Tuple),
- {[List | Final], FLen + Len};
-%% improper list
-list_body(X, Max, Options, _Tuple) ->
- {List, Len} = print(X, Max - 1, Options),
- {[$|, List], Len + 1}.
-
-list_bodyc([], _Max, _Options, _Tuple) ->
- {[], 0};
-list_bodyc(_, Max, _Options, _Tuple) when Max < 5 -> {",...", 4};
-list_bodyc(_, _Max, #print_options{depth = 1}, true) ->
- {",...", 4};
-list_bodyc(_, _Max, #print_options{depth = 1}, false) ->
- {"|...", 4};
-list_bodyc([H | T], Max, #print_options{depth = Depth} = Options, Tuple) ->
- {List, Len} = print(H, Max, dec_depth(Options)),
- {Final, FLen} = list_bodyc(T, Max - Len - 1, dec_depth(Options), Tuple),
- Sep =
- case Depth == 1 andalso not Tuple of
- true -> $|;
- _ -> $,
- end,
- {[Sep, List | Final], FLen + Len + 1};
-%% improper list
-list_bodyc(X, Max, Options, _Tuple) ->
- {List, Len} = print(X, Max - 1, Options),
- {[$|, List], Len + 1}.
-
-map_body(Map, Max, #print_options{depth = Depth}) when Max < 4; Depth =:= 0 ->
- case erlang:map_size(Map) of
- 0 -> {[], 0};
- _ -> {"...", 3}
- end;
-map_body(Map, Max, Options) ->
- case maps:to_list(Map) of
- [] ->
- {[], 0};
- [{Key, Value} | Rest] ->
- {KeyStr, KeyLen} = print(Key, Max - 4, Options),
- DiffLen = KeyLen + 4,
- {ValueStr, ValueLen} = print(Value, Max - DiffLen, Options),
- DiffLen2 = DiffLen + ValueLen,
- {Final, FLen} = map_bodyc(Rest, Max - DiffLen2, dec_depth(Options)),
- {[KeyStr, " => ", ValueStr | Final], DiffLen2 + FLen}
- end.
-
-map_bodyc([], _Max, _Options) ->
- {[], 0};
-map_bodyc(_Rest, Max, #print_options{depth = Depth}) when Max < 5; Depth =:= 0 ->
- {",...", 4};
-map_bodyc([{Key, Value} | Rest], Max, Options) ->
- {KeyStr, KeyLen} = print(Key, Max - 5, Options),
- DiffLen = KeyLen + 5,
- {ValueStr, ValueLen} = print(Value, Max - DiffLen, Options),
- DiffLen2 = DiffLen + ValueLen,
- {Final, FLen} = map_bodyc(Rest, Max - DiffLen2, dec_depth(Options)),
- {[$,, KeyStr, " => ", ValueStr | Final], DiffLen2 + FLen}.
-
-%% The head of a list we hope is ascii. Examples:
-%%
-%% [65,66,67] -> "ABC"
-%% [65,0,67] -> "A"[0,67]
-%% [0,65,66] -> [0,65,66]
-%% [65,b,66] -> "A"[b,66]
-%%
-alist_start([], _Max, #print_options{force_strings = true}) ->
- {"", 0};
-alist_start([], _Max, _Options) ->
- {"[]", 2};
-alist_start(_, Max, _Options) when Max < 4 -> {"...", 3};
-alist_start(_, _Max, #print_options{depth = 0}) ->
- {"[...]", 5};
-alist_start(L, Max, #print_options{force_strings = true} = Options) ->
- alist(L, Max, Options);
-%alist_start([H|_T], _Max, #print_options{depth=1}) when is_integer(H) -> {[$[, H, $|, $., $., $., $]], 7};
-
-% definitely printable
-alist_start([H | T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e ->
- try alist([H | T], Max - 1, Options) of
- {L, Len} ->
- {[$" | L], Len + 1}
- catch
- throw:{unprintable, _} ->
- {R, Len} = list_body([H | T], Max - 2, Options, false),
- {[$[, R, $]], Len + 2}
- end;
-% definitely printable
-alist_start([H | T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff ->
- try alist([H | T], Max - 1, Options) of
- {L, Len} ->
- {[$" | L], Len + 1}
- catch
- throw:{unprintable, _} ->
- {R, Len} = list_body([H | T], Max - 2, Options, false),
- {[$[, R, $]], Len + 2}
- end;
-alist_start([H | T], Max, Options) when
- H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H =:= $\f; H =:= $\b
-->
- try alist([H | T], Max - 1, Options) of
- {L, Len} ->
- {[$" | L], Len + 1}
- catch
- throw:{unprintable, _} ->
- {R, Len} = list_body([H | T], Max - 2, Options, false),
- {[$[, R, $]], Len + 2}
- end;
-alist_start(L, Max, Options) ->
- {R, Len} = list_body(L, Max - 2, Options, false),
- {[$[, R, $]], Len + 2}.
-
-alist([], _Max, #print_options{force_strings = true}) ->
- {"", 0};
-alist([], _Max, _Options) ->
- {"\"", 1};
-alist(_, Max, #print_options{force_strings = true}) when Max < 4 -> {"...", 3};
-alist(_, Max, #print_options{force_strings = false}) when Max < 5 -> {"...\"", 4};
-alist([H | T], Max, Options = #print_options{force_strings = false, lists_as_strings = true}) when
- H =:= $"; H =:= $\\
-->
- %% preserve escaping around quotes
- {L, Len} = alist(T, Max - 1, Options),
- {[$\\, H | L], Len + 2};
-% definitely printable
-alist([H | T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e ->
- {L, Len} = alist(T, Max - 1, Options),
- {[H | L], Len + 1};
-% definitely printable
-alist([H | T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff ->
- {L, Len} = alist(T, Max - 1, Options),
- {[H | L], Len + 1};
-alist([H | T], Max, Options) when
- H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H =:= $\f; H =:= $\b
-->
- {L, Len} = alist(T, Max - 1, Options),
- case Options#print_options.force_strings of
- true ->
- {[H | L], Len + 1};
- _ ->
- {[escape(H) | L], Len + 1}
- end;
-alist([H | T], Max, #print_options{force_strings = true} = Options) when is_integer(H) ->
- {L, Len} = alist(T, Max - 1, Options),
- {[H | L], Len + 1};
-alist([H | T], Max, Options = #print_options{force_strings = true}) when is_binary(H); is_list(H) ->
- {List, Len} = print(H, Max, Options),
- case (Max - Len) =< 0 of
- true ->
- %% no more room to print anything
- {List, Len};
- false ->
- %% no need to decrement depth, as we're in printable string mode
- {Final, FLen} = alist(T, Max - Len, Options),
- {[List | Final], FLen + Len}
- end;
-alist(_, _, #print_options{force_strings = true}) ->
- erlang:error(badarg);
-alist([H | _L], _Max, _Options) ->
- throw({unprintable, H});
-alist(H, _Max, _Options) ->
- %% improper list
- throw({unprintable, H}).
-
-%% is the first character in the atom alphabetic & lowercase?
-atom_needs_quoting_start([H | T]) when H >= $a, H =< $z ->
- atom_needs_quoting(T);
-atom_needs_quoting_start(_) ->
- true.
-
-atom_needs_quoting([]) ->
- false;
-atom_needs_quoting([H | T]) when
- (H >= $a andalso H =< $z);
- (H >= $A andalso H =< $Z);
- (H >= $0 andalso H =< $9);
- H == $@;
- H == $_
-->
- atom_needs_quoting(T);
-atom_needs_quoting(_) ->
- true.
-
--spec prepare_options(options(), #print_options{}) -> #print_options{}.
-prepare_options([], Options) ->
- Options;
-prepare_options([{depth, Depth} | T], Options) when is_integer(Depth) ->
- prepare_options(T, Options#print_options{depth = Depth});
-prepare_options([{lists_as_strings, Bool} | T], Options) when is_boolean(Bool) ->
- prepare_options(T, Options#print_options{lists_as_strings = Bool});
-prepare_options([{force_strings, Bool} | T], Options) when is_boolean(Bool) ->
- prepare_options(T, Options#print_options{force_strings = Bool}).
-
-dec_depth(#print_options{depth = Depth} = Options) when Depth > 0 ->
- Options#print_options{depth = Depth - 1};
-dec_depth(Options) ->
- Options.
-
-escape($\t) -> "\\t";
-escape($\n) -> "\\n";
-escape($\r) -> "\\r";
-escape($\e) -> "\\e";
-escape($\f) -> "\\f";
-escape($\b) -> "\\b";
-escape($\v) -> "\\v".
-
-record_fields([], _, _) ->
- {"", 0};
-record_fields(_, Max, #print_options{depth = D}) when Max < 4; D == 0 ->
- {"...", 3};
-record_fields([{Field, Value} | T], Max, Options) ->
- {ExtraChars, Terminator} =
- case T of
- [] ->
- {1, []};
- _ ->
- {2, ","}
- end,
- {FieldStr, FieldLen} = print(Field, Max - ExtraChars, Options),
- {ValueStr, ValueLen} = print(Value, Max - (FieldLen + ExtraChars), Options),
- {Final, FLen} = record_fields(T, Max - (FieldLen + ValueLen + ExtraChars), dec_depth(Options)),
- {[FieldStr ++ "=" ++ ValueStr ++ Terminator | Final], FLen + FieldLen + ValueLen + ExtraChars}.
-
--ifdef(TEST).
-%%--------------------
-%% The start of a test suite. So far, it only checks for not crashing.
-format_test() ->
- %% simple format strings
- ?assertEqual("foobar", lists:flatten(format("~s", [["foo", $b, $a, $r]], 50))),
- ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~p", [["foo", $b, $a, $r]], 50))),
- ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~P", [["foo", $b, $a, $r], 10], 50))),
- ?assertEqual(
- "[[102,111,111],98,97,114]", lists:flatten(format("~w", [["foo", $b, $a, $r]], 50))
- ),
-
- %% complex ones
- ?assertEqual(" foobar", lists:flatten(format("~10s", [["foo", $b, $a, $r]], 50))),
- ?assertEqual("f", lists:flatten(format("~1s", [["foo", $b, $a, $r]], 50))),
- ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~22p", [["foo", $b, $a, $r]], 50))),
- ?assertEqual(
- "[\"foo\",98,97,114]", lists:flatten(format("~22P", [["foo", $b, $a, $r], 10], 50))
- ),
- ?assertEqual("**********", lists:flatten(format("~10W", [["foo", $b, $a, $r], 10], 50))),
- ?assertEqual(
- "[[102,111,111],98,97,114]", lists:flatten(format("~25W", [["foo", $b, $a, $r], 10], 50))
- ),
- % Note these next two diverge from io_lib:format; the field width is
- % ignored, when it should be used as max line length.
- ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~10p", [["foo", $b, $a, $r]], 50))),
- ?assertEqual(
- "[\"foo\",98,97,114]", lists:flatten(format("~10P", [["foo", $b, $a, $r], 10], 50))
- ),
- ok.
-
-atom_quoting_test() ->
- ?assertEqual("hello", lists:flatten(format("~p", [hello], 50))),
- ?assertEqual("'hello world'", lists:flatten(format("~p", ['hello world'], 50))),
- ?assertEqual("'Hello world'", lists:flatten(format("~p", ['Hello world'], 50))),
- ?assertEqual("hello_world", lists:flatten(format("~p", ['hello_world'], 50))),
- ?assertEqual("'node@127.0.0.1'", lists:flatten(format("~p", ['node@127.0.0.1'], 50))),
- ?assertEqual("node@nohost", lists:flatten(format("~p", [node@nohost], 50))),
- ?assertEqual("abc123", lists:flatten(format("~p", [abc123], 50))),
- ok.
-
-sane_float_printing_test() ->
- ?assertEqual("1.0", lists:flatten(format("~p", [1.0], 50))),
- ?assertEqual("1.23456789", lists:flatten(format("~p", [1.23456789], 50))),
- ?assertEqual("1.23456789", lists:flatten(format("~p", [1.234567890], 50))),
- ?assertEqual("0.3333333333333333", lists:flatten(format("~p", [1 / 3], 50))),
- ?assertEqual("0.1234567", lists:flatten(format("~p", [0.1234567], 50))),
- ok.
-
-float_inside_list_test() ->
- ?assertEqual(
- "[97,38.233913133184835,99]",
- lists:flatten(format("~p", [[$a, 38.233913133184835, $c]], 50))
- ),
- ?assertError(badarg, lists:flatten(format("~s", [[$a, 38.233913133184835, $c]], 50))),
- ok.
-
-quote_strip_test() ->
- ?assertEqual("\"hello\"", lists:flatten(format("~p", ["hello"], 50))),
- ?assertEqual("hello", lists:flatten(format("~s", ["hello"], 50))),
- ?assertEqual("hello", lists:flatten(format("~s", [hello], 50))),
- ?assertEqual("hello", lists:flatten(format("~p", [hello], 50))),
- ?assertEqual("'hello world'", lists:flatten(format("~p", ['hello world'], 50))),
- ?assertEqual("hello world", lists:flatten(format("~s", ['hello world'], 50))),
- ok.
-
-binary_printing_test() ->
- ?assertEqual("<<>>", lists:flatten(format("~p", [<<>>], 50))),
- ?assertEqual("", lists:flatten(format("~s", [<<>>], 50))),
- ?assertEqual("<<..>>", lists:flatten(format("~p", [<<"hi">>], 0))),
- ?assertEqual("<<...>>", lists:flatten(format("~p", [<<"hi">>], 1))),
- ?assertEqual("<<\"hello\">>", lists:flatten(format("~p", [<<$h, $e, $l, $l, $o>>], 50))),
- ?assertEqual("<<\"hello\">>", lists:flatten(format("~p", [<<"hello">>], 50))),
- ?assertEqual("<<104,101,108,108,111>>", lists:flatten(format("~w", [<<"hello">>], 50))),
- ?assertEqual("<<1,2,3,4>>", lists:flatten(format("~p", [<<1, 2, 3, 4>>], 50))),
- ?assertEqual([1, 2, 3, 4], lists:flatten(format("~s", [<<1, 2, 3, 4>>], 50))),
- ?assertEqual("hello", lists:flatten(format("~s", [<<"hello">>], 50))),
- ?assertEqual("hello\nworld", lists:flatten(format("~s", [<<"hello\nworld">>], 50))),
- ?assertEqual("<<\"hello\\nworld\">>", lists:flatten(format("~p", [<<"hello\nworld">>], 50))),
- ?assertEqual(
- "<<\"\\\"hello world\\\"\">>", lists:flatten(format("~p", [<<"\"hello world\"">>], 50))
- ),
- ?assertEqual("<<\"hello\\\\world\">>", lists:flatten(format("~p", [<<"hello\\world">>], 50))),
- ?assertEqual("<<\"hello\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\world">>], 50))),
- ?assertEqual(
- "<<\"hello\\\\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\\world">>], 50))
- ),
- ?assertEqual("<<\"hello\\bworld\">>", lists:flatten(format("~p", [<<"hello\bworld">>], 50))),
- ?assertEqual("<<\"hello\\tworld\">>", lists:flatten(format("~p", [<<"hello\tworld">>], 50))),
- ?assertEqual("<<\"hello\\nworld\">>", lists:flatten(format("~p", [<<"hello\nworld">>], 50))),
- ?assertEqual("<<\"hello\\rworld\">>", lists:flatten(format("~p", [<<"hello\rworld">>], 50))),
- ?assertEqual("<<\"hello\\eworld\">>", lists:flatten(format("~p", [<<"hello\eworld">>], 50))),
- ?assertEqual("<<\"hello\\fworld\">>", lists:flatten(format("~p", [<<"hello\fworld">>], 50))),
- ?assertEqual("<<\"hello\\vworld\">>", lists:flatten(format("~p", [<<"hello\vworld">>], 50))),
- ?assertEqual(" hello", lists:flatten(format("~10s", [<<"hello">>], 50))),
- ?assertEqual("[a]", lists:flatten(format("~s", [<<"[a]">>], 50))),
- ?assertEqual("[a]", lists:flatten(format("~s", [[<<"[a]">>]], 50))),
-
- ok.
-
-bitstring_printing_test() ->
- ?assertEqual(
- "<<1,2,3,1:7>>",
- lists:flatten(
- format(
- "~p",
- [<<1, 2, 3, 1:7>>],
- 100
- )
- )
- ),
- ?assertEqual(
- "<<1:7>>",
- lists:flatten(
- format(
- "~p",
- [<<1:7>>],
- 100
- )
- )
- ),
- ?assertEqual(
- "<<1,2,3,...>>",
- lists:flatten(
- format(
- "~p",
- [<<1, 2, 3, 1:7>>],
- 12
- )
- )
- ),
- ?assertEqual(
- "<<1,2,3,...>>",
- lists:flatten(
- format(
- "~p",
- [<<1, 2, 3, 1:7>>],
- 13
- )
- )
- ),
- ?assertEqual(
- "<<1,2,3,1:7>>",
- lists:flatten(
- format(
- "~p",
- [<<1, 2, 3, 1:7>>],
- 14
- )
- )
- ),
- ?assertEqual("<<..>>", lists:flatten(format("~p", [<<1:7>>], 0))),
- ?assertEqual("<<...>>", lists:flatten(format("~p", [<<1:7>>], 1))),
- ?assertEqual(
- "[<<1>>,<<2>>]",
- lists:flatten(
- format(
- "~p",
- [[<<1>>, <<2>>]],
- 100
- )
- )
- ),
- ?assertEqual("{<<1:7>>}", lists:flatten(format("~p", [{<<1:7>>}], 50))),
- ok.
-
-list_printing_test() ->
- ?assertEqual("[]", lists:flatten(format("~p", [[]], 50))),
- ?assertEqual("[]", lists:flatten(format("~w", [[]], 50))),
- ?assertEqual("", lists:flatten(format("~s", [[]], 50))),
- ?assertEqual("...", lists:flatten(format("~s", [[]], -1))),
- ?assertEqual("[[]]", lists:flatten(format("~p", [[[]]], 50))),
- ?assertEqual("[13,11,10,8,5,4]", lists:flatten(format("~p", [[13, 11, 10, 8, 5, 4]], 50))),
- ?assertEqual("\"\\rabc\"", lists:flatten(format("~p", [[13, $a, $b, $c]], 50))),
- ?assertEqual("[1,2,3|4]", lists:flatten(format("~p", [[1, 2, 3 | 4]], 50))),
- ?assertEqual("[...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 4))),
- ?assertEqual("[1,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 6))),
- ?assertEqual("[1,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 7))),
- ?assertEqual("[1,2,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 8))),
- ?assertEqual("[1|4]", lists:flatten(format("~p", [[1 | 4]], 50))),
- ?assertEqual("[1]", lists:flatten(format("~p", [[1]], 50))),
- ?assertError(badarg, lists:flatten(format("~s", [[1 | 4]], 50))),
- ?assertEqual("\"hello...\"", lists:flatten(format("~p", ["hello world"], 10))),
- ?assertEqual("hello w...", lists:flatten(format("~s", ["hello world"], 10))),
- ?assertEqual("hello world\r\n", lists:flatten(format("~s", ["hello world\r\n"], 50))),
- ?assertEqual("\rhello world\r\n", lists:flatten(format("~s", ["\rhello world\r\n"], 50))),
- ?assertEqual(
- "\"\\rhello world\\r\\n\"", lists:flatten(format("~p", ["\rhello world\r\n"], 50))
- ),
- ?assertEqual(
- "[13,104,101,108,108,111,32,119,111,114,108,100,13,10]",
- lists:flatten(format("~w", ["\rhello world\r\n"], 60))
- ),
- ?assertEqual("...", lists:flatten(format("~s", ["\rhello world\r\n"], 3))),
- ?assertEqual(
- "[22835963083295358096932575511191922182123945984,...]",
- lists:flatten(
- format(
- "~p",
- [
- [
- 22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984
- ]
- ],
- 9
- )
- )
- ),
- ?assertEqual(
- "[22835963083295358096932575511191922182123945984,...]",
- lists:flatten(
- format(
- "~p",
- [
- [
- 22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984
- ]
- ],
- 53
- )
- )
- ),
- %%improper list
- ?assertEqual("[1,2,3|4]", lists:flatten(format("~P", [[1 | [2 | [3 | 4]]], 5], 50))),
- ?assertEqual("[1|1]", lists:flatten(format("~P", [[1 | 1], 5], 50))),
- ?assertEqual("[9|9]", lists:flatten(format("~p", [[9 | 9]], 50))),
- ok.
-
-iolist_printing_test() ->
- ?assertEqual(
- "iolist: HelloIamaniolist",
- lists:flatten(
- format(
- "iolist: ~s",
- [[$H, $e, $l, $l, $o, "I", ["am", [<<"an">>], [$i, $o, $l, $i, $s, $t]]]],
- 1000
- )
- )
- ),
- ?assertEqual(
- "123...",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 6))
- ),
- ?assertEqual(
- "123456...",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 9))
- ),
- ?assertEqual(
- "123456789H...",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 13))
- ),
- ?assertEqual(
- "123456789HellIamaniolist",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 30))
- ),
-
- ok.
-
-tuple_printing_test() ->
- ?assertEqual("{}", lists:flatten(format("~p", [{}], 50))),
- ?assertEqual("{}", lists:flatten(format("~w", [{}], 50))),
- ?assertError(badarg, lists:flatten(format("~s", [{}], 50))),
- ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 1))),
- ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 2))),
- ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 3))),
- ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 4))),
- ?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 5))),
- ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo, bar}], 6))),
- ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo, bar}], 7))),
- ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo, bar}], 9))),
- ?assertEqual("{foo,bar}", lists:flatten(format("~p", [{foo, bar}], 10))),
- ?assertEqual(
- "{22835963083295358096932575511191922182123945984,...}",
- lists:flatten(
- format(
- "~w",
- [
- {22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984}
- ],
- 10
- )
- )
- ),
- ?assertEqual(
- "{22835963083295358096932575511191922182123945984,...}",
- lists:flatten(
- format(
- "~w",
- [
- {22835963083295358096932575511191922182123945984, bar}
- ],
- 10
- )
- )
- ),
- ?assertEqual(
- "{22835963083295358096932575511191922182123945984,...}",
- lists:flatten(
- format(
- "~w",
- [
- {22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984}
- ],
- 53
- )
- )
- ),
- ok.
-
-map_printing_test() ->
- case erlang:is_builtin(erlang, is_map, 1) of
- true ->
- ?assertEqual("#{}", lists:flatten(format("~p", [maps:new()], 50))),
- ?assertEqual("#{}", lists:flatten(format("~p", [maps:new()], 3))),
- ?assertEqual("#{}", lists:flatten(format("~w", [maps:new()], 50))),
- ?assertError(badarg, lists:flatten(format("~s", [maps:new()], 50))),
- ?assertEqual("#{...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 1))),
- ?assertEqual("#{...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 6))),
- ?assertEqual(
- "#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 7))
- ),
- ?assertEqual(
- "#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 9))
- ),
- ?assertEqual(
- "#{bar => foo}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 10))
- ),
- ?assertEqual(
- "#{bar => ...,...}",
- lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 9))
- ),
- ?assertEqual(
- "#{bar => foo,...}",
- lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 10))
- ),
- ?assertEqual(
- "#{bar => foo,...}",
- lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 17))
- ),
- ?assertEqual(
- "#{bar => foo,foo => ...}",
- lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 18))
- ),
- ?assertEqual(
- "#{bar => foo,foo => ...}",
- lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 19))
- ),
- ?assertEqual(
- "#{bar => foo,foo => ...}",
- lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 20))
- ),
- ?assertEqual(
- "#{bar => foo,foo => bar}",
- lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 21))
- ),
- ?assertEqual(
- "#{22835963083295358096932575511191922182123945984 => ...}",
- lists:flatten(
- format(
- "~w",
- [
- maps:from_list([
- {22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984}
- ])
- ],
- 10
- )
- )
- ),
- ?assertEqual(
- "#{22835963083295358096932575511191922182123945984 => ...}",
- lists:flatten(
- format(
- "~w",
- [
- maps:from_list([{22835963083295358096932575511191922182123945984, bar}])
- ],
- 10
- )
- )
- ),
- ?assertEqual(
- "#{22835963083295358096932575511191922182123945984 => ...}",
- lists:flatten(
- format(
- "~w",
- [
- maps:from_list([{22835963083295358096932575511191922182123945984, bar}])
- ],
- 53
- )
- )
- ),
- ?assertEqual(
- "#{22835963083295358096932575511191922182123945984 => bar}",
- lists:flatten(
- format(
- "~w",
- [
- maps:from_list([{22835963083295358096932575511191922182123945984, bar}])
- ],
- 54
- )
- )
- ),
- ok;
- false ->
- ok
- end.
-
-unicode_test() ->
- ?assertEqual([231, 167, 129], lists:flatten(format("~s", [<<231, 167, 129>>], 50))),
- ?assertEqual([31169], lists:flatten(format("~ts", [<<231, 167, 129>>], 50))),
- ok.
-
-depth_limit_test() ->
- ?assertEqual("{...}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 1], 50))),
- ?assertEqual("{a,...}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 2], 50))),
- ?assertEqual("{a,[...]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 3], 50))),
- ?assertEqual("{a,[b|...]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 4], 50))),
- ?assertEqual("{a,[b,[...]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 5], 50))),
- ?assertEqual("{a,[b,[c|...]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 6], 50))),
- ?assertEqual("{a,[b,[c,[...]]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 7], 50))),
- ?assertEqual("{a,[b,[c,[d]]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 8], 50))),
- ?assertEqual("{a,[b,[c,[d]]]}", lists:flatten(format("~P", [{a, [b, [c, [d]]]}, 9], 50))),
-
- ?assertEqual("{a,{...}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 3], 50))),
- ?assertEqual("{a,{b,...}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 4], 50))),
- ?assertEqual("{a,{b,{...}}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 5], 50))),
- ?assertEqual("{a,{b,{c,...}}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 6], 50))),
- ?assertEqual("{a,{b,{c,{...}}}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 7], 50))),
- ?assertEqual("{a,{b,{c,{d}}}}", lists:flatten(format("~P", [{a, {b, {c, {d}}}}, 8], 50))),
-
- case erlang:is_builtin(erlang, is_map, 1) of
- true ->
- ?assertEqual(
- "#{a => #{...}}",
- lists:flatten(
- format(
- "~P",
- [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 2],
- 50
- )
- )
- ),
- ?assertEqual(
- "#{a => #{b => #{...}}}",
- lists:flatten(
- format(
- "~P",
- [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 3],
- 50
- )
- )
- ),
- ?assertEqual(
- "#{a => #{b => #{c => d}}}",
- lists:flatten(
- format(
- "~P",
- [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 4],
- 50
- )
- )
- ),
-
- ?assertEqual("#{}", lists:flatten(format("~P", [maps:new(), 1], 50))),
- ?assertEqual(
- "#{...}",
- lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 1], 50))
- ),
- ?assertEqual(
- "#{1 => 1,...}",
- lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 2], 50))
- ),
- ?assertEqual(
- "#{1 => 1,2 => 2,...}",
- lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 3], 50))
- ),
- ?assertEqual(
- "#{1 => 1,2 => 2,3 => 3}",
- lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 4], 50))
- ),
-
- ok;
- false ->
- ok
- end,
-
- ?assertEqual("{\"a\",[...]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 3], 50))),
- ?assertEqual(
- "{\"a\",[\"b\",[[...]|...]]}",
- lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 6], 50))
- ),
- ?assertEqual(
- "{\"a\",[\"b\",[\"c\",[\"d\"]]]}",
- lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 9], 50))
- ),
-
- ?assertEqual("[...]", lists:flatten(format("~P", [[1, 2, 3], 1], 50))),
- ?assertEqual("[1|...]", lists:flatten(format("~P", [[1, 2, 3], 2], 50))),
- ?assertEqual("[1,2|...]", lists:flatten(format("~P", [[1, 2, 3], 3], 50))),
- ?assertEqual("[1,2,3]", lists:flatten(format("~P", [[1, 2, 3], 4], 50))),
-
- ?assertEqual("{1,...}", lists:flatten(format("~P", [{1, 2, 3}, 2], 50))),
- ?assertEqual("{1,2,...}", lists:flatten(format("~P", [{1, 2, 3}, 3], 50))),
- ?assertEqual("{1,2,3}", lists:flatten(format("~P", [{1, 2, 3}, 4], 50))),
-
- ?assertEqual("{1,...}", lists:flatten(format("~P", [{1, 2, 3}, 2], 50))),
- ?assertEqual("[1,2|...]", lists:flatten(format("~P", [[1, 2, <<3>>], 3], 50))),
- ?assertEqual("[1,2,<<...>>]", lists:flatten(format("~P", [[1, 2, <<3>>], 4], 50))),
- ?assertEqual("[1,2,<<3>>]", lists:flatten(format("~P", [[1, 2, <<3>>], 5], 50))),
-
- ?assertEqual("<<...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 1], 50))),
- ?assertEqual("<<0,...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 2], 50))),
- ?assertEqual("<<0,0,...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 3], 50))),
- ?assertEqual("<<0,0,0,...>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 4], 50))),
- ?assertEqual("<<0,0,0,0>>", lists:flatten(format("~P", [<<0, 0, 0, 0>>, 5], 50))),
-
- %% this is a seriously weird edge case
- ?assertEqual("<<\" \"...>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 2], 50))),
- ?assertEqual("<<\" \"...>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 3], 50))),
- ?assertEqual("<<\" \"...>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 4], 50))),
- ?assertEqual("<<32,32,32,0>>", lists:flatten(format("~P", [<<32, 32, 32, 0>>, 5], 50))),
- ?assertEqual("<<32,32,32,0>>", lists:flatten(format("~p", [<<32, 32, 32, 0>>], 50))),
-
- %% depth limiting for some reason works in 4 byte chunks on printable binaries?
- ?assertEqual("<<\"hell\"...>>", lists:flatten(format("~P", [<<"hello world">>, 2], 50))),
- ?assertEqual(
- "<<\"abcd\"...>>", lists:flatten(format("~P", [<<$a, $b, $c, $d, $e, 0>>, 2], 50))
- ),
-
- %% I don't even know...
- ?assertEqual("<<>>", lists:flatten(format("~P", [<<>>, 1], 50))),
- ?assertEqual("<<>>", lists:flatten(format("~W", [<<>>, 1], 50))),
-
- ?assertEqual("{abc,<<\"abc\\\"\">>}", lists:flatten(format("~P", [{abc, <<"abc\"">>}, 4], 50))),
-
- ok.
-
-print_terms_without_format_string_test() ->
- ?assertError(badarg, format({hello, world}, [], 50)),
- ?assertError(badarg, format([{google, bomb}], [], 50)),
- ?assertError(badarg, format([$h, $e, $l, $l, $o, 3594], [], 50)),
- ?assertEqual("helloworld", lists:flatten(format([$h, $e, $l, $l, $o, "world"], [], 50))),
- ?assertEqual("hello", lists:flatten(format(<<"hello">>, [], 50))),
- ?assertEqual("hello", lists:flatten(format('hello', [], 50))),
- ?assertError(badarg, format(<<1, 2, 3, 1:7>>, [], 100)),
- ?assertError(badarg, format(65535, [], 50)),
- ok.
-
-improper_io_list_test() ->
- ?assertEqual(">hello", lists:flatten(format('~s', [[$> | <<"hello">>]], 50))),
- ?assertEqual(">hello", lists:flatten(format('~ts', [[$> | <<"hello">>]], 50))),
- ?assertEqual("helloworld", lists:flatten(format('~ts', [[<<"hello">> | <<"world">>]], 50))),
- ok.
-
--endif.
diff --git a/src/couch_log/src/couch_log_trunc_io_fmt.erl b/src/couch_log/src/couch_log_trunc_io_fmt.erl
deleted file mode 100644
index cf18019ad..000000000
--- a/src/couch_log/src/couch_log_trunc_io_fmt.erl
+++ /dev/null
@@ -1,593 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2011-2012. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%% fork of io_lib_format that uses trunc_io to protect against large terms
-%%
-%% Renamed to couch_log_format to avoid naming collision with
-%% lager_Format.
--module(couch_log_trunc_io_fmt).
-
--export([format/3, format/4]).
-
--record(options, {
- chomp = false :: boolean()
-}).
-
-format(FmtStr, Args, MaxLen) ->
- format(FmtStr, Args, MaxLen, []).
-
-format([], [], _, _) ->
- "";
-format(FmtStr, Args, MaxLen, Opts) when is_atom(FmtStr) ->
- format(atom_to_list(FmtStr), Args, MaxLen, Opts);
-format(FmtStr, Args, MaxLen, Opts) when is_binary(FmtStr) ->
- format(binary_to_list(FmtStr), Args, MaxLen, Opts);
-format(FmtStr, Args, MaxLen, Opts) when is_list(FmtStr) ->
- case couch_log_util:string_p(FmtStr) of
- true ->
- Options = make_options(Opts, #options{}),
- Cs = collect(FmtStr, Args),
- {Cs2, MaxLen2} = build(Cs, [], MaxLen, Options),
- %% count how many terms remain
- {Count, StrLen} = lists:foldl(
- fun
- ({_C, _As, _F, _Adj, _P, _Pad, _Enc}, {Terms, Chars}) ->
- {Terms + 1, Chars};
- (_, {Terms, Chars}) ->
- {Terms, Chars + 1}
- end,
- {0, 0},
- Cs2
- ),
- build2(Cs2, Count, MaxLen2 - StrLen);
- false ->
- erlang:error(badarg)
- end;
-format(_FmtStr, _Args, _MaxLen, _Opts) ->
- erlang:error(badarg).
-
-collect([$~ | Fmt0], Args0) ->
- {C, Fmt1, Args1} = collect_cseq(Fmt0, Args0),
- [C | collect(Fmt1, Args1)];
-collect([C | Fmt], Args) ->
- [C | collect(Fmt, Args)];
-collect([], []) ->
- [].
-
-collect_cseq(Fmt0, Args0) ->
- {F, Ad, Fmt1, Args1} = field_width(Fmt0, Args0),
- {P, Fmt2, Args2} = precision(Fmt1, Args1),
- {Pad, Fmt3, Args3} = pad_char(Fmt2, Args2),
- {Encoding, Fmt4, Args4} = encoding(Fmt3, Args3),
- {C, As, Fmt5, Args5} = collect_cc(Fmt4, Args4),
- {{C, As, F, Ad, P, Pad, Encoding}, Fmt5, Args5}.
-
-encoding([$t | Fmt], Args) ->
- {unicode, Fmt, Args};
-encoding(Fmt, Args) ->
- {latin1, Fmt, Args}.
-
-field_width([$- | Fmt0], Args0) ->
- {F, Fmt, Args} = field_value(Fmt0, Args0),
- field_width(-F, Fmt, Args);
-field_width(Fmt0, Args0) ->
- {F, Fmt, Args} = field_value(Fmt0, Args0),
- field_width(F, Fmt, Args).
-
-field_width(F, Fmt, Args) when F < 0 ->
- {-F, left, Fmt, Args};
-field_width(F, Fmt, Args) when F >= 0 ->
- {F, right, Fmt, Args}.
-
-precision([$. | Fmt], Args) ->
- field_value(Fmt, Args);
-precision(Fmt, Args) ->
- {none, Fmt, Args}.
-
-field_value([$* | Fmt], [A | Args]) when is_integer(A) ->
- {A, Fmt, Args};
-field_value([C | Fmt], Args) when is_integer(C), C >= $0, C =< $9 ->
- field_value([C | Fmt], Args, 0);
-field_value(Fmt, Args) ->
- {none, Fmt, Args}.
-
-field_value([C | Fmt], Args, F) when is_integer(C), C >= $0, C =< $9 ->
- field_value(Fmt, Args, 10 * F + (C - $0));
-%Default case
-field_value(Fmt, Args, F) ->
- {F, Fmt, Args}.
-
-pad_char([$., $* | Fmt], [Pad | Args]) -> {Pad, Fmt, Args};
-pad_char([$., Pad | Fmt], Args) -> {Pad, Fmt, Args};
-pad_char(Fmt, Args) -> {$\s, Fmt, Args}.
-
-%% collect_cc([FormatChar], [Argument]) ->
-%% {Control,[ControlArg],[FormatChar],[Arg]}.
-%% Here we collect the argments for each control character.
-%% Be explicit to cause failure early.
-
-collect_cc([$w | Fmt], [A | Args]) -> {$w, [A], Fmt, Args};
-collect_cc([$p | Fmt], [A | Args]) -> {$p, [A], Fmt, Args};
-collect_cc([$W | Fmt], [A, Depth | Args]) -> {$W, [A, Depth], Fmt, Args};
-collect_cc([$P | Fmt], [A, Depth | Args]) -> {$P, [A, Depth], Fmt, Args};
-collect_cc([$s | Fmt], [A | Args]) -> {$s, [A], Fmt, Args};
-collect_cc([$r | Fmt], [A | Args]) -> {$r, [A], Fmt, Args};
-collect_cc([$e | Fmt], [A | Args]) -> {$e, [A], Fmt, Args};
-collect_cc([$f | Fmt], [A | Args]) -> {$f, [A], Fmt, Args};
-collect_cc([$g | Fmt], [A | Args]) -> {$g, [A], Fmt, Args};
-collect_cc([$b | Fmt], [A | Args]) -> {$b, [A], Fmt, Args};
-collect_cc([$B | Fmt], [A | Args]) -> {$B, [A], Fmt, Args};
-collect_cc([$x | Fmt], [A, Prefix | Args]) -> {$x, [A, Prefix], Fmt, Args};
-collect_cc([$X | Fmt], [A, Prefix | Args]) -> {$X, [A, Prefix], Fmt, Args};
-collect_cc([$+ | Fmt], [A | Args]) -> {$+, [A], Fmt, Args};
-collect_cc([$# | Fmt], [A | Args]) -> {$#, [A], Fmt, Args};
-collect_cc([$c | Fmt], [A | Args]) -> {$c, [A], Fmt, Args};
-collect_cc([$~ | Fmt], Args) when is_list(Args) -> {$~, [], Fmt, Args};
-collect_cc([$n | Fmt], Args) when is_list(Args) -> {$n, [], Fmt, Args};
-collect_cc([$i | Fmt], [A | Args]) -> {$i, [A], Fmt, Args}.
-
-%% build([Control], Pc, Indentation) -> [Char].
-%% Interpret the control structures. Count the number of print
-%% remaining and only calculate indentation when necessary. Must also
-%% be smart when calculating indentation for characters in format.
-
-build([{$n, _, _, _, _, _, _}], Acc, MaxLen, #options{chomp = true}) ->
- %% trailing ~n, ignore
- {lists:reverse(Acc), MaxLen};
-build([{C, As, F, Ad, P, Pad, Enc} | Cs], Acc, MaxLen, O) ->
- {S, MaxLen2} = control(C, As, F, Ad, P, Pad, Enc, MaxLen),
- build(Cs, [S | Acc], MaxLen2, O);
-build([$\n], Acc, MaxLen, #options{chomp = true}) ->
- %% trailing \n, ignore
- {lists:reverse(Acc), MaxLen};
-build([$\n | Cs], Acc, MaxLen, O) ->
- build(Cs, [$\n | Acc], MaxLen - 1, O);
-build([$\t | Cs], Acc, MaxLen, O) ->
- build(Cs, [$\t | Acc], MaxLen - 1, O);
-build([C | Cs], Acc, MaxLen, O) ->
- build(Cs, [C | Acc], MaxLen - 1, O);
-build([], Acc, MaxLen, _O) ->
- {lists:reverse(Acc), MaxLen}.
-
-build2([{C, As, F, Ad, P, Pad, Enc} | Cs], Count, MaxLen) ->
- {S, Len} = control2(C, As, F, Ad, P, Pad, Enc, MaxLen div Count),
- [S | build2(Cs, Count - 1, MaxLen - Len)];
-build2([C | Cs], Count, MaxLen) ->
- [C | build2(Cs, Count, MaxLen)];
-build2([], _, _) ->
- [].
-
-%% control(FormatChar, [Argument], FieldWidth, Adjust, Precision, PadChar,
-%% Indentation) -> [Char]
-%% This is the main dispatch function for the various formatting commands.
-%% Field widths and precisions have already been calculated.
-
-control($e, [A], F, Adj, P, Pad, _Enc, L) when is_float(A) ->
- Res = fwrite_e(A, F, Adj, P, Pad),
- {Res, L - lists:flatlength(Res)};
-control($f, [A], F, Adj, P, Pad, _Enc, L) when is_float(A) ->
- Res = fwrite_f(A, F, Adj, P, Pad),
- {Res, L - lists:flatlength(Res)};
-control($g, [A], F, Adj, P, Pad, _Enc, L) when is_float(A) ->
- Res = fwrite_g(A, F, Adj, P, Pad),
- {Res, L - lists:flatlength(Res)};
-control($b, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- Res = unprefixed_integer(A, F, Adj, base(P), Pad, true),
- {Res, L - lists:flatlength(Res)};
-control($B, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- Res = unprefixed_integer(A, F, Adj, base(P), Pad, false),
- {Res, L - lists:flatlength(Res)};
-control($x, [A, Prefix], F, Adj, P, Pad, _Enc, L) when
- is_integer(A),
- is_atom(Prefix)
-->
- Res = prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), true),
- {Res, L - lists:flatlength(Res)};
-control($x, [A, Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- %Check if Prefix a character list
- true = io_lib:deep_char_list(Prefix),
- Res = prefixed_integer(A, F, Adj, base(P), Pad, Prefix, true),
- {Res, L - lists:flatlength(Res)};
-control($X, [A, Prefix], F, Adj, P, Pad, _Enc, L) when
- is_integer(A),
- is_atom(Prefix)
-->
- Res = prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), false),
- {Res, L - lists:flatlength(Res)};
-control($X, [A, Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- %Check if Prefix a character list
- true = io_lib:deep_char_list(Prefix),
- Res = prefixed_integer(A, F, Adj, base(P), Pad, Prefix, false),
- {Res, L - lists:flatlength(Res)};
-control($+, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- Base = base(P),
- Prefix = [integer_to_list(Base), $#],
- Res = prefixed_integer(A, F, Adj, Base, Pad, Prefix, true),
- {Res, L - lists:flatlength(Res)};
-control($#, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- Base = base(P),
- Prefix = [integer_to_list(Base), $#],
- Res = prefixed_integer(A, F, Adj, Base, Pad, Prefix, false),
- {Res, L - lists:flatlength(Res)};
-control($c, [A], F, Adj, P, Pad, unicode, L) when is_integer(A) ->
- Res = char(A, F, Adj, P, Pad),
- {Res, L - lists:flatlength(Res)};
-control($c, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- Res = char(A band 255, F, Adj, P, Pad),
- {Res, L - lists:flatlength(Res)};
-control($~, [], F, Adj, P, Pad, _Enc, L) ->
- Res = char($~, F, Adj, P, Pad),
- {Res, L - lists:flatlength(Res)};
-control($n, [], F, Adj, P, Pad, _Enc, L) ->
- Res = newline(F, Adj, P, Pad),
- {Res, L - lists:flatlength(Res)};
-control($i, [_A], _F, _Adj, _P, _Pad, _Enc, L) ->
- {[], L};
-control($s, [A], F, Adj, P, Pad, _Enc, L) when is_atom(A) ->
- Res = string(atom_to_list(A), F, Adj, P, Pad),
- {Res, L - lists:flatlength(Res)};
-control(C, A, F, Adj, P, Pad, Enc, L) ->
- %% save this for later - these are all the 'large' terms
- {{C, A, F, Adj, P, Pad, Enc}, L}.
-
-control2($w, [A], F, Adj, P, Pad, _Enc, L) ->
- Term = couch_log_trunc_io:fprint(A, L, [{lists_as_strings, false}]),
- Res = term(Term, F, Adj, P, Pad),
- {Res, lists:flatlength(Res)};
-control2($p, [A], _F, _Adj, _P, _Pad, _Enc, L) ->
- Term = couch_log_trunc_io:fprint(A, L, [{lists_as_strings, true}]),
- {Term, lists:flatlength(Term)};
-control2($W, [A, Depth], F, Adj, P, Pad, _Enc, L) when is_integer(Depth) ->
- Term = couch_log_trunc_io:fprint(A, L, [{depth, Depth}, {lists_as_strings, false}]),
- Res = term(Term, F, Adj, P, Pad),
- {Res, lists:flatlength(Res)};
-control2($P, [A, Depth], _F, _Adj, _P, _Pad, _Enc, L) when is_integer(Depth) ->
- Term = couch_log_trunc_io:fprint(A, L, [{depth, Depth}, {lists_as_strings, true}]),
- {Term, lists:flatlength(Term)};
-control2($s, [L0], F, Adj, P, Pad, latin1, L) ->
- List = couch_log_trunc_io:fprint(iolist_to_chars(L0), L, [{force_strings, true}]),
- Res = string(List, F, Adj, P, Pad),
- {Res, lists:flatlength(Res)};
-control2($s, [L0], F, Adj, P, Pad, unicode, L) ->
- List = couch_log_trunc_io:fprint(cdata_to_chars(L0), L, [{force_strings, true}]),
- Res = uniconv(string(List, F, Adj, P, Pad)),
- {Res, lists:flatlength(Res)};
-control2($r, [R], F, Adj, P, Pad, _Enc, _L) ->
- List = couch_log_formatter:format_reason(R),
- Res = string(List, F, Adj, P, Pad),
- {Res, lists:flatlength(Res)}.
-
-iolist_to_chars([C | Cs]) when is_integer(C), C >= $\000, C =< $\377 ->
- [C | iolist_to_chars(Cs)];
-iolist_to_chars([I | Cs]) ->
- [iolist_to_chars(I) | iolist_to_chars(Cs)];
-iolist_to_chars([]) ->
- [];
-iolist_to_chars(B) when is_binary(B) ->
- binary_to_list(B).
-
-cdata_to_chars([C | Cs]) when is_integer(C), C >= $\000 ->
- [C | cdata_to_chars(Cs)];
-cdata_to_chars([I | Cs]) ->
- [cdata_to_chars(I) | cdata_to_chars(Cs)];
-cdata_to_chars([]) ->
- [];
-cdata_to_chars(B) when is_binary(B) ->
- case catch unicode:characters_to_list(B) of
- L when is_list(L) -> L;
- _ -> binary_to_list(B)
- end.
-
-make_options([], Options) ->
- Options;
-make_options([{chomp, Bool} | T], Options) when is_boolean(Bool) ->
- make_options(T, Options#options{chomp = Bool}).
-
--ifdef(UNICODE_AS_BINARIES).
-uniconv(C) ->
- unicode:characters_to_binary(C, unicode).
--else.
-uniconv(C) ->
- C.
--endif.
-%% Default integer base
-base(none) ->
- 10;
-base(B) when is_integer(B) ->
- B.
-
-%% term(TermList, Field, Adjust, Precision, PadChar)
-%% Output the characters in a term.
-%% Adjust the characters within the field if length less than Max padding
-%% with PadChar.
-
-term(T, none, _Adj, none, _Pad) ->
- T;
-term(T, none, Adj, P, Pad) ->
- term(T, P, Adj, P, Pad);
-term(T, F, Adj, P0, Pad) ->
- L = lists:flatlength(T),
- P =
- case P0 of
- none -> erlang:min(L, F);
- _ -> P0
- end,
- if
- L > P ->
- adjust(chars($*, P), chars(Pad, F - P), Adj);
- F >= P ->
- adjust(T, chars(Pad, F - L), Adj)
- end.
-
-%% fwrite_e(Float, Field, Adjust, Precision, PadChar)
-
-%Default values
-fwrite_e(Fl, none, Adj, none, Pad) ->
- fwrite_e(Fl, none, Adj, 6, Pad);
-fwrite_e(Fl, none, _Adj, P, _Pad) when P >= 2 ->
- float_e(Fl, float_data(Fl), P);
-fwrite_e(Fl, F, Adj, none, Pad) ->
- fwrite_e(Fl, F, Adj, 6, Pad);
-fwrite_e(Fl, F, Adj, P, Pad) when P >= 2 ->
- term(float_e(Fl, float_data(Fl), P), F, Adj, F, Pad).
-
-%Negative numbers
-float_e(Fl, Fd, P) when Fl < 0.0 ->
- [$- | float_e(-Fl, Fd, P)];
-float_e(_Fl, {Ds, E}, P) ->
- case float_man(Ds, 1, P - 1) of
- {[$0 | Fs], true} -> [[$1 | Fs] | float_exp(E)];
- {Fs, false} -> [Fs | float_exp(E - 1)]
- end.
-
-%% float_man([Digit], Icount, Dcount) -> {[Chars],CarryFlag}.
-%% Generate the characters in the mantissa from the digits with Icount
-%% characters before the '.' and Dcount decimals. Handle carry and let
-%% caller decide what to do at top.
-
-float_man(Ds, 0, Dc) ->
- {Cs, C} = float_man(Ds, Dc),
- {[$. | Cs], C};
-float_man([D | Ds], I, Dc) ->
- case float_man(Ds, I - 1, Dc) of
- {Cs, true} when D =:= $9 -> {[$0 | Cs], true};
- {Cs, true} -> {[D + 1 | Cs], false};
- {Cs, false} -> {[D | Cs], false}
- end;
-%Pad with 0's
-float_man([], I, Dc) ->
- {string:chars($0, I, [$. | string:chars($0, Dc)]), false}.
-
-float_man([D | _], 0) when D >= $5 -> {[], true};
-float_man([_ | _], 0) ->
- {[], false};
-float_man([D | Ds], Dc) ->
- case float_man(Ds, Dc - 1) of
- {Cs, true} when D =:= $9 -> {[$0 | Cs], true};
- {Cs, true} -> {[D + 1 | Cs], false};
- {Cs, false} -> {[D | Cs], false}
- end;
-%Pad with 0's
-float_man([], Dc) ->
- {string:chars($0, Dc), false}.
-
-%% float_exp(Exponent) -> [Char].
-%% Generate the exponent of a floating point number. Always include sign.
-
-float_exp(E) when E >= 0 ->
- [$e, $+ | integer_to_list(E)];
-float_exp(E) ->
- [$e | integer_to_list(E)].
-
-%% fwrite_f(FloatData, Field, Adjust, Precision, PadChar)
-
-%Default values
-fwrite_f(Fl, none, Adj, none, Pad) ->
- fwrite_f(Fl, none, Adj, 6, Pad);
-fwrite_f(Fl, none, _Adj, P, _Pad) when P >= 1 ->
- float_f(Fl, float_data(Fl), P);
-fwrite_f(Fl, F, Adj, none, Pad) ->
- fwrite_f(Fl, F, Adj, 6, Pad);
-fwrite_f(Fl, F, Adj, P, Pad) when P >= 1 ->
- term(float_f(Fl, float_data(Fl), P), F, Adj, F, Pad).
-
-float_f(Fl, Fd, P) when Fl < 0.0 ->
- [$- | float_f(-Fl, Fd, P)];
-float_f(Fl, {Ds, E}, P) when E =< 0 ->
- %Prepend enough 0's
- float_f(Fl, {string:chars($0, -E + 1, Ds), 1}, P);
-float_f(_Fl, {Ds, E}, P) ->
- case float_man(Ds, E, P) of
- %Handle carry
- {Fs, true} -> "1" ++ Fs;
- {Fs, false} -> Fs
- end.
-
-%% float_data([FloatChar]) -> {[Digit],Exponent}
-
-float_data(Fl) ->
- float_data(float_to_list(Fl), []).
-
-float_data([$e | E], Ds) ->
- {lists:reverse(Ds), list_to_integer(E) + 1};
-float_data([D | Cs], Ds) when D >= $0, D =< $9 ->
- float_data(Cs, [D | Ds]);
-float_data([_ | Cs], Ds) ->
- float_data(Cs, Ds).
-
-%% fwrite_g(Float, Field, Adjust, Precision, PadChar)
-%% Use the f form if Float is >= 0.1 and < 1.0e4,
-%% and the prints correctly in the f form, else the e form.
-%% Precision always means the # of significant digits.
-
-fwrite_g(Fl, F, Adj, none, Pad) ->
- fwrite_g(Fl, F, Adj, 6, Pad);
-fwrite_g(Fl, F, Adj, P, Pad) when P >= 1 ->
- A = abs(Fl),
- E =
- if
- A < 1.0e-1 -> -2;
- A < 1.0e0 -> -1;
- A < 1.0e1 -> 0;
- A < 1.0e2 -> 1;
- A < 1.0e3 -> 2;
- A < 1.0e4 -> 3;
- true -> fwrite_f
- end,
- if
- P =< 1, E =:= -1;
- P - 1 > E, E >= -1 ->
- fwrite_f(Fl, F, Adj, P - 1 - E, Pad);
- P =< 1 ->
- fwrite_e(Fl, F, Adj, 2, Pad);
- true ->
- fwrite_e(Fl, F, Adj, P, Pad)
- end.
-
-%% string(String, Field, Adjust, Precision, PadChar)
-
-string(S, none, _Adj, none, _Pad) ->
- S;
-string(S, F, Adj, none, Pad) ->
- string_field(S, F, Adj, lists:flatlength(S), Pad);
-string(S, none, _Adj, P, Pad) ->
- string_field(S, P, left, lists:flatlength(S), Pad);
-string(S, F, Adj, P, Pad) when F >= P ->
- N = lists:flatlength(S),
- if
- F > P ->
- if
- N > P ->
- adjust(flat_trunc(S, P), chars(Pad, F - P), Adj);
- N < P ->
- adjust([S | chars(Pad, P - N)], chars(Pad, F - P), Adj);
- % N == P
- true ->
- adjust(S, chars(Pad, F - P), Adj)
- end;
- % F == P
- true ->
- string_field(S, F, Adj, N, Pad)
- end.
-
-string_field(S, F, _Adj, N, _Pad) when N > F ->
- flat_trunc(S, F);
-string_field(S, F, Adj, N, Pad) when N < F ->
- adjust(S, chars(Pad, F - N), Adj);
-% N == F
-string_field(S, _, _, _, _) ->
- S.
-
-%% unprefixed_integer(Int, Field, Adjust, Base, PadChar, Lowercase)
-%% -> [Char].
-
-unprefixed_integer(Int, F, Adj, Base, Pad, Lowercase) when
- Base >= 2, Base =< 1 + $Z - $A + 10
-->
- if
- Int < 0 ->
- S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase),
- term([$- | S], F, Adj, none, Pad);
- true ->
- S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
- term(S, F, Adj, none, Pad)
- end.
-
-%% prefixed_integer(Int, Field, Adjust, Base, PadChar, Prefix, Lowercase)
-%% -> [Char].
-
-prefixed_integer(Int, F, Adj, Base, Pad, Prefix, Lowercase) when
- Base >= 2, Base =< 1 + $Z - $A + 10
-->
- if
- Int < 0 ->
- S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase),
- term([$-, Prefix | S], F, Adj, none, Pad);
- true ->
- S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
- term([Prefix | S], F, Adj, none, Pad)
- end.
-
-%% char(Char, Field, Adjust, Precision, PadChar) -> [Char].
-
-char(C, none, _Adj, none, _Pad) ->
- [C];
-char(C, F, _Adj, none, _Pad) ->
- chars(C, F);
-char(C, none, _Adj, P, _Pad) ->
- chars(C, P);
-char(C, F, Adj, P, Pad) when F >= P ->
- adjust(chars(C, P), chars(Pad, F - P), Adj).
-
-%% newline(Field, Adjust, Precision, PadChar) -> [Char].
-
-newline(none, _Adj, _P, _Pad) -> "\n";
-newline(F, right, _P, _Pad) -> chars($\n, F).
-
-%%
-%% Utilities
-%%
-
-adjust(Data, [], _) -> Data;
-adjust(Data, Pad, left) -> [Data | Pad];
-adjust(Data, Pad, right) -> [Pad | Data].
-
-%% Flatten and truncate a deep list to at most N elements.
-flat_trunc(List, N) when is_integer(N), N >= 0 ->
- flat_trunc(List, N, []).
-
-flat_trunc(L, 0, R) when is_list(L) ->
- lists:reverse(R);
-flat_trunc([H | T], N, R) ->
- flat_trunc(T, N - 1, [H | R]);
-flat_trunc([], _, R) ->
- lists:reverse(R).
-
-%% A deep version of string:chars/2,3
-
-chars(_C, 0) ->
- [];
-chars(C, 1) ->
- [C];
-chars(C, 2) ->
- [C, C];
-chars(C, 3) ->
- [C, C, C];
-chars(C, N) when is_integer(N), (N band 1) =:= 0 ->
- S = chars(C, N bsr 1),
- [S | S];
-chars(C, N) when is_integer(N) ->
- S = chars(C, N bsr 1),
- [C, S | S].
-
-%chars(C, N, Tail) ->
-% [chars(C, N)|Tail].
-
-%% Lowercase conversion
-
-cond_lowercase(String, true) ->
- lowercase(String);
-cond_lowercase(String, false) ->
- String.
-
-lowercase([H | T]) when is_integer(H), H >= $A, H =< $Z ->
- [(H - $A + $a) | lowercase(T)];
-lowercase([H | T]) ->
- [H | lowercase(T)];
-lowercase([]) ->
- [].
diff --git a/src/couch_log/src/couch_log_util.erl b/src/couch_log/src/couch_log_util.erl
deleted file mode 100644
index 8be11e12d..000000000
--- a/src/couch_log/src/couch_log_util.erl
+++ /dev/null
@@ -1,147 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_util).
-
--export([
- should_log/1,
- iso8601_timestamp/0,
- get_msg_id/0,
-
- level_to_integer/1,
- level_to_atom/1,
- level_to_string/1,
-
- string_p/1
-]).
-
--include("couch_log.hrl").
-
--spec should_log(#log_entry{} | atom()) -> boolean().
-should_log(#log_entry{level = Level}) ->
- should_log(Level);
-should_log(Level) ->
- level_to_integer(Level) >= couch_log_config:get(level_int).
-
--spec iso8601_timestamp() -> string().
-iso8601_timestamp() ->
- {_, _, Micro} = Now = os:timestamp(),
- {{Year, Month, Date}, {Hour, Minute, Second}} = calendar:now_to_datetime(Now),
- Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
- io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
-
--spec get_msg_id() -> string().
-get_msg_id() ->
- case erlang:get(nonce) of
- undefined -> "--------";
- MsgId -> MsgId
- end.
-
--spec level_to_integer(atom() | string() | integer()) -> integer().
-level_to_integer(L) when L >= 0, L =< 9 -> L;
-level_to_integer(debug) -> 1;
-level_to_integer(info) -> 2;
-level_to_integer(notice) -> 3;
-level_to_integer(warning) -> 4;
-level_to_integer(warn) -> 4;
-level_to_integer(error) -> 5;
-level_to_integer(err) -> 5;
-level_to_integer(critical) -> 6;
-level_to_integer(crit) -> 6;
-level_to_integer(alert) -> 7;
-level_to_integer(emergency) -> 8;
-level_to_integer(emerg) -> 8;
-level_to_integer(none) -> 9;
-level_to_integer("debug") -> 1;
-level_to_integer("info") -> 2;
-level_to_integer("notice") -> 3;
-level_to_integer("warning") -> 4;
-level_to_integer("warn") -> 4;
-level_to_integer("error") -> 5;
-level_to_integer("err") -> 5;
-level_to_integer("critical") -> 6;
-level_to_integer("crit") -> 6;
-level_to_integer("alert") -> 7;
-level_to_integer("emergency") -> 8;
-level_to_integer("emerg") -> 8;
-level_to_integer("none") -> 9;
-level_to_integer("1") -> 1;
-level_to_integer("2") -> 2;
-level_to_integer("3") -> 3;
-level_to_integer("4") -> 4;
-level_to_integer("5") -> 5;
-level_to_integer("6") -> 6;
-level_to_integer("7") -> 7;
-level_to_integer("8") -> 8;
-level_to_integer("9") -> 9.
-
--spec level_to_atom(atom() | string() | integer()) -> atom().
-level_to_atom(L) when is_atom(L) -> L;
-level_to_atom("1") -> debug;
-level_to_atom("debug") -> debug;
-level_to_atom("2") -> info;
-level_to_atom("info") -> info;
-level_to_atom("3") -> notice;
-level_to_atom("notice") -> notice;
-level_to_atom("4") -> warning;
-level_to_atom("warning") -> warning;
-level_to_atom("warn") -> warning;
-level_to_atom("5") -> error;
-level_to_atom("error") -> error;
-level_to_atom("err") -> error;
-level_to_atom("6") -> critical;
-level_to_atom("critical") -> critical;
-level_to_atom("crit") -> critical;
-level_to_atom("7") -> alert;
-level_to_atom("alert") -> alert;
-level_to_atom("8") -> emergency;
-level_to_atom("emergency") -> emergency;
-level_to_atom("emerg") -> emergency;
-level_to_atom("9") -> none;
-level_to_atom("none") -> none;
-level_to_atom(V) when is_integer(V) -> level_to_atom(integer_to_list(V));
-level_to_atom(V) when is_list(V) -> info.
-
-level_to_string(L) when is_atom(L) -> atom_to_list(L);
-level_to_string(L) -> atom_to_list(level_to_atom(L)).
-
-% From error_logger_file_h via lager_stdlib.erl
-string_p([]) ->
- false;
-string_p(Term) ->
- string_p1(Term).
-
-string_p1([H | T]) when is_integer(H), H >= $\s, H < 256 ->
- string_p1(T);
-string_p1([$\n | T]) ->
- string_p1(T);
-string_p1([$\r | T]) ->
- string_p1(T);
-string_p1([$\t | T]) ->
- string_p1(T);
-string_p1([$\v | T]) ->
- string_p1(T);
-string_p1([$\b | T]) ->
- string_p1(T);
-string_p1([$\f | T]) ->
- string_p1(T);
-string_p1([$\e | T]) ->
- string_p1(T);
-string_p1([H | T]) when is_list(H) ->
- case string_p1(H) of
- true -> string_p1(T);
- _ -> false
- end;
-string_p1([]) ->
- true;
-string_p1(_) ->
- false.
diff --git a/src/couch_log/src/couch_log_writer.erl b/src/couch_log/src/couch_log_writer.erl
deleted file mode 100644
index 18bb557ae..000000000
--- a/src/couch_log/src/couch_log_writer.erl
+++ /dev/null
@@ -1,73 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% @doc Modules wishing to handle writing log
-% messages should implement this behavior.
-
--module(couch_log_writer).
-
--export([
- init/0,
- terminate/2,
- write/2
-]).
-
--include("couch_log.hrl").
-
--define(DEFAULT_WRITER, couch_log_writer_stderr).
-
--callback init() -> {ok, State :: term()}.
--callback terminate(Reason :: term(), State :: term()) -> ok.
--callback write(LogEntry :: #log_entry{}, State :: term()) ->
- {ok, NewState :: term()}.
-
--spec init() -> {atom(), term()}.
-init() ->
- Writer = get_writer_mod(),
- {ok, St} = Writer:init(),
- {Writer, St}.
-
--spec terminate(term(), {atom(), term()}) -> ok.
-terminate(Reason, {Writer, St}) ->
- ok = Writer:terminate(Reason, St).
-
--spec write(#log_entry{}, {atom(), term()}) -> {atom(), term()}.
-write(Entry, {Writer, St}) ->
- {ok, NewSt} = Writer:write(Entry, St),
- {Writer, NewSt}.
-
-get_writer_mod() ->
- WriterStr = config:get("log", "writer", "stderr"),
- ModName1 = to_atom("couch_log_writer_" ++ WriterStr),
- case mod_exists(ModName1) of
- true ->
- ModName1;
- false ->
- ModName2 = to_atom(WriterStr),
- case mod_exists(ModName2) of
- true ->
- ModName2;
- false ->
- ?DEFAULT_WRITER
- end
- end.
-
-to_atom(Str) ->
- try list_to_existing_atom(Str) of
- Atom -> Atom
- catch
- _:_ ->
- undefined
- end.
-
-mod_exists(ModName) ->
- code:which(ModName) /= non_existing.
diff --git a/src/couch_log/src/couch_log_writer_file.erl b/src/couch_log/src/couch_log_writer_file.erl
deleted file mode 100644
index 9b7255050..000000000
--- a/src/couch_log/src/couch_log_writer_file.erl
+++ /dev/null
@@ -1,131 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_writer_file).
--behaviour(couch_log_writer).
-
--export([
- init/0,
- terminate/2,
- write/2
-]).
-
--include_lib("kernel/include/file.hrl").
--include("couch_log.hrl").
-
--record(st, {
- file_path,
- fd,
- inode,
- last_check
-}).
-
--define(CHECK_INTERVAL, 30000000).
-
--ifdef(TEST).
--export([
- maybe_reopen/1
-]).
--endif.
-
-init() ->
- FilePath = config:get("log", "file", "./couch.log"),
- Opts = [append, raw] ++ buffer_opt(),
- case filelib:ensure_dir(FilePath) of
- ok ->
- case file:open(FilePath, Opts) of
- {ok, Fd} ->
- case file:read_file_info(FilePath) of
- {ok, FInfo} ->
- {ok, #st{
- file_path = FilePath,
- fd = Fd,
- inode = FInfo#file_info.inode,
- last_check = os:timestamp()
- }};
- FInfoError ->
- ok = file:close(Fd),
- FInfoError
- end;
- OpenError ->
- OpenError
- end;
- EnsureDirError ->
- EnsureDirError
- end.
-
-terminate(_, St) ->
- % Apparently delayed_write can require two closes
- file:close(St#st.fd),
- file:close(St#st.fd),
- ok.
-
-write(Entry, St) ->
- {ok, NewSt} = maybe_reopen(St),
- #log_entry{
- level = Level,
- pid = Pid,
- msg = Msg,
- msg_id = MsgId,
- time_stamp = TimeStamp
- } = Entry,
- Fmt = "[~s] ~s ~s ~p ~s ",
- Args = [
- couch_log_util:level_to_string(Level),
- TimeStamp,
- node(),
- Pid,
- MsgId
- ],
- MsgSize = couch_log_config:get(max_message_size),
- Data = couch_log_trunc_io:format(Fmt, Args, MsgSize),
- ok = file:write(NewSt#st.fd, [Data, Msg, "\n"]),
- {ok, NewSt}.
-
-buffer_opt() ->
- WriteBuffer = config:get_integer("log", "write_buffer", 0),
- WriteDelay = config:get_integer("log", "write_delay", 0),
- case {WriteBuffer, WriteDelay} of
- {B, D} when is_integer(B), is_integer(D), B > 0, D > 0 ->
- [{delayed_write, B, D}];
- _ ->
- []
- end.
-
-maybe_reopen(St) ->
- #st{
- last_check = LastCheck
- } = St,
- Now = os:timestamp(),
- case timer:now_diff(Now, LastCheck) > ?CHECK_INTERVAL of
- true -> reopen(St);
- false -> {ok, St}
- end.
-
-reopen(St) ->
- case file:read_file_info(St#st.file_path) of
- {ok, FInfo} ->
- NewINode = FInfo#file_info.inode,
- case NewINode == St#st.inode of
- true ->
- % No rotate necessary
- {ok, St};
- false ->
- % File was moved and re-created
- terminate(rotating, St),
- init()
- end;
- _ ->
- % File was moved or deleted
- terminate(rotating, St),
- init()
- end.
diff --git a/src/couch_log/src/couch_log_writer_journald.erl b/src/couch_log/src/couch_log_writer_journald.erl
deleted file mode 100644
index c2bdd940c..000000000
--- a/src/couch_log/src/couch_log_writer_journald.erl
+++ /dev/null
@@ -1,63 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_writer_journald).
--behaviour(couch_log_writer).
-
--export([
- init/0,
- terminate/2,
- write/2
-]).
-
--include("couch_log.hrl").
-
-init() ->
- {ok, nil}.
-
-terminate(_, _St) ->
- ok.
-
-write(Entry, St) ->
- #log_entry{
- level = Level,
- pid = Pid,
- msg = Msg,
- msg_id = MsgId
- } = Entry,
- Fmt = "<~B>~s ~p ~s ",
- Args = [
- level_for_journald(Level),
- node(),
- Pid,
- MsgId
- ],
- MsgSize = couch_log_config:get(max_message_size),
- Data = couch_log_trunc_io:format(Fmt, Args, MsgSize),
- io:format(standard_error, [Data, Msg, "\n"], []),
- {ok, St}.
-
-% log level mapping from sd-daemon(3)
-% https://www.freedesktop.org/software/systemd/man/sd-daemon.html
--spec level_for_journald(atom()) -> integer().
-level_for_journald(Level) when is_atom(Level) ->
- case Level of
- debug -> 7;
- info -> 6;
- notice -> 5;
- warning -> 4;
- error -> 3;
- critical -> 2;
- alert -> 1;
- emergency -> 0;
- _ -> 3
- end.
diff --git a/src/couch_log/src/couch_log_writer_stderr.erl b/src/couch_log/src/couch_log_writer_stderr.erl
deleted file mode 100644
index 01e350971..000000000
--- a/src/couch_log/src/couch_log_writer_stderr.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_writer_stderr).
--behaviour(couch_log_writer).
-
--export([
- init/0,
- terminate/2,
- write/2
-]).
-
--include("couch_log.hrl").
-
-init() ->
- {ok, nil}.
-
-terminate(_, _St) ->
- ok.
-
-write(Entry, St) ->
- #log_entry{
- level = Level,
- pid = Pid,
- msg = Msg,
- msg_id = MsgId,
- time_stamp = TimeStamp
- } = Entry,
- Fmt = "[~s] ~s ~s ~p ~s ",
- Args = [
- couch_log_util:level_to_string(Level),
- TimeStamp,
- node(),
- Pid,
- MsgId
- ],
- MsgSize = couch_log_config:get(max_message_size),
- Data = couch_log_trunc_io:format(Fmt, Args, MsgSize),
- io:format(standard_error, [Data, Msg, "\n"], []),
- {ok, St}.
diff --git a/src/couch_log/src/couch_log_writer_syslog.erl b/src/couch_log/src/couch_log_writer_syslog.erl
deleted file mode 100644
index b95cf018c..000000000
--- a/src/couch_log/src/couch_log_writer_syslog.erl
+++ /dev/null
@@ -1,201 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_writer_syslog).
--behavior(couch_log_writer).
-
--export([
- init/0,
- terminate/2,
- write/2
-]).
-
--include("couch_log.hrl").
-
--record(st, {
- socket,
- host,
- port,
- hostname,
- os_pid,
- appid,
- facility
-}).
-
--define(SYSLOG_VERSION, 1).
-
--ifdef(TEST).
--export([
- get_facility/1,
- get_level/1
-]).
--endif.
-
-init() ->
- {ok, Socket} = gen_udp:open(0),
-
- Host =
- case config:get("log", "syslog_host") of
- undefined ->
- undefined;
- SysLogHost ->
- case inet:getaddr(SysLogHost, inet) of
- {ok, Address} ->
- Address;
- _ ->
- undefined
- end
- end,
-
- {ok, #st{
- socket = Socket,
- host = Host,
- port = config:get_integer("log", "syslog_port", 514),
- hostname = net_adm:localhost(),
- os_pid = os:getpid(),
- appid = config:get("log", "syslog_appid", "couchdb"),
- facility = get_facility(config:get("log", "syslog_facility", "local2"))
- }}.
-
-terminate(_Reason, St) ->
- gen_udp:close(St#st.socket).
-
-write(Entry, St) ->
- #log_entry{
- level = Level,
- pid = Pid,
- msg = Msg,
- msg_id = MsgId,
- time_stamp = TimeStamp
- } = Entry,
- Fmt = "<~B>~B ~s ~s ~s ~p ~s - ",
- Args = [
- St#st.facility bor get_level(Level),
- ?SYSLOG_VERSION,
- TimeStamp,
- St#st.hostname,
- St#st.appid,
- Pid,
- MsgId
- ],
- Pre = io_lib:format(Fmt, Args),
- ok = send(St, [Pre, Msg, $\n]),
- {ok, St}.
-
-send(#st{host = undefined}, Packet) ->
- io:format(standard_error, "~s", [Packet]);
-send(St, Packet) ->
- #st{
- socket = Socket,
- host = Host,
- port = Port
- } = St,
- gen_udp:send(Socket, Host, Port, Packet).
-
-get_facility(Name) ->
- FacId =
- case Name of
- % Kernel messages
- "kern" ->
- 0;
- % Random user-level messages
- "user" ->
- 1;
- % Mail system
- "mail" ->
- 2;
- % System daemons
- "daemon" ->
- 3;
- % Security/Authorization messages
- "auth" ->
- 4;
- % Internal Syslog messages
- "syslog" ->
- 5;
- % Line printer subsystem
- "lpr" ->
- 6;
- % Network news subsystems
- "news" ->
- 7;
- % UUCP subsystem
- "uucp" ->
- 8;
- % Clock daemon
- "clock" ->
- 9;
- % Security/Authorization messages
- "authpriv" ->
- 10;
- % FTP daemon
- "ftp" ->
- 11;
- % NTP subsystem
- "ntp" ->
- 12;
- % Log audit
- "audit" ->
- 13;
- % Log alert
- "alert" ->
- 14;
- % Scheduling daemon
- "cron" ->
- 15;
- % Local use 0
- "local0" ->
- 16;
- % Local use 1
- "local1" ->
- 17;
- % Local use 2
- "local2" ->
- 18;
- % Local use 3
- "local3" ->
- 19;
- % Local use 4
- "local4" ->
- 20;
- % Local use 5
- "local5" ->
- 21;
- % Local use 6
- "local6" ->
- 22;
- % Local use 7
- "local7" ->
- 23;
- _ ->
- try list_to_integer(Name) of
- N when N >= 0, N =< 23 -> N;
- _ -> 23
- catch
- _:_ ->
- 23
- end
- end,
- FacId bsl 3.
-
-get_level(Name) when is_atom(Name) ->
- case Name of
- debug -> 7;
- info -> 6;
- notice -> 5;
- warning -> 4;
- error -> 3;
- critical -> 2;
- alert -> 1;
- emergency -> 0;
- _ -> 3
- end.
diff --git a/src/couch_log/test/eunit/couch_log_config_listener_test.erl b/src/couch_log/test/eunit/couch_log_config_listener_test.erl
deleted file mode 100644
index c955972ff..000000000
--- a/src/couch_log/test/eunit/couch_log_config_listener_test.erl
+++ /dev/null
@@ -1,79 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_config_listener_test).
-
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(TIMEOUT, 1000).
-
-couch_log_config_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
- fun check_restart_listener/0,
- fun check_ignore_non_log/0
- ]}.
-
-check_restart_listener() ->
- Listener1 = get_listener(),
- ?assert(is_process_alive(Listener1)),
-
- Handler1 = get_handler(),
- ?assertNotEqual(not_found, Handler1),
- Ref = erlang:monitor(process, Listener1),
- ok = gen_event:delete_handler(config_event, get_handler(), testing),
-
- receive
- {'DOWN', Ref, process, _, _} ->
- ?assertNot(is_process_alive(Listener1))
- after ?TIMEOUT ->
- erlang:error({timeout, config_listener_mon_death})
- end,
-
- NewHandler = test_util:wait(
- fun() ->
- case get_handler() of
- not_found -> wait;
- Reply -> Reply
- end
- end,
- ?TIMEOUT,
- 20
- ),
- ?assertEqual(Handler1, NewHandler),
-
- Listener2 = get_listener(),
- ?assert(is_process_alive(Listener2)),
- ?assertNotEqual(Listener1, Listener2),
- ok.
-
-check_ignore_non_log() ->
- Run = fun() ->
- couch_log_test_util:with_config_listener(fun() ->
- config:set("foo", "bar", "baz"),
- couch_log_test_util:wait_for_config()
- end)
- end,
- ?assertError(config_change_timeout, Run()).
-
-get_handler() ->
- FoldFun = fun
- ({config_listener, {couch_log_sup, _}} = H, not_found) ->
- H;
- (_, Acc) ->
- Acc
- end,
- lists:foldl(FoldFun, not_found, gen_event:which_handlers(config_event)).
-
-get_listener() ->
- Children = supervisor:which_children(couch_log_sup),
- hd([Pid || {config_listener_mon, Pid, _, _} <- Children]).
diff --git a/src/couch_log/test/eunit/couch_log_config_test.erl b/src/couch_log/test/eunit/couch_log_config_test.erl
deleted file mode 100644
index df7cdf977..000000000
--- a/src/couch_log/test/eunit/couch_log_config_test.erl
+++ /dev/null
@@ -1,170 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_config_test).
-
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(T(Name), {atom_to_list(Name), fun Name/0}).
-
-couch_log_config_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
- ?T(check_level),
- ?T(check_max_message_size),
- ?T(check_bad_level),
- ?T(check_bad_max_message_size),
- ?T(check_strip_last_msg),
- ?T(check_bad_strip_last_msg),
- ?T(check_filter_fields),
- ?T(check_bad_filter_fields)
- ]}.
-
-check_level() ->
- % Default level is info
- ?assertEqual(info, couch_log_config:get(level)),
- ?assertEqual(2, couch_log_config:get(level_int)),
-
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "level", "emerg"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(emergency, couch_log_config:get(level)),
- ?assertEqual(8, couch_log_config:get(level_int)),
-
- config:set("log", "level", "debug"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(debug, couch_log_config:get(level)),
- ?assertEqual(1, couch_log_config:get(level_int)),
-
- config:delete("log", "level"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(info, couch_log_config:get(level)),
- ?assertEqual(2, couch_log_config:get(level_int))
- end).
-
-check_max_message_size() ->
- % Default is 16000
- ?assertEqual(16000, couch_log_config:get(max_message_size)),
-
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "max_message_size", "1024"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(1024, couch_log_config:get(max_message_size)),
-
- config:delete("log", "max_message_size"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(16000, couch_log_config:get(max_message_size))
- end).
-
-check_bad_level() ->
- % Default level is info
- ?assertEqual(info, couch_log_config:get(level)),
- ?assertEqual(2, couch_log_config:get(level_int)),
-
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "level", "debug"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(debug, couch_log_config:get(level)),
- ?assertEqual(1, couch_log_config:get(level_int)),
-
- config:set("log", "level", "this is not a valid level name"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(info, couch_log_config:get(level)),
- ?assertEqual(2, couch_log_config:get(level_int)),
-
- config:delete("log", "level"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(info, couch_log_config:get(level)),
- ?assertEqual(2, couch_log_config:get(level_int))
- end).
-
-check_bad_max_message_size() ->
- % Default level is 16000
- ?assertEqual(16000, couch_log_config:get(max_message_size)),
-
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "max_message_size", "1024"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(1024, couch_log_config:get(max_message_size)),
-
- config:set("log", "max_message_size", "this is not a valid size"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(16000, couch_log_config:get(max_message_size)),
-
- config:delete("log", "max_message_size"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(16000, couch_log_config:get(max_message_size))
- end).
-
-check_strip_last_msg() ->
- % Default is true
- ?assertEqual(true, couch_log_config:get(strip_last_msg)),
-
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "strip_last_msg", "false"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(false, couch_log_config:get(strip_last_msg)),
-
- config:delete("log", "strip_last_msg"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(true, couch_log_config:get(strip_last_msg))
- end).
-
-check_bad_strip_last_msg() ->
- % Default is true
- ?assertEqual(true, couch_log_config:get(strip_last_msg)),
-
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "strip_last_msg", "false"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(false, couch_log_config:get(strip_last_msg)),
-
- config:set("log", "strip_last_msg", "this is not a boolean"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(true, couch_log_config:get(strip_last_msg)),
-
- config:delete("log", "strip_last_msg"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(true, couch_log_config:get(strip_last_msg))
- end).
-
-check_filter_fields() ->
- Default = [pid, registered_name, error_info, messages],
- ?assertEqual(Default, couch_log_config:get(filter_fields)),
-
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "filter_fields", "[foo, bar, baz]"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual([foo, bar, baz], couch_log_config:get(filter_fields)),
-
- config:delete("log", "filter_fields"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(Default, couch_log_config:get(filter_fields))
- end).
-
-check_bad_filter_fields() ->
- Default = [pid, registered_name, error_info, messages],
- ?assertEqual(Default, couch_log_config:get(filter_fields)),
-
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "filter_fields", "[foo, bar, baz]"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual([foo, bar, baz], couch_log_config:get(filter_fields)),
-
- config:set("log", "filter_fields", "not a list of atoms"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(Default, couch_log_config:get(filter_fields)),
-
- config:delete("log", "filter_fields"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(Default, couch_log_config:get(filter_fields))
- end).
diff --git a/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl b/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl
deleted file mode 100644
index cb053d611..000000000
--- a/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl
+++ /dev/null
@@ -1,36 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_error_logger_h_test).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(HANDLER, couch_log_error_logger_h).
-
-couch_log_error_logger_h_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
- fun handler_ignores_unknown_messages/0,
- fun coverage_test/0
- ]}.
-
-handler_ignores_unknown_messages() ->
- Handlers1 = gen_event:which_handlers(error_logger),
- ?assert(lists:member(?HANDLER, Handlers1)),
- ?assertEqual(ignored, gen_event:call(error_logger, ?HANDLER, foo)),
-
- error_logger ! this_is_a_message,
- Handlers2 = gen_event:which_handlers(error_logger),
- ?assert(lists:member(?HANDLER, Handlers2)).
-
-coverage_test() ->
- Resp = couch_log_error_logger_h:code_change(foo, bazinga, baz),
- ?assertEqual({ok, bazinga}, Resp).
diff --git a/src/couch_log/test/eunit/couch_log_formatter_test.erl b/src/couch_log/test/eunit/couch_log_formatter_test.erl
deleted file mode 100644
index d516c2bc5..000000000
--- a/src/couch_log/test/eunit/couch_log_formatter_test.erl
+++ /dev/null
@@ -1,966 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_formatter_test).
-
--include("couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-truncate_fmt_test() ->
- Msg = [0 || _ <- lists:seq(1, 1048576)],
- Entry = couch_log_formatter:format(info, self(), "~w", [Msg]),
- ?assert(length(Entry#log_entry.msg) =< 16000).
-
-truncate_test() ->
- Msg = [0 || _ <- lists:seq(1, 1048576)],
- Entry = couch_log_formatter:format(info, self(), Msg),
- ?assert(length(Entry#log_entry.msg) =< 16000).
-
-format_reason_test() ->
- MsgFmt = "This is a reason: ~r",
- Reason = {foo, [{x, k, 3}, {c, d, 2}]},
- Entry = couch_log_formatter:format(info, self(), MsgFmt, [Reason]),
- Formatted = "This is a reason: foo at x:k/3 <= c:d/2",
- ?assertEqual(Formatted, lists:flatten(Entry#log_entry.msg)).
-
-crashing_formatting_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "** Generic server and some stuff",
- % not enough args!
- [a_gen_server, {foo, bar}, server_state]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event)
- ),
- do_matches(do_format(Event), [
- "Encountered error {error,{badmatch"
- ]).
-
-gen_server_error_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "** Generic server and some stuff",
- [a_gen_server, {foo, bar}, server_state, some_reason]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event)
- ),
- do_matches(do_format(Event), [
- "gen_server a_gen_server terminated",
- "with reason: some_reason",
- "last msg: redacted",
- "state: server_state",
- "extra: \\[\\]"
- ]).
-
-gen_server_error_with_extra_args_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "** Generic server and some stuff",
- [a_gen_server, {foo, bar}, server_state, some_reason, sad, args]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event)
- ),
- do_matches(do_format(Event), [
- "gen_server a_gen_server terminated",
- "with reason: some_reason",
- "last msg: redacted",
- "state: server_state",
- "extra: \\[sad,args\\]"
- ]).
-
-gen_fsm_error_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "** State machine did a thing",
- [a_gen_fsm, {ohai, there}, state_name, curr_state, barf]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event)
- ),
- do_matches(do_format(Event), [
- "gen_fsm a_gen_fsm in state state_name",
- "with reason: barf",
- "last msg: redacted",
- "state: curr_state",
- "extra: \\[\\]"
- ]).
-
-gen_fsm_error_with_extra_args_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "** State machine did a thing",
- [a_gen_fsm, {ohai, there}, state_name, curr_state, barf, sad, args]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event)
- ),
- do_matches(do_format(Event), [
- "gen_fsm a_gen_fsm in state state_name",
- "with reason: barf",
- "last msg: redacted",
- "state: curr_state",
- "extra: \\[sad,args\\]"
- ]).
-
-gen_event_error_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "** gen_event handler did a thing",
- [
- handler_id,
- a_gen_event,
- {ohai, there},
- curr_state,
- barf
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event)
- ),
- do_matches(do_format(Event), [
- "gen_event handler_id installed in a_gen_event",
- "reason: barf",
- "last msg: redacted",
- "state: curr_state"
- ]).
-
-emulator_error_test() ->
- Event = {
- error,
- erlang:group_leader(),
- {
- emulator,
- "~s~n",
- ["A process died and stuff\n"]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = emulator,
- msg = "A process died and stuff"
- },
- do_format(Event)
- ).
-
-normal_error_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "format thing: ~w ~w",
- [
- first_arg,
- second_arg
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid,
- msg = "format thing: first_arg second_arg"
- },
- do_format(Event)
- ).
-
-error_report_std_error_test() ->
- Pid = self(),
- Event = {
- error_report,
- erlang:group_leader(),
- {
- Pid,
- std_error,
- [foo, {bar, baz}]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid,
- msg = "foo, bar: baz"
- },
- do_format(Event)
- ).
-
-supervisor_report_test() ->
- Pid = self(),
- % A standard supervisor report
- Event1 = {
- error_report,
- erlang:group_leader(),
- {
- Pid,
- supervisor_report,
- [
- {supervisor, sup_name},
- {offender, [
- {id, sup_child},
- {pid, list_to_pid("<0.1.0>")},
- {mfargs, {some_mod, some_fun, 3}}
- ]},
- {reason, a_reason},
- {errorContext, some_context}
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event1)
- ),
- do_matches(do_format(Event1), [
- "Supervisor sup_name",
- "had child sup_child started with some_mod:some_fun/3 at <0.1.0> exit",
- "with reason a_reason",
- "in context some_context"
- ]),
- % Slightly older using name instead of id
- % in the offender blob.
- Event2 = {
- error_report,
- erlang:group_leader(),
- {
- Pid,
- supervisor_report,
- [
- {supervisor, sup_name},
- {offender, [
- {name, sup_child},
- {pid, list_to_pid("<0.1.0>")},
- {mfargs, {some_mod, some_fun, 3}}
- ]},
- {reason, a_reason},
- {errorContext, some_context}
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event2)
- ),
- do_matches(do_format(Event2), [
- "Supervisor sup_name",
- "had child sup_child started with some_mod:some_fun/3 at <0.1.0> exit",
- "with reason a_reason",
- "in context some_context"
- ]),
- % A supervisor_bridge
- Event3 = {
- error_report,
- erlang:group_leader(),
- {
- Pid,
- supervisor_report,
- [
- {supervisor, sup_name},
- {offender, [
- {mod, bridge_mod},
- {pid, list_to_pid("<0.1.0>")}
- ]},
- {reason, a_reason},
- {errorContext, some_context}
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event3)
- ),
- do_matches(do_format(Event3), [
- "Supervisor sup_name",
- "had child at module bridge_mod at <0.1.0> exit",
- "with reason a_reason",
- "in context some_context"
- ]),
- % Any other supervisor report
- Event4 = {
- error_report,
- erlang:group_leader(),
- {
- Pid,
- supervisor_report,
- [foo, {a, thing}, bang]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid,
- msg = "SUPERVISOR REPORT foo, a: thing, bang"
- },
- do_format(Event4)
- ).
-
-crash_report_test() ->
- Pid = self(),
- % A standard crash report
- Event1 = {
- error_report,
- erlang:group_leader(),
- {
- Pid,
- crash_report,
- [
- [
- {pid, list_to_pid("<0.2.0>")},
- {error_info,
- {
- exit,
- undef,
- [{mod_name, fun_name, [a, b]}]
- }}
- ],
- [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")]
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event1)
- ),
- do_matches(do_format(Event1), [
- "Process <0.2.0>",
- "with 2 neighbors",
- "exited",
- "reason: call to undefined function mod_name:fun_name\\(a, b\\)"
- ]),
- % A registered process crash report
- Event2 = {
- error_report,
- erlang:group_leader(),
- {
- Pid,
- crash_report,
- [
- [
- {pid, list_to_pid("<0.2.0>")},
- {registered_name, couch_log_server},
- {error_info,
- {
- exit,
- undef,
- [{mod_name, fun_name, [a, b]}]
- }}
- ],
- [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")]
- ]
- }
- },
- do_matches(do_format(Event2), [
- "Process couch_log_server \\(<0.2.0>\\)"
- ]),
- % A non-exit crash report
- Event3 = {
- error_report,
- erlang:group_leader(),
- {
- Pid,
- crash_report,
- [
- [
- {pid, list_to_pid("<0.2.0>")},
- {registered_name, couch_log_server},
- {error_info,
- {
- killed,
- undef,
- [{mod_name, fun_name, [a, b]}]
- }}
- ],
- [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")]
- ]
- }
- },
- do_matches(do_format(Event3), [
- "crashed"
- ]),
- % A extra report info
- Event4 = {
- error_report,
- erlang:group_leader(),
- {
- Pid,
- crash_report,
- [
- [
- {pid, list_to_pid("<0.2.0>")},
- {error_info,
- {
- killed,
- undef,
- [{mod_name, fun_name, [a, b]}]
- }},
- {another, entry},
- yep
- ],
- [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")]
- ]
- }
- },
- do_matches(do_format(Event4), [
- "; another: entry, yep"
- ]).
-
-warning_report_test() ->
- Pid = self(),
- % A warning message
- Event1 = {
- warning_msg,
- erlang:group_leader(),
- {
- Pid,
- "a ~s string ~w",
- ["format", 7]
- }
- },
- ?assertMatch(
- #log_entry{
- level = warning,
- pid = Pid,
- msg = "a format string 7"
- },
- do_format(Event1)
- ),
- % A warning report
- Event2 = {
- warning_report,
- erlang:group_leader(),
- {
- Pid,
- std_warning,
- [list, 'of', {things, indeed}]
- }
- },
- ?assertMatch(
- #log_entry{
- level = warning,
- pid = Pid,
- msg = "list, of, things: indeed"
- },
- do_format(Event2)
- ).
-
-info_report_test() ->
- Pid = self(),
- % An info message
- Event1 = {
- info_msg,
- erlang:group_leader(),
- {
- Pid,
- "an info ~s string ~w",
- ["format", 7]
- }
- },
- ?assertMatch(
- #log_entry{
- level = info,
- pid = Pid,
- msg = "an info format string 7"
- },
- do_format(Event1)
- ),
- % Application exit info
- Event2 = {
- info_report,
- erlang:group_leader(),
- {
- Pid,
- std_info,
- [
- {type, no_idea},
- {application, couch_log},
- {exited, red_sox_are_on}
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = info,
- pid = Pid,
- msg = "Application couch_log exited with reason: red_sox_are_on"
- },
- do_format(Event2)
- ),
- % Any other std_info message
- Event3 = {
- info_report,
- erlang:group_leader(),
- {
- Pid,
- std_info,
- [
- {type, no_idea},
- {application, couch_log}
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = info,
- pid = Pid,
- msg = "type: no_idea, application: couch_log"
- },
- do_format(Event3)
- ),
- % Non-list other report
- Event4 = {
- info_report,
- erlang:group_leader(),
- {
- Pid,
- std_info,
- dang
- }
- },
- ?assertMatch(
- #log_entry{
- level = info,
- pid = Pid,
- msg = "dang"
- },
- do_format(Event4)
- ).
-
-progress_report_test() ->
- Pid = self(),
- % Application started
- Event1 = {
- info_report,
- erlang:group_leader(),
- {
- Pid,
- progress,
- [{started_at, 'nonode@nohost'}, {application, app_name}]
- }
- },
- ?assertMatch(
- #log_entry{
- level = info,
- pid = Pid,
- msg = "Application app_name started on node nonode@nohost"
- },
- do_format(Event1)
- ),
- % Supervisor started child
- Event2 = {
- info_report,
- erlang:group_leader(),
- {
- Pid,
- progress,
- [
- {supervisor, sup_dude},
- {started, [
- {mfargs, {mod_name, fun_name, 1}},
- {pid, list_to_pid("<0.5.0>")}
- ]}
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = debug,
- pid = Pid,
- msg =
- "Supervisor sup_dude started mod_name:fun_name/1"
- " at pid <0.5.0>"
- },
- do_format(Event2)
- ),
- % Other progress report
- Event3 = {
- info_report,
- erlang:group_leader(),
- {
- Pid,
- progress,
- [a, {thing, boop}, here]
- }
- },
- ?assertMatch(
- #log_entry{
- level = info,
- pid = Pid,
- msg = "PROGRESS REPORT a, thing: boop, here"
- },
- do_format(Event3)
- ).
-
-log_unknown_event_test() ->
- Pid = self(),
- ?assertMatch(
- #log_entry{
- level = warning,
- pid = Pid,
- msg = "Unexpected error_logger event an_unknown_event"
- },
- do_format(an_unknown_event)
- ).
-
-format_reason_test_() ->
- Cases = [
- {
- {'function not exported', [{a, b, 2}, {c, d, 1}, {e, f, 2}]},
- "call to unexported function a:b/2 at c:d/1 <= e:f/2"
- },
- {
- {'function not exported', [{a, b, 2, []}, {c, d, 1}, {e, f, 2}]},
- "call to unexported function a:b/2 at c:d/1 <= e:f/2"
- },
- {
- {undef, [{a, b, 2, []}, {c, d, 1}, {e, f, 2}]},
- "call to undefined function a:b/2 at c:d/1 <= e:f/2"
- },
- {
- {bad_return, {{a, b, 2}, {'EXIT', killed}}},
- "bad return value {'EXIT',killed} from a:b/2"
- },
- {
- {bad_return_value, foo},
- "bad return value foo"
- },
- {
- {{bad_return_value, foo}, {h, i, 0}},
- "bad return value foo at h:i/0"
- },
- {
- {{badrecord, {foo, 1, 4}}, [{h, i, 0}, {j, k, [a, b]}]},
- "bad record {foo,1,4} at h:i/0 <= j:k/2"
- },
- {
- {{case_clause, bingo}, [{j, k, 3}, {z, z, 0}]},
- "no case clause matching bingo at j:k/3 <= z:z/0"
- },
- {
- {function_clause, [{j, k, [a, 2]}, {y, x, 1}]},
- "no function clause matching j:k(a, 2) at y:x/1"
- },
- {
- {if_clause, [{j, k, [a, 2]}, {y, x, 1}]},
- "no true branch found while evaluating if expression at j:k/2 <= y:x/1"
- },
- {
- {{try_clause, bango}, [{j, k, [a, 2]}, {y, x, 1}]},
- "no try clause matching bango at j:k/2 <= y:x/1"
- },
- {
- {badarith, [{j, k, [a, 2]}, {y, x, 1}]},
- "bad arithmetic expression at j:k/2 <= y:x/1"
- },
- {
- {{badmatch, bongo}, [{j, k, [a, 2]}, {y, x, 1}]},
- "no match of right hand value bongo at j:k/2 <= y:x/1"
- },
- {
- {emfile, [{j, k, [a, 2]}, {y, x, 1}]},
- "maximum number of file descriptors exhausted, check ulimit -n; j:k/2 <= y:x/1"
- },
- {
- {system_limit, [{erlang, open_port, []}, {y, x, 1}]},
- "system limit: maximum number of ports exceeded at y:x/1"
- },
- {
- {system_limit, [{erlang, spawn, []}, {y, x, 1}]},
- "system limit: maximum number of processes exceeded at y:x/1"
- },
- {
- {system_limit, [{erlang, spawn_opt, []}, {y, x, 1}]},
- "system limit: maximum number of processes exceeded at y:x/1"
- },
- {
- {system_limit, [{erlang, list_to_atom, ["foo"]}, {y, x, 1}]},
- "system limit: tried to create an atom larger than 255, or maximum atom count exceeded at y:x/1"
- },
- {
- {system_limit, [{ets, new, []}, {y, x, 1}]},
- "system limit: maximum number of ETS tables exceeded at y:x/1"
- },
- {
- {system_limit, [{couch_log, totes_logs, []}, {y, x, 1}]},
- "system limit: couch_log:totes_logs() at y:x/1"
- },
- {
- {badarg, [{j, k, [a, 2]}, {y, x, 1}]},
- "bad argument in call to j:k(a, 2) at y:x/1"
- },
- {
- {{badarg, [{j, k, [a, 2]}, {y, x, 1}]}, some_ignored_thing},
- "bad argument in call to j:k(a, 2) at y:x/1"
- },
- {
- {{badarity, {fun erlang:spawn/1, [a, b]}}, [{y, x, 1}]},
- "function called with wrong arity of 2 instead of 1 at y:x/1"
- },
- {
- {noproc, [{y, x, 1}]},
- "no such process or port in call to y:x/1"
- },
- {
- {{badfun, 2}, [{y, x, 1}]},
- "bad function 2 called at y:x/1"
- },
- {
- {a_reason, [{y, x, 1}]},
- "a_reason at y:x/1"
- },
- {
- {a_reason, [{y, x, 1, [{line, 4}]}]},
- "a_reason at y:x/1(line:4)"
- }
- ],
- [
- {Msg, fun() ->
- ?assertEqual(
- Msg,
- lists:flatten(couch_log_formatter:format_reason(Reason))
- )
- end}
- || {Reason, Msg} <- Cases
- ].
-
-coverage_test() ->
- % MFA's that aren't
- ?assertEqual(["foo"], couch_log_formatter:format_mfa(foo)),
-
- % Traces with line numbers
- Trace = [{x, y, [a], [{line, 4}]}],
- ?assertEqual(
- "x:y/1(line:4)",
- lists:flatten(couch_log_formatter:format_trace(Trace))
- ),
-
- % Excercising print_silly_list
- ?assertMatch(
- #log_entry{
- level = error,
- msg = "foobar"
- },
- do_format(
- {
- error_report,
- erlang:group_leader(),
- {self(), std_error, "foobar"}
- }
- )
- ),
-
- % Excercising print_silly_list
- ?assertMatch(
- #log_entry{
- level = error,
- msg = "dang"
- },
- do_format(
- {
- error_report,
- erlang:group_leader(),
- {self(), std_error, dang}
- }
- )
- ).
-
-gen_server_error_with_last_msg_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "** Generic server and some stuff",
- [a_gen_server, {foo, bar}, server_state, some_reason]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event)
- ),
- with_last(fun() ->
- do_matches(do_format(Event), [
- "gen_server a_gen_server terminated",
- "with reason: some_reason",
- "last msg: {foo,bar}",
- "state: server_state",
- "extra: \\[\\]"
- ])
- end).
-
-gen_event_error_with_last_msg_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "** gen_event handler did a thing",
- [
- handler_id,
- a_gen_event,
- {ohai, there},
- curr_state,
- barf
- ]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event)
- ),
- with_last(fun() ->
- do_matches(do_format(Event), [
- "gen_event handler_id installed in a_gen_event",
- "reason: barf",
- "last msg: {ohai,there}",
- "state: curr_state"
- ])
- end).
-
-gen_fsm_error_with_last_msg_test() ->
- Pid = self(),
- Event = {
- error,
- erlang:group_leader(),
- {
- Pid,
- "** State machine did a thing",
- [a_gen_fsm, {ohai, there}, state_name, curr_state, barf]
- }
- },
- ?assertMatch(
- #log_entry{
- level = error,
- pid = Pid
- },
- do_format(Event)
- ),
- with_last(fun() ->
- do_matches(do_format(Event), [
- "gen_fsm a_gen_fsm in state state_name",
- "with reason: barf",
- "last msg: {ohai,there}",
- "state: curr_state",
- "extra: \\[\\]"
- ])
- end).
-
-with_last(Fun) ->
- meck:new(couch_log_config_dyn, [passthrough]),
- try
- meck:expect(couch_log_config_dyn, get, fun(Case) ->
- case Case of
- strip_last_msg -> false;
- Case -> meck:passthrough([Case])
- end
- end),
- Fun()
- after
- meck:unload(couch_log_config_dyn)
- end.
-
-do_format(Event) ->
- E = couch_log_formatter:format(Event),
- E#log_entry{
- msg = lists:flatten(E#log_entry.msg),
- msg_id = lists:flatten(E#log_entry.msg_id),
- time_stamp = lists:flatten(E#log_entry.time_stamp)
- }.
-
-do_matches(_, []) ->
- ok;
-do_matches(#log_entry{msg = Msg} = E, [Pattern | RestPatterns]) ->
- case re:run(Msg, Pattern) of
- {match, _} ->
- ok;
- nomatch ->
- Err1 = io_lib:format("'~s' does not match '~s'", [Pattern, Msg]),
- Err2 = lists:flatten(Err1),
- ?assertEqual(nomatch, Err2)
- end,
- do_matches(E, RestPatterns).
diff --git a/src/couch_log/test/eunit/couch_log_monitor_test.erl b/src/couch_log/test/eunit/couch_log_monitor_test.erl
deleted file mode 100644
index ceeb98b4e..000000000
--- a/src/couch_log/test/eunit/couch_log_monitor_test.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_monitor_test).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(HANDLER, couch_log_error_logger_h).
-
-couch_log_monitor_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
- fun monitor_ignores_unknown_messages/0,
- fun monitor_restarts_handler/0,
- fun coverage_test/0
- ]}.
-
-monitor_ignores_unknown_messages() ->
- Pid1 = get_monitor_pid(),
-
- ?assertEqual(ignored, gen_server:call(Pid1, do_foo_please)),
-
- gen_server:cast(Pid1, do_bar_please),
- Pid1 ! do_baz_please,
- timer:sleep(250),
- ?assert(is_process_alive(Pid1)).
-
-monitor_restarts_handler() ->
- Pid1 = get_monitor_pid(),
- error_logger:delete_report_handler(?HANDLER),
- timer:sleep(250),
-
- ?assert(not is_process_alive(Pid1)),
-
- Pid2 = get_monitor_pid(),
- ?assert(is_process_alive(Pid2)),
-
- Handlers = gen_event:which_handlers(error_logger),
- ?assert(lists:member(?HANDLER, Handlers)).
-
-coverage_test() ->
- Resp = couch_log_monitor:code_change(foo, bazinga, baz),
- ?assertEqual({ok, bazinga}, Resp).
-
-get_monitor_pid() ->
- Children = supervisor:which_children(couch_log_sup),
- [MonPid] = [Pid || {couch_log_monitor, Pid, _, _} <- Children, is_pid(Pid)],
- MonPid.
diff --git a/src/couch_log/test/eunit/couch_log_server_test.erl b/src/couch_log/test/eunit/couch_log_server_test.erl
deleted file mode 100644
index a2334b048..000000000
--- a/src/couch_log/test/eunit/couch_log_server_test.erl
+++ /dev/null
@@ -1,110 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_server_test).
-
--include("couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-couch_log_server_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
- fun check_can_reconfigure/0,
- fun check_can_restart/0,
- fun check_can_cast_log_entry/0,
- fun check_logs_ignored_messages/0
- ]}.
-
-check_can_reconfigure() ->
- couch_log:error("a message", []),
- ?assertEqual(0, couch_log_test_util:last_log_key()),
- ?assertEqual(ok, couch_log_server:reconfigure()),
- ?assertEqual('$end_of_table', couch_log_test_util:last_log_key()),
-
- couch_log_test_util:with_config_listener(fun() ->
- couch_log:error("another message", []),
- ?assertEqual(0, couch_log_test_util:last_log_key()),
- config:set("log", "some_key", "some_val"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual('$end_of_table', couch_log_test_util:last_log_key())
- end).
-
-check_can_restart() ->
- Pid1 = whereis(couch_log_server),
- Ref = erlang:monitor(process, Pid1),
- ?assert(is_process_alive(Pid1)),
-
- supervisor:terminate_child(couch_log_sup, couch_log_server),
- supervisor:restart_child(couch_log_sup, couch_log_server),
-
- receive
- {'DOWN', Ref, _, _, _} -> ok
- after 1000 ->
- erlang:error(timeout_restarting_couch_log_server)
- end,
-
- ?assert(not is_process_alive(Pid1)),
-
- Pid2 = whereis(couch_log_server),
- ?assertNotEqual(Pid2, Pid1),
- ?assert(is_process_alive(Pid2)).
-
-check_can_cast_log_entry() ->
- Entry = #log_entry{
- level = critical,
- pid = self(),
- msg = "this will be casted",
- msg_id = "----",
- time_stamp = "2016-07-20-almost-my-birthday"
- },
- ok = gen_server:cast(couch_log_server, {log, Entry}),
- % totes gross
- timer:sleep(500),
- ?assertEqual(Entry, couch_log_test_util:last_log()).
-
-check_logs_ignored_messages() ->
- gen_server:call(couch_log_server, a_call),
- ?assertMatch(
- #log_entry{
- level = error,
- pid = couch_log_server,
- msg = "couch_log_server ignored a_call"
- },
- couch_log_test_util:last_log()
- ),
-
- gen_server:cast(couch_log_server, a_cast),
- % yes gross
- timer:sleep(500),
- ?assertMatch(
- #log_entry{
- level = error,
- pid = couch_log_server,
- msg = "couch_log_server ignored a_cast"
- },
- couch_log_test_util:last_log()
- ),
-
- couch_log_server ! an_info,
- % still gross
- timer:sleep(500),
- ?assertMatch(
- #log_entry{
- level = error,
- pid = couch_log_server,
- msg = "couch_log_server ignored an_info"
- },
- couch_log_test_util:last_log()
- ).
-
-coverage_test() ->
- Resp = couch_log_server:code_change(foo, bazinga, baz),
- ?assertEqual({ok, bazinga}, Resp).
diff --git a/src/couch_log/test/eunit/couch_log_test.erl b/src/couch_log/test/eunit/couch_log_test.erl
deleted file mode 100644
index 1538934b3..000000000
--- a/src/couch_log/test/eunit/couch_log_test.erl
+++ /dev/null
@@ -1,76 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_test).
-
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-couch_log_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1,
- gen() ++ [fun check_set_level/0]}.
-
-check_set_level() ->
- couch_log:set_level(crit),
- ?assertEqual("crit", config:get("log", "level")).
-
-levels() ->
- [
- debug,
- info,
- notice,
- warning,
- error,
- critical,
- alert,
- emergency,
- none
- ].
-
-gen() ->
- lists:map(
- fun(L) ->
- Name = "Test log level: " ++ couch_log_util:level_to_string(L),
- {Name, fun() -> check_levels(L, levels()) end}
- end,
- levels() -- [none]
- ).
-
-check_levels(_, []) ->
- ok;
-check_levels(TestLevel, [CfgLevel | RestLevels]) ->
- TestInt = couch_log_util:level_to_integer(TestLevel),
- CfgInt = couch_log_util:level_to_integer(CfgLevel),
- Pid = self(),
- Msg = new_msg(),
- LastKey = couch_log_test_util:last_log_key(),
- couch_log_test_util:with_level(CfgLevel, fun() ->
- couch_log:TestLevel(Msg, []),
- case TestInt >= CfgInt of
- true ->
- ?assertMatch(
- #log_entry{
- level = TestLevel,
- pid = Pid,
- msg = Msg
- },
- couch_log_test_util:last_log()
- );
- false ->
- ?assertEqual(LastKey, couch_log_test_util:last_log_key())
- end
- end),
- check_levels(TestLevel, RestLevels).
-
-new_msg() ->
- Bin = list_to_binary([couch_rand:uniform(255) || _ <- lists:seq(1, 16)]),
- couch_util:to_hex(Bin).
diff --git a/src/couch_log/test/eunit/couch_log_test_util.erl b/src/couch_log/test/eunit/couch_log_test_util.erl
deleted file mode 100644
index 9a170bdbd..000000000
--- a/src/couch_log/test/eunit/couch_log_test_util.erl
+++ /dev/null
@@ -1,170 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_test_util).
-
--export([
- start/0,
- stop/1,
- last_log/0,
- last_log_key/0,
- wait_for_config/0,
- with_config_listener/1,
- with_level/2,
- with_meck/2
-]).
-
--include("couch_log.hrl").
-
-start() ->
- remove_error_loggers(),
- application:set_env(config, ini_files, config_files()),
- application:start(config),
- ignore_common_loggers(),
- application:start(couch_log),
- meck:new(couch_stats),
- ok = meck:expect(couch_stats, increment_counter, ['_'], ok).
-
-stop(_) ->
- application:stop(config),
- application:stop(couch_log),
- meck:unload(couch_stats).
-
-with_level(Name, Fun) ->
- with_config_listener(fun() ->
- try
- LevelStr = couch_log_util:level_to_string(Name),
- config:set("log", "level", LevelStr, false),
- wait_for_config(),
- Fun()
- after
- config:delete("log", "level", false)
- end
- end).
-
-with_config_listener(Fun) ->
- Listener = self(),
- try
- add_listener(Listener),
- Fun()
- after
- rem_listener(Listener)
- end.
-
-wait_for_config() ->
- receive
- couch_log_config_change_finished -> ok
- after 1000 ->
- erlang:error(config_change_timeout)
- end.
-
-with_meck(Mods, Fun) ->
- lists:foreach(
- fun(M) ->
- case M of
- {Name, Opts} -> meck:new(Name, Opts);
- Name -> meck:new(Name)
- end
- end,
- Mods
- ),
- try
- Fun()
- after
- lists:foreach(
- fun(M) ->
- case M of
- {Name, _} -> meck:unload(Name);
- Name -> meck:unload(Name)
- end
- end,
- Mods
- )
- end.
-
-ignore_common_loggers() ->
- IgnoreSet = [
- application_controller,
- config,
- config_event
- ],
- lists:foreach(
- fun(Proc) ->
- disable_logs_from(Proc)
- end,
- IgnoreSet
- ).
-
-disable_logs_from(Pid) when is_pid(Pid) ->
- Ignored =
- case application:get_env(couch_log, ignored_pids) of
- {ok, L} when is_list(L) ->
- lists:usort([Pid | L]);
- _E ->
- [Pid]
- end,
- IgnoredAlive = [P || P <- Ignored, is_process_alive(P)],
- application:set_env(couch_log, ignored_pids, IgnoredAlive);
-disable_logs_from(Name) when is_atom(Name) ->
- case whereis(Name) of
- P when is_pid(P) ->
- disable_logs_from(P);
- undefined ->
- erlang:error({unknown_pid_name, Name})
- end.
-
-last_log_key() ->
- ets:last(?COUCH_LOG_TEST_TABLE).
-
-last_log() ->
- [{_, Entry}] = ets:lookup(?COUCH_LOG_TEST_TABLE, last_log_key()),
- Entry.
-
-remove_error_loggers() ->
- ErrorLoggerPid = whereis(error_logger),
- if
- ErrorLoggerPid == undefined ->
- ok;
- true ->
- lists:foreach(
- fun(Handler) ->
- error_logger:delete_report_handler(Handler)
- end,
- gen_event:which_handlers(ErrorLoggerPid)
- )
- end.
-
-config_files() ->
- Path = filename:dirname(code:which(?MODULE)),
- Name = filename:join(Path, "couch_log_test.ini"),
- ok = file:write_file(Name, "[log]\nwriter = ets\n"),
- [Name].
-
-add_listener(Listener) ->
- Listeners =
- case application:get_env(couch_log, config_listeners) of
- {ok, L} when is_list(L) ->
- lists:usort([Listener | L]);
- _ ->
- [Listener]
- end,
- application:set_env(couch_log, config_listeners, Listeners).
-
-rem_listener(Listener) ->
- Listeners =
- case application:get_env(couch_lig, config_listeners) of
- {ok, L} when is_list(L) ->
- L -- [Listener];
- _ ->
- []
- end,
- application:set_env(couch_log, config_listeners, Listeners).
diff --git a/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl b/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl
deleted file mode 100644
index 8d1fdeffb..000000000
--- a/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl
+++ /dev/null
@@ -1,91 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_trunc_io_fmt_test).
-
--include_lib("eunit/include/eunit.hrl").
-
-format_test_() ->
- lists:map(
- fun({Fmt, Args, Expect}) ->
- Name = io_lib:format("~p", [Expect]),
- {
- lists:flatten(Name),
- ?_assertEqual(
- Expect,
- lists:flatten(couch_log_trunc_io_fmt:format(Fmt, Args, 1024))
- )
- }
- end,
- cases()
- ).
-
-chomp_test() ->
- R1 = couch_log_trunc_io_fmt:format("\n", [], 1024, [{chomp, true}]),
- ?assertEqual("", lists:flatten(R1)),
- R2 = couch_log_trunc_io_fmt:format("~n", [], 1024, [{chomp, true}]),
- ?assertEqual("", lists:flatten(R2)).
-
-cases() ->
- [
- {"", [], ""},
- {"stuff\n\t", [], "stuff\n\t"},
- {"~w", [foo], "foo"},
- {"~p", [bar], "bar"},
- {"~W", [{{{2}}}, 2], "{{...}}"},
- {"~P", [{{{ohai}}}, 1], "{...}"},
- {"~s", [[$s, [$t, [$u, [$f, [$f]]]]]], "stuff"},
- {"~4s", ["stuff"], "stuf"},
- {"~8s", ["stuff"], " stuff"},
- {"~.8s", ["stuff"], "stuff "},
- {"~10.4s", ["stuff"], " stuf"},
- {"~10.6s", ["stuff"], " stuff "},
- {"~10.5s", ["stuff"], " stuff"},
- {"~10.10s", ["stuff"], " stuff"},
- {"~r", [{reason, [{x, k, [c, d]}]}], "reason at x:k/2"},
- {"~e", [1.0], "1.00000e+0"},
- {"~f", [1.0], "1.000000"},
- {"~f", [0.000323], "0.000323"},
- {"~f", [31.4], "31.400000"},
- {"~f", [-2.3], "-2.300000"},
- {"~g", [1.0], "1.00000"},
- {"~b", [-15], "-15"},
- {"~b", [15], "15"},
- {"~B", [15], "15"},
- {"~.16b", [15], "f"},
- {"~.16B", [15], "F"},
- {"~.16b", [-15], "-f"},
- {"~.16B", [-15], "-F"},
- {"~.16x", [15, "16#"], "16#f"},
- {"~.16x", [15, '16#'], "16#f"},
- {"~.16x", [-15, "16#"], "-16#f"},
- {"~.16X", [15, "16#"], "16#F"},
- {"~.16X", [15, '16#'], "16#F"},
- {"~.16X", [-15, "16#"], "-16#F"},
- {"~.16#", [15], "16#F"},
- {"~.16+", [15], "16#f"},
- {"~c", [$z], "z"},
- {"~tc", [$g], "g"},
- {"~~", [], "\~"},
- {"~n", [], "\n"},
- {"~2n", [], "\n\n"},
- {"~3n", [], "\n\n\n"},
- {"~i", [ignored], ""},
- {"~2.w", [1], " 1"},
- {"~*w", [2, 1], " 1"},
- {"~-2.w", [1], "1 "},
- {"~2.0. w", [1], " "},
- {"~2.1. w", [1], " 1"},
- {"~2.0.|w", [1], "||"},
- {"~2.1.|w", [1], "|1"},
- {"~2.1.*w", [$q, 1], "q1"}
- ].
diff --git a/src/couch_log/test/eunit/couch_log_util_test.erl b/src/couch_log/test/eunit/couch_log_util_test.erl
deleted file mode 100644
index ade968146..000000000
--- a/src/couch_log/test/eunit/couch_log_util_test.erl
+++ /dev/null
@@ -1,91 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_util_test).
-
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-get_message_id_test() ->
- ?assertEqual("--------", couch_log_util:get_msg_id()),
- erlang:put(nonce, "deadbeef"),
- ?assertEqual("deadbeef", couch_log_util:get_msg_id()),
- erlang:put(nonce, undefined).
-
-level_to_atom_test() ->
- lists:foreach(
- fun(L) ->
- ?assert(is_atom(couch_log_util:level_to_atom(L))),
- ?assert(is_integer(couch_log_util:level_to_integer(L))),
- ?assert(is_list(couch_log_util:level_to_string(L)))
- end,
- levels()
- ).
-
-string_p_test() ->
- ?assertEqual(false, couch_log_util:string_p([])),
- ?assertEqual(false, couch_log_util:string_p([[false]])),
- ?assertEqual(true, couch_log_util:string_p([$\n])),
- ?assertEqual(true, couch_log_util:string_p([$\r])),
- ?assertEqual(true, couch_log_util:string_p([$\t])),
- ?assertEqual(true, couch_log_util:string_p([$\v])),
- ?assertEqual(true, couch_log_util:string_p([$\b])),
- ?assertEqual(true, couch_log_util:string_p([$\f])),
- ?assertEqual(true, couch_log_util:string_p([$\e])).
-
-levels() ->
- [
- 1,
- 2,
- 3,
- 4,
- 5,
- 6,
- 7,
- 8,
- 9,
- "1",
- "2",
- "3",
- "4",
- "5",
- "6",
- "7",
- "8",
- "9",
- debug,
- info,
- notice,
- warning,
- warn,
- error,
- err,
- critical,
- crit,
- alert,
- emergency,
- emerg,
- none,
- "debug",
- "info",
- "notice",
- "warning",
- "warn",
- "error",
- "err",
- "critical",
- "crit",
- "alert",
- "emergency",
- "emerg",
- "none"
- ].
diff --git a/src/couch_log/test/eunit/couch_log_writer_ets.erl b/src/couch_log/test/eunit/couch_log_writer_ets.erl
deleted file mode 100644
index 7ddb9f39e..000000000
--- a/src/couch_log/test/eunit/couch_log_writer_ets.erl
+++ /dev/null
@@ -1,44 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_writer_ets).
--behaviour(couch_log_writer).
-
--export([
- init/0,
- terminate/2,
- write/2
-]).
-
--include("couch_log.hrl").
-
-init() ->
- ets:new(?COUCH_LOG_TEST_TABLE, [named_table, public, ordered_set]),
- {ok, 0}.
-
-terminate(_, _St) ->
- ets:delete(?COUCH_LOG_TEST_TABLE),
- ok.
-
-write(Entry0, St) ->
- Entry = Entry0#log_entry{
- msg = lists:flatten(Entry0#log_entry.msg),
- time_stamp = lists:flatten(Entry0#log_entry.time_stamp)
- },
- Ignored = application:get_env(couch_log, ignored_pids, []),
- case lists:member(Entry#log_entry.pid, Ignored) of
- true ->
- {ok, St};
- false ->
- ets:insert(?COUCH_LOG_TEST_TABLE, {St, Entry}),
- {ok, St + 1}
- end.
diff --git a/src/couch_log/test/eunit/couch_log_writer_file_test.erl b/src/couch_log/test/eunit/couch_log_writer_file_test.erl
deleted file mode 100644
index 2e40088f4..000000000
--- a/src/couch_log/test/eunit/couch_log_writer_file_test.erl
+++ /dev/null
@@ -1,155 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_writer_file_test).
-
--include_lib("kernel/include/file.hrl").
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(WRITER, couch_log_writer_file).
-
-couch_log_writer_file_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
- fun check_init_terminate/0,
- fun() ->
- couch_log_test_util:with_meck(
- [{filelib, [unstick]}],
- fun check_ensure_dir_fail/0
- )
- end,
- fun() ->
- couch_log_test_util:with_meck(
- [{file, [unstick, passthrough]}],
- fun check_open_fail/0
- )
- end,
- fun() ->
- couch_log_test_util:with_meck(
- [{file, [unstick, passthrough]}],
- fun check_read_file_info_fail/0
- )
- end,
- fun check_file_write/0,
- fun check_buffered_file_write/0,
- fun check_reopen/0
- ]}.
-
-check_init_terminate() ->
- {ok, St} = ?WRITER:init(),
- ok = ?WRITER:terminate(stop, St).
-
-check_ensure_dir_fail() ->
- meck:expect(filelib, ensure_dir, 1, {error, eperm}),
- ?assertEqual({error, eperm}, ?WRITER:init()),
- ?assert(meck:called(filelib, ensure_dir, 1)),
- ?assert(meck:validate(filelib)).
-
-check_open_fail() ->
- meck:expect(file, open, 2, {error, enotfound}),
- ?assertEqual({error, enotfound}, ?WRITER:init()),
- ?assert(meck:called(file, open, 2)),
- ?assert(meck:validate(file)).
-
-check_read_file_info_fail() ->
- RFI = fun
- ("./couch.log") -> {error, enoent};
- (Path) -> meck:passthrough([Path])
- end,
- meck:expect(file, read_file_info, RFI),
- ?assertEqual({error, enoent}, ?WRITER:init()),
- ?assert(meck:called(file, read_file_info, 1)),
- ?assert(meck:validate(file)).
-
-check_file_write() ->
- % Make sure we have an empty log for this test
- IsFile = filelib:is_file("./couch.log"),
- if
- not IsFile -> ok;
- true -> file:delete("./couch.log")
- end,
-
- Entry = #log_entry{
- level = info,
- pid = list_to_pid("<0.1.0>"),
- msg = "stuff",
- msg_id = "msg_id",
- time_stamp = "time_stamp"
- },
- {ok, St} = ?WRITER:init(),
- {ok, NewSt} = ?WRITER:write(Entry, St),
- ok = ?WRITER:terminate(stop, NewSt),
-
- {ok, Data} = file:read_file("./couch.log"),
- Expect = <<"[info] time_stamp nonode@nohost <0.1.0> msg_id stuff\n">>,
- ?assertEqual(Expect, Data).
-
-check_buffered_file_write() ->
- % Make sure we have an empty log for this test
- IsFile = filelib:is_file("./couch.log"),
- if
- not IsFile -> ok;
- true -> file:delete("./couch.log")
- end,
-
- config:set("log", "write_buffer", "1024"),
- config:set("log", "write_delay", "10"),
-
- try
- Entry = #log_entry{
- level = info,
- pid = list_to_pid("<0.1.0>"),
- msg = "stuff",
- msg_id = "msg_id",
- time_stamp = "time_stamp"
- },
- {ok, St} = ?WRITER:init(),
- {ok, NewSt} = ?WRITER:write(Entry, St),
- ok = ?WRITER:terminate(stop, NewSt)
- after
- config:delete("log", "write_buffer"),
- config:delete("log", "write_delay")
- end,
-
- {ok, Data} = file:read_file("./couch.log"),
- Expect = <<"[info] time_stamp nonode@nohost <0.1.0> msg_id stuff\n">>,
- ?assertEqual(Expect, Data).
-
-check_reopen() ->
- {ok, St1} = clear_clock(?WRITER:init()),
- {ok, St2} = clear_clock(couch_log_writer_file:maybe_reopen(St1)),
- ?assertEqual(St1, St2),
-
- case os:type() of
- {win32, _} ->
- % Windows file handling doesn't work the same
- % as Unix where you can move or delete an open
- % file so these tests make no sense there.
- yay_we_pass;
- _ ->
- % Delete file
- file:delete("./couch.log"),
- {ok, St3} = clear_clock(couch_log_writer_file:maybe_reopen(St2)),
- ?assert(element(3, St3) /= element(3, St2)),
-
- % Recreate file
- file:delete("./couch.log"),
- file:write_file("./couch.log", ""),
- {ok, St4} = clear_clock(couch_log_writer_file:maybe_reopen(St3)),
- ?assert(element(3, St4) /= element(3, St2))
- end.
-
-clear_clock({ok, St}) ->
- {ok, clear_clock(St)};
-clear_clock(St) ->
- {st, Path, Fd, INode, _} = St,
- {st, Path, Fd, INode, {0, 0, 0}}.
diff --git a/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl b/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl
deleted file mode 100644
index 04f1e9a41..000000000
--- a/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_writer_stderr_test).
-
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(WRITER, couch_log_writer_stderr).
-
-couch_log_writer_stderr_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
- fun check_init_terminate/0,
- fun() ->
- couch_log_test_util:with_meck(
- [{io, [unstick]}],
- fun check_write/0
- )
- end
- ]}.
-
-check_init_terminate() ->
- {ok, St} = ?WRITER:init(),
- ok = ?WRITER:terminate(stop, St).
-
-check_write() ->
- meck:expect(io, format, 3, ok),
-
- Entry = #log_entry{
- level = debug,
- pid = list_to_pid("<0.1.0>"),
- msg = "stuff",
- msg_id = "msg_id",
- time_stamp = "time_stamp"
- },
- {ok, St} = ?WRITER:init(),
- {ok, NewSt} = ?WRITER:write(Entry, St),
- ok = ?WRITER:terminate(stop, NewSt),
-
- ?assert(meck:validate(io)).
diff --git a/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl b/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl
deleted file mode 100644
index 5a3f89520..000000000
--- a/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl
+++ /dev/null
@@ -1,144 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_writer_syslog_test).
-
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(WRITER, couch_log_writer_syslog).
-
-couch_log_writer_syslog_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
- fun check_init_terminate/0,
- fun() ->
- couch_log_test_util:with_meck(
- [{io, [unstick]}],
- fun check_stderr_write/0
- )
- end,
- fun() ->
- couch_log_test_util:with_meck(
- [{gen_udp, [unstick]}],
- fun check_udp_send/0
- )
- end
- ]}.
-
-check_init_terminate() ->
- {ok, St} = ?WRITER:init(),
- ok = ?WRITER:terminate(stop, St).
-
-check_stderr_write() ->
- meck:expect(io, format, 3, ok),
-
- Entry = #log_entry{
- level = debug,
- pid = list_to_pid("<0.1.0>"),
- msg = "stuff",
- msg_id = "msg_id",
- time_stamp = "time_stamp"
- },
- {ok, St} = ?WRITER:init(),
- {ok, NewSt} = ?WRITER:write(Entry, St),
- ok = ?WRITER:terminate(stop, NewSt),
-
- ?assert(meck:called(io, format, 3)),
- ?assert(meck:validate(io)).
-
-check_udp_send() ->
- meck:expect(gen_udp, open, 1, {ok, socket}),
- meck:expect(gen_udp, send, 4, ok),
- meck:expect(gen_udp, close, fun(socket) -> ok end),
-
- config:set("log", "syslog_host", "localhost"),
- try
- Entry = #log_entry{
- level = debug,
- pid = list_to_pid("<0.1.0>"),
- msg = "stuff",
- msg_id = "msg_id",
- time_stamp = "time_stamp"
- },
- {ok, St} = ?WRITER:init(),
- {ok, NewSt} = ?WRITER:write(Entry, St),
- ok = ?WRITER:terminate(stop, NewSt)
- after
- config:delete("log", "syslog_host")
- end,
-
- ?assert(meck:called(gen_udp, open, 1)),
- ?assert(meck:called(gen_udp, send, 4)),
- ?assert(meck:called(gen_udp, close, 1)),
- ?assert(meck:validate(gen_udp)).
-
-facility_test() ->
- Names = [
- "kern",
- "user",
- "mail",
- "daemon",
- "auth",
- "syslog",
- "lpr",
- "news",
- "uucp",
- "clock",
- "authpriv",
- "ftp",
- "ntp",
- "audit",
- "alert",
- "cron",
- "local0",
- "local1",
- "local2",
- "local3",
- "local4",
- "local5",
- "local6",
- "local7"
- ],
- lists:foldl(
- fun(Name, Id) ->
- IdStr = lists:flatten(io_lib:format("~w", [Id])),
- ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(Name)),
- ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(IdStr)),
- Id + 1
- end,
- 0,
- Names
- ),
- ?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("foo")),
- ?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("-1")),
- ?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("24")).
-
-level_test() ->
- Levels = [
- emergency,
- alert,
- critical,
- error,
- warning,
- notice,
- info,
- debug
- ],
- lists:foldl(
- fun(Name, Id) ->
- ?assertEqual(Id, couch_log_writer_syslog:get_level(Name)),
- Id + 1
- end,
- 0,
- Levels
- ),
- ?assertEqual(3, couch_log_writer_syslog:get_level(foo)).
diff --git a/src/couch_log/test/eunit/couch_log_writer_test.erl b/src/couch_log/test/eunit/couch_log_writer_test.erl
deleted file mode 100644
index e758c9f60..000000000
--- a/src/couch_log/test/eunit/couch_log_writer_test.erl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log_writer_test).
-
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-couch_log_writer_test_() ->
- {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
- fun check_writer_change/0
- ]}.
-
-check_writer_change() ->
- % Change to file and back
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "writer", "file"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(undefined, ets:info(?COUCH_LOG_TEST_TABLE)),
- ?assert(is_pid(whereis(couch_log_server))),
-
- config:set("log", "writer", "couch_log_writer_ets"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(0, ets:info(?COUCH_LOG_TEST_TABLE, size))
- end),
-
- % Using a bad setting doesn't break things
- couch_log_test_util:with_config_listener(fun() ->
- config:set("log", "writer", "hopefully not an atom or module"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(undefined, ets:info(?COUCH_LOG_TEST_TABLE)),
- ?assert(is_pid(whereis(couch_log_server))),
-
- config:set("log", "writer", "couch_log_writer_ets"),
- couch_log_test_util:wait_for_config(),
- ?assertEqual(0, ets:info(?COUCH_LOG_TEST_TABLE, size))
- end).
diff --git a/src/couch_mrview/LICENSE b/src/couch_mrview/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_mrview/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_mrview/include/couch_mrview.hrl b/src/couch_mrview/include/couch_mrview.hrl
deleted file mode 100644
index b31463c53..000000000
--- a/src/couch_mrview/include/couch_mrview.hrl
+++ /dev/null
@@ -1,112 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(mrst, {
- sig=nil,
- fd=nil,
- fd_monitor,
- db_name,
- idx_name,
- language,
- design_opts=[],
- partitioned=false,
- lib,
- views,
- id_btree=nil,
- update_seq=0,
- purge_seq=0,
- first_build,
- partial_resp_pid,
- doc_acc,
- doc_queue,
- write_queue,
- qserver=nil,
- view_info=#{}
-}).
-
-
--record(mrview, {
- id_num,
- update_seq=0,
- purge_seq=0,
- map_names=[],
- reduce_funs=[],
- def,
- btree=nil,
- options=[]
-}).
-
-
--record(mrheader, {
- seq=0,
- purge_seq=0,
- id_btree_state=nil,
- view_info=#{}, % replaces log btree in versions < 3.x
- view_states=nil
-}).
-
--define(MAX_VIEW_LIMIT, 16#10000000).
-
--record(mrargs, {
- view_type,
- reduce,
-
- preflight_fun,
-
- start_key,
- start_key_docid,
- end_key,
- end_key_docid,
- keys,
-
- direction = fwd,
- limit = ?MAX_VIEW_LIMIT,
- skip = 0,
- group_level = 0,
- group = undefined,
- stable = false,
- update = true,
- multi_get = false,
- inclusive_end = true,
- include_docs = false,
- doc_options = [],
- update_seq=false,
- conflicts,
- callback,
- sorted = true,
- extra = []
-}).
-
--record(vacc, {
- db,
- req,
- resp,
- prepend,
- etag,
- should_close = false,
- buffer = [],
- bufsize = 0,
- threshold = 1490,
- row_sent = false,
- meta_sent = false
-}).
-
--record(lacc, {
- db,
- req,
- resp,
- qserver,
- lname,
- etag,
- code,
- headers
-}).
diff --git a/src/couch_mrview/priv/stats_descriptions.cfg b/src/couch_mrview/priv/stats_descriptions.cfg
deleted file mode 100644
index 95634670d..000000000
--- a/src/couch_mrview/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,24 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-% Style guide for descriptions: Start with a lowercase letter & do not add
-% a trailing full-stop / period
-% Please keep this in alphabetical order
-
-{[couchdb, mrview, map_doc], [
- {type, counter},
- {desc, <<"number of documents mapped in the view server">>}
-]}.
-{[couchdb, mrview, emits], [
- {type, counter},
- {desc, <<"number of invocations of `emit' in map functions in the view server">>}
-]}.
diff --git a/src/couch_mrview/rebar.config b/src/couch_mrview/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/couch_mrview/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/couch_mrview/src/couch_mrview.app.src b/src/couch_mrview/src/couch_mrview.app.src
deleted file mode 100644
index 735d1f8a0..000000000
--- a/src/couch_mrview/src/couch_mrview.app.src
+++ /dev/null
@@ -1,18 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_mrview, [
- {description, "CouchDB Map/Reduce Views"},
- {vsn, git},
- {registered, []},
- {applications, [kernel, stdlib, couch_index, couch_stats, ioq]}
-]}.
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
deleted file mode 100644
index d8640c903..000000000
--- a/src/couch_mrview/src/couch_mrview.erl
+++ /dev/null
@@ -1,748 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview).
-
--export([validate/2]).
--export([query_all_docs/2, query_all_docs/4]).
--export([query_view/3, query_view/4, query_view/6, get_view_index_pid/4]).
--export([get_info/2]).
--export([trigger_update/2, trigger_update/3]).
--export([get_view_info/3]).
--export([refresh/2]).
--export([compact/2, compact/3, cancel_compaction/2]).
--export([cleanup/1]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--record(mracc, {
- db,
- meta_sent = false,
- total_rows,
- offset,
- limit,
- skip,
- group_level,
- doc_info,
- callback,
- user_acc,
- last_go = ok,
- reduce_fun,
- finalizer,
- update_seq,
- args
-}).
-
-validate_ddoc_fields(DDoc) ->
- MapFuncType = map_function_type(DDoc),
- lists:foreach(
- fun(Path) ->
- validate_ddoc_fields(DDoc, Path)
- end,
- [
- [{<<"filters">>, object}, {any, [object, string]}],
- [{<<"language">>, string}],
- [{<<"lists">>, object}, {any, [object, string]}],
- [{<<"options">>, object}],
- [{<<"options">>, object}, {<<"include_design">>, boolean}],
- [{<<"options">>, object}, {<<"local_seq">>, boolean}],
- [{<<"options">>, object}, {<<"partitioned">>, boolean}],
- [{<<"rewrites">>, [string, array]}],
- [{<<"shows">>, object}, {any, [object, string]}],
- [{<<"updates">>, object}, {any, [object, string]}],
- [{<<"validate_doc_update">>, string}],
- [{<<"views">>, object}, {<<"lib">>, object}],
- [{<<"views">>, object}, {any, object}, {<<"map">>, MapFuncType}],
- [{<<"views">>, object}, {any, object}, {<<"reduce">>, string}]
- ]
- ),
- require_map_function_for_views(DDoc),
- ok.
-
-require_map_function_for_views({Props}) ->
- case couch_util:get_value(<<"views">>, Props) of
- undefined ->
- ok;
- {Views} ->
- lists:foreach(
- fun
- ({<<"lib">>, _}) ->
- ok;
- ({Key, {Value}}) ->
- case couch_util:get_value(<<"map">>, Value) of
- undefined ->
- throw(
- {invalid_design_doc,
- <<"View `", Key/binary, "` must contain map function">>}
- );
- _ ->
- ok
- end
- end,
- Views
- ),
- ok
- end.
-
-validate_ddoc_fields(DDoc, Path) ->
- case validate_ddoc_fields(DDoc, Path, []) of
- ok ->
- ok;
- {error, {FailedPath0, Type0}} ->
- FailedPath = iolist_to_binary(join(FailedPath0, <<".">>)),
- Type = format_type(Type0),
- throw(
- {invalid_design_doc,
- <<"`", FailedPath/binary, "` field must have ", Type/binary, " type">>}
- )
- end.
-
-validate_ddoc_fields(undefined, _, _) ->
- ok;
-validate_ddoc_fields(_, [], _) ->
- ok;
-validate_ddoc_fields({KVS} = Props, [{any, Type} | Rest], Acc) ->
- lists:foldl(
- fun
- ({Key, _}, ok) ->
- validate_ddoc_fields(Props, [{Key, Type} | Rest], Acc);
- ({_, _}, {error, _} = Error) ->
- Error
- end,
- ok,
- KVS
- );
-validate_ddoc_fields({KVS} = Props, [{Key, Type} | Rest], Acc) ->
- case validate_ddoc_field(Props, {Key, Type}) of
- ok ->
- validate_ddoc_fields(
- couch_util:get_value(Key, KVS),
- Rest,
- [Key | Acc]
- );
- error ->
- {error, {[Key | Acc], Type}};
- {error, Key1} ->
- {error, {[Key1 | Acc], Type}}
- end.
-
-validate_ddoc_field(undefined, Type) when is_atom(Type) ->
- ok;
-validate_ddoc_field(_, any) ->
- ok;
-validate_ddoc_field(Value, Types) when is_list(Types) ->
- lists:foldl(
- fun
- (_, ok) -> ok;
- (Type, _) -> validate_ddoc_field(Value, Type)
- end,
- error,
- Types
- );
-validate_ddoc_field(Value, string) when is_binary(Value) ->
- ok;
-validate_ddoc_field(Value, array) when is_list(Value) ->
- ok;
-validate_ddoc_field({Value}, object) when is_list(Value) ->
- ok;
-validate_ddoc_field(Value, boolean) when is_boolean(Value) ->
- ok;
-validate_ddoc_field({Props}, {any, Type}) ->
- validate_ddoc_field1(Props, Type);
-validate_ddoc_field({Props}, {Key, Type}) ->
- validate_ddoc_field(couch_util:get_value(Key, Props), Type);
-validate_ddoc_field(_, _) ->
- error.
-
-validate_ddoc_field1([], _) ->
- ok;
-validate_ddoc_field1([{Key, Value} | Rest], Type) ->
- case validate_ddoc_field(Value, Type) of
- ok ->
- validate_ddoc_field1(Rest, Type);
- error ->
- {error, Key}
- end.
-
-map_function_type({Props}) ->
- case couch_util:get_value(<<"language">>, Props) of
- <<"query">> -> object;
- _ -> string
- end.
-
-format_type(Type) when is_atom(Type) ->
- ?l2b(atom_to_list(Type));
-format_type(Types) when is_list(Types) ->
- iolist_to_binary(join(lists:map(fun atom_to_list/1, Types), <<" or ">>)).
-
-join(L, Sep) ->
- join(L, Sep, []).
-join([H | []], _, Acc) ->
- [H | Acc];
-join([H | T], Sep, Acc) ->
- join(T, Sep, [Sep, H | Acc]).
-
-validate(Db, DDoc) ->
- ok = validate_ddoc_fields(DDoc#doc.body),
- GetName = fun
- (#mrview{map_names = [Name | _]}) -> Name;
- (#mrview{reduce_funs = [{Name, _} | _]}) -> Name;
- (_) -> null
- end,
- ValidateView = fun(Proc, #mrview{def = MapSrc, reduce_funs = Reds} = View) ->
- couch_query_servers:try_compile(Proc, map, GetName(View), MapSrc),
- lists:foreach(
- fun
- ({_RedName, <<"_sum", _/binary>>}) ->
- ok;
- ({_RedName, <<"_count", _/binary>>}) ->
- ok;
- ({_RedName, <<"_stats", _/binary>>}) ->
- ok;
- ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
- ok;
- ({_RedName, <<"_", _/binary>> = Bad}) ->
- Msg = ["`", Bad, "` is not a supported reduce function."],
- throw({invalid_design_doc, Msg});
- ({RedName, RedSrc}) ->
- couch_query_servers:try_compile(Proc, reduce, RedName, RedSrc)
- end,
- Reds
- )
- end,
- {ok, #mrst{
- language = Lang,
- views = Views,
- partitioned = Partitioned
- }} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
-
- case {couch_db:is_partitioned(Db), Partitioned} of
- {false, true} ->
- throw(
- {invalid_design_doc, <<
- "partitioned option cannot be true in a "
- "non-partitioned database."
- >>}
- );
- {_, _} ->
- ok
- end,
-
- try Views =/= [] andalso couch_query_servers:get_os_process(Lang) of
- false ->
- ok;
- Proc ->
- try
- lists:foreach(fun(V) -> ValidateView(Proc, V) end, Views)
- after
- couch_query_servers:ret_os_process(Proc)
- end
- catch
- {unknown_query_language, _Lang} ->
- %% Allow users to save ddocs written in unknown languages
- ok
- end.
-
-query_all_docs(Db, Args) ->
- query_all_docs(Db, Args, fun default_cb/2, []).
-
-query_all_docs(Db, Args, Callback, Acc) when is_list(Args) ->
- query_all_docs(Db, to_mrargs(Args), Callback, Acc);
-query_all_docs(Db, Args0, Callback, Acc) ->
- Sig = couch_util:with_db(Db, fun(WDb) ->
- {ok, Info} = couch_db:get_db_info(WDb),
- couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Info)))
- end),
- Args1 = Args0#mrargs{view_type = map},
- Args2 = couch_mrview_util:validate_all_docs_args(Db, Args1),
- {ok, Acc1} =
- case Args2#mrargs.preflight_fun of
- PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc);
- _ -> {ok, Acc}
- end,
- all_docs_fold(Db, Args2, Callback, Acc1).
-
-query_view(Db, DDoc, VName) ->
- query_view(Db, DDoc, VName, #mrargs{}).
-
-query_view(Db, DDoc, VName, Args) when is_list(Args) ->
- query_view(Db, DDoc, VName, to_mrargs(Args), fun default_cb/2, []);
-query_view(Db, DDoc, VName, Args) ->
- query_view(Db, DDoc, VName, Args, fun default_cb/2, []).
-
-query_view(Db, DDoc, VName, Args, Callback, Acc) when is_list(Args) ->
- query_view(Db, DDoc, VName, to_mrargs(Args), Callback, Acc);
-query_view(Db, DDoc, VName, Args0, Callback, Acc0) ->
- case couch_mrview_util:get_view(Db, DDoc, VName, Args0) of
- {ok, VInfo, Sig, Args} ->
- {ok, Acc1} =
- case Args#mrargs.preflight_fun of
- PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc0);
- _ -> {ok, Acc0}
- end,
- query_view(Db, VInfo, Args, Callback, Acc1);
- ddoc_updated ->
- Callback(ok, ddoc_updated)
- end.
-
-get_view_index_pid(Db, DDoc, ViewName, Args0) ->
- couch_mrview_util:get_view_index_pid(Db, DDoc, ViewName, Args0).
-
-query_view(Db, {Type, View, Ref}, Args, Callback, Acc) ->
- try
- case Type of
- map -> map_fold(Db, View, Args, Callback, Acc);
- red -> red_fold(Db, View, Args, Callback, Acc)
- end
- after
- erlang:demonitor(Ref, [flush])
- end.
-
-get_info(Db, DDoc) ->
- {ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- couch_index:get_info(Pid).
-
-trigger_update(Db, DDoc) ->
- trigger_update(Db, DDoc, couch_db:get_update_seq(Db)).
-
-trigger_update(Db, DDoc, UpdateSeq) ->
- {ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- couch_index:trigger_update(Pid, UpdateSeq).
-
-%% get informations on a view
-get_view_info(Db, DDoc, VName) ->
- {ok, {_, View, _}, _, _Args} = couch_mrview_util:get_view(
- Db,
- DDoc,
- VName,
- #mrargs{}
- ),
-
- %% get the total number of rows
- {ok, TotalRows} = couch_mrview_util:get_row_count(View),
-
- {ok, [
- {update_seq, View#mrview.update_seq},
- {purge_seq, View#mrview.purge_seq},
- {total_rows, TotalRows}
- ]}.
-
-%% @doc refresh a view index
-refresh(DbName, DDoc) when is_binary(DbName) ->
- UpdateSeq = couch_util:with_db(DbName, fun(WDb) ->
- couch_db:get_update_seq(WDb)
- end),
-
- case couch_index_server:get_index(couch_mrview_index, DbName, DDoc) of
- {ok, Pid} ->
- case catch couch_index:get_state(Pid, UpdateSeq) of
- {ok, _} -> ok;
- Error -> {error, Error}
- end;
- Error ->
- {error, Error}
- end;
-refresh(Db, DDoc) ->
- refresh(couch_db:name(Db), DDoc).
-
-compact(Db, DDoc) ->
- compact(Db, DDoc, []).
-
-compact(Db, DDoc, Opts) ->
- {ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- couch_index:compact(Pid, Opts).
-
-cancel_compaction(Db, DDoc) ->
- {ok, IPid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- {ok, CPid} = couch_index:get_compactor_pid(IPid),
- ok = couch_index_compactor:cancel(CPid),
-
- % Cleanup the compaction file if it exists
- {ok, #mrst{sig = Sig, db_name = DbName}} = couch_index:get_state(IPid, 0),
- couch_mrview_util:delete_compaction_file(DbName, Sig),
- ok.
-
-cleanup(Db) ->
- couch_mrview_cleanup:run(Db).
-
-all_docs_fold(Db, #mrargs{keys = undefined} = Args, Callback, UAcc) ->
- ReduceFun = get_reduce_fun(Args),
- Total = get_total_rows(Db, Args),
- UpdateSeq = get_update_seq(Db, Args),
- Acc = #mracc{
- db = Db,
- total_rows = Total,
- limit = Args#mrargs.limit,
- skip = Args#mrargs.skip,
- callback = Callback,
- user_acc = UAcc,
- reduce_fun = ReduceFun,
- update_seq = UpdateSeq,
- args = Args
- },
- [Opts1] = couch_mrview_util:all_docs_key_opts(Args),
- % TODO: This is a terrible hack for now. We'll probably have
- % to rewrite _all_docs to not be part of mrview and not expect
- % a btree. For now non-btree's will just have to pass 0 or
- % some fake reductions to get an offset.
- Opts2 = [include_reductions | Opts1],
- FunName =
- case couch_util:get_value(namespace, Args#mrargs.extra) of
- <<"_design">> -> fold_design_docs;
- <<"_local">> -> fold_local_docs;
- _ -> fold_docs
- end,
- {ok, Offset, FinalAcc} = couch_db:FunName(Db, fun map_fold/3, Acc, Opts2),
- finish_fold(FinalAcc, [{total, Total}, {offset, Offset}]);
-all_docs_fold(Db, #mrargs{direction = Dir, keys = Keys0} = Args, Callback, UAcc) ->
- ReduceFun = get_reduce_fun(Args),
- Total = get_total_rows(Db, Args),
- UpdateSeq = get_update_seq(Db, Args),
- Acc = #mracc{
- db = Db,
- total_rows = Total,
- limit = Args#mrargs.limit,
- skip = Args#mrargs.skip,
- callback = Callback,
- user_acc = UAcc,
- reduce_fun = ReduceFun,
- update_seq = UpdateSeq,
- args = Args
- },
- % Backwards compatibility hack. The old _all_docs iterates keys
- % in reverse if descending=true was passed. Here we'll just
- % reverse the list instead.
- Keys =
- if
- Dir =:= fwd -> Keys0;
- true -> lists:reverse(Keys0)
- end,
-
- FoldFun = fun(Key, Acc0) ->
- DocInfo = (catch couch_db:get_doc_info(Db, Key)),
- {Doc, Acc1} =
- case DocInfo of
- {ok, #doc_info{id = Id, revs = [RevInfo | _RestRevs]} = DI} ->
- Rev = couch_doc:rev_to_str(RevInfo#rev_info.rev),
- Props =
- [{rev, Rev}] ++
- case RevInfo#rev_info.deleted of
- true -> [{deleted, true}];
- false -> []
- end,
- {{{Id, Id}, {Props}}, Acc0#mracc{doc_info = DI}};
- not_found ->
- {{{Key, error}, not_found}, Acc0}
- end,
- {_, Acc2} = map_fold(Doc, {[], [{0, 0, 0}]}, Acc1),
- Acc2
- end,
- FinalAcc = lists:foldl(FoldFun, Acc, Keys),
- finish_fold(FinalAcc, [{total, Total}]).
-
-map_fold(Db, View, Args, Callback, UAcc) ->
- {ok, Total} = couch_mrview_util:get_row_count(View),
- Acc = #mracc{
- db = Db,
- total_rows = Total,
- limit = Args#mrargs.limit,
- skip = Args#mrargs.skip,
- callback = Callback,
- user_acc = UAcc,
- reduce_fun = fun couch_mrview_util:reduce_to_count/1,
- update_seq = View#mrview.update_seq,
- args = Args
- },
- OptList = couch_mrview_util:key_opts(Args),
- {Reds, Acc2} = lists:foldl(
- fun(Opts, {_, Acc0}) ->
- {ok, R, A} = couch_mrview_util:fold(View, fun map_fold/3, Acc0, Opts),
- {R, A}
- end,
- {nil, Acc},
- OptList
- ),
- Offset = couch_mrview_util:reduce_to_count(Reds),
- finish_fold(Acc2, [{total, Total}, {offset, Offset}]).
-
-map_fold(#full_doc_info{} = FullDocInfo, OffsetReds, Acc) ->
- % matches for _all_docs and translates #full_doc_info{} -> KV pair
- case couch_doc:to_doc_info(FullDocInfo) of
- #doc_info{id = Id, revs = [#rev_info{deleted = false, rev = Rev} | _]} = DI ->
- Value = {[{rev, couch_doc:rev_to_str(Rev)}]},
- NS = couch_util:get_value(namespace, Acc#mracc.args#mrargs.extra),
- case Id of
- <<?DESIGN_DOC_PREFIX, _/binary>> when NS =:= <<"_non_design">> -> {ok, Acc};
- _ -> map_fold({{Id, Id}, Value}, OffsetReds, Acc#mracc{doc_info = DI})
- end;
- #doc_info{revs = [#rev_info{deleted = true} | _]} ->
- {ok, Acc}
- end;
-map_fold(_KV, _Offset, #mracc{skip = N} = Acc) when N > 0 ->
- {ok, Acc#mracc{skip = N - 1, last_go = ok}};
-map_fold(KV, OffsetReds, #mracc{offset = undefined} = Acc) ->
- #mracc{
- total_rows = Total,
- callback = Callback,
- user_acc = UAcc0,
- reduce_fun = Reduce,
- update_seq = UpdateSeq,
- args = Args
- } = Acc,
- Offset = Reduce(OffsetReds),
- Meta = make_meta(Args, UpdateSeq, [{total, Total}, {offset, Offset}]),
- {Go, UAcc1} = Callback(Meta, UAcc0),
- Acc1 = Acc#mracc{meta_sent = true, offset = Offset, user_acc = UAcc1, last_go = Go},
- case Go of
- ok -> map_fold(KV, OffsetReds, Acc1);
- stop -> {stop, Acc1}
- end;
-map_fold(_KV, _Offset, #mracc{limit = 0} = Acc) ->
- {stop, Acc};
-map_fold({{Key, Id}, Val}, _Offset, Acc) ->
- #mracc{
- db = Db,
- limit = Limit,
- doc_info = DI,
- callback = Callback,
- user_acc = UAcc0,
- args = Args
- } = Acc,
- Doc =
- case DI of
- #doc_info{} -> couch_mrview_util:maybe_load_doc(Db, DI, Args);
- _ -> couch_mrview_util:maybe_load_doc(Db, Id, Val, Args)
- end,
- Row = [{id, Id}, {key, Key}, {value, Val}] ++ Doc,
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{
- limit = Limit - 1,
- doc_info = undefined,
- user_acc = UAcc1,
- last_go = Go
- }};
-map_fold(#doc{id = <<"_local/", _/binary>>} = Doc, _Offset, #mracc{} = Acc) ->
- #mracc{
- limit = Limit,
- callback = Callback,
- user_acc = UAcc0,
- args = Args
- } = Acc,
- #doc{
- id = DocId,
- revs = {Pos, [RevId | _]}
- } = Doc,
- Rev = {Pos, RevId},
- Row =
- [
- {id, DocId},
- {key, DocId},
- {value, {[{rev, couch_doc:rev_to_str(Rev)}]}}
- ] ++
- if
- not Args#mrargs.include_docs -> [];
- true -> [{doc, couch_doc:to_json_obj(Doc, Args#mrargs.doc_options)}]
- end,
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{
- limit = Limit - 1,
- reduce_fun = undefined,
- doc_info = undefined,
- user_acc = UAcc1,
- last_go = Go
- }}.
-
-red_fold(Db, {NthRed, _Lang, View} = RedView, Args, Callback, UAcc) ->
- Finalizer =
- case couch_util:get_value(finalizer, Args#mrargs.extra) of
- undefined ->
- {_, FunSrc} = lists:nth(NthRed, View#mrview.reduce_funs),
- FunSrc;
- CustomFun ->
- CustomFun
- end,
- Acc = #mracc{
- db = Db,
- total_rows = null,
- limit = Args#mrargs.limit,
- skip = Args#mrargs.skip,
- group_level = Args#mrargs.group_level,
- callback = Callback,
- user_acc = UAcc,
- update_seq = View#mrview.update_seq,
- finalizer = Finalizer,
- args = Args
- },
- Grouping = {key_group_level, Args#mrargs.group_level},
- OptList = couch_mrview_util:key_opts(Args, [Grouping]),
- Acc2 = lists:foldl(
- fun(Opts, Acc0) ->
- {ok, Acc1} =
- couch_mrview_util:fold_reduce(RedView, fun red_fold/3, Acc0, Opts),
- Acc1
- end,
- Acc,
- OptList
- ),
- finish_fold(Acc2, []).
-
-red_fold({p, _Partition, Key}, Red, Acc) ->
- red_fold(Key, Red, Acc);
-red_fold(_Key, _Red, #mracc{skip = N} = Acc) when N > 0 ->
- {ok, Acc#mracc{skip = N - 1, last_go = ok}};
-red_fold(Key, Red, #mracc{meta_sent = false} = Acc) ->
- #mracc{
- args = Args,
- callback = Callback,
- user_acc = UAcc0,
- update_seq = UpdateSeq
- } = Acc,
- Meta = make_meta(Args, UpdateSeq, []),
- {Go, UAcc1} = Callback(Meta, UAcc0),
- Acc1 = Acc#mracc{user_acc = UAcc1, meta_sent = true, last_go = Go},
- case Go of
- ok -> red_fold(Key, Red, Acc1);
- _ -> {Go, Acc1}
- end;
-red_fold(_Key, _Red, #mracc{limit = 0} = Acc) ->
- {stop, Acc};
-red_fold(_Key, Red, #mracc{group_level = 0} = Acc) ->
- #mracc{
- finalizer = Finalizer,
- limit = Limit,
- callback = Callback,
- user_acc = UAcc0
- } = Acc,
- Row = [{key, null}, {value, maybe_finalize(Red, Finalizer)}],
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc = UAcc1, limit = Limit - 1, last_go = Go}};
-red_fold(Key, Red, #mracc{group_level = exact} = Acc) ->
- #mracc{
- finalizer = Finalizer,
- limit = Limit,
- callback = Callback,
- user_acc = UAcc0
- } = Acc,
- Row = [{key, Key}, {value, maybe_finalize(Red, Finalizer)}],
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc = UAcc1, limit = Limit - 1, last_go = Go}};
-red_fold(K, Red, #mracc{group_level = I} = Acc) when I > 0, is_list(K) ->
- #mracc{
- finalizer = Finalizer,
- limit = Limit,
- callback = Callback,
- user_acc = UAcc0
- } = Acc,
- Row = [{key, lists:sublist(K, I)}, {value, maybe_finalize(Red, Finalizer)}],
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc = UAcc1, limit = Limit - 1, last_go = Go}};
-red_fold(K, Red, #mracc{group_level = I} = Acc) when I > 0 ->
- #mracc{
- finalizer = Finalizer,
- limit = Limit,
- callback = Callback,
- user_acc = UAcc0
- } = Acc,
- Row = [{key, K}, {value, maybe_finalize(Red, Finalizer)}],
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc = UAcc1, limit = Limit - 1, last_go = Go}}.
-
-maybe_finalize(Red, null) ->
- Red;
-maybe_finalize(Red, RedSrc) ->
- {ok, Finalized} = couch_query_servers:finalize(RedSrc, Red),
- Finalized.
-
-finish_fold(#mracc{last_go = ok, update_seq = UpdateSeq} = Acc, ExtraMeta) ->
- #mracc{callback = Callback, user_acc = UAcc, args = Args} = Acc,
- % Possible send meta info
- Meta = make_meta(Args, UpdateSeq, ExtraMeta),
- {Go, UAcc1} =
- case Acc#mracc.meta_sent of
- false -> Callback(Meta, UAcc);
- _ -> {ok, Acc#mracc.user_acc}
- end,
- % Notify callback that the fold is complete.
- {_, UAcc2} =
- case Go of
- ok -> Callback(complete, UAcc1);
- _ -> {ok, UAcc1}
- end,
- {ok, UAcc2};
-finish_fold(#mracc{user_acc = UAcc}, _ExtraMeta) ->
- {ok, UAcc}.
-
-make_meta(Args, UpdateSeq, Base) ->
- case Args#mrargs.update_seq of
- true -> {meta, Base ++ [{update_seq, UpdateSeq}]};
- _ -> {meta, Base}
- end.
-
-get_reduce_fun(#mrargs{extra = Extra}) ->
- case couch_util:get_value(namespace, Extra) of
- <<"_local">> ->
- fun(_) -> null end;
- _ ->
- fun couch_mrview_util:all_docs_reduce_to_count/1
- end.
-
-get_total_rows(Db, #mrargs{extra = Extra}) ->
- case couch_util:get_value(namespace, Extra) of
- <<"_local">> ->
- null;
- <<"_design">> ->
- {ok, N} = couch_db:get_design_doc_count(Db),
- N;
- <<"_non_design">> ->
- {ok, N} = couch_db:get_design_doc_count(Db),
- {ok, Info} = couch_db:get_db_info(Db),
- couch_util:get_value(doc_count, Info) - N;
- _ ->
- {ok, Info} = couch_db:get_db_info(Db),
- couch_util:get_value(doc_count, Info)
- end.
-
-get_update_seq(Db, #mrargs{extra = Extra}) ->
- case couch_util:get_value(namespace, Extra) of
- <<"_local">> ->
- null;
- _ ->
- couch_db:get_update_seq(Db)
- end.
-
-default_cb(complete, Acc) ->
- {ok, lists:reverse(Acc)};
-default_cb({final, Info}, []) ->
- {ok, [Info]};
-default_cb({final, _}, Acc) ->
- {ok, Acc};
-default_cb(ok, ddoc_updated) ->
- {ok, ddoc_updated};
-default_cb(Row, Acc) ->
- {ok, [Row | Acc]}.
-
-to_mrargs(KeyList) ->
- lists:foldl(
- fun({Key, Value}, Acc) ->
- Index = lookup_index(couch_util:to_existing_atom(Key)),
- setelement(Index, Acc, Value)
- end,
- #mrargs{},
- KeyList
- ).
-
-lookup_index(Key) ->
- Index = lists:zip(
- record_info(fields, mrargs), lists:seq(2, record_info(size, mrargs))
- ),
- couch_util:get_value(Key, Index).
diff --git a/src/couch_mrview/src/couch_mrview_cleanup.erl b/src/couch_mrview/src/couch_mrview_cleanup.erl
deleted file mode 100644
index 417605c55..000000000
--- a/src/couch_mrview/src/couch_mrview_cleanup.erl
+++ /dev/null
@@ -1,68 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_cleanup).
-
--export([run/1]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-run(Db) ->
- RootDir = couch_index_util:root_dir(),
- DbName = couch_db:name(Db),
-
- {ok, DesignDocs} = couch_db:get_design_docs(Db),
- SigFiles = lists:foldl(
- fun(DDocInfo, SFAcc) ->
- {ok, DDoc} = couch_db:open_doc_int(Db, DDocInfo, [ejson_body]),
- {ok, InitState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
- Sig = InitState#mrst.sig,
- IFName = couch_mrview_util:index_file(DbName, Sig),
- CFName = couch_mrview_util:compaction_file(DbName, Sig),
- [IFName, CFName | SFAcc]
- end,
- [],
- [DD || DD <- DesignDocs, DD#full_doc_info.deleted == false]
- ),
-
- IdxDir = couch_index_util:index_dir(mrview, DbName),
- DiskFiles = filelib:wildcard(filename:join(IdxDir, "*")),
-
- % We need to delete files that have no ddoc.
- ToDelete = DiskFiles -- SigFiles,
-
- lists:foreach(
- fun(FN) ->
- couch_log:debug("Deleting stale view file: ~s", [FN]),
- couch_file:delete(RootDir, FN, [sync]),
- case couch_mrview_util:verify_view_filename(FN) of
- true ->
- Sig = couch_mrview_util:get_signature_from_filename(FN),
- DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
- case couch_db:open_doc(Db, DocId, []) of
- {ok, LocalPurgeDoc} ->
- couch_db:update_doc(
- Db,
- LocalPurgeDoc#doc{deleted = true},
- [?ADMIN_CTX]
- );
- {not_found, _} ->
- ok
- end;
- false ->
- ok
- end
- end,
- ToDelete
- ),
- ok.
diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl
deleted file mode 100644
index 28e5a9b3d..000000000
--- a/src/couch_mrview/src/couch_mrview_compactor.erl
+++ /dev/null
@@ -1,317 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_compactor).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([compact/3, swap_compacted/2, remove_compacted/1]).
-
--record(acc, {
- btree = nil,
- last_id = nil,
- kvs = [],
- kvs_size = 0,
- changes = 0,
- total_changes
-}).
-
--define(DEFAULT_RECOMPACT_RETRY_COUNT, 3).
-
-compact(_Db, State, Opts) ->
- case lists:member(recompact, Opts) of
- false -> compact(State);
- true -> recompact(State)
- end.
-
-compact(State) ->
- #mrst{
- db_name = DbName,
- idx_name = IdxName,
- sig = Sig,
- update_seq = Seq,
- id_btree = IdBtree,
- views = Views
- } = State,
- erlang:put(io_priority, {view_compact, DbName, IdxName}),
-
- {EmptyState, NumDocIds} = couch_util:with_db(DbName, fun(Db) ->
- CompactFName = couch_mrview_util:compaction_file(DbName, Sig),
- {ok, Fd} = couch_mrview_util:open_file(CompactFName),
- ESt = couch_mrview_util:reset_index(Db, Fd, State),
-
- {ok, Count} = couch_db:get_doc_count(Db),
-
- {ESt, Count}
- end),
-
- #mrst{
- id_btree = EmptyIdBtree,
- views = EmptyViews
- } = EmptyState,
-
- TotalChanges = lists:foldl(
- fun(View, Acc) ->
- {ok, Kvs} = couch_mrview_util:get_row_count(View),
- Acc + Kvs
- end,
- NumDocIds,
- Views
- ),
-
- couch_task_status:add_task([
- {type, view_compaction},
- {database, DbName},
- {design_document, IdxName},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ]),
-
- BufferSize0 = config:get(
- "view_compaction", "keyvalue_buffer_size", "2097152"
- ),
- BufferSize = list_to_integer(BufferSize0),
-
- FoldFun = fun({DocId, ViewIdKeys} = KV, Acc) ->
- #acc{btree = Bt, kvs = Kvs, kvs_size = KvsSize} = Acc,
- NewKvs =
- case Kvs of
- [{DocId, OldViewIdKeys} | Rest] ->
- couch_log:error(
- "Dupes of ~s in ~s ~s",
- [DocId, DbName, IdxName]
- ),
- [{DocId, ViewIdKeys ++ OldViewIdKeys} | Rest];
- _ ->
- [KV | Kvs]
- end,
- KvsSize2 = KvsSize + ?term_size(KV),
- case KvsSize2 >= BufferSize of
- true ->
- {ok, Bt2} = couch_btree:add(Bt, lists:reverse(NewKvs)),
- Acc2 = update_task(Acc, length(NewKvs)),
- {ok, Acc2#acc{
- btree = Bt2, kvs = [], kvs_size = 0, last_id = DocId
- }};
- _ ->
- {ok, Acc#acc{
- kvs = NewKvs, kvs_size = KvsSize2, last_id = DocId
- }}
- end
- end,
-
- InitAcc = #acc{total_changes = TotalChanges, btree = EmptyIdBtree},
- {ok, _, FinalAcc} = couch_btree:foldl(IdBtree, FoldFun, InitAcc),
- #acc{btree = Bt3, kvs = Uncopied} = FinalAcc,
- {ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
- FinalAcc2 = update_task(FinalAcc, length(Uncopied)),
-
- {NewViews, _} = lists:mapfoldl(
- fun({View, EmptyView}, Acc) ->
- compact_view(View, EmptyView, BufferSize, Acc)
- end,
- FinalAcc2,
- lists:zip(Views, EmptyViews)
- ),
-
- unlink(EmptyState#mrst.fd),
- {ok, EmptyState#mrst{
- id_btree = NewIdBtree,
- views = NewViews,
- update_seq = Seq
- }}.
-
-recompact(State) ->
- recompact(State, recompact_retry_count()).
-
-recompact(#mrst{db_name = DbName, idx_name = IdxName}, 0) ->
- erlang:error({exceeded_recompact_retry_count, [{db_name, DbName}, {idx_name, IdxName}]});
-recompact(State, RetryCount) ->
- Self = self(),
- link(State#mrst.fd),
- {Pid, Ref} = erlang:spawn_monitor(fun() ->
- couch_index_updater:update(Self, couch_mrview_index, State)
- end),
- recompact_loop(Pid, Ref, State, RetryCount).
-
-recompact_loop(Pid, Ref, State, RetryCount) ->
- receive
- {'$gen_cast', {new_state, State2}} ->
- % We've made progress so reset RetryCount
- recompact_loop(Pid, Ref, State2, recompact_retry_count());
- {'DOWN', Ref, _, _, {updated, Pid, State2}} ->
- unlink(State#mrst.fd),
- {ok, State2};
- {'DOWN', Ref, _, _, Reason} ->
- unlink(State#mrst.fd),
- couch_log:warning("Error during recompaction: ~r", [Reason]),
- recompact(State, RetryCount - 1)
- end.
-
-recompact_retry_count() ->
- config:get_integer(
- "view_compaction",
- "recompact_retry_count",
- ?DEFAULT_RECOMPACT_RETRY_COUNT
- ).
-
-%% @spec compact_view(View, EmptyView, Retry, Acc) -> {CompactView, NewAcc}
-compact_view(#mrview{id_num = VID} = View, EmptyView, BufferSize, Acc0) ->
- {NewBt, FinalAcc} = compact_view_btree(
- View#mrview.btree,
- EmptyView#mrview.btree,
- VID,
- BufferSize,
- Acc0
- ),
-
- {
- EmptyView#mrview{
- btree = NewBt,
- update_seq = View#mrview.update_seq,
- purge_seq = View#mrview.purge_seq
- },
- FinalAcc
- }.
-
-compact_view_btree(Btree, EmptyBtree, VID, BufferSize, Acc0) ->
- Fun = fun(KV, #acc{btree = Bt, kvs = Kvs, kvs_size = KvsSize} = Acc) ->
- KvsSize2 = KvsSize + ?term_size(KV),
- if
- KvsSize2 >= BufferSize ->
- {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV | Kvs])),
- Acc2 = update_task(VID, Acc, 1 + length(Kvs)),
- {ok, Acc2#acc{btree = Bt2, kvs = [], kvs_size = 0}};
- true ->
- {ok, Acc#acc{kvs = [KV | Kvs], kvs_size = KvsSize2}}
- end
- end,
-
- InitAcc = Acc0#acc{kvs = [], kvs_size = 0, btree = EmptyBtree},
- {ok, _, FinalAcc} = couch_btree:foldl(Btree, Fun, InitAcc),
- #acc{btree = Bt3, kvs = Uncopied} = FinalAcc,
- {ok, NewBt} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
- FinalAcc2 = update_task(VID, FinalAcc, length(Uncopied)),
- {NewBt, FinalAcc2}.
-
-update_task(Acc, ChangesInc) ->
- update_task(null, Acc, ChangesInc).
-
-update_task(VID, #acc{changes = Changes, total_changes = Total} = Acc, ChangesInc) ->
- Phase =
- if
- is_integer(VID) -> view;
- true -> ids
- end,
- Changes2 = Changes + ChangesInc,
- Progress =
- if
- Total == 0 -> 0;
- true -> (Changes2 * 100) div Total
- end,
- couch_task_status:update([
- {phase, Phase},
- {view, VID},
- {changes_done, Changes2},
- {total_changes, Total},
- {progress, Progress}
- ]),
- Acc#acc{changes = Changes2}.
-
-swap_compacted(OldState, NewState) ->
- #mrst{
- fd = Fd
- } = OldState,
- #mrst{
- sig = Sig,
- db_name = DbName,
- fd = NewFd
- } = NewState,
-
- link(NewState#mrst.fd),
- Ref = erlang:monitor(process, NewState#mrst.fd),
-
- RootDir = couch_index_util:root_dir(),
- IndexFName = couch_mrview_util:index_file(DbName, Sig),
- CompactFName = couch_mrview_util:compaction_file(DbName, Sig),
-
- {ok, Pre} = couch_file:bytes(Fd),
- {ok, Post} = couch_file:bytes(NewFd),
- couch_log:notice("Compaction swap for view ~s ~p ~p", [
- IndexFName,
- Pre,
- Post
- ]),
- ok = couch_file:delete(RootDir, IndexFName),
- ok = file:rename(CompactFName, IndexFName),
-
- unlink(OldState#mrst.fd),
- erlang:demonitor(OldState#mrst.fd_monitor, [flush]),
-
- {ok, NewState#mrst{fd_monitor = Ref}}.
-
-remove_compacted(#mrst{sig = Sig, db_name = DbName} = State) ->
- RootDir = couch_index_util:root_dir(),
- CompactFName = couch_mrview_util:compaction_file(DbName, Sig),
- ok = couch_file:delete(RootDir, CompactFName),
- {ok, State}.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- meck:new(couch_index_updater),
- meck:new(couch_log).
-
-teardown_all(_) ->
- meck:unload().
-
-recompact_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- recompact_success_after_progress(),
- recompact_exceeded_retry_count()
- ]
- }.
-
-recompact_success_after_progress() ->
- ?_test(begin
- ok = meck:expect(couch_index_updater, update, fun(Pid, _, #mrst{update_seq = 0} = State) ->
- Pid ! {'$gen_cast', {new_state, State#mrst{update_seq = 1}}},
- timer:sleep(100),
- exit({updated, self(), State#mrst{update_seq = 2}})
- end),
- State = #mrst{fd = self(), update_seq = 0},
- ?assertEqual({ok, State#mrst{update_seq = 2}}, recompact(State))
- end).
-
-recompact_exceeded_retry_count() ->
- ?_test(begin
- ok = meck:expect(
- couch_index_updater,
- update,
- fun(_, _, _) ->
- exit(error)
- end
- ),
- ok = meck:expect(couch_log, warning, fun(_, _) -> ok end),
- State = #mrst{fd = self(), db_name = foo, idx_name = bar},
- ExpectedError = {exceeded_recompact_retry_count, [{db_name, foo}, {idx_name, bar}]},
- ?assertError(ExpectedError, recompact(State))
- end).
-
--endif.
diff --git a/src/couch_mrview/src/couch_mrview_debug.erl b/src/couch_mrview/src/couch_mrview_debug.erl
deleted file mode 100644
index a4203d49d..000000000
--- a/src/couch_mrview/src/couch_mrview_debug.erl
+++ /dev/null
@@ -1,50 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_debug).
-
--export([
- help/0,
- help/1
-]).
-
--export([
- view_signature/2
-]).
-
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-help() ->
- [
- view_signature
- ].
-
--spec help(Function :: atom()) -> ok.
-%% erlfmt-ignore
-help(view_signature) ->
- io:format("
- view_signature(ShardName, DDocName)
- --------------
- Returns a view signature for given ddoc for a given (non clustered) database.
- ---
- ", []);
-help(Unknown) ->
- io:format("Unknown function: `~p`. Please try one of the following:~n", [Unknown]),
- [io:format(" - ~s~n", [Function]) || Function <- help()],
- io:format(" ---~n", []),
- ok.
-
-view_signature(DbName, DDocName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc_int(Db, <<"_design/", DDocName/binary>>, []),
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DDocName, DDoc),
- couch_util:to_hex(IdxState#mrst.sig).
diff --git a/src/couch_mrview/src/couch_mrview_http.erl b/src/couch_mrview/src/couch_mrview_http.erl
deleted file mode 100644
index fa3fab386..000000000
--- a/src/couch_mrview/src/couch_mrview_http.erl
+++ /dev/null
@@ -1,674 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_http).
-
--export([
- handle_all_docs_req/2,
- handle_local_docs_req/2,
- handle_design_docs_req/2,
- handle_reindex_req/3,
- handle_view_req/3,
- handle_temp_view_req/2,
- handle_info_req/3,
- handle_compact_req/3,
- handle_cleanup_req/2
-]).
-
--export([
- parse_boolean/1,
- parse_int/1,
- parse_pos_int/1,
- prepend_val/1,
- parse_body_and_query/2,
- parse_body_and_query/3,
- parse_params/2,
- parse_params/3,
- parse_params/4,
- view_cb/2,
- row_to_json/1,
- row_to_json/2,
- check_view_etag/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-handle_all_docs_req(#httpd{method = 'GET'} = Req, Db) ->
- all_docs_req(Req, Db, undefined);
-handle_all_docs_req(#httpd{method = 'POST'} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- Keys = couch_mrview_util:get_view_keys(chttpd:json_body_obj(Req)),
- all_docs_req(Req, Db, Keys);
-handle_all_docs_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_local_docs_req(#httpd{method = 'GET'} = Req, Db) ->
- all_docs_req(Req, Db, undefined, <<"_local">>);
-handle_local_docs_req(#httpd{method = 'POST'} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- Keys = couch_mrview_util:get_view_keys(chttpd:json_body_obj(Req)),
- all_docs_req(Req, Db, Keys, <<"_local">>);
-handle_local_docs_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_design_docs_req(#httpd{method = 'GET'} = Req, Db) ->
- all_docs_req(Req, Db, undefined, <<"_design">>);
-handle_design_docs_req(#httpd{method = 'POST'} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- Keys = couch_mrview_util:get_view_keys(chttpd:json_body_obj(Req)),
- all_docs_req(Req, Db, Keys, <<"_design">>);
-handle_design_docs_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_reindex_req(
- #httpd{
- method = 'POST',
- path_parts = [_, _, DName, <<"_reindex">>]
- } = Req,
- Db,
- _DDoc
-) ->
- chttpd:validate_ctype(Req, "application/json"),
- ok = couch_db:check_is_admin(Db),
- couch_mrview:trigger_update(Db, <<"_design/", DName/binary>>),
- chttpd:send_json(Req, 201, {[{<<"ok">>, true}]});
-handle_reindex_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-handle_view_req(
- #httpd{
- method = 'GET',
- path_parts = [_, _, DDocName, _, VName, <<"_info">>]
- } = Req,
- Db,
- _DDoc
-) ->
- DbName = couch_db:name(Db),
- DDocId = <<"_design/", DDocName/binary>>,
- {ok, Info} = couch_mrview:get_view_info(DbName, DDocId, VName),
-
- FinalInfo =
- [
- {db_name, DbName},
- {ddoc, DDocId},
- {view, VName}
- ] ++ Info,
- chttpd:send_json(Req, 200, {FinalInfo});
-handle_view_req(#httpd{method = 'GET'} = Req, Db, DDoc) ->
- [_, _, _, _, ViewName] = Req#httpd.path_parts,
- couch_stats:increment_counter([couchdb, httpd, view_reads]),
- design_doc_view(Req, Db, DDoc, ViewName, undefined);
-handle_view_req(#httpd{method = 'POST'} = Req, Db, DDoc) ->
- chttpd:validate_ctype(Req, "application/json"),
- [_, _, _, _, ViewName] = Req#httpd.path_parts,
- Props = chttpd:json_body_obj(Req),
- Keys = couch_mrview_util:get_view_keys(Props),
- Queries = couch_mrview_util:get_view_queries(Props),
- case {Queries, Keys} of
- {Queries, undefined} when is_list(Queries) ->
- IncrBy = length(Queries),
- couch_stats:increment_counter([couchdb, httpd, view_reads], IncrBy),
- multi_query_view(Req, Db, DDoc, ViewName, Queries);
- {undefined, Keys} when is_list(Keys) ->
- couch_stats:increment_counter([couchdb, httpd, view_reads]),
- design_doc_view(Req, Db, DDoc, ViewName, Keys);
- {undefined, undefined} ->
- throw({
- bad_request,
- "POST body must contain `keys` or `queries` field"
- });
- {_, _} ->
- throw({bad_request, "`keys` and `queries` are mutually exclusive"})
- end;
-handle_view_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_temp_view_req(#httpd{method = 'POST'} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- ok = couch_db:check_is_admin(Db),
- {Body} = chttpd:json_body_obj(Req),
- DDoc = couch_mrview_util:temp_view_to_ddoc({Body}),
- Keys = couch_mrview_util:get_view_keys({Body}),
- couch_stats:increment_counter([couchdb, httpd, temporary_view_reads]),
- design_doc_view(Req, Db, DDoc, <<"temp">>, Keys);
-handle_temp_view_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-handle_info_req(#httpd{method = 'GET'} = Req, Db, DDoc) ->
- [_, _, Name, _] = Req#httpd.path_parts,
- {ok, Info} = couch_mrview:get_info(Db, DDoc),
- chttpd:send_json(
- Req,
- 200,
- {[
- {name, Name},
- {view_index, {Info}}
- ]}
- );
-handle_info_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "GET").
-
-handle_compact_req(#httpd{method = 'POST'} = Req, Db, DDoc) ->
- chttpd:validate_ctype(Req, "application/json"),
- ok = couch_db:check_is_admin(Db),
- ok = couch_mrview:compact(Db, DDoc),
- chttpd:send_json(Req, 202, {[{ok, true}]});
-handle_compact_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-handle_cleanup_req(#httpd{method = 'POST'} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- ok = couch_db:check_is_admin(Db),
- ok = couch_mrview:cleanup(Db),
- chttpd:send_json(Req, 202, {[{ok, true}]});
-handle_cleanup_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-all_docs_req(Req, Db, Keys) ->
- all_docs_req(Req, Db, Keys, undefined).
-
-all_docs_req(Req, Db, Keys, NS) ->
- case is_restricted(Db, NS) of
- true ->
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- do_all_docs_req(Req, Db, Keys, NS);
- _ when NS == <<"_local">> ->
- throw({forbidden, <<"Only admins can access _local_docs">>});
- _ ->
- case is_public_fields_configured(Db) of
- true ->
- do_all_docs_req(Req, Db, Keys, NS);
- false ->
- throw(
- {forbidden,
- <<"Only admins can access _all_docs", " of system databases.">>}
- )
- end
- end;
- false ->
- do_all_docs_req(Req, Db, Keys, NS)
- end.
-
-is_restricted(_Db, <<"_local">>) ->
- true;
-is_restricted(Db, _) ->
- couch_db:is_system_db(Db).
-
-is_public_fields_configured(Db) ->
- DbName = ?b2l(couch_db:name(Db)),
- case config:get("couch_httpd_auth", "authentication_db", "_users") of
- DbName ->
- UsersDbPublic = chttpd_util:get_chttpd_auth_config(
- "users_db_public", "false"
- ),
- PublicFields = chttpd_util:get_chttpd_auth_config("public_fields"),
- case {UsersDbPublic, PublicFields} of
- {"true", PublicFields} when PublicFields =/= undefined ->
- true;
- {_, _} ->
- false
- end;
- _ ->
- false
- end.
-
-do_all_docs_req(Req, Db, Keys, NS) ->
- Args0 = couch_mrview_http:parse_body_and_query(Req, Keys),
- Args1 = set_namespace(NS, Args0),
- ETagFun = fun(Sig, Acc0) ->
- check_view_etag(Sig, Acc0, Req)
- end,
- Args = Args1#mrargs{preflight_fun = ETagFun},
- {ok, Resp} = couch_httpd:etag_maybe(Req, fun() ->
- Max = chttpd:chunked_response_buffer_size(),
- VAcc0 = #vacc{db = Db, req = Req, threshold = Max},
- DbName = ?b2l(couch_db:name(Db)),
- UsersDbName = config:get(
- "couch_httpd_auth",
- "authentication_db",
- "_users"
- ),
- IsAdmin = is_admin(Db),
- Callback = get_view_callback(DbName, UsersDbName, IsAdmin),
- couch_mrview:query_all_docs(Db, Args, Callback, VAcc0)
- end),
- case is_record(Resp, vacc) of
- true -> {ok, Resp#vacc.resp};
- _ -> {ok, Resp}
- end.
-
-set_namespace(NS, #mrargs{extra = Extra} = Args) ->
- Args#mrargs{extra = [{namespace, NS} | Extra]}.
-
-is_admin(Db) ->
- case catch couch_db:check_is_admin(Db) of
- {unauthorized, _} ->
- false;
- ok ->
- true
- end.
-
-% admin users always get all fields
-get_view_callback(_, _, true) ->
- fun view_cb/2;
-% if we are operating on the users db and we aren't
-% admin, filter the view
-get_view_callback(_DbName, _DbName, false) ->
- fun filtered_view_cb/2;
-% non _users databases get all fields
-get_view_callback(_, _, _) ->
- fun view_cb/2.
-
-design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
- Args0 = parse_params(Req, Keys),
- ETagFun = fun(Sig, Acc0) ->
- check_view_etag(Sig, Acc0, Req)
- end,
- Args = Args0#mrargs{preflight_fun = ETagFun},
- {ok, Resp} = couch_httpd:etag_maybe(Req, fun() ->
- Max = chttpd:chunked_response_buffer_size(),
- VAcc0 = #vacc{db = Db, req = Req, threshold = Max},
- couch_mrview:query_view(Db, DDoc, ViewName, Args, fun view_cb/2, VAcc0)
- end),
- case is_record(Resp, vacc) of
- true -> {ok, Resp#vacc.resp};
- _ -> {ok, Resp}
- end.
-
-multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
- Args0 = parse_params(Req, undefined),
- {ok, _, _, Args1} = couch_mrview_util:get_view(Db, DDoc, ViewName, Args0),
- ArgQueries = lists:map(
- fun({Query}) ->
- QueryArg = parse_params(Query, undefined, Args1),
- couch_mrview_util:validate_args(Db, DDoc, QueryArg)
- end,
- Queries
- ),
- {ok, Resp2} = couch_httpd:etag_maybe(Req, fun() ->
- Max = chttpd:chunked_response_buffer_size(),
- VAcc0 = #vacc{db = Db, req = Req, prepend = "\r\n", threshold = Max},
- %% TODO: proper calculation of etag
- Etag = [$", couch_uuids:new(), $"],
- Headers = [{"ETag", Etag}],
- FirstChunk = "{\"results\":[",
- {ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req, 200, Headers, FirstChunk),
- VAcc1 = VAcc0#vacc{resp = Resp0},
- VAcc2 = lists:foldl(
- fun(Args, Acc0) ->
- {ok, Acc1} = couch_mrview:query_view(Db, DDoc, ViewName, Args, fun view_cb/2, Acc0),
- Acc1
- end,
- VAcc1,
- ArgQueries
- ),
- {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
- {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, VAcc2#vacc{resp = Resp2}}
- end),
- case is_record(Resp2, vacc) of
- true -> {ok, Resp2#vacc.resp};
- _ -> {ok, Resp2}
- end.
-
-filtered_view_cb({row, Row0}, Acc) ->
- Row1 = lists:map(
- fun
- ({doc, null}) ->
- {doc, null};
- ({doc, Body}) ->
- Doc = couch_users_db:strip_non_public_fields(#doc{body = Body}),
- {doc, Doc#doc.body};
- (KV) ->
- KV
- end,
- Row0
- ),
- view_cb({row, Row1}, Acc);
-filtered_view_cb(Obj, Acc) ->
- view_cb(Obj, Acc).
-
-%% these clauses start (and possibly end) the response
-view_cb({error, Reason}, #vacc{resp = undefined} = Acc) ->
- {ok, Resp} = chttpd:send_error(Acc#vacc.req, Reason),
- {ok, Acc#vacc{resp = Resp}};
-view_cb(complete, #vacc{resp = undefined} = Acc) ->
- % Nothing in view
- {ok, Resp} = chttpd:send_json(Acc#vacc.req, 200, {[{rows, []}]}),
- {ok, Acc#vacc{resp = Resp}};
-view_cb(Msg, #vacc{resp = undefined} = Acc) ->
- %% Start response
- Headers = [],
- {ok, Resp} = chttpd:start_delayed_json_response(Acc#vacc.req, 200, Headers),
- view_cb(Msg, Acc#vacc{resp = Resp, should_close = true});
-%% ---------------------------------------------------
-
-%% From here on down, the response has been started.
-
-view_cb({error, Reason}, #vacc{resp = Resp} = Acc) ->
- {ok, Resp1} = chttpd:send_delayed_error(Resp, Reason),
- {ok, Acc#vacc{resp = Resp1}};
-view_cb(complete, #vacc{resp = Resp, buffer = Buf, threshold = Max} = Acc) ->
- % Finish view output and possibly end the response
- {ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, "\r\n]}", Max),
- case Acc#vacc.should_close of
- true ->
- {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, Acc#vacc{resp = Resp2}};
- _ ->
- {ok, Acc#vacc{
- resp = Resp1,
- meta_sent = false,
- row_sent = false,
- prepend = ",\r\n",
- buffer = [],
- bufsize = 0
- }}
- end;
-view_cb({meta, Meta}, #vacc{meta_sent = false, row_sent = false} = Acc) ->
- % Sending metadata as we've not sent it or any row yet
- Parts =
- case couch_util:get_value(total, Meta) of
- undefined -> [];
- Total -> [io_lib:format("\"total_rows\":~p", [Total])]
- end ++
- case couch_util:get_value(offset, Meta) of
- undefined -> [];
- Offset -> [io_lib:format("\"offset\":~p", [Offset])]
- end ++
- case couch_util:get_value(update_seq, Meta) of
- undefined ->
- [];
- null ->
- ["\"update_seq\":null"];
- UpdateSeq when is_integer(UpdateSeq) ->
- [io_lib:format("\"update_seq\":~B", [UpdateSeq])];
- UpdateSeq when is_binary(UpdateSeq) ->
- [io_lib:format("\"update_seq\":\"~s\"", [UpdateSeq])]
- end ++ ["\"rows\":["],
- Chunk = [prepend_val(Acc), "{", string:join(Parts, ","), "\r\n"],
- {ok, AccOut} = maybe_flush_response(Acc, Chunk, iolist_size(Chunk)),
- {ok, AccOut#vacc{prepend = "", meta_sent = true}};
-view_cb({meta, _Meta}, #vacc{} = Acc) ->
- %% ignore metadata
- {ok, Acc};
-view_cb({row, Row}, #vacc{meta_sent = false} = Acc) ->
- %% sorted=false and row arrived before meta
- % Adding another row
- Chunk = [prepend_val(Acc), "{\"rows\":[\r\n", row_to_json(Row)],
- maybe_flush_response(Acc#vacc{meta_sent = true, row_sent = true}, Chunk, iolist_size(Chunk));
-view_cb({row, Row}, #vacc{meta_sent = true} = Acc) ->
- % Adding another row
- Chunk = [prepend_val(Acc), row_to_json(Row)],
- maybe_flush_response(Acc#vacc{row_sent = true}, Chunk, iolist_size(Chunk)).
-
-maybe_flush_response(#vacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
- Size > 0 andalso (Size + Len) > Max
-->
- #vacc{buffer = Buffer, resp = Resp} = Acc,
- {ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
- {ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
-maybe_flush_response(Acc0, Data, Len) ->
- #vacc{buffer = Buf, bufsize = Size} = Acc0,
- Acc = Acc0#vacc{
- prepend = ",\r\n",
- buffer = [Buf | Data],
- bufsize = Size + Len
- },
- {ok, Acc}.
-
-prepend_val(#vacc{prepend = Prepend}) ->
- case Prepend of
- undefined ->
- "";
- _ ->
- Prepend
- end.
-
-row_to_json(Row) ->
- Id = couch_util:get_value(id, Row),
- row_to_json(Id, Row).
-
-row_to_json(error, Row) ->
- % Special case for _all_docs request with KEYS to
- % match prior behavior.
- Key = couch_util:get_value(key, Row),
- Val = couch_util:get_value(value, Row),
- Reason = couch_util:get_value(reason, Row),
- ReasonProp =
- if
- Reason == undefined -> [];
- true -> [{reason, Reason}]
- end,
- Obj = {[{key, Key}, {error, Val}] ++ ReasonProp},
- ?JSON_ENCODE(Obj);
-row_to_json(Id0, Row) ->
- Id =
- case Id0 of
- undefined -> [];
- Id0 -> [{id, Id0}]
- end,
- Key = couch_util:get_value(key, Row, null),
- Val = couch_util:get_value(value, Row),
- Doc =
- case couch_util:get_value(doc, Row) of
- undefined -> [];
- Doc0 -> [{doc, Doc0}]
- end,
- Obj = {Id ++ [{key, Key}, {value, Val}] ++ Doc},
- ?JSON_ENCODE(Obj).
-
-parse_params(#httpd{} = Req, Keys) ->
- parse_params(chttpd:qs(Req), Keys);
-parse_params(Props, Keys) ->
- Args = #mrargs{},
- parse_params(Props, Keys, Args).
-
-parse_params(Props, Keys, Args) ->
- parse_params(Props, Keys, Args, []).
-
-parse_params(Props, Keys, #mrargs{} = Args0, Options) ->
- IsDecoded = lists:member(decoded, Options),
- Args1 =
- case lists:member(keep_group_level, Options) of
- true ->
- Args0;
- _ ->
- % group_level set to undefined to detect if explicitly set by user
- Args0#mrargs{keys = Keys, group = undefined, group_level = undefined}
- end,
- lists:foldl(
- fun({K, V}, Acc) ->
- parse_param(K, V, Acc, IsDecoded)
- end,
- Args1,
- Props
- ).
-
-parse_body_and_query(#httpd{method = 'POST'} = Req, Keys) ->
- Props = chttpd:json_body_obj(Req),
- parse_body_and_query(Req, Props, Keys);
-parse_body_and_query(Req, Keys) ->
- parse_params(
- chttpd:qs(Req),
- Keys,
- #mrargs{
- keys = Keys,
- group = undefined,
- group_level = undefined
- },
- [keep_group_level]
- ).
-
-parse_body_and_query(Req, {Props}, Keys) ->
- Args = #mrargs{keys = Keys, group = undefined, group_level = undefined},
- BodyArgs = parse_params(Props, Keys, Args, [decoded]),
- parse_params(chttpd:qs(Req), Keys, BodyArgs, [keep_group_level]).
-
-parse_param(Key, Val, Args, IsDecoded) when is_binary(Key) ->
- parse_param(binary_to_list(Key), Val, Args, IsDecoded);
-parse_param(Key, Val, Args, IsDecoded) ->
- case Key of
- "" ->
- Args;
- "reduce" ->
- Args#mrargs{reduce = parse_boolean(Val)};
- "key" when IsDecoded ->
- Args#mrargs{start_key = Val, end_key = Val};
- "key" ->
- JsonKey = ?JSON_DECODE(Val),
- Args#mrargs{start_key = JsonKey, end_key = JsonKey};
- "keys" when IsDecoded ->
- Args#mrargs{keys = Val};
- "keys" ->
- Args#mrargs{keys = ?JSON_DECODE(Val)};
- "startkey" when IsDecoded ->
- Args#mrargs{start_key = Val};
- "start_key" when IsDecoded ->
- Args#mrargs{start_key = Val};
- "startkey" ->
- Args#mrargs{start_key = ?JSON_DECODE(Val)};
- "start_key" ->
- Args#mrargs{start_key = ?JSON_DECODE(Val)};
- "startkey_docid" ->
- Args#mrargs{start_key_docid = couch_util:to_binary(Val)};
- "start_key_doc_id" ->
- Args#mrargs{start_key_docid = couch_util:to_binary(Val)};
- "endkey" when IsDecoded ->
- Args#mrargs{end_key = Val};
- "end_key" when IsDecoded ->
- Args#mrargs{end_key = Val};
- "endkey" ->
- Args#mrargs{end_key = ?JSON_DECODE(Val)};
- "end_key" ->
- Args#mrargs{end_key = ?JSON_DECODE(Val)};
- "endkey_docid" ->
- Args#mrargs{end_key_docid = couch_util:to_binary(Val)};
- "end_key_doc_id" ->
- Args#mrargs{end_key_docid = couch_util:to_binary(Val)};
- "limit" ->
- Args#mrargs{limit = parse_pos_int(Val)};
- "stale" when Val == "ok" orelse Val == <<"ok">> ->
- Args#mrargs{stable = true, update = false};
- "stale" when Val == "update_after" orelse Val == <<"update_after">> ->
- Args#mrargs{stable = true, update = lazy};
- "stale" ->
- throw({query_parse_error, <<"Invalid value for `stale`.">>});
- "stable" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
- Args#mrargs{stable = true};
- "stable" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
- Args#mrargs{stable = false};
- "stable" ->
- throw({query_parse_error, <<"Invalid value for `stable`.">>});
- "update" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
- Args#mrargs{update = true};
- "update" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
- Args#mrargs{update = false};
- "update" when Val == "lazy" orelse Val == <<"lazy">> ->
- Args#mrargs{update = lazy};
- "update" ->
- throw({query_parse_error, <<"Invalid value for `update`.">>});
- "descending" ->
- case parse_boolean(Val) of
- true -> Args#mrargs{direction = rev};
- _ -> Args#mrargs{direction = fwd}
- end;
- "skip" ->
- Args#mrargs{skip = parse_pos_int(Val)};
- "group" ->
- Args#mrargs{group = parse_boolean(Val)};
- "group_level" ->
- Args#mrargs{group_level = parse_pos_int(Val)};
- "inclusive_end" ->
- Args#mrargs{inclusive_end = parse_boolean(Val)};
- "include_docs" ->
- Args#mrargs{include_docs = parse_boolean(Val)};
- "attachments" ->
- case parse_boolean(Val) of
- true ->
- Opts = Args#mrargs.doc_options,
- Args#mrargs{doc_options = [attachments | Opts]};
- false ->
- Args
- end;
- "att_encoding_info" ->
- case parse_boolean(Val) of
- true ->
- Opts = Args#mrargs.doc_options,
- Args#mrargs{doc_options = [att_encoding_info | Opts]};
- false ->
- Args
- end;
- "update_seq" ->
- Args#mrargs{update_seq = parse_boolean(Val)};
- "conflicts" ->
- Args#mrargs{conflicts = parse_boolean(Val)};
- "callback" ->
- Args#mrargs{callback = couch_util:to_binary(Val)};
- "sorted" ->
- Args#mrargs{sorted = parse_boolean(Val)};
- "partition" ->
- Partition = couch_util:to_binary(Val),
- couch_partition:validate_partition(Partition),
- couch_mrview_util:set_extra(Args, partition, Partition);
- _ ->
- BKey = couch_util:to_binary(Key),
- BVal = couch_util:to_binary(Val),
- Args#mrargs{extra = [{BKey, BVal} | Args#mrargs.extra]}
- end.
-
-parse_boolean(true) ->
- true;
-parse_boolean(false) ->
- false;
-parse_boolean(Val) when is_binary(Val) ->
- parse_boolean(?b2l(Val));
-parse_boolean(Val) ->
- case string:to_lower(Val) of
- "true" ->
- true;
- "false" ->
- false;
- _ ->
- Msg = io_lib:format("Invalid boolean parameter: ~p", [Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_int(Val) when is_integer(Val) ->
- Val;
-parse_int(Val) ->
- case (catch list_to_integer(Val)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Msg = io_lib:format("Invalid value for integer: ~p", [Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_pos_int(Val) ->
- case parse_int(Val) of
- IntVal when IntVal >= 0 ->
- IntVal;
- _ ->
- Fmt = "Invalid value for positive integer: ~p",
- Msg = io_lib:format(Fmt, [Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-check_view_etag(Sig, Acc0, Req) ->
- ETag = chttpd:make_etag(Sig),
- case chttpd:etag_match(Req, ETag) of
- true -> throw({etag_match, ETag});
- false -> {ok, Acc0#vacc{etag = ETag}}
- end.
diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl
deleted file mode 100644
index 1bfdb2818..000000000
--- a/src/couch_mrview/src/couch_mrview_index.erl
+++ /dev/null
@@ -1,362 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_index).
-
--export([get/2]).
--export([init/2, open/2, close/1, reset/1, delete/1, shutdown/1]).
--export([start_update/4, purge/4, process_doc/3, finish_update/1, commit/1]).
--export([compact/3, swap_compacted/2, remove_compacted/1]).
--export([index_file_exists/1]).
--export([update_local_purge_doc/2, verify_index_exists/2]).
--export([ensure_local_purge_docs/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-get(db_name, #mrst{db_name = DbName}) ->
- DbName;
-get(idx_name, #mrst{idx_name = IdxName}) ->
- IdxName;
-get(signature, #mrst{sig = Signature}) ->
- Signature;
-get(update_seq, #mrst{update_seq = UpdateSeq}) ->
- UpdateSeq;
-get(purge_seq, #mrst{purge_seq = PurgeSeq}) ->
- PurgeSeq;
-get(update_options, #mrst{design_opts = Opts}) ->
- IncDesign = couch_util:get_value(<<"include_design">>, Opts, false),
- LocalSeq = couch_util:get_value(<<"local_seq">>, Opts, false),
- Partitioned = couch_util:get_value(<<"partitioned">>, Opts, false),
- if
- IncDesign -> [include_design];
- true -> []
- end ++
- if
- LocalSeq -> [local_seq];
- true -> []
- end ++
- if
- Partitioned -> [partitioned];
- true -> []
- end;
-get(fd, #mrst{fd = Fd}) ->
- Fd;
-get(language, #mrst{language = Language}) ->
- Language;
-get(views, #mrst{views = Views}) ->
- Views;
-get(info, State) ->
- #mrst{
- fd = Fd,
- sig = Sig,
- id_btree = IdBtree,
- language = Lang,
- update_seq = UpdateSeq,
- purge_seq = PurgeSeq,
- views = Views,
- view_info = ViewInfo
- } = State,
- {ok, FileSize} = couch_file:bytes(Fd),
- {ok, ExternalSize} = couch_mrview_util:calculate_external_size(Views),
- {ok, ActiveViewSize} = couch_mrview_util:calculate_active_size(Views),
- ActiveSize = couch_btree:size(IdBtree) + ActiveViewSize,
-
- UpdateOptions0 = get(update_options, State),
- UpdateOptions = [atom_to_binary(O, latin1) || O <- UpdateOptions0],
- CollVsTups = couch_mrview_util:get_collator_versions(ViewInfo),
- CollVsBins = [couch_util:version_to_binary(V) || V <- CollVsTups],
- {ok, [
- {signature, list_to_binary(couch_index_util:hexsig(Sig))},
- {language, Lang},
- {sizes,
- {[
- {file, FileSize},
- {active, ActiveSize},
- {external, ExternalSize}
- ]}},
- {update_seq, UpdateSeq},
- {purge_seq, PurgeSeq},
- {update_options, UpdateOptions},
- {collator_versions, CollVsBins}
- ]};
-get(Other, _) ->
- throw({unknown_index_property, Other}).
-
-init(Db, DDoc) ->
- {ok, State} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
- {ok, set_partitioned(Db, State)}.
-
-open(Db, State0) ->
- #mrst{
- db_name = DbName,
- sig = Sig
- } = State = set_partitioned(Db, State0),
- IndexFName = couch_mrview_util:index_file(DbName, Sig),
-
- % If we are upgrading from <= 2.x, we upgrade the view
- % index file on the fly, avoiding an index reset.
- % We are making commit with a new state
- % right after the upgrade to ensure
- % that we have a proper sig in the header
- % when open the view next time
- %
- % OldSig is `ok` if no upgrade happened.
- %
- % To remove support for 2.x auto-upgrades in the
- % future, just remove the next line and the code
- % between "upgrade code for <= 2.x" and
- % "end of upgrade code for <= 2.x" and the corresponding
- % code in couch_mrview_util
-
- OldSig = couch_mrview_util:maybe_update_index_file(State),
-
- case couch_mrview_util:open_file(IndexFName) of
- {ok, Fd} ->
- case couch_file:read_header(Fd) of
- % upgrade code for <= 2.x
- {ok, {OldSig, Header}} ->
- % Matching view signatures.
- NewSt = init_and_upgrade_state(Db, Fd, State, Header),
- ensure_local_purge_doc(Db, NewSt),
- {ok, NewSt};
- % end of upgrade code for <= 2.x
- {ok, {Sig, Header}} ->
- % Matching view signatures.
- NewSt = init_and_upgrade_state(Db, Fd, State, Header),
- ensure_local_purge_doc(Db, NewSt),
- check_collator_versions(DbName, NewSt),
- {ok, NewSt};
- {ok, {WrongSig, _}} ->
- couch_log:error(
- "~s has the wrong signature: expected: ~p but got ~p",
- [IndexFName, Sig, WrongSig]
- ),
- NewSt = couch_mrview_util:reset_index(Db, Fd, State),
- ensure_local_purge_doc(Db, NewSt),
- {ok, NewSt};
- {ok, Else} ->
- couch_log:error(
- "~s has a bad header: got ~p",
- [IndexFName, Else]
- ),
- NewSt = couch_mrview_util:reset_index(Db, Fd, State),
- ensure_local_purge_doc(Db, NewSt),
- {ok, NewSt};
- no_valid_header ->
- NewSt = couch_mrview_util:reset_index(Db, Fd, State),
- ensure_local_purge_doc(Db, NewSt),
- {ok, NewSt}
- end;
- {error, Reason} = Error ->
- couch_log:error(
- "Failed to open view file '~s': ~s",
- [IndexFName, file:format_error(Reason)]
- ),
- Error
- end.
-
-close(State) ->
- erlang:demonitor(State#mrst.fd_monitor, [flush]),
- couch_file:close(State#mrst.fd).
-
-% This called after ddoc_updated event occurrs, and
-% before we shutdown couch_index process.
-% We unlink couch_index from corresponding couch_file and demonitor it.
-% This allows all outstanding queries that are currently streaming
-% data from couch_file finish successfully.
-% couch_file will be closed automatically after all
-% outstanding queries are done.
-shutdown(State) ->
- erlang:demonitor(State#mrst.fd_monitor, [flush]),
- unlink(State#mrst.fd).
-
-delete(#mrst{db_name = DbName, sig = Sig} = State) ->
- couch_file:close(State#mrst.fd),
- catch couch_mrview_util:delete_files(DbName, Sig).
-
-reset(State) ->
- couch_util:with_db(State#mrst.db_name, fun(Db) ->
- NewState = couch_mrview_util:reset_index(Db, State#mrst.fd, State),
- {ok, NewState}
- end).
-
-start_update(PartialDest, State, NumChanges, NumChangesDone) ->
- couch_mrview_updater:start_update(
- PartialDest,
- State,
- NumChanges,
- NumChangesDone
- ).
-
-purge(Db, PurgeSeq, PurgedIdRevs, State) ->
- couch_mrview_updater:purge(Db, PurgeSeq, PurgedIdRevs, State).
-
-process_doc(Doc, Seq, State) ->
- couch_mrview_updater:process_doc(Doc, Seq, State).
-
-finish_update(State) ->
- couch_mrview_updater:finish_update(State).
-
-commit(State) ->
- Header = {State#mrst.sig, couch_mrview_util:make_header(State)},
- couch_file:write_header(State#mrst.fd, Header).
-
-compact(Db, State, Opts) ->
- couch_mrview_compactor:compact(Db, State, Opts).
-
-swap_compacted(OldState, NewState) ->
- couch_mrview_compactor:swap_compacted(OldState, NewState).
-
-remove_compacted(State) ->
- couch_mrview_compactor:remove_compacted(State).
-
-index_file_exists(State) ->
- #mrst{
- db_name = DbName,
- sig = Sig
- } = State,
- IndexFName = couch_mrview_util:index_file(DbName, Sig),
- filelib:is_file(IndexFName).
-
-verify_index_exists(DbName, Props) ->
- try
- Type = couch_util:get_value(<<"type">>, Props),
- if
- Type =/= <<"mrview">> ->
- false;
- true ->
- DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
- couch_util:with_db(DbName, fun(Db) ->
- case couch_db:get_design_doc(Db, DDocId) of
- {ok, #doc{} = DDoc} ->
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(
- DbName, DDoc
- ),
- IdxSig = IdxState#mrst.sig,
- SigInLocal = couch_util:get_value(
- <<"signature">>, Props
- ),
- couch_index_util:hexsig(IdxSig) == SigInLocal;
- {not_found, _} ->
- false
- end
- end)
- end
- catch
- _:_ ->
- false
- end.
-
-set_partitioned(Db, State) ->
- #mrst{
- design_opts = DesignOpts
- } = State,
- DbPartitioned = couch_db:is_partitioned(Db),
- ViewPartitioned = couch_util:get_value(
- <<"partitioned">>, DesignOpts, DbPartitioned
- ),
- IsPartitioned = DbPartitioned andalso ViewPartitioned,
- State#mrst{partitioned = IsPartitioned}.
-
-ensure_local_purge_docs(DbName, DDocs) ->
- couch_util:with_db(DbName, fun(Db) ->
- lists:foreach(
- fun(DDoc) ->
- try couch_mrview_util:ddoc_to_mrst(DbName, DDoc) of
- {ok, MRSt} ->
- ensure_local_purge_doc(Db, MRSt)
- catch
- _:_ ->
- ok
- end
- end,
- DDocs
- )
- end).
-
-ensure_local_purge_doc(Db, #mrst{} = State) ->
- Sig = couch_index_util:hexsig(get(signature, State)),
- DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
- case couch_db:open_doc(Db, DocId, []) of
- {not_found, _} ->
- create_local_purge_doc(Db, State);
- {ok, _} ->
- ok
- end.
-
-create_local_purge_doc(Db, State) ->
- PurgeSeq = couch_db:get_purge_seq(Db),
- update_local_purge_doc(Db, State, PurgeSeq).
-
-update_local_purge_doc(Db, State) ->
- update_local_purge_doc(Db, State, get(purge_seq, State)).
-
-update_local_purge_doc(Db, State, PSeq) ->
- Sig = couch_index_util:hexsig(State#mrst.sig),
- DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- BaseDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DocId},
- {<<"type">>, <<"mrview">>},
- {<<"purge_seq">>, PSeq},
- {<<"updated_on">>, NowSecs},
- {<<"ddoc_id">>, get(idx_name, State)},
- {<<"signature">>, Sig}
- ]}
- ),
- Doc =
- case couch_db:open_doc(Db, DocId, []) of
- {ok, #doc{revs = Revs}} ->
- BaseDoc#doc{revs = Revs};
- {not_found, _} ->
- BaseDoc
- end,
- couch_db:update_doc(Db, Doc, []).
-
-init_and_upgrade_state(Db, Fd, State, Header) ->
- {Commit, #mrst{} = Mrst} = couch_mrview_util:init_state(Db, Fd, State, Header),
- case Commit of
- true ->
- case couch_mrview_util:commit_on_header_upgrade() of
- true ->
- LogMsg = "~p : Index ~s ~s was upgraded",
- DbName = couch_db:name(Db),
- IdxName = State#mrst.idx_name,
- couch_log:warning(LogMsg, [?MODULE, DbName, IdxName]),
- ok = commit(Mrst),
- Mrst;
- false ->
- Mrst
- end;
- false ->
- Mrst
- end.
-
-% Check if there are multiple collator versions used to build this view
-check_collator_versions(DbName, #mrst{} = Mrst) ->
- case couch_mrview_util:compact_on_collator_upgrade() of
- true ->
- #mrst{view_info = ViewInfo, idx_name = IdxName} = Mrst,
- Vers = couch_mrview_util:get_collator_versions(ViewInfo),
- case length(Vers) >= 2 of
- true ->
- Event = {index_collator_upgrade, IdxName},
- couch_event:notify(DbName, Event);
- false ->
- ok
- end;
- false ->
- ok
- end.
diff --git a/src/couch_mrview/src/couch_mrview_show.erl b/src/couch_mrview/src/couch_mrview_show.erl
deleted file mode 100644
index 3e95be9cc..000000000
--- a/src/couch_mrview/src/couch_mrview_show.erl
+++ /dev/null
@@ -1,515 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_show).
-
--export([
- handle_doc_show_req/3,
- handle_doc_update_req/3,
- handle_view_list_req/3,
- list_cb/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-% /db/_design/foo/_show/bar/docid
-% show converts a json doc to a response of any content-type.
-% it looks up the doc an then passes it to the query server.
-% then it sends the response from the query server to the http client.
-
-maybe_open_doc(Db, DocId) ->
- case catch couch_httpd_db:couch_doc_open(Db, DocId, nil, [conflicts]) of
- #doc{} = Doc -> Doc;
- {not_found, _} -> nil
- end.
-
-handle_doc_show_req(
- #httpd{
- path_parts = [_, _, _, _, ShowName, DocId]
- } = Req,
- Db,
- DDoc
-) ->
- % open the doc
- Doc = maybe_open_doc(Db, DocId),
-
- % we don't handle revs here b/c they are an internal api
- % returns 404 if there is no doc with DocId
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-handle_doc_show_req(
- #httpd{
- path_parts = [_, _, _, _, ShowName, DocId | Rest]
- } = Req,
- Db,
- DDoc
-) ->
- DocParts = [DocId | Rest],
- DocId1 = ?l2b(string:join([?b2l(P) || P <- DocParts], "/")),
-
- % open the doc
- Doc = maybe_open_doc(Db, DocId1),
-
- % we don't handle revs here b/c they are an internal api
- % pass 404 docs to the show function
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-handle_doc_show_req(
- #httpd{
- path_parts = [_, _, _, _, ShowName]
- } = Req,
- Db,
- DDoc
-) ->
- % with no docid the doc is nil
- handle_doc_show(Req, Db, DDoc, ShowName, nil);
-handle_doc_show_req(Req, _Db, _DDoc) ->
- chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
- % get responder for ddoc/showname
- CurrentEtag = show_etag(Req, Doc, DDoc, []),
- chttpd:etag_respond(Req, CurrentEtag, fun() ->
- JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
- JsonDoc = couch_query_servers:json_doc(Doc),
- [<<"resp">>, ExternalResp] =
- couch_query_servers:ddoc_prompt(
- DDoc,
- [<<"shows">>, ShowName],
- [JsonDoc, JsonReq]
- ),
- JsonResp = apply_etag(ExternalResp, CurrentEtag),
- chttpd_external:send_external_response(Req, JsonResp)
- end).
-
-show_etag(#httpd{user_ctx = UserCtx} = Req, Doc, DDoc, More) ->
- Accept = chttpd:header_value(Req, "Accept"),
- DocPart =
- case Doc of
- nil -> nil;
- Doc -> chttpd:doc_etag(Doc)
- end,
- chttpd:make_etag({
- chttpd:doc_etag(DDoc),
- DocPart,
- Accept,
- {UserCtx#user_ctx.name, UserCtx#user_ctx.roles},
- More
- }).
-
-% updates a doc based on a request
-% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
-% % anything but GET
-% send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
-
-% This call is creating a new doc using an _update function to
-% modify the provided request body.
-% /db/_design/foo/_update/bar
-handle_doc_update_req(
- #httpd{
- path_parts = [_, _, _, _, UpdateName]
- } = Req,
- Db,
- DDoc
-) ->
- send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
-% /db/_design/foo/_update/bar/docid
-handle_doc_update_req(
- #httpd{
- path_parts = [_, _, _, _, UpdateName | DocIdParts]
- } = Req,
- Db,
- DDoc
-) ->
- DocId = ?l2b(string:join([?b2l(P) || P <- DocIdParts], "/")),
- Doc = maybe_open_doc(Db, DocId),
- send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
-handle_doc_update_req(Req, _Db, _DDoc) ->
- chttpd:send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
-
-send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
- JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
- JsonDoc = couch_query_servers:json_doc(Doc),
- Cmd = [<<"updates">>, UpdateName],
- UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
- JsonResp =
- case UpdateResp of
- [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
- case
- chttpd:header_value(
- Req, "X-Couch-Full-Commit", "false"
- )
- of
- "true" ->
- Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
- _ ->
- Options = [{user_ctx, Req#httpd.user_ctx}]
- end,
- NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
- {ok, NewRev} = couch_db:update_doc(Db, NewDoc, Options),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- {JsonResp1} = apply_headers(JsonResp0, [
- {<<"X-Couch-Update-NewRev">>, NewRevStr},
- {<<"X-Couch-Id">>, couch_util:url_encode(NewDoc#doc.id)}
- ]),
- {[{<<"code">>, 201} | JsonResp1]};
- [<<"up">>, _Other, {JsonResp0}] ->
- {[{<<"code">>, 200} | JsonResp0]}
- end,
- % todo set location field
- chttpd_external:send_external_response(Req, JsonResp).
-
-handle_view_list_req(#httpd{method = Method} = Req, Db, DDoc) when
- Method =:= 'GET' orelse Method =:= 'OPTIONS'
-->
- case Req#httpd.path_parts of
- [_, _, _DName, _, LName, VName] ->
- % Same design doc for view and list
- handle_view_list(Req, Db, DDoc, LName, DDoc, VName, undefined);
- [_, _, _, _, LName, DName, VName] ->
- % Different design docs for view and list
- VDocId = <<"_design/", DName/binary>>,
- {ok, VDDoc} = couch_db:open_doc(Db, VDocId, [ejson_body]),
- handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, undefined);
- _ ->
- chttpd:send_error(Req, 404, <<"list_error">>, <<"Bad path.">>)
- end;
-handle_view_list_req(#httpd{method = 'POST'} = Req, Db, DDoc) ->
- chttpd:validate_ctype(Req, "application/json"),
- {Props} = chttpd:json_body_obj(Req),
- Keys = proplists:get_value(<<"keys">>, Props),
- case Req#httpd.path_parts of
- [_, _, _DName, _, LName, VName] ->
- handle_view_list(Req, Db, DDoc, LName, DDoc, VName, Keys);
- [_, _, _, _, LName, DName, VName] ->
- % Different design docs for view and list
- VDocId = <<"_design/", DName/binary>>,
- {ok, VDDoc} = couch_db:open_doc(Db, VDocId, [ejson_body]),
- handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, Keys);
- _ ->
- chttpd:send_error(Req, 404, <<"list_error">>, <<"Bad path.">>)
- end;
-handle_view_list_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, Keys) ->
- Args0 = couch_mrview_http:parse_body_and_query(Req, Keys),
- ETagFun = fun(BaseSig, Acc0) ->
- UserCtx = Req#httpd.user_ctx,
- Name = UserCtx#user_ctx.name,
- Roles = UserCtx#user_ctx.roles,
- Accept = chttpd:header_value(Req, "Accept"),
- Parts = {chttpd:doc_etag(DDoc), Accept, {Name, Roles}},
- ETag = chttpd:make_etag({BaseSig, Parts}),
- case chttpd:etag_match(Req, ETag) of
- true -> throw({etag_match, ETag});
- false -> {ok, Acc0#lacc{etag = ETag}}
- end
- end,
- Args = Args0#mrargs{preflight_fun = ETagFun},
- couch_httpd:etag_maybe(Req, fun() ->
- couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
- Acc = #lacc{db = Db, req = Req, qserver = QServer, lname = LName},
- case VName of
- <<"_all_docs">> ->
- couch_mrview:query_all_docs(Db, Args, fun list_cb/2, Acc);
- _ ->
- couch_mrview:query_view(Db, VDDoc, VName, Args, fun list_cb/2, Acc)
- end
- end)
- end).
-
-list_cb({meta, Meta}, #lacc{code = undefined} = Acc) ->
- MetaProps =
- case couch_util:get_value(total, Meta) of
- undefined -> [];
- Total -> [{total_rows, Total}]
- end ++
- case couch_util:get_value(offset, Meta) of
- undefined -> [];
- Offset -> [{offset, Offset}]
- end ++
- case couch_util:get_value(update_seq, Meta) of
- undefined -> [];
- UpdateSeq -> [{update_seq, UpdateSeq}]
- end,
- start_list_resp({MetaProps}, Acc);
-list_cb({row, Row}, #lacc{code = undefined} = Acc) ->
- {ok, NewAcc} = start_list_resp({[]}, Acc),
- send_list_row(Row, NewAcc);
-list_cb({row, Row}, Acc) ->
- send_list_row(Row, Acc);
-list_cb(complete, Acc) ->
- #lacc{qserver = {Proc, _}, req = Req, resp = Resp0} = Acc,
- if
- Resp0 =:= nil ->
- {ok, #lacc{resp = Resp}} = start_list_resp({[]}, Acc);
- true ->
- Resp = Resp0
- end,
- case couch_query_servers:proc_prompt(Proc, [<<"list_end">>]) of
- [<<"end">>, Data, Headers] ->
- Acc2 = fixup_headers(Headers, Acc#lacc{resp = Resp}),
- #lacc{resp = Resp2} = send_non_empty_chunk(Acc2, Data);
- [<<"end">>, Data] ->
- #lacc{resp = Resp2} = send_non_empty_chunk(Acc#lacc{resp = Resp}, Data)
- end,
- last_chunk(Req, Resp2),
- {ok, Resp2}.
-
-start_list_resp(Head, Acc) ->
- #lacc{db = Db, req = Req, qserver = QServer, lname = LName} = Acc,
- JsonReq = json_req_obj(Req, Db),
-
- [<<"start">>, Chunk, JsonResp] = couch_query_servers:ddoc_proc_prompt(
- QServer,
- [<<"lists">>, LName],
- [Head, JsonReq]
- ),
- Acc2 = send_non_empty_chunk(fixup_headers(JsonResp, Acc), Chunk),
- {ok, Acc2}.
-
-fixup_headers(Headers, #lacc{etag = ETag} = Acc) ->
- Headers2 = apply_etag(Headers, ETag),
- #extern_resp_args{
- code = Code,
- ctype = CType,
- headers = ExtHeaders
- } = chttpd_external:parse_external_response(Headers2),
- Headers3 = chttpd_external:default_or_content_type(CType, ExtHeaders),
- Headers4 = chttpd_util:maybe_add_csp_header("showlist", Headers3, "sandbox"),
- Acc#lacc{code = Code, headers = Headers4}.
-
-send_list_row(Row, #lacc{qserver = {Proc, _}, req = Req, resp = Resp} = Acc) ->
- RowObj =
- case couch_util:get_value(id, Row) of
- undefined -> [];
- Id -> [{id, Id}]
- end ++
- case couch_util:get_value(key, Row) of
- undefined -> [];
- Key -> [{key, Key}]
- end ++
- case couch_util:get_value(value, Row) of
- undefined -> [];
- Val -> [{value, Val}]
- end ++
- case couch_util:get_value(doc, Row) of
- undefined -> [];
- Doc -> [{doc, Doc}]
- end,
- try couch_query_servers:proc_prompt(Proc, [<<"list_row">>, {RowObj}]) of
- [<<"chunks">>, Chunk, Headers] ->
- Acc2 = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
- {ok, Acc2};
- [<<"chunks">>, Chunk] ->
- Acc2 = send_non_empty_chunk(Acc, Chunk),
- {ok, Acc2};
- [<<"end">>, Chunk, Headers] ->
- #lacc{resp = Resp2} = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
- {ok, Resp3} = last_chunk(Req, Resp2),
- {stop, Resp3};
- [<<"end">>, Chunk] ->
- #lacc{resp = Resp2} = send_non_empty_chunk(Acc, Chunk),
- {ok, Resp3} = last_chunk(Req, Resp2),
- {stop, Resp3}
- catch
- Error ->
- {ok, Resp2} =
- case Resp of
- undefined ->
- {Code, _, _} = chttpd:error_info(Error),
- #lacc{req = Req, headers = Headers} = Acc,
- chttpd:start_chunked_response(Req, Code, Headers);
- _ ->
- {ok, Resp}
- end,
- {ok, Resp3} = chttpd:send_chunked_error(Resp2, Error),
- {stop, Resp3}
- end.
-
-send_non_empty_chunk(Acc, []) ->
- Acc;
-send_non_empty_chunk(#lacc{resp = undefined} = Acc, Chunk) ->
- #lacc{req = Req, code = Code, headers = Headers} = Acc,
- {ok, Resp} = chttpd:start_chunked_response(Req, Code, Headers),
- send_non_empty_chunk(Acc#lacc{resp = Resp}, Chunk);
-send_non_empty_chunk(#lacc{resp = Resp} = Acc, Chunk) ->
- chttpd:send_chunk(Resp, Chunk),
- Acc.
-
-apply_etag(JsonResp, undefined) ->
- JsonResp;
-apply_etag({ExternalResponse}, CurrentEtag) ->
- % Here we embark on the delicate task of replacing or creating the
- % headers on the JsonResponse object. We need to control the Etag and
- % Vary headers. If the external function controls the Etag, we'd have to
- % run it to check for a match, which sort of defeats the purpose.
- apply_headers(ExternalResponse, [
- {<<"ETag">>, CurrentEtag},
- {<<"Vary">>, <<"Accept">>}
- ]).
-
-apply_headers(JsonResp, []) ->
- JsonResp;
-apply_headers(JsonResp, NewHeaders) ->
- case couch_util:get_value(<<"headers">>, JsonResp) of
- undefined ->
- {[{<<"headers">>, {NewHeaders}} | JsonResp]};
- JsonHeaders ->
- Headers = apply_headers1(JsonHeaders, NewHeaders),
- NewKV = {<<"headers">>, Headers},
- {lists:keyreplace(<<"headers">>, 1, JsonResp, NewKV)}
- end.
-apply_headers1(JsonHeaders, [{Key, Value} | Rest]) ->
- NewJsonHeaders = json_apply_field({Key, Value}, JsonHeaders),
- apply_headers1(NewJsonHeaders, Rest);
-apply_headers1(JsonHeaders, []) ->
- JsonHeaders.
-
-% Maybe this is in the proplists API
-% todo move to couch_util
-json_apply_field(H, {L}) ->
- json_apply_field(H, L, []).
-
-json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
- % drop matching keys
- json_apply_field({Key, NewValue}, Headers, Acc);
-json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
- % something else is next, leave it alone.
- json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
-json_apply_field({Key, NewValue}, [], Acc) ->
- % end of list, add ours
- {[{Key, NewValue} | Acc]}.
-
-% This loads the db info if we have a fully loaded db record, but we might not
-% have the db locally on this node, so then load the info through fabric.
-json_req_obj(Req, Db) ->
- case couch_db:is_clustered(Db) of
- true ->
- % use a separate process because we're already in a receive loop,
- % and json_req_obj calls fabric:get_db_info()
- JRO = fun() -> exit(chttpd_external:json_req_obj(Req, Db)) end,
- {Pid, Ref} = spawn_monitor(JRO),
- receive
- {'DOWN', Ref, process, Pid, JsonReq} -> JsonReq
- end;
- false ->
- chttpd_external:json_req_obj(Req, Db)
- end.
-
-last_chunk(Req, undefined) ->
- chttpd:send_response(Req, 200, [], <<"">>);
-last_chunk(_Req, Resp) ->
- chttpd:send_chunk(Resp, []).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-apply_headers_test_() ->
- [
- should_apply_headers(),
- should_apply_headers_with_merge(),
- should_apply_headers_with_merge_overwrite()
- ].
-
-should_apply_headers() ->
- ?_test(begin
- JsonResp = [{<<"code">>, 201}],
- Headers = [{<<"foo">>, <<"bar">>}],
- {Props} = apply_headers(JsonResp, Headers),
- JsonHeaders = couch_util:get_value(<<"headers">>, Props),
- ?assertEqual({Headers}, JsonHeaders)
- end).
-
-should_apply_headers_with_merge() ->
- ?_test(begin
- BaseHeaders = [{<<"bar">>, <<"baz">>}],
- NewHeaders = [{<<"foo">>, <<"bar">>}],
- JsonResp = [
- {<<"code">>, 201},
- {<<"headers">>, {BaseHeaders}}
- ],
- {Props} = apply_headers(JsonResp, NewHeaders),
- JsonHeaders = couch_util:get_value(<<"headers">>, Props),
- ExpectedHeaders = {NewHeaders ++ BaseHeaders},
- ?assertEqual(ExpectedHeaders, JsonHeaders)
- end).
-
-should_apply_headers_with_merge_overwrite() ->
- ?_test(begin
- BaseHeaders = [{<<"foo">>, <<"bar">>}],
- NewHeaders = [{<<"foo">>, <<"baz">>}],
- JsonResp = [
- {<<"code">>, 201},
- {<<"headers">>, {BaseHeaders}}
- ],
- {Props} = apply_headers(JsonResp, NewHeaders),
- JsonHeaders = couch_util:get_value(<<"headers">>, Props),
- ?assertEqual({NewHeaders}, JsonHeaders)
- end).
-
-send_list_row_test_() ->
- Cases = couch_tests_combinatorics:product([
- [
- {"[<<\"end\">>, [], []]", fun(_, _) -> [<<"end">>, [], []] end},
- {"[<<\"end\">>, []]", fun(_, _) -> [<<"end">>, []] end},
- {"throw(timeout)", fun(_, _) -> throw(timeout) end}
- ],
- [
- req,
- undefined
- ]
- ]),
- {"Ensure send_list_row returns a valid response on end or error",
- {setup, fun setup/0, fun(_) -> meck:unload() end, [
- {
- lists:flatten(io_lib:format("~s -- ~p", [N, R])),
- should_return_valid_response(F, R)
- }
- || [{N, F}, R] <- Cases
- ]}}.
-
-setup() ->
- ok = application:start(config, permanent),
- ok = meck:expect(
- chttpd,
- send_chunk,
- fun(Resp, _) -> {ok, Resp} end
- ),
- ok = meck:expect(
- chttpd,
- send_chunked_error,
- fun(Resp, _) -> {ok, Resp} end
- ),
- ok = meck:expect(
- chttpd,
- start_chunked_response,
- fun(_, _, _) -> {ok, resp} end
- ),
- ok = meck:expect(
- chttpd_external,
- parse_external_response,
- 1,
- #extern_resp_args{headers = []}
- ).
-
-should_return_valid_response(Spec, Req) ->
- ?_test(begin
- ok = meck:expect(couch_query_servers, proc_prompt, Spec),
- Acc = #lacc{qserver = {proc, undefined}, req = Req, resp = resp},
- ?assertEqual({stop, resp}, send_list_row([], Acc))
- end).
-
--endif.
diff --git a/src/couch_mrview/src/couch_mrview_test_util.erl b/src/couch_mrview/src/couch_mrview_test_util.erl
deleted file mode 100644
index 918988ea3..000000000
--- a/src/couch_mrview/src/couch_mrview_test_util.erl
+++ /dev/null
@@ -1,136 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_test_util).
-
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch/include/couch_eunit.hrl").
-
-init_db(Name, Type) ->
- init_db(Name, Type, 10).
-
-init_db(Name, Type, Count) ->
- {ok, Db} = new_db(Name, Type),
- Docs = make_docs(Type, Count),
- save_docs(Db, Docs).
-
-new_db(Name, Type) when Type == local; Type == design ->
- couch_server:delete(Name, [?ADMIN_CTX]),
- couch_db:create(Name, [?ADMIN_CTX]);
-new_db(Name, Type) ->
- couch_server:delete(Name, [?ADMIN_CTX]),
- {ok, Db} = couch_db:create(Name, [?ADMIN_CTX]),
- save_docs(Db, [ddoc(Type)]).
-
-delete_db(Name) ->
- couch_server:delete(Name, [?ADMIN_CTX]).
-
-save_docs(Db, Docs) ->
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- couch_db:reopen(Db).
-
-make_docs(local, Count) ->
- [local_doc(I) || I <- lists:seq(1, Count)];
-make_docs(design, Count) ->
- lists:foldl(
- fun(I, Acc) ->
- [doc(I), ddoc(I) | Acc]
- end,
- [],
- lists:seq(1, Count)
- );
-make_docs(_, Count) ->
- [doc(I) || I <- lists:seq(1, Count)].
-
-make_docs(_, Since, Count) ->
- [doc(I) || I <- lists:seq(Since, Count)].
-
-ddoc(map) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/bar">>},
- {<<"views">>,
- {[
- {<<"baz">>,
- {[
- {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
- ]}},
- {<<"bing">>,
- {[
- {<<"map">>, <<"function(doc) {}">>}
- ]}},
- {<<"zing">>,
- {[
- {<<"map">>, <<
- "function(doc) {\n"
- " if(doc.foo !== undefined)\n"
- " emit(doc.foo, 0);\n"
- "}"
- >>}
- ]}}
- ]}}
- ]}
- );
-ddoc(red) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/red">>},
- {<<"views">>,
- {[
- {<<"baz">>,
- {[
- {<<"map">>, <<
- "function(doc) {\n"
- " emit([doc.val % 2, doc.val], doc.val);\n"
- "}\n"
- >>},
- {<<"reduce">>, <<"function(keys, vals) {return sum(vals);}">>}
- ]}},
- {<<"zing">>,
- {[
- {<<"map">>, <<
- "function(doc) {\n"
- " if(doc.foo !== undefined)\n"
- " emit(doc.foo, null);\n"
- "}"
- >>},
- {<<"reduce">>, <<"_count">>}
- ]}}
- ]}}
- ]}
- );
-ddoc(Id) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, list_to_binary(io_lib:format("_design/bar~2..0b", [Id]))},
- {<<"views">>, {[]}}
- ]}
- ).
-
-doc(Id) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, list_to_binary(integer_to_list(Id))},
- {<<"val">>, Id}
- ]}
- ).
-
-local_doc(Id) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, list_to_binary(io_lib:format("_local/~b", [Id]))},
- {<<"val">>, Id}
- ]}
- ).
diff --git a/src/couch_mrview/src/couch_mrview_update_notifier.erl b/src/couch_mrview/src/couch_mrview_update_notifier.erl
deleted file mode 100644
index ac91131a0..000000000
--- a/src/couch_mrview/src/couch_mrview_update_notifier.erl
+++ /dev/null
@@ -1,51 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_update_notifier).
-
--behaviour(gen_event).
-
--export([start_link/1, notify/1]).
--export([init/1, terminate/2, handle_event/2, handle_call/2, handle_info/2, code_change/3, stop/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link(Exec) ->
- couch_event_sup:start_link(
- couch_mrview_update, {couch_mrview_update_notifier, make_ref()}, Exec
- ).
-
-notify(Event) ->
- gen_event:notify(couch_mrview_update, Event).
-
-stop(Pid) ->
- couch_event_sup:stop(Pid).
-
-init(Fun) ->
- {ok, Fun}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_event(Event, Fun) ->
- Fun(Event),
- {ok, Fun}.
-
-handle_call(_Request, State) ->
- {ok, ok, State}.
-
-handle_info({'EXIT', Pid, Reason}, Pid) ->
- couch_log:error("View update notification process ~p died: ~p", [Pid, Reason]),
- remove_handler.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/couch_mrview/src/couch_mrview_updater.erl b/src/couch_mrview/src/couch_mrview_updater.erl
deleted file mode 100644
index 969a82028..000000000
--- a/src/couch_mrview/src/couch_mrview_updater.erl
+++ /dev/null
@@ -1,383 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_updater).
-
--export([start_update/4, purge/4, process_doc/3, finish_update/1]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(REM_VAL, removed).
-
-start_update(Partial, State, NumChanges, NumChangesDone) ->
- MaxSize = config:get_integer("view_updater", "queue_memory_cap", 100000),
- MaxItems = config:get_integer("view_updater", "queue_item_cap", 500),
- QueueOpts = [{max_size, MaxSize}, {max_items, MaxItems}],
- {ok, DocQueue} = couch_work_queue:new(QueueOpts),
- {ok, WriteQueue} = couch_work_queue:new(QueueOpts),
- InitState = State#mrst{
- first_build = State#mrst.update_seq == 0,
- partial_resp_pid = Partial,
- doc_acc = [],
- doc_queue = DocQueue,
- write_queue = WriteQueue
- },
-
- Self = self(),
-
- MapFun = fun() ->
- erlang:put(
- io_priority,
- {view_update, State#mrst.db_name, State#mrst.idx_name}
- ),
- Progress =
- case NumChanges of
- 0 -> 0;
- _ -> (NumChangesDone * 100) div NumChanges
- end,
- couch_task_status:add_task([
- {indexer_pid, ?l2b(pid_to_list(Partial))},
- {type, indexer},
- {database, State#mrst.db_name},
- {design_document, State#mrst.idx_name},
- {progress, Progress},
- {changes_done, NumChangesDone},
- {total_changes, NumChanges}
- ]),
- couch_task_status:set_update_frequency(500),
- map_docs(Self, InitState)
- end,
- WriteFun = fun() ->
- erlang:put(
- io_priority,
- {view_update, State#mrst.db_name, State#mrst.idx_name}
- ),
- write_results(Self, InitState)
- end,
- spawn_link(MapFun),
- spawn_link(WriteFun),
-
- {ok, InitState}.
-
-purge(_Db, PurgeSeq, PurgedIdRevs, State) ->
- #mrst{
- id_btree = IdBtree,
- views = Views,
- partitioned = Partitioned
- } = State,
-
- Ids = [Id || {Id, _Revs} <- PurgedIdRevs],
- {ok, Lookups, IdBtree2} = couch_btree:query_modify(IdBtree, Ids, [], Ids),
-
- MakeDictFun = fun
- ({ok, {DocId, ViewNumRowKeys}}, DictAcc) ->
- FoldFun = fun
- ({ViewNum, {Key, Seq, _Op}}, DictAcc2) ->
- dict:append(ViewNum, {Key, Seq, DocId}, DictAcc2);
- ({ViewNum, RowKey0}, DictAcc2) ->
- RowKey =
- if
- not Partitioned ->
- RowKey0;
- true ->
- [{RK, _}] = inject_partition([{RowKey0, DocId}]),
- RK
- end,
- dict:append(ViewNum, {RowKey, DocId}, DictAcc2)
- end,
- lists:foldl(FoldFun, DictAcc, ViewNumRowKeys);
- ({not_found, _}, DictAcc) ->
- DictAcc
- end,
- KeysToRemove = lists:foldl(MakeDictFun, dict:new(), Lookups),
-
- RemKeysFun = fun(#mrview{id_num = ViewId} = View) ->
- ToRem = couch_util:dict_find(ViewId, KeysToRemove, []),
- {ok, VBtree2} = couch_btree:add_remove(View#mrview.btree, [], ToRem),
- NewPurgeSeq =
- case VBtree2 =/= View#mrview.btree of
- true -> PurgeSeq;
- _ -> View#mrview.purge_seq
- end,
- View#mrview{btree = VBtree2, purge_seq = NewPurgeSeq}
- end,
-
- Views2 = lists:map(RemKeysFun, Views),
- {ok, State#mrst{
- id_btree = IdBtree2,
- views = Views2,
- purge_seq = PurgeSeq
- }}.
-
-process_doc(Doc, Seq, #mrst{doc_acc = Acc} = State) when length(Acc) > 100 ->
- couch_work_queue:queue(State#mrst.doc_queue, lists:reverse(Acc)),
- process_doc(Doc, Seq, State#mrst{doc_acc = []});
-process_doc(nil, Seq, #mrst{doc_acc = Acc} = State) ->
- {ok, State#mrst{doc_acc = [{nil, Seq, nil} | Acc]}};
-process_doc(#doc{id = Id, deleted = true}, Seq, #mrst{doc_acc = Acc} = State) ->
- {ok, State#mrst{doc_acc = [{Id, Seq, deleted} | Acc]}};
-process_doc(#doc{id = Id} = Doc, Seq, #mrst{doc_acc = Acc} = State) ->
- {ok, State#mrst{doc_acc = [{Id, Seq, Doc} | Acc]}}.
-
-finish_update(#mrst{doc_acc = Acc} = State) ->
- if
- Acc /= [] ->
- couch_work_queue:queue(State#mrst.doc_queue, Acc);
- true ->
- ok
- end,
- couch_work_queue:close(State#mrst.doc_queue),
- receive
- {new_state, NewState} ->
- {ok, NewState#mrst{
- first_build = undefined,
- partial_resp_pid = undefined,
- doc_acc = undefined,
- doc_queue = undefined,
- write_queue = undefined,
- qserver = nil
- }}
- end.
-
-map_docs(Parent, #mrst{db_name = DbName, idx_name = IdxName} = State0) ->
- erlang:put(io_priority, {view_update, DbName, IdxName}),
- case couch_work_queue:dequeue(State0#mrst.doc_queue) of
- closed ->
- couch_query_servers:stop_doc_map(State0#mrst.qserver),
- couch_work_queue:close(State0#mrst.write_queue);
- {ok, Dequeued} ->
- % Run all the non deleted docs through the view engine and
- % then pass the results on to the writer process.
- State1 =
- case State0#mrst.qserver of
- nil -> start_query_server(State0);
- _ -> State0
- end,
- QServer = State1#mrst.qserver,
- DocFun = fun
- ({nil, Seq, _}, {SeqAcc, Results}) ->
- {erlang:max(Seq, SeqAcc), Results};
- ({Id, Seq, deleted}, {SeqAcc, Results}) ->
- {erlang:max(Seq, SeqAcc), [{Id, []} | Results]};
- ({Id, Seq, Doc}, {SeqAcc, Results}) ->
- couch_stats:increment_counter([couchdb, mrview, map_doc]),
- {ok, Res} = couch_query_servers:map_doc_raw(QServer, Doc),
- {erlang:max(Seq, SeqAcc), [{Id, Res} | Results]}
- end,
- FoldFun = fun(Docs, Acc) ->
- update_task(length(Docs)),
- lists:foldl(DocFun, Acc, Docs)
- end,
- Results = lists:foldl(FoldFun, {0, []}, Dequeued),
- couch_work_queue:queue(State1#mrst.write_queue, Results),
- map_docs(Parent, State1)
- end.
-
-write_results(Parent, #mrst{} = State) ->
- case accumulate_writes(State, State#mrst.write_queue, nil) of
- stop ->
- Parent ! {new_state, State};
- {Go, {Seq, ViewKVs, DocIdKeys}} ->
- NewState = write_kvs(State, Seq, ViewKVs, DocIdKeys),
- if
- Go == stop ->
- Parent ! {new_state, NewState};
- true ->
- send_partial(NewState#mrst.partial_resp_pid, NewState),
- write_results(Parent, NewState)
- end
- end.
-
-start_query_server(State) ->
- #mrst{
- language = Language,
- lib = Lib,
- views = Views
- } = State,
- Defs = [View#mrview.def || View <- Views],
- {ok, QServer} = couch_query_servers:start_doc_map(Language, Defs, Lib),
- State#mrst{qserver = QServer}.
-
-accumulate_writes(State, W, Acc0) ->
- {Seq, ViewKVs, DocIdKVs} =
- case Acc0 of
- nil -> {0, [{V#mrview.id_num, []} || V <- State#mrst.views], []};
- _ -> Acc0
- end,
- case couch_work_queue:dequeue(W) of
- closed when Seq == 0 ->
- stop;
- closed ->
- {stop, {Seq, ViewKVs, DocIdKVs}};
- {ok, Info} ->
- {_, _, NewIds} = Acc = merge_results(Info, Seq, ViewKVs, DocIdKVs),
- case accumulate_more(length(NewIds), Acc) of
- true -> accumulate_writes(State, W, Acc);
- false -> {ok, Acc}
- end
- end.
-
-accumulate_more(NumDocIds, Acc) ->
- % check if we have enough items now
- MinItems = config:get("view_updater", "min_writer_items", "100"),
- MinSize = config:get("view_updater", "min_writer_size", "16777216"),
- CurrMem = ?term_size(Acc),
- NumDocIds < list_to_integer(MinItems) andalso
- CurrMem < list_to_integer(MinSize).
-
-merge_results([], SeqAcc, ViewKVs, DocIdKeys) ->
- {SeqAcc, ViewKVs, DocIdKeys};
-merge_results([{Seq, Results} | Rest], SeqAcc, ViewKVs, DocIdKeys) ->
- Fun = fun(RawResults, {VKV, DIK}) ->
- merge_results(RawResults, VKV, DIK)
- end,
- {ViewKVs1, DocIdKeys1} = lists:foldl(Fun, {ViewKVs, DocIdKeys}, Results),
- merge_results(Rest, erlang:max(Seq, SeqAcc), ViewKVs1, DocIdKeys1).
-
-merge_results({DocId, []}, ViewKVs, DocIdKeys) ->
- {ViewKVs, [{DocId, []} | DocIdKeys]};
-merge_results({DocId, RawResults}, ViewKVs, DocIdKeys) ->
- JsonResults = couch_query_servers:raw_to_ejson(RawResults),
- Results = [[list_to_tuple(Res) || Res <- FunRs] || FunRs <- JsonResults],
- case lists:flatten(Results) of
- [] ->
- {ViewKVs, [{DocId, []} | DocIdKeys]};
- _ ->
- {ViewKVs1, ViewIdKeys} = insert_results(DocId, Results, ViewKVs, [], []),
- {ViewKVs1, [ViewIdKeys | DocIdKeys]}
- end.
-
-insert_results(DocId, [], [], ViewKVs, ViewIdKeys) ->
- {lists:reverse(ViewKVs), {DocId, ViewIdKeys}};
-insert_results(DocId, [KVs | RKVs], [{Id, VKVs} | RVKVs], VKVAcc, VIdKeys) ->
- CombineDupesFun = fun
- ({Key, Val}, {[{Key, {dups, Vals}} | Rest], IdKeys}) ->
- {[{Key, {dups, [Val | Vals]}} | Rest], IdKeys};
- ({Key, Val1}, {[{Key, Val2} | Rest], IdKeys}) ->
- {[{Key, {dups, [Val1, Val2]}} | Rest], IdKeys};
- ({Key, Value}, {Rest, IdKeys}) ->
- {[{Key, Value} | Rest], [{Id, Key} | IdKeys]}
- end,
- InitAcc = {[], VIdKeys},
- couch_stats:increment_counter([couchdb, mrview, emits], length(KVs)),
- {Duped, VIdKeys0} = lists:foldl(
- CombineDupesFun,
- InitAcc,
- lists:sort(KVs)
- ),
- FinalKVs = [{{Key, DocId}, Val} || {Key, Val} <- Duped] ++ VKVs,
- insert_results(DocId, RKVs, RVKVs, [{Id, FinalKVs} | VKVAcc], VIdKeys0).
-
-write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys) ->
- #mrst{
- id_btree = IdBtree,
- first_build = FirstBuild,
- partitioned = Partitioned
- } = State,
-
- {ok, ToRemove, IdBtree2} = update_id_btree(IdBtree, DocIdKeys, FirstBuild),
- ToRemByView = collapse_rem_keys(ToRemove, dict:new()),
-
- UpdateView = fun(#mrview{id_num = ViewId} = View, {ViewId, KVs0}) ->
- ToRem0 = couch_util:dict_find(ViewId, ToRemByView, []),
- {KVs, ToRem} =
- case Partitioned of
- true ->
- KVs1 = inject_partition(KVs0),
- ToRem1 = inject_partition(ToRem0),
- {KVs1, ToRem1};
- false ->
- {KVs0, ToRem0}
- end,
- {ok, VBtree2} = couch_btree:add_remove(View#mrview.btree, KVs, ToRem),
- NewUpdateSeq =
- case VBtree2 =/= View#mrview.btree of
- true -> UpdateSeq;
- _ -> View#mrview.update_seq
- end,
-
- View2 = View#mrview{btree = VBtree2, update_seq = NewUpdateSeq},
- maybe_notify(State, View2, KVs, ToRem),
- View2
- end,
-
- State#mrst{
- views = lists:zipwith(UpdateView, State#mrst.views, ViewKVs),
- update_seq = UpdateSeq,
- id_btree = IdBtree2
- }.
-
-inject_partition(Rows) ->
- lists:map(
- fun
- ({{Key, DocId}, Value}) ->
- % Adding a row to the view
- {Partition, _} = couch_partition:extract(DocId),
- {{{p, Partition, Key}, DocId}, Value};
- ({Key, DocId}) ->
- % Removing a row based on values in id_tree
- {Partition, _} = couch_partition:extract(DocId),
- {{p, Partition, Key}, DocId}
- end,
- Rows
- ).
-
-update_id_btree(Btree, DocIdKeys, true) ->
- ToAdd = [{Id, DIKeys} || {Id, DIKeys} <- DocIdKeys, DIKeys /= []],
- couch_btree:query_modify(Btree, [], ToAdd, []);
-update_id_btree(Btree, DocIdKeys, _) ->
- ToFind = [Id || {Id, _} <- DocIdKeys],
- ToAdd = [{Id, DIKeys} || {Id, DIKeys} <- DocIdKeys, DIKeys /= []],
- ToRem = [Id || {Id, DIKeys} <- DocIdKeys, DIKeys == []],
- couch_btree:query_modify(Btree, ToFind, ToAdd, ToRem).
-
-collapse_rem_keys([], Acc) ->
- Acc;
-collapse_rem_keys([{ok, {DocId, ViewIdKeys}} | Rest], Acc) ->
- NewAcc = lists:foldl(
- fun({ViewId, Key}, Acc2) ->
- dict:append(ViewId, {Key, DocId}, Acc2)
- end,
- Acc,
- ViewIdKeys
- ),
- collapse_rem_keys(Rest, NewAcc);
-collapse_rem_keys([{not_found, _} | Rest], Acc) ->
- collapse_rem_keys(Rest, Acc).
-
-send_partial(Pid, State) when is_pid(Pid) ->
- gen_server:cast(Pid, {new_state, State});
-send_partial(_, _) ->
- ok.
-
-update_task(NumChanges) ->
- [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
- Changes2 = Changes + NumChanges,
- Progress =
- case Total of
- 0 ->
- % updater restart after compaction finishes
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
- couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]).
-
-maybe_notify(State, View, KVs, ToRem) ->
- Updated = fun() ->
- [Key || {{Key, _}, _} <- KVs]
- end,
- Removed = fun() ->
- [Key || {Key, _DocId} <- ToRem]
- end,
- couch_index_plugin:index_update(State, View, Updated, Removed).
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
deleted file mode 100644
index 9e3d292ed..000000000
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ /dev/null
@@ -1,1292 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_util).
-
--export([get_view/4, get_view_index_pid/4]).
--export([get_local_purge_doc_id/1, get_value_from_options/2]).
--export([verify_view_filename/1, get_signature_from_filename/1]).
--export([ddoc_to_mrst/2, init_state/4, reset_index/3]).
--export([make_header/1]).
--export([index_file/2, compaction_file/2, open_file/1]).
--export([delete_files/2, delete_index_file/2, delete_compaction_file/2]).
--export([get_row_count/1, all_docs_reduce_to_count/1, reduce_to_count/1]).
--export([all_docs_key_opts/1, all_docs_key_opts/2, key_opts/1, key_opts/2]).
--export([fold/4, fold_reduce/4]).
--export([temp_view_to_ddoc/1]).
--export([calculate_external_size/1]).
--export([calculate_active_size/1]).
--export([validate_all_docs_args/2, validate_args/1, validate_args/3]).
--export([maybe_load_doc/3, maybe_load_doc/4]).
--export([maybe_update_index_file/1]).
--export([extract_view/4, extract_view_reduce/1]).
--export([get_view_keys/1, get_view_queries/1]).
--export([set_view_type/3]).
--export([set_extra/3, get_extra/2, get_extra/3]).
--export([get_collator_versions/1]).
--export([compact_on_collator_upgrade/0]).
--export([commit_on_header_upgrade/0]).
-
--define(MOD, couch_mrview_index).
--define(GET_VIEW_RETRY_COUNT, 1).
--define(GET_VIEW_RETRY_DELAY, 50).
--define(LOWEST_KEY, null).
--define(HIGHEST_KEY, {<<255, 255, 255, 255>>}).
--define(LOWEST(A, B),
- (if
- A < B -> A;
- true -> B
- end)
-).
--define(HIGHEST(A, B),
- (if
- A > B -> A;
- true -> B
- end)
-).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-get_local_purge_doc_id(Sig) ->
- ?l2b(?LOCAL_DOC_PREFIX ++ "purge-mrview-" ++ Sig).
-
-get_value_from_options(Key, Options) ->
- case couch_util:get_value(Key, Options) of
- undefined ->
- Reason = <<"'", Key/binary, "' must exists in options.">>,
- throw({bad_request, Reason});
- Value ->
- Value
- end.
-
-verify_view_filename(FileName) ->
- FilePathList = filename:split(FileName),
- PureFN = lists:last(FilePathList),
- case filename:extension(PureFN) of
- ".view" ->
- Sig = filename:basename(PureFN),
- case
- [
- Ch
- || Ch <- Sig,
- not (((Ch >= $0) and (Ch =< $9)) orelse
- ((Ch >= $a) and (Ch =< $f)) orelse
- ((Ch >= $A) and (Ch =< $F)))
- ] == []
- of
- true -> true;
- false -> false
- end;
- _ ->
- false
- end.
-
-get_signature_from_filename(FileName) ->
- FilePathList = filename:split(FileName),
- PureFN = lists:last(FilePathList),
- filename:basename(PureFN, ".view").
-
-get_view(Db, DDoc, ViewName, Args0) ->
- case get_view_index_state(Db, DDoc, ViewName, Args0) of
- {ok, State, Args2} ->
- Ref = erlang:monitor(process, State#mrst.fd),
- #mrst{language = Lang, views = Views} = State,
- {Type, View, Args3} = extract_view(Lang, Args2, ViewName, Views),
- check_range(Args3, view_cmp(View)),
- Sig = view_sig(Db, State, View, Args3),
- {ok, {Type, View, Ref}, Sig, Args3};
- ddoc_updated ->
- ddoc_updated
- end.
-
-get_view_index_pid(Db, DDoc, ViewName, Args0) ->
- ArgCheck = fun(InitState) ->
- Args1 = set_view_type(Args0, ViewName, InitState#mrst.views),
- {ok, validate_args(InitState, Args1)}
- end,
- couch_index_server:get_index(?MOD, Db, DDoc, ArgCheck).
-
-get_view_index_state(Db, DDoc, ViewName, Args0) ->
- get_view_index_state(Db, DDoc, ViewName, Args0, ?GET_VIEW_RETRY_COUNT).
-
-get_view_index_state(_, DDoc, _, _, RetryCount) when RetryCount < 0 ->
- couch_log:warning("DDoc '~s' recreated too frequently", [DDoc#doc.id]),
- throw({get_view_state, exceeded_retry_count});
-get_view_index_state(Db, DDoc, ViewName, Args0, RetryCount) ->
- try
- {ok, Pid, Args} = get_view_index_pid(Db, DDoc, ViewName, Args0),
- UpdateSeq = couch_util:with_db(Db, fun(WDb) ->
- couch_db:get_update_seq(WDb)
- end),
- State =
- case Args#mrargs.update of
- lazy ->
- spawn(fun() ->
- catch couch_index:get_state(Pid, UpdateSeq)
- end),
- couch_index:get_state(Pid, 0);
- false ->
- couch_index:get_state(Pid, 0);
- _ ->
- couch_index:get_state(Pid, UpdateSeq)
- end,
- case State of
- {ok, State0} -> {ok, State0, Args};
- ddoc_updated -> ddoc_updated;
- Else -> throw(Else)
- end
- catch
- exit:{Reason, _} when Reason == noproc; Reason == normal ->
- timer:sleep(?GET_VIEW_RETRY_DELAY),
- get_view_index_state(Db, DDoc, ViewName, Args0, RetryCount - 1);
- error:{badmatch, Error} ->
- throw(Error);
- Error ->
- throw(Error)
- end.
-
-ddoc_to_mrst(DbName, #doc{id = Id, body = {Fields}}) ->
- MakeDict = fun
- ({Name, {MRFuns}}, DictBySrcAcc) ->
- case couch_util:get_value(<<"map">>, MRFuns) of
- MapSrc when MapSrc /= undefined ->
- RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
- {ViewOpts} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
- View =
- case dict:find({MapSrc, ViewOpts}, DictBySrcAcc) of
- {ok, View0} -> View0;
- error -> #mrview{def = MapSrc, options = ViewOpts}
- end,
- {MapNames, RedSrcs} =
- case RedSrc of
- null ->
- MNames = [Name | View#mrview.map_names],
- {MNames, View#mrview.reduce_funs};
- _ ->
- RedFuns = [{Name, RedSrc} | View#mrview.reduce_funs],
- {View#mrview.map_names, RedFuns}
- end,
- View2 = View#mrview{map_names = MapNames, reduce_funs = RedSrcs},
- dict:store({MapSrc, ViewOpts}, View2, DictBySrcAcc);
- undefined ->
- DictBySrcAcc
- end;
- ({Name, Else}, DictBySrcAcc) ->
- couch_log:error(
- "design_doc_to_view_group ~s views ~p",
- [Name, Else]
- ),
- DictBySrcAcc
- end,
- {DesignOpts} = proplists:get_value(<<"options">>, Fields, {[]}),
- Partitioned = proplists:get_value(<<"partitioned">>, DesignOpts, false),
-
- {RawViews} = couch_util:get_value(<<"views">>, Fields, {[]}),
- BySrc = lists:foldl(MakeDict, dict:new(), RawViews),
-
- NumViews = fun({_, View}, N) ->
- {View#mrview{id_num = N}, N + 1}
- end,
- {Views, _} = lists:mapfoldl(NumViews, 0, lists:sort(dict:to_list(BySrc))),
-
- Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
- Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}),
-
- IdxState = #mrst{
- db_name = DbName,
- idx_name = Id,
- lib = Lib,
- views = Views,
- language = Language,
- design_opts = DesignOpts,
- partitioned = Partitioned
- },
- SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
- {ok, IdxState#mrst{sig = couch_hash:md5_hash(term_to_binary(SigInfo))}}.
-
-set_view_type(_Args, _ViewName, []) ->
- throw({not_found, missing_named_view});
-set_view_type(Args, ViewName, [View | Rest]) ->
- RedNames = [N || {N, _} <- View#mrview.reduce_funs],
- case lists:member(ViewName, RedNames) of
- true ->
- case Args#mrargs.reduce of
- false -> Args#mrargs{view_type = map};
- _ -> Args#mrargs{view_type = red}
- end;
- false ->
- case lists:member(ViewName, View#mrview.map_names) of
- true -> Args#mrargs{view_type = map};
- false -> set_view_type(Args, ViewName, Rest)
- end
- end.
-
-set_extra(#mrargs{} = Args, Key, Value) ->
- Extra0 = Args#mrargs.extra,
- Extra1 = lists:ukeysort(1, [{Key, Value} | Extra0]),
- Args#mrargs{extra = Extra1}.
-
-get_extra(#mrargs{} = Args, Key) ->
- couch_util:get_value(Key, Args#mrargs.extra).
-
-get_extra(#mrargs{} = Args, Key, Default) ->
- couch_util:get_value(Key, Args#mrargs.extra, Default).
-
-extract_view(_Lang, _Args, _ViewName, []) ->
- throw({not_found, missing_named_view});
-extract_view(Lang, #mrargs{view_type = map} = Args, Name, [View | Rest]) ->
- Names = View#mrview.map_names ++ [N || {N, _} <- View#mrview.reduce_funs],
- case lists:member(Name, Names) of
- true -> {map, View, Args};
- _ -> extract_view(Lang, Args, Name, Rest)
- end;
-extract_view(Lang, #mrargs{view_type = red} = Args, Name, [View | Rest]) ->
- RedNames = [N || {N, _} <- View#mrview.reduce_funs],
- case lists:member(Name, RedNames) of
- true -> {red, {index_of(Name, RedNames), Lang, View}, Args};
- false -> extract_view(Lang, Args, Name, Rest)
- end.
-
-view_sig(Db, State, View, #mrargs{include_docs = true} = Args) ->
- BaseSig = view_sig(Db, State, View, Args#mrargs{include_docs = false}),
- UpdateSeq = couch_db:get_update_seq(Db),
- PurgeSeq = couch_db:get_purge_seq(Db),
- Term = view_sig_term(BaseSig, UpdateSeq, PurgeSeq),
- couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Term)));
-view_sig(Db, State, {_Nth, _Lang, View}, Args) ->
- view_sig(Db, State, View, Args);
-view_sig(_Db, State, View, Args0) ->
- Sig = State#mrst.sig,
- UpdateSeq = View#mrview.update_seq,
- PurgeSeq = View#mrview.purge_seq,
- Args = Args0#mrargs{
- preflight_fun = undefined,
- extra = []
- },
- Term = view_sig_term(Sig, UpdateSeq, PurgeSeq, Args),
- couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Term))).
-
-view_sig_term(BaseSig, UpdateSeq, PurgeSeq) ->
- {BaseSig, UpdateSeq, PurgeSeq}.
-
-view_sig_term(BaseSig, UpdateSeq, PurgeSeq, Args) ->
- {BaseSig, UpdateSeq, PurgeSeq, Args}.
-
-init_state(Db, Fd, #mrst{views = Views} = State, nil) ->
- PurgeSeq = couch_db:get_purge_seq(Db),
- Header = #mrheader{
- seq = 0,
- purge_seq = PurgeSeq,
- id_btree_state = nil,
- view_info = update_collator_versions(#{}),
- view_states = [make_view_state(#mrview{}) || _ <- Views]
- },
- init_state(Db, Fd, State, Header);
-init_state(Db, Fd, State, Header) ->
- #mrst{
- language = Lang,
- views = Views
- } = State,
-
- {ShouldCommit, #mrheader{
- seq = Seq,
- purge_seq = PurgeSeq,
- id_btree_state = IdBtreeState,
- view_info = ViewInfo,
- view_states = ViewStates
- }} = maybe_update_header(Header),
-
- IdBtOpts = [
- {compression, couch_compress:get_compression_method()}
- ],
- {ok, IdBtree} = couch_btree:open(IdBtreeState, Fd, IdBtOpts),
-
- OpenViewFun = fun(St, View) -> open_view(Db, Fd, Lang, St, View) end,
- Views2 = lists:zipwith(OpenViewFun, ViewStates, Views),
-
- {ShouldCommit, State#mrst{
- fd = Fd,
- fd_monitor = erlang:monitor(process, Fd),
- update_seq = Seq,
- purge_seq = PurgeSeq,
- id_btree = IdBtree,
- views = Views2,
- view_info = ViewInfo
- }}.
-
-open_view(_Db, Fd, Lang, ViewState, View) ->
- ReduceFun = make_reduce_fun(Lang, View#mrview.reduce_funs),
- LessFun = maybe_define_less_fun(View),
- Compression = couch_compress:get_compression_method(),
- BTState = get_key_btree_state(ViewState),
- ViewBtOpts = [
- {less, LessFun},
- {reduce, ReduceFun},
- {compression, Compression}
- ],
- {ok, Btree} = couch_btree:open(BTState, Fd, ViewBtOpts),
-
- View#mrview{
- btree = Btree,
- update_seq = get_update_seq(ViewState),
- purge_seq = get_purge_seq(ViewState)
- }.
-
-temp_view_to_ddoc({Props}) ->
- Language = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
- Options = couch_util:get_value(<<"options">>, Props, {[]}),
- View0 = [{<<"map">>, couch_util:get_value(<<"map">>, Props)}],
- View1 =
- View0 ++
- case couch_util:get_value(<<"reduce">>, Props) of
- RedSrc when is_binary(RedSrc) -> [{<<"reduce">>, RedSrc}];
- _ -> []
- end,
- DDoc =
- {[
- {<<"_id">>, couch_uuids:random()},
- {<<"language">>, Language},
- {<<"options">>, Options},
- {<<"views">>,
- {[
- {<<"temp">>, {View1}}
- ]}}
- ]},
- couch_doc:from_json_obj(DDoc).
-
-get_row_count(#mrview{btree = Bt}) ->
- Count =
- case couch_btree:full_reduce(Bt) of
- {ok, {Count0, _Reds, _}} -> Count0;
- {ok, {Count0, _Reds}} -> Count0
- end,
- {ok, Count}.
-
-all_docs_reduce_to_count(Reductions) ->
- Reduce = fun couch_bt_engine:id_tree_reduce/2,
- {Count, _, _} = couch_btree:final_reduce(Reduce, Reductions),
- Count.
-
-reduce_to_count(nil) ->
- 0;
-reduce_to_count(Reductions) ->
- CountReduceFun = fun count_reduce/2,
- FinalReduction = couch_btree:final_reduce(CountReduceFun, Reductions),
- get_count(FinalReduction).
-
-fold(#mrview{btree = Bt}, Fun, Acc, Opts) ->
- WrapperFun = fun(KV, Reds, Acc2) ->
- fold_fun(Fun, expand_dups([KV], []), Reds, Acc2)
- end,
- {ok, _LastRed, _Acc} = couch_btree:fold(Bt, WrapperFun, Acc, Opts).
-
-fold_fun(_Fun, [], _, Acc) ->
- {ok, Acc};
-fold_fun(Fun, [KV | Rest], {KVReds, Reds}, Acc) ->
- case Fun(KV, {KVReds, Reds}, Acc) of
- {ok, Acc2} ->
- fold_fun(Fun, Rest, {[KV | KVReds], Reds}, Acc2);
- {stop, Acc2} ->
- {stop, Acc2}
- end.
-
-fold_reduce({NthRed, Lang, View}, Fun, Acc, Options) ->
- #mrview{
- btree = Bt,
- reduce_funs = RedFuns
- } = View,
-
- ReduceFun = make_user_reds_reduce_fun(Lang, RedFuns, NthRed),
-
- WrapperFun = fun({GroupedKey, _}, PartialReds, Acc0) ->
- FinalReduction = couch_btree:final_reduce(ReduceFun, PartialReds),
- UserReductions = get_user_reds(FinalReduction),
- Fun(GroupedKey, lists:nth(NthRed, UserReductions), Acc0)
- end,
-
- couch_btree:fold_reduce(Bt, WrapperFun, Acc, Options).
-
-validate_args(Db, DDoc, Args0) ->
- {ok, State} = couch_mrview_index:init(Db, DDoc),
- Args1 = apply_limit(State#mrst.partitioned, Args0),
- validate_args(State, Args1).
-
-validate_args(#mrst{} = State, Args0) ->
- Args = validate_args(Args0),
-
- ViewPartitioned = State#mrst.partitioned,
- Partition = get_extra(Args, partition),
-
- case {ViewPartitioned, Partition} of
- {true, undefined} ->
- Msg1 = <<
- "`partition` parameter is mandatory "
- "for queries to this view."
- >>,
- mrverror(Msg1);
- {true, _} ->
- apply_partition(Args, Partition);
- {false, undefined} ->
- Args;
- {false, Value} when is_binary(Value) ->
- Msg2 = <<
- "`partition` parameter is not "
- "supported in this design doc"
- >>,
- mrverror(Msg2)
- end.
-
-apply_limit(ViewPartitioned, Args) ->
- Options = Args#mrargs.extra,
- IgnorePQLimit = lists:keyfind(ignore_partition_query_limit, 1, Options),
- LimitType =
- case {ViewPartitioned, IgnorePQLimit} of
- {true, false} -> "partition_query_limit";
- {true, _} -> "query_limit";
- {false, _} -> "query_limit"
- end,
-
- MaxLimit = config:get_integer(
- "query_server_config",
- LimitType,
- ?MAX_VIEW_LIMIT
- ),
-
- % Set the highest limit possible if a user has not
- % specified a limit
- Args1 =
- case Args#mrargs.limit == ?MAX_VIEW_LIMIT of
- true -> Args#mrargs{limit = MaxLimit};
- false -> Args
- end,
-
- if
- Args1#mrargs.limit =< MaxLimit ->
- Args1;
- true ->
- Fmt = "Limit is too large, must not exceed ~p",
- mrverror(io_lib:format(Fmt, [MaxLimit]))
- end.
-
-validate_all_docs_args(Db, Args0) ->
- Args = validate_args(Args0),
-
- DbPartitioned = couch_db:is_partitioned(Db),
- Partition = get_extra(Args, partition),
-
- case {DbPartitioned, Partition} of
- {false, <<_/binary>>} ->
- mrverror(<<"`partition` parameter is not supported on this db">>);
- {_, <<_/binary>>} ->
- Args1 = apply_limit(true, Args),
- apply_all_docs_partition(Args1, Partition);
- _ ->
- Args
- end.
-
-validate_args(Args) ->
- GroupLevel = determine_group_level(Args),
- Reduce = Args#mrargs.reduce,
- case Reduce == undefined orelse is_boolean(Reduce) of
- true -> ok;
- _ -> mrverror(<<"Invalid `reduce` value.">>)
- end,
-
- case {Args#mrargs.view_type, Reduce} of
- {map, true} -> mrverror(<<"Reduce is invalid for map-only views.">>);
- _ -> ok
- end,
-
- case {Args#mrargs.view_type, GroupLevel, Args#mrargs.keys} of
- {red, exact, _} ->
- ok;
- {red, _, KeyList} when is_list(KeyList) ->
- Msg = <<"Multi-key fetchs for reduce views must use `group=true`">>,
- mrverror(Msg);
- _ ->
- ok
- end,
-
- case Args#mrargs.keys of
- Keys when is_list(Keys) -> ok;
- undefined -> ok;
- _ -> mrverror(<<"`keys` must be an array of strings.">>)
- end,
-
- case {Args#mrargs.keys, Args#mrargs.start_key, Args#mrargs.end_key} of
- {undefined, _, _} ->
- ok;
- {[], _, _} ->
- ok;
- {[_ | _], undefined, undefined} ->
- ok;
- _ ->
- mrverror(<<
- "`keys` is incompatible with `key`"
- ", `start_key` and `end_key`"
- >>)
- end,
-
- case Args#mrargs.start_key_docid of
- undefined -> ok;
- SKDocId0 when is_binary(SKDocId0) -> ok;
- _ -> mrverror(<<"`start_key_docid` must be a string.">>)
- end,
-
- case Args#mrargs.end_key_docid of
- undefined -> ok;
- EKDocId0 when is_binary(EKDocId0) -> ok;
- _ -> mrverror(<<"`end_key_docid` must be a string.">>)
- end,
-
- case Args#mrargs.direction of
- fwd -> ok;
- rev -> ok;
- _ -> mrverror(<<"Invalid direction.">>)
- end,
-
- case {Args#mrargs.limit >= 0, Args#mrargs.limit == undefined} of
- {true, _} -> ok;
- {_, true} -> ok;
- _ -> mrverror(<<"`limit` must be a positive integer.">>)
- end,
-
- case Args#mrargs.skip < 0 of
- true -> mrverror(<<"`skip` must be >= 0">>);
- _ -> ok
- end,
-
- case {Args#mrargs.view_type, GroupLevel} of
- {red, exact} -> ok;
- {_, 0} -> ok;
- {red, Int} when is_integer(Int), Int >= 0 -> ok;
- {red, _} -> mrverror(<<"`group_level` must be >= 0">>);
- {map, _} -> mrverror(<<"Invalid use of grouping on a map view.">>)
- end,
-
- case Args#mrargs.stable of
- true -> ok;
- false -> ok;
- _ -> mrverror(<<"Invalid value for `stable`.">>)
- end,
-
- case Args#mrargs.update of
- true -> ok;
- false -> ok;
- lazy -> ok;
- _ -> mrverror(<<"Invalid value for `update`.">>)
- end,
-
- case is_boolean(Args#mrargs.inclusive_end) of
- true -> ok;
- _ -> mrverror(<<"Invalid value for `inclusive_end`.">>)
- end,
-
- case {Args#mrargs.view_type, Args#mrargs.include_docs} of
- {red, true} -> mrverror(<<"`include_docs` is invalid for reduce">>);
- {_, ID} when is_boolean(ID) -> ok;
- _ -> mrverror(<<"Invalid value for `include_docs`">>)
- end,
-
- case {Args#mrargs.view_type, Args#mrargs.conflicts} of
- {_, undefined} -> ok;
- {map, V} when is_boolean(V) -> ok;
- {red, undefined} -> ok;
- {map, _} -> mrverror(<<"Invalid value for `conflicts`.">>);
- {red, _} -> mrverror(<<"`conflicts` is invalid for reduce views.">>)
- end,
-
- SKDocId =
- case {Args#mrargs.direction, Args#mrargs.start_key_docid} of
- {fwd, undefined} -> <<>>;
- {rev, undefined} -> <<255>>;
- {_, SKDocId1} -> SKDocId1
- end,
-
- EKDocId =
- case {Args#mrargs.direction, Args#mrargs.end_key_docid} of
- {fwd, undefined} -> <<255>>;
- {rev, undefined} -> <<>>;
- {_, EKDocId1} -> EKDocId1
- end,
-
- case is_boolean(Args#mrargs.sorted) of
- true -> ok;
- _ -> mrverror(<<"Invalid value for `sorted`.">>)
- end,
-
- case get_extra(Args, partition) of
- undefined -> ok;
- Partition when is_binary(Partition), Partition /= <<>> -> ok;
- _ -> mrverror(<<"Invalid value for `partition`.">>)
- end,
-
- Args#mrargs{
- start_key_docid = SKDocId,
- end_key_docid = EKDocId,
- group_level = GroupLevel
- }.
-
-determine_group_level(#mrargs{group = undefined, group_level = undefined}) ->
- 0;
-determine_group_level(#mrargs{group = false, group_level = undefined}) ->
- 0;
-determine_group_level(#mrargs{group = false, group_level = Level}) when Level > 0 ->
- mrverror(<<"Can't specify group=false and group_level>0 at the same time">>);
-determine_group_level(#mrargs{group = true, group_level = undefined}) ->
- exact;
-determine_group_level(#mrargs{group_level = GroupLevel}) ->
- GroupLevel.
-
-apply_partition(#mrargs{keys = [{p, _, _} | _]} = Args, _Partition) ->
- % already applied
- Args;
-apply_partition(#mrargs{keys = Keys} = Args, Partition) when Keys /= undefined ->
- Args#mrargs{keys = [{p, Partition, K} || K <- Keys]};
-apply_partition(#mrargs{start_key = {p, _, _}, end_key = {p, _, _}} = Args, _Partition) ->
- % already applied.
- Args;
-apply_partition(Args, Partition) ->
- #mrargs{
- direction = Dir,
- start_key = StartKey,
- end_key = EndKey
- } = Args,
-
- {DefSK, DefEK} =
- case Dir of
- fwd -> {?LOWEST_KEY, ?HIGHEST_KEY};
- rev -> {?HIGHEST_KEY, ?LOWEST_KEY}
- end,
-
- SK0 =
- if
- StartKey /= undefined -> StartKey;
- true -> DefSK
- end,
- EK0 =
- if
- EndKey /= undefined -> EndKey;
- true -> DefEK
- end,
-
- Args#mrargs{
- start_key = {p, Partition, SK0},
- end_key = {p, Partition, EK0}
- }.
-
-%% all_docs is special as it's not really a view and is already
-%% effectively partitioned as the partition is a prefix of all keys.
-apply_all_docs_partition(#mrargs{} = Args, Partition) ->
- #mrargs{
- direction = Dir,
- start_key = StartKey,
- end_key = EndKey
- } = Args,
-
- {DefSK, DefEK} =
- case Dir of
- fwd ->
- {
- couch_partition:start_key(Partition),
- couch_partition:end_key(Partition)
- };
- rev ->
- {
- couch_partition:end_key(Partition),
- couch_partition:start_key(Partition)
- }
- end,
-
- SK0 =
- if
- StartKey == undefined -> DefSK;
- true -> StartKey
- end,
- EK0 =
- if
- EndKey == undefined -> DefEK;
- true -> EndKey
- end,
-
- {SK1, EK1} =
- case Dir of
- fwd -> {?HIGHEST(DefSK, SK0), ?LOWEST(DefEK, EK0)};
- rev -> {?LOWEST(DefSK, SK0), ?HIGHEST(DefEK, EK0)}
- end,
-
- Args#mrargs{
- start_key = SK1,
- end_key = EK1
- }.
-
-check_range(#mrargs{start_key = undefined}, _Cmp) ->
- ok;
-check_range(#mrargs{end_key = undefined}, _Cmp) ->
- ok;
-check_range(#mrargs{start_key = K, end_key = K}, _Cmp) ->
- ok;
-check_range(Args, Cmp) ->
- #mrargs{
- direction = Dir,
- start_key = SK,
- start_key_docid = SKD,
- end_key = EK,
- end_key_docid = EKD
- } = Args,
- case {Dir, Cmp({SK, SKD}, {EK, EKD})} of
- {fwd, false} ->
- throw(
- {query_parse_error,
- <<"No rows can match your key range, reverse your ",
- "start_key and end_key or set descending=true">>}
- );
- {rev, true} ->
- throw(
- {query_parse_error,
- <<"No rows can match your key range, reverse your ",
- "start_key and end_key or set descending=false">>}
- );
- _ ->
- ok
- end.
-
-view_cmp({_Nth, _Lang, View}) ->
- view_cmp(View);
-view_cmp(View) ->
- fun(A, B) -> couch_btree:less(View#mrview.btree, A, B) end.
-
-make_header(State) ->
- #mrst{
- update_seq = Seq,
- purge_seq = PurgeSeq,
- id_btree = IdBtree,
- views = Views,
- view_info = ViewInfo
- } = State,
-
- #mrheader{
- seq = Seq,
- purge_seq = PurgeSeq,
- id_btree_state = get_btree_state(IdBtree),
- view_info = ViewInfo,
- view_states = [make_disk_view_state(V) || V <- Views]
- }.
-
-index_file(DbName, Sig) ->
- FileName = couch_index_util:hexsig(Sig) ++ ".view",
- couch_index_util:index_file(mrview, DbName, FileName).
-
-compaction_file(DbName, Sig) ->
- FileName = couch_index_util:hexsig(Sig) ++ ".compact.view",
- couch_index_util:index_file(mrview, DbName, FileName).
-
-open_file(FName) ->
- case couch_file:open(FName, [nologifmissing]) of
- {ok, Fd} -> {ok, Fd};
- {error, enoent} -> couch_file:open(FName, [create]);
- Error -> Error
- end.
-
-delete_files(DbName, Sig) ->
- delete_index_file(DbName, Sig),
- delete_compaction_file(DbName, Sig).
-
-delete_index_file(DbName, Sig) ->
- delete_file(index_file(DbName, Sig)).
-
-delete_compaction_file(DbName, Sig) ->
- delete_file(compaction_file(DbName, Sig)).
-
-delete_file(FName) ->
- case filelib:is_file(FName) of
- true ->
- RootDir = couch_index_util:root_dir(),
- couch_file:delete(RootDir, FName);
- _ ->
- ok
- end.
-
-reset_index(Db, Fd, #mrst{sig = Sig} = State) ->
- ok = couch_file:truncate(Fd, 0),
- ok = couch_file:write_header(Fd, {Sig, nil}),
- {_Commit, NewSt} = init_state(Db, Fd, reset_state(State), nil),
- NewSt.
-
-reset_state(State) ->
- State#mrst{
- fd = nil,
- qserver = nil,
- update_seq = 0,
- id_btree = nil,
- views = [View#mrview{btree = nil} || View <- State#mrst.views],
- view_info = #{}
- }.
-
-all_docs_key_opts(#mrargs{extra = Extra} = Args) ->
- all_docs_key_opts(Args, Extra).
-
-all_docs_key_opts(#mrargs{keys = undefined} = Args, Extra) ->
- all_docs_key_opts(Args#mrargs{keys = []}, Extra);
-all_docs_key_opts(#mrargs{keys = [], direction = Dir} = Args, Extra) ->
- [[{dir, Dir}] ++ ad_skey_opts(Args) ++ ad_ekey_opts(Args) ++ Extra];
-all_docs_key_opts(#mrargs{keys = Keys, direction = Dir} = Args, Extra) ->
- lists:map(
- fun(K) ->
- [{dir, Dir}] ++
- ad_skey_opts(Args#mrargs{start_key = K}) ++
- ad_ekey_opts(Args#mrargs{end_key = K}) ++
- Extra
- end,
- Keys
- ).
-
-ad_skey_opts(#mrargs{start_key = SKey}) when is_binary(SKey) ->
- [{start_key, SKey}];
-ad_skey_opts(#mrargs{start_key_docid = SKeyDocId}) ->
- [{start_key, SKeyDocId}].
-
-ad_ekey_opts(#mrargs{end_key = EKey} = Args) when is_binary(EKey) ->
- Type =
- if
- Args#mrargs.inclusive_end -> end_key;
- true -> end_key_gt
- end,
- [{Type, EKey}];
-ad_ekey_opts(#mrargs{end_key_docid = EKeyDocId} = Args) ->
- Type =
- if
- Args#mrargs.inclusive_end -> end_key;
- true -> end_key_gt
- end,
- [{Type, EKeyDocId}].
-
-key_opts(Args) ->
- key_opts(Args, []).
-
-key_opts(#mrargs{keys = undefined, direction = Dir} = Args, Extra) ->
- [[{dir, Dir}] ++ skey_opts(Args) ++ ekey_opts(Args) ++ Extra];
-key_opts(#mrargs{keys = Keys, direction = Dir} = Args, Extra) ->
- lists:map(
- fun(K) ->
- [{dir, Dir}] ++
- skey_opts(Args#mrargs{start_key = K}) ++
- ekey_opts(Args#mrargs{end_key = K}) ++
- Extra
- end,
- Keys
- ).
-
-skey_opts(#mrargs{start_key = undefined}) ->
- [];
-skey_opts(#mrargs{start_key = SKey, start_key_docid = SKeyDocId}) ->
- [{start_key, {SKey, SKeyDocId}}].
-
-ekey_opts(#mrargs{end_key = undefined}) ->
- [];
-ekey_opts(#mrargs{end_key = EKey, end_key_docid = EKeyDocId} = Args) ->
- case Args#mrargs.inclusive_end of
- true -> [{end_key, {EKey, EKeyDocId}}];
- false -> [{end_key_gt, {EKey, reverse_key_default(EKeyDocId)}}]
- end.
-
-reverse_key_default(<<>>) -> <<255>>;
-reverse_key_default(<<255>>) -> <<>>;
-reverse_key_default(Key) -> Key.
-
-reduced_external_size(Tree) ->
- case couch_btree:full_reduce(Tree) of
- {ok, {_, _, Size}} -> Size;
- % return 0 for versions of the reduce function without Size
- {ok, {_, _}} -> 0
- end.
-
-calculate_external_size(Views) ->
- SumFun = fun
- (#mrview{btree = nil}, Acc) ->
- Acc;
- (#mrview{btree = Bt}, Acc) ->
- Acc + reduced_external_size(Bt)
- end,
- {ok, lists:foldl(SumFun, 0, Views)}.
-
-calculate_active_size(Views) ->
- FoldFun = fun
- (#mrview{btree = nil}, Acc) ->
- Acc;
- (#mrview{btree = Bt}, Acc) ->
- Acc + couch_btree:size(Bt)
- end,
- {ok, lists:foldl(FoldFun, 0, Views)}.
-
-detuple_kvs([], Acc) ->
- lists:reverse(Acc);
-detuple_kvs([KV | Rest], Acc) ->
- {{Key, Id}, Value} = KV,
- NKV = [[Key, Id], Value],
- detuple_kvs(Rest, [NKV | Acc]).
-
-expand_dups([], Acc) ->
- lists:reverse(Acc);
-expand_dups([{Key, {dups, Vals}} | Rest], Acc) ->
- Expanded = [{Key, Val} || Val <- Vals],
- expand_dups(Rest, Expanded ++ Acc);
-expand_dups([KV | Rest], Acc) ->
- expand_dups(Rest, [KV | Acc]).
-
-maybe_load_doc(_Db, _DI, #mrargs{include_docs = false}) ->
- [];
-maybe_load_doc(Db, #doc_info{} = DI, #mrargs{conflicts = true, doc_options = Opts}) ->
- doc_row(couch_index_util:load_doc(Db, DI, [conflicts]), Opts);
-maybe_load_doc(Db, #doc_info{} = DI, #mrargs{doc_options = Opts}) ->
- doc_row(couch_index_util:load_doc(Db, DI, []), Opts).
-
-maybe_load_doc(_Db, _Id, _Val, #mrargs{include_docs = false}) ->
- [];
-maybe_load_doc(Db, Id, Val, #mrargs{conflicts = true, doc_options = Opts}) ->
- doc_row(couch_index_util:load_doc(Db, docid_rev(Id, Val), [conflicts]), Opts);
-maybe_load_doc(Db, Id, Val, #mrargs{doc_options = Opts}) ->
- doc_row(couch_index_util:load_doc(Db, docid_rev(Id, Val), []), Opts).
-
-doc_row(null, _Opts) ->
- [{doc, null}];
-doc_row(Doc, Opts) ->
- [{doc, couch_doc:to_json_obj(Doc, Opts)}].
-
-docid_rev(Id, {Props}) ->
- DocId = couch_util:get_value(<<"_id">>, Props, Id),
- Rev =
- case couch_util:get_value(<<"_rev">>, Props, nil) of
- nil -> nil;
- Rev0 -> couch_doc:parse_rev(Rev0)
- end,
- {DocId, Rev};
-docid_rev(Id, _) ->
- {Id, nil}.
-
-index_of(Key, List) ->
- index_of(Key, List, 1).
-
-index_of(_, [], _) ->
- throw({error, missing_named_view});
-index_of(Key, [Key | _], Idx) ->
- Idx;
-index_of(Key, [_ | Rest], Idx) ->
- index_of(Key, Rest, Idx + 1).
-
-mrverror(Mesg) ->
- throw({query_parse_error, Mesg}).
-
-%% Updates 2.x view files to 3.x or later view files
-%% transparently, the first time the 2.x view file is opened by
-%% 3.x or later.
-%%
-%% Here's how it works:
-%%
-%% Before opening a view index,
-%% If no matching index file is found in the new location:
-%% calculate the <= 2.x view signature
-%% if a file with that signature lives in the old location
-%% rename it to the new location with the new signature in the name.
-%% Then proceed to open the view index as usual.
-
-maybe_update_index_file(State) ->
- DbName = State#mrst.db_name,
- NewIndexFile = index_file(DbName, State#mrst.sig),
- % open in read-only mode so we don't create
- % the file if it doesn't exist.
- case file:open(NewIndexFile, [read, raw]) of
- {ok, Fd_Read} ->
- % the new index file exists, there is nothing to do here.
- file:close(Fd_Read);
- _Error ->
- update_index_file(State)
- end.
-
-update_index_file(State) ->
- Sig = sig_vsn_2x(State),
- DbName = State#mrst.db_name,
- FileName = couch_index_util:hexsig(Sig) ++ ".view",
- IndexFile = couch_index_util:index_file("mrview", DbName, FileName),
-
- % If we have an old index, rename it to the new position.
- case file:read_file_info(IndexFile) of
- {ok, _FileInfo} ->
- % Crash if the rename fails for any reason.
- % If the target exists, e.g. the next request will find the
- % new file and we are good. We might need to catch this
- % further up to avoid a full server crash.
- NewIndexFile = index_file(DbName, State#mrst.sig),
- couch_log:notice(
- "Attempting to update legacy view index file"
- " from ~p to ~s",
- [IndexFile, NewIndexFile]
- ),
- ok = filelib:ensure_dir(NewIndexFile),
- ok = file:rename(IndexFile, NewIndexFile),
- couch_log:notice(
- "Successfully updated legacy view index file"
- " ~s",
- [IndexFile]
- ),
- Sig;
- {error, enoent} ->
- % Ignore missing index file
- ok;
- {error, Reason} ->
- couch_log:error(
- "Failed to update legacy view index file"
- " ~s : ~s",
- [IndexFile, file:format_error(Reason)]
- ),
- ok
- end.
-
-sig_vsn_2x(State) ->
- #mrst{
- lib = Lib,
- language = Language,
- design_opts = DesignOpts
- } = State,
- SI = proplists:get_value(<<"seq_indexed">>, DesignOpts, false),
- KSI = proplists:get_value(<<"keyseq_indexed">>, DesignOpts, false),
- Views = [old_view_format(V, SI, KSI) || V <- State#mrst.views],
- SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
- couch_hash:md5_hash(term_to_binary(SigInfo)).
-
-old_view_format(View, SI, KSI) ->
- {
- mrview,
- View#mrview.id_num,
- View#mrview.update_seq,
- View#mrview.purge_seq,
- View#mrview.map_names,
- View#mrview.reduce_funs,
- View#mrview.def,
- View#mrview.btree,
- nil,
- nil,
- SI,
- KSI,
- View#mrview.options
- }.
-
-maybe_update_header(#mrheader{view_info = Info} = Header) when is_map(Info) ->
- % Latest (3.2.1+) version. The size of the record is the same as
- % the <2.3.1 version. The main difference is that the LogBt field
- % is now a map. This trick allows for easy downgrading back to
- % version 3.2.1 and then upgrading back to 3.2.1+ if needed.
- {false, Header#mrheader{
- view_info = update_collator_versions(Info),
- view_states = [make_view_state(S) || S <- Header#mrheader.view_states]
- }};
-maybe_update_header({mrheader, Seq, PSeq, IDBt, ViewStates}) ->
- % Versions >2.3.1 and =<3.2.1 (no view info map)
- {true, #mrheader{
- seq = Seq,
- purge_seq = PSeq,
- id_btree_state = IDBt,
- view_info = update_collator_versions(#{}),
- view_states = [make_view_state(S) || S <- ViewStates]
- }};
-maybe_update_header({mrheader, Seq, PSeq, IDBt, _LogBt, ViewStates}) ->
- % Versions <2.3.1.
- {true, #mrheader{
- seq = Seq,
- purge_seq = PSeq,
- id_btree_state = IDBt,
- view_info = update_collator_versions(#{}),
- view_states = [make_view_state(S) || S <- ViewStates]
- }}.
-
-%% End of <= 2.x upgrade code.
-
-% Used for creating a new view states or reading (upgrading) from
-% disk. On disk, the state will be a 5 tuple with nil values in
-% positions 2 and 3 to allow downgrading between current version and
-% =<3.2.1 views.
-%
-make_view_state(#mrview{} = View) ->
- BTState = get_btree_state(View#mrview.btree),
- {
- BTState,
- View#mrview.update_seq,
- View#mrview.purge_seq
- };
-make_view_state({BTState, UpdateSeq, PurgeSeq}) ->
- % Versions >2.x and =<3.2.1
- {BTState, UpdateSeq, PurgeSeq};
-make_view_state({BTState, _SeqBTOrNil, _KSeqBTOrNil, UpdateSeq, PurgeSeq}) ->
- % Current disk version and version 2.x views
- {BTState, UpdateSeq, PurgeSeq};
-make_view_state(nil) ->
- {nil, 0, 0}.
-
-% Used by make_header/1 before committing to disk. The two added nil
-% values in position 2 and 3 make the state on disk look like a 2.x
-% view, where those fields used to be SeqBTState and KSeqBTState,
-% respectively. This is to allow easy downgrading between current
-% version and >2.x and =<3.2.1 views.
-%
-make_disk_view_state(#mrview{} = View) ->
- BTState = get_btree_state(View#mrview.btree),
- {
- BTState,
- nil,
- nil,
- View#mrview.update_seq,
- View#mrview.purge_seq
- };
-make_disk_view_state({BTState, UpdateSeq, PurgeSeq}) ->
- {BTState, nil, nil, UpdateSeq, PurgeSeq};
-make_disk_view_state(nil) ->
- {nil, nil, nil, 0, 0}.
-
-get_key_btree_state(ViewState) ->
- element(1, ViewState).
-
-get_update_seq(ViewState) ->
- element(2, ViewState).
-
-get_purge_seq(ViewState) ->
- element(3, ViewState).
-
-get_count(Reduction) ->
- element(1, Reduction).
-
-get_user_reds(Reduction) ->
- element(2, Reduction).
-
-% This is for backwards compatibility for seq btree reduces
-get_external_size_reds(Reduction) when is_integer(Reduction) ->
- 0;
-get_external_size_reds(Reduction) when tuple_size(Reduction) == 2 ->
- 0;
-get_external_size_reds(Reduction) when tuple_size(Reduction) == 3 ->
- element(3, Reduction).
-
-make_reduce_fun(Lang, ReduceFuns) ->
- FunSrcs = [FunSrc || {_, FunSrc} <- ReduceFuns],
- fun
- (reduce, KVs0) ->
- KVs = detuple_kvs(expand_dups(KVs0, []), []),
- {ok, Result} = couch_query_servers:reduce(Lang, FunSrcs, KVs),
- ExternalSize = kv_external_size(KVs, Result),
- {length(KVs), Result, ExternalSize};
- (rereduce, Reds) ->
- ExtractFun = fun(Red, {CountsAcc0, URedsAcc0, ExtAcc0}) ->
- CountsAcc = CountsAcc0 + get_count(Red),
- URedsAcc = lists:append(URedsAcc0, [get_user_reds(Red)]),
- ExtAcc = ExtAcc0 + get_external_size_reds(Red),
- {CountsAcc, URedsAcc, ExtAcc}
- end,
- {Counts, UReds, ExternalSize} = lists:foldl(
- ExtractFun,
- {0, [], 0},
- Reds
- ),
- {ok, Result} = couch_query_servers:rereduce(Lang, FunSrcs, UReds),
- {Counts, Result, ExternalSize}
- end.
-
-maybe_define_less_fun(#mrview{options = Options}) ->
- case couch_util:get_value(<<"collation">>, Options) of
- <<"raw">> -> undefined;
- _ -> fun couch_ejson_compare:less_json_ids/2
- end.
-
-count_reduce(reduce, KVs) ->
- CountFun = fun
- ({_, {dups, Vals}}, Acc) -> Acc + length(Vals);
- (_, Acc) -> Acc + 1
- end,
- Count = lists:foldl(CountFun, 0, KVs),
- {Count, []};
-count_reduce(rereduce, Reds) ->
- CountFun = fun(Red, Acc) ->
- Acc + get_count(Red)
- end,
- Count = lists:foldl(CountFun, 0, Reds),
- {Count, []}.
-
-make_user_reds_reduce_fun(Lang, ReduceFuns, NthRed) ->
- LPad = lists:duplicate(NthRed - 1, []),
- RPad = lists:duplicate(length(ReduceFuns) - NthRed, []),
- {_, FunSrc} = lists:nth(NthRed, ReduceFuns),
- fun
- (reduce, KVs0) ->
- KVs = detuple_kvs(expand_dups(KVs0, []), []),
- {ok, Result} = couch_query_servers:reduce(Lang, [FunSrc], KVs),
- {0, LPad ++ Result ++ RPad};
- (rereduce, Reds) ->
- ExtractFun = fun(Reds0) ->
- [lists:nth(NthRed, get_user_reds(Reds0))]
- end,
- UReds = lists:map(ExtractFun, Reds),
- {ok, Result} = couch_query_servers:rereduce(Lang, [FunSrc], UReds),
- {0, LPad ++ Result ++ RPad}
- end.
-
-get_btree_state(nil) ->
- nil;
-get_btree_state(#btree{} = Btree) ->
- couch_btree:get_state(Btree).
-
-extract_view_reduce({red, {N, _Lang, #mrview{reduce_funs = Reds}}, _Ref}) ->
- {_Name, FunSrc} = lists:nth(N, Reds),
- FunSrc.
-
-get_view_keys({Props}) ->
- case couch_util:get_value(<<"keys">>, Props) of
- undefined ->
- undefined;
- Keys when is_list(Keys) ->
- Keys;
- _ ->
- throw({bad_request, "`keys` member must be an array."})
- end.
-
-get_view_queries({Props}) ->
- case couch_util:get_value(<<"queries">>, Props) of
- undefined ->
- undefined;
- Queries when is_list(Queries) ->
- Queries;
- _ ->
- throw({bad_request, "`queries` member must be an array."})
- end.
-
-kv_external_size(KVList, Reduction) ->
- lists:foldl(
- fun([[Key, _], Value], Acc) ->
- ?term_size(Key) + ?term_size(Value) + Acc
- end,
- ?term_size(Reduction),
- KVList
- ).
-
-update_collator_versions(#{} = ViewInfo) ->
- Versions = maps:get(ucol_vs, ViewInfo, []),
- Ver = tuple_to_list(couch_ejson_compare:get_collator_version()),
- ViewInfo#{ucol_vs => lists:usort([Ver | Versions])}.
-
-get_collator_versions(#{ucol_vs := Versions}) when is_list(Versions) ->
- Versions;
-get_collator_versions(#{}) ->
- [].
-
-compact_on_collator_upgrade() ->
- config:get_boolean("view_upgrade", "compact_on_collator_upgrade", true).
-
-commit_on_header_upgrade() ->
- config:get_boolean("view_upgrade", "commit_on_header_upgrade", true).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
deleted file mode 100644
index 1a81d4f0a..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
+++ /dev/null
@@ -1,145 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_all_docs_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-all_docs_test_() ->
- {
- "_all_docs view tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_query/1,
- fun should_query_with_range/1,
- fun should_query_with_range_rev/1,
- fun should_query_with_limit_and_skip/1,
- fun should_query_with_include_docs/1,
- fun should_query_empty_views/1
- ]
- }
- }
- }.
-
-should_query(Db) ->
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, 11}, {offset, 0}]},
- mk_row(<<"1">>, <<"1-08d53a5760b95fce6df2e2c5b008be39">>),
- mk_row(<<"10">>, <<"1-a05b6ea2bc0243949f103d5b4f15f71e">>),
- mk_row(<<"2">>, <<"1-b57c77a9e6f7574ca6469f0d6dcd78bb">>),
- mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
- mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
- mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
- mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>),
- mk_row(<<"8">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>),
- mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>),
- mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range(Db) ->
- Result = run_query(Db, [{start_key, <<"3">>}, {end_key, <<"5">>}]),
- Expect =
- {ok, [
- {meta, [{total, 11}, {offset, 3}]},
- mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
- mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range_rev(Db) ->
- Result = run_query(Db, [
- {direction, rev},
- {start_key, <<"5">>},
- {end_key, <<"3">>},
- {inclusive_end, true}
- ]),
- Expect =
- {ok, [
- {meta, [{total, 11}, {offset, 5}]},
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
- mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
- mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_limit_and_skip(Db) ->
- Result = run_query(Db, [
- {start_key, <<"2">>},
- {limit, 3},
- {skip, 3}
- ]),
- Expect =
- {ok, [
- {meta, [{total, 11}, {offset, 5}]},
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
- mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
- mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_include_docs(Db) ->
- Result = run_query(Db, [
- {start_key, <<"8">>},
- {end_key, <<"8">>},
- {include_docs, true}
- ]),
- Doc =
- {[
- {<<"_id">>, <<"8">>},
- {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
- {<<"val">>, 8}
- ]},
- Val = {[{rev, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}]},
- Expect =
- {ok, [
- {meta, [{total, 11}, {offset, 8}]},
- {row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_empty_views(Db) ->
- Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
- Expect =
- {ok, [
- {meta, [{total, 0}, {offset, 0}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-mk_row(Id, Rev) ->
- {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
-
-run_query(Db, Opts) ->
- couch_mrview:query_all_docs(Db, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
deleted file mode 100644
index c00b97b33..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
+++ /dev/null
@@ -1,236 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_collation_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
--define(VALUES, [
- null,
- false,
- true,
-
- 1,
- 2,
- 3.0,
- 4,
-
- <<"a">>,
- <<"A">>,
- <<"aa">>,
- <<"b">>,
- <<"B">>,
- <<"ba">>,
- <<"bb">>,
-
- % U+200B is a zero-width space, which will be ignored by ICU but will cause
- % the raw collator to treat these as three distinct keys
- <<"c">>,
- unicode:characters_to_binary([$c, 16#200B]),
- unicode:characters_to_binary([$c, 16#200B, 16#200B]),
-
- [<<"a">>],
- [<<"b">>],
- [<<"b">>, <<"c">>],
- [<<"b">>, <<"c">>, <<"a">>],
- [<<"b">>, <<"d">>],
- [<<"b">>, <<"d">>, <<"e">>],
-
- {[{<<"a">>, 1}]},
- {[{<<"a">>, 2}]},
- {[{<<"b">>, 1}]},
- {[{<<"b">>, 2}]},
- {[{<<"b">>, 2}, {<<"a">>, 1}]},
- {[{<<"b">>, 2}, {<<"c">>, 2}]},
-
- % Values with depth > 10 trigger the erlang collation fallback in couch_ejson_compare
- {[{<<"x">>, [[[[[[[[[[[<<"y">>]]]]]]]]]]]}]}
-]).
-
-setup() ->
- {ok, Db1} = couch_mrview_test_util:new_db(?tempdb(), map),
- Docs = [couch_mrview_test_util:ddoc(red) | make_docs()],
- {ok, Db2} = couch_mrview_test_util:save_docs(Db1, Docs),
- Db2.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-collation_test_() ->
- {
- "Collation tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_collate_fwd/1,
- fun should_collate_rev/1,
- fun should_collate_range_/1,
- fun should_collate_with_inclusive_end_fwd/1,
- fun should_collate_with_inclusive_end_rev/1,
- fun should_collate_without_inclusive_end_fwd/1,
- fun should_collate_without_inclusive_end_rev/1,
- fun should_collate_with_endkey_docid/1,
- fun should_use_collator_for_reduce_grouping/1
- ]
- }
- }
- }.
-
-should_collate_fwd(Db) ->
- {ok, Results} = run_query(Db, []),
- Expect = [{meta, [{total, length(?VALUES)}, {offset, 0}]}] ++ rows(),
- ?_assertEquiv(Expect, Results).
-
-should_collate_rev(Db) ->
- {ok, Results} = run_query(Db, [{direction, rev}]),
- Expect = [{meta, [{total, length(?VALUES)}, {offset, 0}]}] ++ lists:reverse(rows()),
- ?_assertEquiv(Expect, Results).
-
-should_collate_range_(Db) ->
- Index = lists:zip(lists:seq(0, length(?VALUES) - 1), ?VALUES),
- lists:map(
- fun(V) ->
- {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]),
- Expect = [
- {meta, [{total, length(?VALUES)}, find_offset(Index, V)]}
- | find_matching_rows(Index, V)
- ],
- ?_assertEquiv(Expect, Results)
- end,
- ?VALUES
- ).
-
-find_offset(Index, Value) ->
- [{Offset, _} | _] = lists:dropwhile(
- fun({_, V}) ->
- couch_ejson_compare:less(Value, V) =/= 0
- end,
- Index
- ),
- {offset, Offset}.
-
-find_matching_rows(Index, Value) ->
- Matches = lists:filter(
- fun({_, V}) ->
- couch_ejson_compare:less(Value, V) =:= 0
- end,
- Index
- ),
- lists:map(
- fun({Id, V}) ->
- {row, [{id, list_to_binary(integer_to_list(Id))}, {key, V}, {value, 0}]}
- end,
- Matches
- ).
-
-should_collate_with_inclusive_end_fwd(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, true}],
- {ok, Rows0} = run_query(Db, Opts),
- LastRow = lists:last(Rows0),
- Expect = {row, [{id, <<"10">>}, {key, <<"b">>}, {value, 0}]},
- ?_assertEqual(Expect, LastRow).
-
-should_collate_with_inclusive_end_rev(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, true}, {direction, rev}],
- {ok, Rows} = run_query(Db, Opts),
- LastRow = lists:last(Rows),
- Expect = {row, [{id, <<"10">>}, {key, <<"b">>}, {value, 0}]},
- ?_assertEqual(Expect, LastRow).
-
-should_collate_without_inclusive_end_fwd(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, false}],
- {ok, Rows0} = run_query(Db, Opts),
- LastRow = lists:last(Rows0),
- Expect = {row, [{id, <<"9">>}, {key, <<"aa">>}, {value, 0}]},
- ?_assertEqual(Expect, LastRow).
-
-should_collate_without_inclusive_end_rev(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, false}, {direction, rev}],
- {ok, Rows} = run_query(Db, Opts),
- LastRow = lists:last(Rows),
- Expect = {row, [{id, <<"11">>}, {key, <<"B">>}, {value, 0}]},
- ?_assertEqual(Expect, LastRow).
-
-should_collate_with_endkey_docid(Db) ->
- ?_test(begin
- {ok, Rows0} = run_query(Db, [
- {end_key, <<"b">>},
- {end_key_docid, <<"10">>},
- {inclusive_end, false}
- ]),
- Result0 = lists:last(Rows0),
- Expect0 = {row, [{id, <<"9">>}, {key, <<"aa">>}, {value, 0}]},
- ?assertEqual(Expect0, Result0),
-
- {ok, Rows1} = run_query(Db, [
- {end_key, <<"b">>},
- {end_key_docid, <<"11">>},
- {inclusive_end, false}
- ]),
- Result1 = lists:last(Rows1),
- Expect1 = {row, [{id, <<"10">>}, {key, <<"b">>}, {value, 0}]},
- ?assertEqual(Expect1, Result1)
- end).
-
-should_use_collator_for_reduce_grouping(Db) ->
- UniqueKeys = lists:usort(
- fun(A, B) ->
- not couch_ejson_compare:less_json(B, A)
- end,
- ?VALUES
- ),
- {ok, [{meta, _} | Rows]} = reduce_query(Db, [{group_level, exact}]),
- ?_assertEqual(length(UniqueKeys), length(Rows)).
-
-make_docs() ->
- {Docs, _} = lists:foldl(
- fun(V, {Docs0, Count}) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, list_to_binary(integer_to_list(Count))},
- {<<"foo">>, V}
- ]}
- ),
- {[Doc | Docs0], Count + 1}
- end,
- {[], 0},
- ?VALUES
- ),
- Docs.
-
-rows() ->
- {Rows, _} = lists:foldl(
- fun(V, {Rows0, Count}) ->
- Id = list_to_binary(integer_to_list(Count)),
- Row = {row, [{id, Id}, {key, V}, {value, 0}]},
- {[Row | Rows0], Count + 1}
- end,
- {[], 0},
- ?VALUES
- ),
- lists:reverse(Rows).
-
-run_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"zing">>, Opts).
-
-reduce_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/red">>, <<"zing">>, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
deleted file mode 100644
index df035c649..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
+++ /dev/null
@@ -1,130 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_compact_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 1000),
- ok = meck:new(couch_mrview_compactor, [passthrough]),
- Db.
-
-teardown(Db) ->
- meck:unload(),
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-compaction_test_() ->
- {
- "Compaction tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_swap/1,
- fun should_remove/1
- ]
- }
- }
- }.
-
-should_swap(Db) ->
- ?_test(begin
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
- {ok, QPid} = start_query(Db),
- {ok, MonRef} = couch_mrview:compact(Db, <<"_design/bar">>, [monitor]),
- receive
- {'DOWN', MonRef, process, _, _} -> ok
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "compaction failed"}
- ]}
- )
- end,
- QPid ! {self(), continue},
- receive
- {QPid, Count} ->
- ?assertEqual(1000, Count)
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "query failed"}
- ]}
- )
- end
- end).
-
-should_remove(Db) ->
- ?_test(begin
- DDoc = <<"_design/bar">>,
- {ok, _Results} = couch_mrview:query_view(Db, DDoc, <<"baz">>),
- {ok, IndexPid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- ok = couch_index:compact(IndexPid, []),
- {ok, CompactorPid} = couch_index:get_compactor_pid(IndexPid),
- {ok, CompactingPid} = couch_index_compactor:get_compacting_pid(CompactorPid),
- MonRef = erlang:monitor(process, CompactingPid),
- exit(CompactingPid, crash),
- receive
- {'DOWN', MonRef, process, _, crash} ->
- meck:wait(couch_mrview_compactor, remove_compacted, '_', 100),
- ?assertEqual(
- 1,
- meck:num_calls(
- couch_mrview_compactor, remove_compacted, '_', IndexPid
- )
- ),
- ?assert(is_process_alive(IndexPid)),
- ?assert(is_process_alive(CompactorPid))
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "compaction didn't exit :/"}
- ]}
- )
- end
- end).
-
-start_query(Db) ->
- Self = self(),
- Pid = spawn(fun() ->
- CB = fun
- (_, wait) ->
- receive
- {Self, continue} -> {ok, 0}
- end;
- ({row, _}, Count) ->
- {ok, Count + 1};
- (_, Count) ->
- {ok, Count}
- end,
- {ok, Result} =
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait),
- Self ! {self(), Result}
- end),
- {ok, Pid}.
diff --git a/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
deleted file mode 100644
index 91b24e336..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
+++ /dev/null
@@ -1,157 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_ddoc_updated_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- Name = ?tempdb(),
- couch_server:delete(Name, [?ADMIN_CTX]),
- {ok, Db} = couch_db:create(Name, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/bar">>},
- {<<"views">>,
- {[
- {<<"baz">>,
- {[
- {<<"map">>, <<
- "function(doc) {\n"
- " emit(doc.val, doc.val);\n"
- "}"
- >>}
- ]}}
- ]}}
- ]}
- ),
- [Doc1 | Docs999] = couch_mrview_test_util:make_docs(map, 100),
- {ok, _} = couch_db:update_docs(Db, [DDoc, Doc1], []),
- {ok, Db2} = couch_db:reopen(Db),
-
- % run a query with 1 doc to initialize couch_index process
- CB = fun
- ({row, _}, Count) -> {ok, Count + 1};
- (_, Count) -> {ok, Count}
- end,
- {ok, _} =
- couch_mrview:query_view(Db2, <<"_design/bar">>, <<"baz">>, [], CB, 0),
-
- meck:new(couch_index_updater, [passthrough]),
- meck:expect(couch_index_updater, update, fun(Idx, Mod, IdxSt) ->
- timer:sleep(5000),
- meck:passthrough([Idx, Mod, IdxSt])
- end),
-
- % add more docs
- {ok, _} = couch_db:update_docs(Db2, Docs999, []),
- {ok, Db3} = couch_db:reopen(Db2),
- Db3.
-
-teardown(Db) ->
- meck:unload(couch_index_updater),
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-ddoc_update_test_() ->
- {
- "Check ddoc update actions",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun check_indexing_stops_on_ddoc_change/1
- ]
- }
- }
- }.
-
-check_indexing_stops_on_ddoc_change(Db) ->
- ?_test(begin
- DDocID = <<"_design/bar">>,
-
- IndexesBefore = get_indexes_by_ddoc(couch_db:name(Db), DDocID, 1),
- ?assertEqual(1, length(IndexesBefore)),
- AliveBefore = lists:filter(fun erlang:is_process_alive/1, IndexesBefore),
- ?assertEqual(1, length(AliveBefore)),
-
- {ok, DDoc} = couch_db:open_doc(Db, DDocID, [ejson_body, ?ADMIN_CTX]),
- DDocJson2 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DDocID},
- {<<"_deleted">>, true},
- {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)}
- ]}
- ),
-
- % spawn a process for query
- Self = self(),
- QPid = spawn(fun() ->
- {ok, Result} = couch_mrview:query_view(
- Db, <<"_design/bar">>, <<"baz">>, []
- ),
- Self ! {self(), Result}
- end),
-
- % while indexing for the query is in progress, delete DDoc
- {ok, _} = couch_db:update_doc(Db, DDocJson2, []),
- receive
- {QPid, Msg} ->
- ?assertEqual(Msg, ddoc_updated)
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "test failed"}
- ]}
- )
- end,
-
- %% assert that previously running indexes are gone
- IndexesAfter = get_indexes_by_ddoc(couch_db:name(Db), DDocID, 0),
- ?assertEqual(0, length(IndexesAfter)),
- AliveAfter = lists:filter(fun erlang:is_process_alive/1, IndexesBefore),
- ?assertEqual(0, length(AliveAfter))
- end).
-
-get_indexes_by_ddoc(DbName0, DDocID, N) ->
- Indexes = test_util:wait(fun() ->
- Indxs = ets:match_object(
- couch_index_server:by_db(DbName0), {'$1', {DDocID, '$2'}}
- ),
- case length(Indxs) == N of
- true ->
- Indxs;
- false ->
- wait
- end
- end),
- lists:foldl(
- fun({DbName, {_DDocID, Sig}}, Acc) ->
- case ets:lookup(couch_index_server:by_sig(DbName), {DbName, Sig}) of
- [{_, Pid}] -> [Pid | Acc];
- _ -> Acc
- end
- end,
- [],
- Indexes
- ).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
deleted file mode 100644
index 3e4cbc84f..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
+++ /dev/null
@@ -1,557 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_ddoc_validation_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(LIB, {[{<<"mylib">>, {[{<<"lib1">>, <<"x=42">>}]}}]}).
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-ddoc_validation_test_() ->
- {
- "ddoc validation tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_reject_invalid_js_map/1,
- fun should_reject_invalid_js_reduce/1,
- fun should_reject_invalid_builtin_reduce/1,
- fun should_reject_non_object_options/1,
- fun should_reject_non_object_filters/1,
- fun should_accept_obj_in_filters/1,
- fun should_reject_non_object_lists/1,
- fun should_accept_obj_in_lists/1,
- fun should_reject_non_object_shows/1,
- fun should_accept_obj_in_shows/1,
- fun should_reject_non_object_updates/1,
- fun should_accept_obj_in_updates/1,
- fun should_reject_non_object_views/1,
- fun should_reject_non_string_language/1,
- fun should_reject_non_string_validate_doc_update/1,
- fun should_accept_string_rewrites/1,
- fun should_reject_bad_rewrites/1,
- fun should_accept_option/1,
- fun should_accept_any_option/1,
- fun should_accept_filter/1,
- fun should_reject_non_string_or_obj_filter_function/1,
- fun should_accept_list/1,
- fun should_reject_non_string_or_obj_list_function/1,
- fun should_accept_show/1,
- fun should_reject_non_string_or_obj_show_function/1,
- fun should_accept_update/1,
- fun should_reject_non_string_or_obj_update_function/1,
- fun should_accept_view/1,
- fun should_accept_view_with_reduce/1,
- fun should_accept_view_with_lib/1,
- fun should_reject_view_that_is_not_an_object/1,
- fun should_reject_view_without_map_function/1,
- fun should_reject_view_with_non_string_map_function/1,
- fun should_reject_view_with_non_string_reduce_function/1,
- fun should_accept_any_in_lib/1,
- fun should_accept_map_object_for_queries/1,
- fun should_reject_map_non_objects_for_queries/1
- ]
- }
- }
- }.
-
-should_reject_invalid_js_map(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_invalid_js_map">>},
- {<<"views">>,
- {[
- {<<"foo">>,
- {[
- {<<"map">>, <<"function(doc) }{">>}
- ]}}
- ]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, compilation_error, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_invalid_js_reduce(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_invalid_js_reduce">>},
- {<<"views">>,
- {[
- {<<"foo">>,
- {[
- {<<"map">>, <<"function(doc) { emit(null); }">>},
- {<<"reduce">>, <<"function(k, v, r) }{}">>}
- ]}}
- ]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, compilation_error, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_invalid_builtin_reduce(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_invalid_builtin_reduce">>},
- {<<"views">>,
- {[
- {<<"foo">>,
- {[
- {<<"map">>, <<"function(doc) { emit(null); }">>},
- {<<"reduce">>, <<"_foobar">>}
- ]}}
- ]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_non_object_options(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_object_options">>},
- {<<"options">>, <<"invalid">>}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_non_object_filters(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_object_filters">>},
- {<<"filters">>, <<"invalid">>}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_obj_in_filters(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_obj_in_filters">>},
- {<<"filters">>, ?LIB}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_object_lists(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_object_lists">>},
- {<<"lists">>, <<"invalid">>}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_non_object_shows(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_object_shows">>},
- {<<"shows">>, <<"invalid">>}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_obj_in_shows(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_obj_in_shows">>},
- {<<"shows">>, ?LIB}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_object_updates(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_object_updates">>},
- {<<"updates">>, <<"invalid">>}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_obj_in_updates(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_obj_in_updates">>},
- {<<"updates">>, ?LIB}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_object_views(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_object_views">>},
- {<<"views">>, <<"invalid">>}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_non_string_language(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_string_language">>},
- {<<"language">>, 1}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_non_string_validate_doc_update(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_string_vdu">>},
- {<<"validate_doc_update">>, 1}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_string_rewrites(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>},
- {<<"rewrites">>, <<"function(req){}">>}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_bad_rewrites(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>},
- {<<"rewrites">>, 42}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_option(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_options">>},
- {<<"options">>, {[{<<"option1">>, <<"function(doc,req){}">>}]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_any_option(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_any_option">>},
- {<<"options">>, {[{<<"option1">>, true}]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_filter(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_filters">>},
- {<<"filters">>, {[{<<"filter1">>, <<"function(doc,req){}">>}]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_or_obj_filter_function(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_filter_function">>},
- {<<"filters">>, {[{<<"filter1">>, 1}]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_list(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_lists">>},
- {<<"lists">>, {[{<<"list1">>, <<"function(doc,req){}">>}]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_or_obj_list_function(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_list_function">>},
- {<<"lists">>, {[{<<"list1">>, 1}]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_obj_in_lists(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_obj_in_lists">>},
- {<<"lists">>, ?LIB}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_show(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_shows">>},
- {<<"shows">>, {[{<<"show1">>, <<"function(doc,req){}">>}]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_or_obj_show_function(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_show_function">>},
- {<<"shows">>, {[{<<"show1">>, 1}]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_update(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_updates">>},
- {<<"updates">>, {[{<<"update1">>, <<"function(doc,req){}">>}]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_or_obj_update_function(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_update_function">>},
- {<<"updates">>, {[{<<"update1">>, 1}]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_view(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_view">>},
- {<<"views">>,
- {[
- {<<"view1">>, {[{<<"map">>, <<"function(d){}">>}]}}
- ]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_view_with_reduce(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_view_with_reduce">>},
- {<<"views">>,
- {[
- {<<"view1">>,
- {[
- {<<"map">>, <<"function(d){}">>},
- {<<"reduce">>, <<"function(d){}">>}
- ]}}
- ]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_view_with_lib(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_view_with_lib">>},
- {<<"views">>,
- {[
- {<<"view1">>,
- {[
- {<<"map">>, <<"function(d){}">>}
- ]}},
- {<<"lib">>,
- {[
- {<<"lib1">>, <<"x=42">>}
- ]}}
- ]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_view_that_is_not_an_object(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_non_object_view">>},
- {<<"views">>, {[{<<"view1">>, <<"thisisbad">>}]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_view_without_map_function(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_view_without_map">>},
- {<<"views">>,
- {[
- {<<"view1">>, {[]}}
- ]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_view_with_non_string_map_function(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_view_with_nonstr_map">>},
- {<<"views">>,
- {[
- {<<"view1">>,
- {[
- {<<"map">>, {[]}}
- ]}}
- ]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_reject_view_with_non_string_reduce_function(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_view_with_nonstr_reduce">>},
- {<<"views">>,
- {[
- {<<"view1">>,
- {[
- {<<"map">>, <<"function(d){}">>},
- {<<"reduce">>, 1}
- ]}}
- ]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
-
-should_accept_any_in_lib(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_any_in_lib">>},
- {<<"views">>,
- {[
- {<<"view1">>,
- {[
- {<<"map">>, <<"function(d){}">>}
- ]}},
- {<<"lib">>, {[{<<"lib1">>, {[]}}]}}
- ]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_map_object_for_queries(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_accept_map_objects_for_queries">>},
- {<<"language">>, <<"query">>},
- {<<"views">>,
- {[
- {<<"view1">>,
- {[
- {<<"map">>,
- {[
- {<<"x">>, <<"y">>}
- ]}}
- ]}}
- ]}}
- ]}
- ),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_map_non_objects_for_queries(Db) ->
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/should_reject_map_non_objects__with_nonstr_reduce">>},
- {<<"language">>, <<"query">>},
- {<<"views">>,
- {[
- {<<"view1">>,
- {[
- {<<"map">>, <<"function(d){}">>}
- ]}}
- ]}}
- ]}
- ),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])
- ).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
deleted file mode 100644
index b1de2839d..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
+++ /dev/null
@@ -1,144 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_design_docs_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), design),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-design_docs_test_() ->
- {
- "_design_docs view tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_query/1,
- fun should_query_with_range/1,
- fun should_query_with_range_rev/1,
- fun should_query_with_limit_and_skip/1,
- fun should_query_with_include_docs/1
- ]
- }
- }
- }.
-
-should_query(Db) ->
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, 10}, {offset, 10}]},
- mk_row(<<"_design/bar01">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar02">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar08">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar09">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar10">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_design/bar03">>},
- {end_key, <<"_design/bar05">>}
- ]),
- Expect =
- {ok, [
- {meta, [{total, 10}, {offset, 12}]},
- mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range_rev(Db) ->
- Result = run_query(Db, [
- {direction, rev},
- {start_key, <<"_design/bar05">>},
- {end_key, <<"_design/bar03">>},
- {inclusive_end, true}
- ]),
- Expect =
- {ok, [
- {meta, [{total, 10}, {offset, 5}]},
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_limit_and_skip(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_design/bar02">>},
- {limit, 3},
- {skip, 3}
- ]),
- Expect =
- {ok, [
- {meta, [{total, 10}, {offset, 14}]},
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_include_docs(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_design/bar08">>},
- {end_key, <<"_design/bar08">>},
- {include_docs, true}
- ]),
- Doc =
- {[
- {<<"_id">>, <<"_design/bar08">>},
- {<<"_rev">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>},
- {<<"views">>, {[]}}
- ]},
- Val = {[{rev, <<"1-0b24e44a44af45e51e562fd124ce3007">>}]},
- Expect =
- {ok, [
- {meta, [{total, 10}, {offset, 17}]},
- {row, [
- {id, <<"_design/bar08">>},
- {key, <<"_design/bar08">>},
- {value, Val},
- {doc, Doc}
- ]}
- ]},
- ?_assertEqual(Expect, Result).
-
-mk_row(Id, Rev) ->
- {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
-
-run_query(Db, Opts0) ->
- Opts = [{extra, [{namespace, <<"_design">>}]} | Opts0],
- couch_mrview:query_all_docs(Db, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
deleted file mode 100644
index bfa4965a4..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
+++ /dev/null
@@ -1,37 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_http_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-mrview_http_test_() ->
- [
- ?_assertEqual(
- #mrargs{group_level = undefined, group = true},
- couch_mrview_http:parse_params(
- [{"group", "true"}],
- undefined,
- #mrargs{}
- )
- ),
-
- ?_assertEqual(
- #mrargs{group_level = 1, group = undefined},
- couch_mrview_http:parse_params(
- [{"group_level", "1"}],
- undefined,
- #mrargs{}
- )
- )
- ].
diff --git a/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
deleted file mode 100644
index bf64eaea3..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_index_info_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
- {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
- {Db, Info}.
-
-teardown({Db, _}) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-view_info_test_() ->
- {
- "Views index tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun sig_is_binary/1,
- fun language_is_js/1,
- fun file_size_is_non_neg_int/1,
- fun active_size_is_non_neg_int/1,
- fun external_size_is_non_neg_int/1,
- fun active_size_less_than_file_size/1,
- fun update_seq_is_non_neg_int/1,
- fun purge_seq_is_non_neg_int/1,
- fun update_opts_is_bin_list/1
- ]
- }
- }
- }.
-
-sig_is_binary({_, Info}) ->
- ?_assert(is_binary(prop(signature, Info))).
-
-language_is_js({_, Info}) ->
- ?_assertEqual(<<"javascript">>, prop(language, Info)).
-
-file_size_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int([sizes, file], Info)).
-
-active_size_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int([sizes, active], Info)).
-
-external_size_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int([sizes, external], Info)).
-
-active_size_less_than_file_size({_, Info}) ->
- ?_assert(prop([sizes, active], Info) < prop([sizes, file], Info)).
-
-update_seq_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int(update_seq, Info)).
-
-purge_seq_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int(purge_seq, Info)).
-
-update_opts_is_bin_list({_, Info}) ->
- Opts = prop(update_options, Info),
- ?_assert(
- is_list(Opts) andalso
- (Opts == [] orelse lists:all([is_binary(B) || B <- Opts]))
- ).
-
-check_non_neg_int(Key, Info) ->
- Size = prop(Key, Info),
- is_integer(Size) andalso Size >= 0.
-
-prop(Key, {Props}) when is_list(Props) ->
- prop(Key, Props);
-prop([Key], Info) ->
- prop(Key, Info);
-prop([Key | Rest], Info) ->
- prop(Rest, prop(Key, Info));
-prop(Key, Info) when is_atom(Key), is_list(Info) ->
- couch_util:get_value(Key, Info).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
deleted file mode 100644
index 7c812eeb0..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
+++ /dev/null
@@ -1,158 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_local_docs_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), local),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-all_docs_test_() ->
- {
- "_local_docs view tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_query/1,
- fun should_query_with_range/1,
- fun should_query_with_range_rev/1,
- fun should_query_with_limit_and_skip/1,
- fun should_query_with_include_docs/1,
- fun should_query_with_update_seq/1
- ]
- }
- }
- }.
-
-should_query(Db) ->
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(1),
- mk_row(10),
- mk_row(2),
- mk_row(3),
- mk_row(4),
- mk_row(5),
- mk_row(6),
- mk_row(7),
- mk_row(8),
- mk_row(9)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_local/3">>},
- {end_key, <<"_local/5">>}
- ]),
- Expect =
- {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(3),
- mk_row(4),
- mk_row(5)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range_rev(Db) ->
- Result = run_query(Db, [
- {direction, rev},
- {start_key, <<"_local/5">>},
- {end_key, <<"_local/3">>},
- {inclusive_end, true}
- ]),
- Expect =
- {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(5),
- mk_row(4),
- mk_row(3)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_limit_and_skip(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_local/2">>},
- {limit, 3},
- {skip, 3}
- ]),
- Expect =
- {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(5),
- mk_row(6),
- mk_row(7)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_include_docs(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_local/8">>},
- {end_key, <<"_local/8">>},
- {include_docs, true}
- ]),
- {row, Doc0} = mk_row(8),
- Doc =
- Doc0 ++
- [
- {doc,
- {[
- {<<"_id">>, <<"_local/8">>},
- {<<"_rev">>, <<"0-1">>},
- {<<"val">>, 8}
- ]}}
- ],
- Expect =
- {ok, [
- {meta, [{total, null}, {offset, null}]},
- {row, Doc}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_update_seq(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_local/2">>},
- {limit, 1},
- {update_seq, true}
- ]),
- Expect =
- {ok, [
- {meta, [{total, null}, {offset, null}, {update_seq, null}]},
- mk_row(2)
- ]},
- ?_assertEqual(Expect, Result).
-
-mk_row(IntId) ->
- Id = list_to_binary(io_lib:format("_local/~b", [IntId])),
- {row, [{id, Id}, {key, Id}, {value, {[{rev, <<"0-1">>}]}}]}.
-
-run_query(Db, Opts0) ->
- Opts = [{extra, [{namespace, <<"_local">>}]} | Opts0],
- couch_mrview:query_all_docs(Db, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
deleted file mode 100644
index 0f8357a98..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
+++ /dev/null
@@ -1,152 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_map_views_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-map_views_test_() ->
- {
- "Map views",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_map/1,
- fun should_map_with_range/1,
- fun should_map_with_limit_and_skip/1,
- fun should_map_with_include_docs/1,
- fun should_map_empty_views/1,
- fun should_give_ext_size_seq_indexed_test/1
- ]
- }
- }
- }.
-
-should_map(Db) ->
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, 10}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
- {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
- {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
- {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
- {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
- {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_map_with_range(Db) ->
- Result = run_query(Db, [
- {direction, rev},
- {start_key, 5},
- {end_key, 3},
- {inclusive_end, true}
- ]),
- Expect =
- {ok, [
- {meta, [{total, 10}, {offset, 5}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_map_with_limit_and_skip(Db) ->
- Result = run_query(Db, [
- {start_key, 2},
- {limit, 3},
- {skip, 3}
- ]),
- Expect =
- {ok, [
- {meta, [{total, 10}, {offset, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
- {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
- {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_map_with_include_docs(Db) ->
- Result = run_query(Db, [
- {start_key, 8},
- {end_key, 8},
- {include_docs, true}
- ]),
- Doc =
- {[
- {<<"_id">>, <<"8">>},
- {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
- {<<"val">>, 8}
- ]},
- Expect =
- {ok, [
- {meta, [{total, 10}, {offset, 7}]},
- {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_map_empty_views(Db) ->
- Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
- Expect =
- {ok, [
- {meta, [{total, 0}, {offset, 0}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_give_ext_size_seq_indexed_test(Db) ->
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/seqdoc">>},
- {<<"options">>, {[{<<"seq_indexed">>, true}]}},
- {<<"views">>,
- {[
- {<<"view1">>,
- {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>}
- ]}}
- ]}}
- ]}
- ),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- {ok, Db1} = couch_db:open_int(couch_db:name(Db), []),
- {ok, DDoc1} = couch_db:open_doc(Db1, <<"_design/seqdoc">>, [ejson_body]),
- couch_mrview:query_view(Db1, DDoc1, <<"view1">>, [{update, true}]),
- {ok, Info} = couch_mrview:get_info(Db1, DDoc),
- Size = couch_util:get_nested_json_value({Info}, [sizes, external]),
- ok = couch_db:close(Db1),
- ?_assert(is_number(Size)).
-
-run_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
deleted file mode 100644
index 3207a3da3..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
+++ /dev/null
@@ -1,314 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_purge_docs_fabric_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-% seconds
--define(TIMEOUT, 60).
-
-setup_all() ->
- Ctx = test_util:start_couch([fabric, mem3]),
- meck:new(couch_mrview_index, [passthrough]),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-setup() ->
- DbName = ?tempdb(),
- ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]),
- meck:reset([couch_mrview_index]),
- meck:expect(couch_mrview_index, ensure_local_purge_docs, fun(A, B) ->
- meck:passthrough([A, B])
- end),
- DbName.
-
-teardown(DbName) ->
- ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
-
-view_purge_fabric_test_() ->
- {
- "Map views",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun test_purge_verify_index/1,
- fun test_purge_hook_before_compaction/1
- ]
- }
- }
- }.
-
-test_purge_verify_index(DbName) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Docs1 = couch_mrview_test_util:make_docs(normal, 5),
- {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(
- DbName,
- couch_mrview_test_util:ddoc(map),
- [?ADMIN_CTX]
- ),
-
- Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect1 =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect1, Result1),
-
- {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
- ?assertEqual(0, couch_util:get_value(<<"purge_seq">>, Props1)),
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest] = ShardNames,
- ?assertEqual(
- true,
- couch_mrview_index:verify_index_exists(
- ShardDbName, Props1
- )
- ),
-
- purge_docs(DbName, [<<"1">>]),
-
- Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect2 =
- {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2),
-
- {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)),
- ?assertEqual(
- true,
- couch_mrview_index:verify_index_exists(
- ShardDbName, Props2
- )
- )
- end)}.
-
-test_purge_hook_before_compaction(DbName) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Docs1 = couch_mrview_test_util:make_docs(normal, 5),
- {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(
- DbName,
- couch_mrview_test_util:ddoc(map),
- [?ADMIN_CTX]
- ),
-
- Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect1 =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect1, Result1),
-
- purge_docs(DbName, [<<"1">>]),
-
- Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect2 =
- {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2),
-
- {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)),
-
- [ShardName | _] = local_shards(DbName),
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_compaction(ShardName, ?LINE),
-
- ?assertEqual(
- ok,
- meck:wait(
- 1,
- couch_mrview_index,
- ensure_local_purge_docs,
- '_',
- 5000
- )
- ),
-
- % Make sure compaction didn't change the update seq
- {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)),
-
- purge_docs(DbName, [<<"2">>]),
-
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_compaction(ShardName, ?LINE),
-
- ?assertEqual(
- ok,
- meck:wait(
- 2,
- couch_mrview_index,
- ensure_local_purge_docs,
- '_',
- 5000
- )
- ),
-
- % Make sure compaction after a purge didn't overwrite
- % the local purge doc for the index
- {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)),
-
- % Force another update to ensure that we update
- % the local doc appropriate after compaction
- Result3 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect3 =
- {ok, [
- {meta, [{total, 3}, {offset, 0}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect3, Result3),
-
- {ok, #doc{body = {Props3}}} = get_local_purge_doc(DbName),
- ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props3)),
-
- % Check that if the local doc doesn't exist that one
- % is created for the index on compaction
- delete_local_purge_doc(DbName),
- ?assertMatch({not_found, _}, get_local_purge_doc(DbName)),
-
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_compaction(ShardName, ?LINE),
-
- ?assertEqual(
- ok,
- meck:wait(
- 3,
- couch_mrview_index,
- ensure_local_purge_docs,
- '_',
- 5000
- )
- ),
-
- {ok, #doc{body = {Props4}}} = get_local_purge_doc(DbName),
- ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props4))
- end)}.
-
-get_local_purge_doc(DbName) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []),
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
- Sig = IdxState#mrst.sig,
- HexSig = list_to_binary(couch_index_util:hexsig(Sig)),
- DocId = couch_mrview_util:get_local_purge_doc_id(HexSig),
- [ShardName | _] = local_shards(DbName),
- couch_util:with_db(ShardName, fun(Db) ->
- couch_db:open_doc(Db, DocId, [])
- end).
-
-delete_local_purge_doc(DbName) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []),
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
- Sig = IdxState#mrst.sig,
- HexSig = list_to_binary(couch_index_util:hexsig(Sig)),
- DocId = couch_mrview_util:get_local_purge_doc_id(HexSig),
- NewDoc = #doc{id = DocId, deleted = true},
- [ShardName | _] = local_shards(DbName),
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:update_doc(Db, NewDoc, [])
- end).
-
-get_rev(#full_doc_info{} = FDI) ->
- #doc_info{
- revs = [#rev_info{} = PrevRev | _]
- } = couch_doc:to_doc_info(FDI),
- PrevRev#rev_info.rev.
-
-purge_docs(DbName, DocIds) ->
- lists:foreach(
- fun(DocId) ->
- FDI = fabric:get_full_doc_info(DbName, DocId, []),
- Rev = get_rev(FDI),
- {ok, [{ok, _}]} = fabric:purge_docs(DbName, [{DocId, [Rev]}], [])
- end,
- DocIds
- ).
-
-wait_compaction(DbName, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, 10000) of
- timeout ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, Line},
- {reason, "Timeout waiting for database compaction"}
- ]}
- );
- _ ->
- ok
- end.
-
-is_compaction_running(DbName) ->
- {ok, DbInfo} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:get_db_info(Db)
- end),
- couch_util:get_value(compact_running, DbInfo).
-
-local_shards(DbName) ->
- try
- [ShardName || #shard{name = ShardName} <- mem3:local_shards(DbName)]
- catch
- error:database_does_not_exist ->
- []
- end.
diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
deleted file mode 100644
index 63c5de458..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
+++ /dev/null
@@ -1,607 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_purge_docs_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- meck:new(couch_index_updater, [passthrough]),
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 5),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- meck:unload(),
- ok.
-
-view_purge_test_() ->
- {
- "Map views",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun test_purge_single/1,
- fun test_purge_partial/1,
- fun test_purge_complete/1,
- fun test_purge_nochange/1,
- fun test_purge_index_reset/1,
- fun test_purge_compact_size_check/1,
- fun test_purge_single_for_docid_with_list/1,
- fun test_purge_complete_for_docid_with_list/1,
- fun test_purge_compact_for_stale_purge_cp_without_client/1,
- fun test_purge_compact_for_stale_purge_cp_with_client/1
- ]
- }
- }
- }.
-
-test_purge_single(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI = couch_db:get_full_doc_info(Db, <<"1">>),
- Rev = get_rev(FDI),
- {ok, [{ok, _PRevs}]} = couch_db:purge_docs(
- Db,
- [{<<"UUID1">>, <<"1">>, [Rev]}]
- ),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 =
- {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-test_purge_single_for_docid_with_list(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI = couch_db:get_full_doc_info(Db, <<"1">>),
- Rev = get_rev(FDI),
- {ok, [{ok, _PRevs}]} = couch_db:purge_docs(
- Db,
- [{<<"UUID1">>, "1", [Rev]}]
- ),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 =
- {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-test_purge_partial(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>),
- Rev1 = get_rev(FDI1),
- Update =
- {[
- {'_id', <<"1">>},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {'val', 1.2}
- ]},
- {ok, [_Rev2]} = save_docs(Db, [Update], [replicated_changes]),
-
- PurgeInfos = [{<<"UUID1">>, <<"1">>, [Rev1]}],
-
- {ok, _} = couch_db:purge_docs(Db, PurgeInfos),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1.2}, {value, 1.2}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-test_purge_complete(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>),
- Rev1 = get_rev(FDI1),
- FDI2 = couch_db:get_full_doc_info(Db, <<"2">>),
- Rev2 = get_rev(FDI2),
- FDI5 = couch_db:get_full_doc_info(Db, <<"5">>),
- Rev5 = get_rev(FDI5),
-
- PurgeInfos = [
- {<<"UUID1">>, <<"1">>, [Rev1]},
- {<<"UUID2">>, <<"2">>, [Rev2]},
- {<<"UUID5">>, <<"5">>, [Rev5]}
- ],
- {ok, _} = couch_db:purge_docs(Db, PurgeInfos),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 =
- {ok, [
- {meta, [{total, 2}, {offset, 0}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-test_purge_complete_for_docid_with_list(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>),
- Rev1 = get_rev(FDI1),
- FDI2 = couch_db:get_full_doc_info(Db, <<"2">>),
- Rev2 = get_rev(FDI2),
- FDI5 = couch_db:get_full_doc_info(Db, <<"5">>),
- Rev5 = get_rev(FDI5),
-
- PurgeInfos = [
- {<<"UUID1">>, "1", [Rev1]},
- {<<"UUID2">>, "2", [Rev2]},
- {<<"UUID5">>, "5", [Rev5]}
- ],
- {ok, _} = couch_db:purge_docs(Db, PurgeInfos),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 =
- {ok, [
- {meta, [{total, 2}, {offset, 0}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-test_purge_nochange(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>),
- Rev1 = get_rev(FDI1),
-
- PurgeInfos = [
- {<<"UUID1">>, <<"6">>, [Rev1]}
- ],
- {ok, _} = couch_db:purge_docs(Db, PurgeInfos),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-test_purge_index_reset(Db) ->
- ?_test(begin
- ok = couch_db:set_purge_infos_limit(Db, 2),
- {ok, Db1} = couch_db:reopen(Db),
-
- Result = run_query(Db1, []),
- Expect =
- {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- PurgeInfos = lists:map(
- fun(I) ->
- DocId = list_to_binary(integer_to_list(I)),
- FDI = couch_db:get_full_doc_info(Db, DocId),
- Rev = get_rev(FDI),
- {couch_uuids:random(), DocId, [Rev]}
- end,
- lists:seq(1, 5)
- ),
- {ok, _} = couch_db:purge_docs(Db1, PurgeInfos),
-
- {ok, Db2} = couch_db:reopen(Db1),
-
- % Forcibly set the purge doc to a newer purge
- % sequence to force an index reset. This should
- % never happen in real life but the reset
- % is required for correctness.
- {ok, #doc{body = {OldProps}} = LocalDoc} = get_local_purge_doc(Db2),
- NewPurgeSeq = {<<"purge_seq">>, 5},
- NewProps = lists:keyreplace(<<"purge_seq">>, 1, OldProps, NewPurgeSeq),
- RewindDoc = LocalDoc#doc{body = {NewProps}},
- {ok, _} = couch_db:update_doc(Db2, RewindDoc, []),
-
- % Compact the database to remove purge infos
- {ok, _} = couch_db:start_compact(Db2),
- wait_compaction(couch_db:name(Db), "database", ?LINE),
-
- {ok, Db3} = couch_db:reopen(Db2),
- Result2 = run_query(Db3, []),
- Expect2 =
- {ok, [
- {meta, [{total, 0}, {offset, 0}]}
- ]},
- ?assertEqual(Expect2, Result2),
-
- % Assert that we had a reset
- meck:wait(
- 1,
- couch_index_updater,
- handle_info,
- [{'EXIT', '_', {reset, '_'}}, '_'],
- 5000
- )
- end).
-
-test_purge_compact_size_check(Db) ->
- ?_test(begin
- DbName = couch_db:name(Db),
- Docs = couch_mrview_test_util:make_docs(normal, 6, 200),
- {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs),
- _Result = run_query(Db1, []),
- DiskSizeBefore = db_disk_size(DbName),
-
- PurgedDocsNum = 150,
- IdsRevs = lists:foldl(
- fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end,
- [],
- lists:seq(1, PurgedDocsNum)
- ),
- {ok, _} = couch_db:purge_docs(Db1, IdsRevs),
-
- {ok, Db2} = couch_db:reopen(Db1),
- _Result1 = run_query(Db2, []),
- {ok, PurgedIdRevs} = couch_db:fold_purge_infos(
- Db2,
- 0,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsNum, length(PurgedIdRevs)),
- config:set("couchdb", "file_compression", "snappy", false),
-
- {ok, Db3} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db3),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db3),
- DiskSizeAfter = db_disk_size(DbName),
- ?assert(DiskSizeBefore > DiskSizeAfter)
- end).
-
-test_purge_compact_for_stale_purge_cp_without_client(Db) ->
- ?_test(begin
- DbName = couch_db:name(Db),
- % add more documents to database for purge
- Docs = couch_mrview_test_util:make_docs(normal, 6, 200),
- {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs),
-
- % change PurgedDocsLimit to 10 from 1000 to
- % avoid timeout of eunit test
- PurgedDocsLimit = 10,
- couch_db:set_purge_infos_limit(Db1, PurgedDocsLimit),
-
- % purge 150 documents
- PurgedDocsNum = 150,
- PurgeInfos = lists:foldl(
- fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end,
- [],
- lists:seq(1, PurgedDocsNum)
- ),
- {ok, _} = couch_db:purge_docs(Db1, PurgeInfos),
-
- {ok, Db2} = couch_db:reopen(Db1),
- {ok, PurgedIdRevs} = couch_db:fold_purge_infos(
- Db2,
- 0,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsNum, length(PurgedIdRevs)),
-
- % run compaction to trigger pruning of purge tree
- {ok, Db3} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db3),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db3),
-
- % check the remaining purge requests in purge tree
- {ok, Db4} = couch_db:reopen(Db3),
- OldestPSeq = couch_db:get_oldest_purge_seq(Db4),
- {ok, PurgedIdRevs2} = couch_db:fold_purge_infos(
- Db4,
- OldestPSeq - 1,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsLimit, length(PurgedIdRevs2))
- end).
-
-test_purge_compact_for_stale_purge_cp_with_client(Db) ->
- ?_test(begin
- DbName = couch_db:name(Db),
- % add more documents to database for purge
- Docs = couch_mrview_test_util:make_docs(normal, 6, 200),
- {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs),
-
- % change PurgedDocsLimit to 10 from 1000 to
- % avoid timeout of eunit test
- PurgedDocsLimit = 10,
- couch_db:set_purge_infos_limit(Db1, PurgedDocsLimit),
- _Result = run_query(Db1, []),
-
- % first purge 30 documents
- PurgedDocsNum1 = 30,
- IdsRevs = lists:foldl(
- fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end,
- [],
- lists:seq(1, PurgedDocsNum1)
- ),
- {ok, _} = couch_db:purge_docs(Db1, IdsRevs),
-
- {ok, Db2} = couch_db:reopen(Db1),
- % run query again to reflect purge request to mrview
- _Result1 = run_query(Db2, []),
- {ok, PurgedIdRevs} = couch_db:fold_purge_infos(
- Db2,
- 0,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsNum1, length(PurgedIdRevs)),
-
- % then purge 120 documents
- PurgedDocsNum2 = 150,
- IdsRevs2 = lists:foldl(
- fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end,
- [],
- lists:seq(PurgedDocsNum1 + 1, PurgedDocsNum2)
- ),
- {ok, _} = couch_db:purge_docs(Db2, IdsRevs2),
-
- % run compaction to trigger pruning of purge tree
- % only the first 30 purge requests are pruned
- {ok, Db3} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db3),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db3),
-
- % check the remaining purge requests in purge tree
- {ok, Db4} = couch_db:reopen(Db3),
- OldestPSeq = couch_db:get_oldest_purge_seq(Db4),
- {ok, PurgedIdRevs2} = couch_db:fold_purge_infos(
- Db4,
- OldestPSeq - 1,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsNum2 - PurgedDocsNum1, length(PurgedIdRevs2))
- end).
-
-get_local_purge_doc(Db) ->
- {ok, DDoc} = couch_db:open_doc(Db, <<"_design/bar">>, []),
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
- Sig = IdxState#mrst.sig,
- HexSig = list_to_binary(couch_index_util:hexsig(Sig)),
- DocId = couch_mrview_util:get_local_purge_doc_id(HexSig),
- couch_db:open_doc(Db, DocId, []).
-
-run_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
-
-save_docs(Db, JsonDocs, Options) ->
- Docs = lists:map(
- fun(JDoc) ->
- couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc)))
- end,
- JsonDocs
- ),
- Opts = [full_commit | Options],
- case lists:member(replicated_changes, Options) of
- true ->
- {ok, []} = couch_db:update_docs(
- Db, Docs, Opts, replicated_changes
- ),
- {ok,
- lists:map(
- fun(Doc) ->
- {Pos, [RevId | _]} = Doc#doc.revs,
- {Pos, RevId}
- end,
- Docs
- )};
- false ->
- {ok, Resp} = couch_db:update_docs(Db, Docs, Opts),
- {ok, [Rev || {ok, Rev} <- Resp]}
- end.
-
-get_rev(#full_doc_info{} = FDI) ->
- #doc_info{
- revs = [#rev_info{} = PrevRev | _]
- } = couch_doc:to_doc_info(FDI),
- PrevRev#rev_info.rev.
-
-db_disk_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- ok = couch_db:close(Db),
- active_size(Info).
-
-active_size(Info) ->
- couch_util:get_nested_json_value({Info}, [sizes, active]).
-
-wait_compaction(DbName, Kind, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, 10000) of
- timeout ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, Line},
- {reason,
- "Timeout waiting for " ++
- Kind ++
- " database compaction"}
- ]}
- );
- _ ->
- ok
- end.
-
-is_compaction_running(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DbInfo} = couch_db:get_db_info(Db),
- couch_db:close(Db),
- couch_util:get_value(compact_running, DbInfo).
-
-fold_fun({_PSeq, _UUID, Id, Revs}, Acc) ->
- {ok, [{Id, Revs} | Acc]}.
-
-docid(I) ->
- list_to_binary(integer_to_list(I)).
-
-uuid(I) ->
- Str = io_lib:format("UUID~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
deleted file mode 100644
index b6042b6c7..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
+++ /dev/null
@@ -1,97 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_red_views_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), red),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-reduce_views_test_() ->
- {
- "Reduce views",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_reduce_basic/1,
- fun should_reduce_key_range/1,
- fun should_reduce_with_group_level/1,
- fun should_reduce_with_group_exact/1
- ]
- }
- }
- }.
-
-should_reduce_basic(Db) ->
- Result = run_query(Db, []),
- Expect =
- {ok, [
- {meta, []},
- {row, [{key, null}, {value, 55}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_reduce_key_range(Db) ->
- Result = run_query(Db, [{start_key, [0, 2]}, {end_key, [0, 4]}]),
- Expect =
- {ok, [
- {meta, []},
- {row, [{key, null}, {value, 6}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_reduce_with_group_level(Db) ->
- Result = run_query(Db, [{group_level, 1}]),
- Expect =
- {ok, [
- {meta, []},
- {row, [{key, [0]}, {value, 30}]},
- {row, [{key, [1]}, {value, 25}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_reduce_with_group_exact(Db) ->
- Result = run_query(Db, [{group_level, exact}]),
- Expect =
- {ok, [
- {meta, []},
- {row, [{key, [0, 2]}, {value, 2}]},
- {row, [{key, [0, 4]}, {value, 4}]},
- {row, [{key, [0, 6]}, {value, 6}]},
- {row, [{key, [0, 8]}, {value, 8}]},
- {row, [{key, [0, 10]}, {value, 10}]},
- {row, [{key, [1, 1]}, {value, 1}]},
- {row, [{key, [1, 3]}, {value, 3}]},
- {row, [{key, [1, 5]}, {value, 5}]},
- {row, [{key, [1, 7]}, {value, 7}]},
- {row, [{key, [1, 9]}, {value, 9}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-run_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/red">>, <<"baz">>, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
deleted file mode 100644
index a495fd82c..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
+++ /dev/null
@@ -1,37 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_util_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-couch_mrview_util_test_() ->
- [
- ?_assertEqual(0, validate_group_level(undefined, undefined)),
- ?_assertEqual(exact, validate_group_level(true, undefined)),
- ?_assertEqual(0, validate_group_level(false, undefined)),
- ?_assertEqual(1, validate_group_level(undefined, 1)),
- ?_assertEqual(0, validate_group_level(true, 0)),
- ?_assertEqual(0, validate_group_level(undefined, 0)),
- ?_assertEqual(1, validate_group_level(true, 1)),
- ?_assertEqual(0, validate_group_level(false, 0)),
- ?_assertThrow(
- {query_parse_error, <<"Can't specify group=false and group_level>0 at the same time">>},
- validate_group_level(false, 1)
- )
- ].
-
-validate_group_level(Group, GroupLevel) ->
- Args0 = #mrargs{group = Group, group_level = GroupLevel, view_type = red},
- Args1 = couch_mrview_util:validate_args(Args0),
- Args1#mrargs.group_level.
diff --git a/src/couch_peruser/.gitignore b/src/couch_peruser/.gitignore
deleted file mode 100644
index 93fc2e28b..000000000
--- a/src/couch_peruser/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-/.eunit
-/doc
-/ebin
-/deps
-/.rebar
-/couchperuser-*
-erl_crash.dump
-TEST-*.xml
-*.beam
diff --git a/src/couch_peruser/LICENSE b/src/couch_peruser/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/src/couch_peruser/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_peruser/README.md b/src/couch_peruser/README.md
deleted file mode 100644
index 64a05188c..000000000
--- a/src/couch_peruser/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# couch_peruser [![Build Status](https://travis-ci.org/apache/couchdb-peruser.svg?branch=master)](https://travis-ci.org/apache/couchdb-peruser)
-
-couch_peruser is a CouchDB application that ensures that a private per-user
-database exists for each document in _users. These databases are
-writable only by the corresponding user. Databases are in the form:
-
- userdb-{hex encoded username}
-
-For example, the user `bob` will have a database named `userdb-626f62`.
-
-The reason for hex encoding is that CouchDB usernames have no restrictions,
-but CouchDB databases have restrictions. Hex encoding the UTF-8
-representation of the username is a transformation that's easy to
-correctly implement in just about any language, especially JavaScript
-and Erlang. Other encodings would be possible, but would require
-additional client and server-side code to support that encoding. This
-is the simplest scheme that is obviously correct.
-
-## Implementation Notes
-
-The module itself is a `gen_server` and it implements the `mem3_cluster`
-behaviour.
-
-In a CouchDB cluster, the module runs on each node in the cluster. On startup,
-it launches a changes listener for each shard of the `authentication_db`
-(`_users`).
-
-In a cluster, when a change notification comes in (after a user doc has been
-created/updated/deleted), each node independently calculates if it should
-handle the notification based on the current list of active nodes in the
-cluster. This ensures that we avoid trying to update the internal `_dbs`
-concurrently and causing conflicts. It also ensures that at least one node
-does handle a notification. The mechanism that handles this does survive
-cluster reconfigurations transparently.
diff --git a/src/couch_peruser/src/couch_peruser.app.src b/src/couch_peruser/src/couch_peruser.app.src
deleted file mode 100644
index 6cfaf4421..000000000
--- a/src/couch_peruser/src/couch_peruser.app.src
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_peruser, [
- {description, "couch_peruser - maintains per-user databases in CouchDB"},
- {vsn, git},
- {registered, [couch_peruser, couch_peruser_sup]},
- {applications, [kernel, stdlib, config, couch, fabric, mem3]},
- {mod, {couch_peruser_app, []}},
- {env, []}
-]}.
diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
deleted file mode 100644
index c87ffb2fb..000000000
--- a/src/couch_peruser/src/couch_peruser.erl
+++ /dev/null
@@ -1,505 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_peruser).
--behaviour(gen_server).
--behaviour(mem3_cluster).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-% gen_server callbacks
--export([
- start_link/0,
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
--export([init_changes_handler/1, changes_handler/3]).
-
-% mem3_cluster callbacks
--export([
- cluster_stable/1,
- cluster_unstable/1
-]).
-
--record(changes_state, {
- parent :: pid(),
- db_name :: binary(),
- delete_dbs :: boolean(),
- changes_pid :: pid(),
- changes_ref :: reference(),
- q_for_peruser_db :: integer(),
- peruser_dbname_prefix :: binary()
-}).
-
--record(state, {
- parent :: pid(),
- db_name :: binary(),
- delete_dbs :: boolean(),
- states :: list(),
- mem3_cluster_pid :: pid(),
- cluster_stable :: boolean(),
- q_for_peruser_db :: integer(),
- peruser_dbname_prefix :: binary()
-}).
-
--define(DEFAULT_USERDB_PREFIX, "userdb-").
--define(RELISTEN_DELAY, 5000).
-% seconds
--define(DEFAULT_QUIET_PERIOD, 60).
-% seconds
--define(DEFAULT_START_PERIOD, 5).
-
-%%
-%% Please leave in the commented-out couch_log:debug calls, thanks! — Jan
-%%
--spec start_link() -> {ok, pid()} | ignore | {error, term()}.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
--spec init_state() -> #state{}.
-init_state() ->
- couch_log:debug("peruser: starting on node ~p in pid ~p", [node(), self()]),
- case config:get_boolean("couch_peruser", "enable", false) of
- false ->
- couch_log:debug("peruser: disabled on node ~p", [node()]),
- #state{};
- true ->
- couch_log:debug("peruser: enabled on node ~p", [node()]),
- DbName = ?l2b(
- config:get(
- "couch_httpd_auth", "authentication_db", "_users"
- )
- ),
- DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false),
- Q = config:get_integer("couch_peruser", "q", 1),
- Prefix = config:get("couch_peruser", "database_prefix", ?DEFAULT_USERDB_PREFIX),
- case couch_db:validate_dbname(Prefix) of
- ok ->
- ok;
- Error ->
- couch_log:error(
- "couch_peruser can't proceed as illegal database prefix ~p.\n"
- " Error: ~p",
- [Prefix, Error]
- ),
- throw(Error)
- end,
-
- % set up cluster-stable listener
- Period = abs(
- config:get_integer(
- "couch_peruser",
- "cluster_quiet_period",
- ?DEFAULT_QUIET_PERIOD
- )
- ),
- StartPeriod = abs(
- config:get_integer(
- "couch_peruser",
- "cluster_start_period",
- ?DEFAULT_START_PERIOD
- )
- ),
-
- {ok, Mem3Cluster} = mem3_cluster:start_link(
- ?MODULE,
- self(),
- StartPeriod,
- Period
- ),
-
- #state{
- parent = self(),
- db_name = DbName,
- delete_dbs = DeleteDbs,
- mem3_cluster_pid = Mem3Cluster,
- cluster_stable = false,
- q_for_peruser_db = Q,
- peruser_dbname_prefix = ?l2b(Prefix)
- }
- end.
-
--spec start_listening(State :: #state{}) -> #state{} | ok.
-start_listening(#state{states = ChangesStates} = State) when
- length(ChangesStates) > 0
-->
- % couch_log:debug("peruser: start_listening() already run on node ~p in pid ~p", [node(), self()]),
- State;
-start_listening(
- #state{
- db_name = DbName,
- delete_dbs = DeleteDbs,
- q_for_peruser_db = Q,
- peruser_dbname_prefix = Prefix
- } = State
-) ->
- % couch_log:debug("peruser: start_listening() on node ~p", [node()]),
- try
- States = lists:map(
- fun(A) ->
- S = #changes_state{
- parent = State#state.parent,
- db_name = A#shard.name,
- delete_dbs = DeleteDbs,
- q_for_peruser_db = Q,
- peruser_dbname_prefix = Prefix
- },
- {Pid, Ref} = spawn_opt(
- ?MODULE, init_changes_handler, [S], [link, monitor]
- ),
- S#changes_state{changes_pid = Pid, changes_ref = Ref}
- end,
- mem3:local_shards(DbName)
- ),
- % couch_log:debug("peruser: start_listening() States ~p", [States]),
-
- State#state{states = States, cluster_stable = true}
- catch
- error:database_does_not_exist ->
- couch_log:warning(
- "couch_peruser can't proceed as underlying database (~s) is missing, disables itself.",
- [DbName]
- ),
- config:set(
- "couch_peruser",
- "enable",
- "false",
- lists:concat([binary_to_list(DbName), " is missing"])
- )
- end.
-
--spec init_changes_handler(ChangesState :: #changes_state{}) -> ok.
-init_changes_handler(#changes_state{db_name = DbName} = ChangesState) ->
- % couch_log:debug("peruser: init_changes_handler() on DbName ~p", [DbName]),
- try
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX, sys_db]),
- FunAcc = {fun ?MODULE:changes_handler/3, ChangesState},
- (couch_changes:handle_db_changes(
- #changes_args{feed = "continuous", timeout = infinity},
- {json_req, null},
- Db
- ))(
- FunAcc
- )
- catch
- error:database_does_not_exist ->
- ok
- end.
-
--type db_change() :: {atom(), tuple(), binary()}.
--spec changes_handler(
- Change :: db_change(),
- ResultType :: any(),
- ChangesState :: #changes_state{}
-) -> #changes_state{}.
-changes_handler(
- {change, {Doc}, _Prepend},
- _ResType,
- ChangesState = #changes_state{
- db_name = DbName,
- q_for_peruser_db = Q,
- peruser_dbname_prefix = Prefix
- }
-) ->
- % couch_log:debug("peruser: changes_handler() on DbName/Doc ~p/~p", [DbName, Doc]),
-
- case couch_util:get_value(<<"id">>, Doc) of
- <<"org.couchdb.user:", User/binary>> = DocId ->
- case should_handle_doc(DbName, DocId) of
- true ->
- case couch_util:get_value(<<"deleted">>, Doc, false) of
- false ->
- UserDb = ensure_user_db(Prefix, User, Q),
- ok = ensure_security(User, UserDb, fun add_user/3),
- ChangesState;
- true ->
- case ChangesState#changes_state.delete_dbs of
- true ->
- _UserDb = delete_user_db(Prefix, User),
- ChangesState;
- false ->
- UserDb = user_db_name(Prefix, User),
- ok = ensure_security(User, UserDb, fun remove_user/3),
- ChangesState
- end
- end;
- false ->
- ChangesState
- end;
- _ ->
- ChangesState
- end;
-changes_handler(_Event, _ResType, ChangesState) ->
- ChangesState.
-
--spec should_handle_doc(ShardName :: binary(), DocId :: binary()) -> boolean().
-should_handle_doc(ShardName, DocId) ->
- case is_stable() of
- false ->
- % when the cluster is unstable, we have already stopped all Listeners
- % the next stable event will restart all listeners and pick up this
- % doc change
- couch_log:debug(
- "peruser: skipping, cluster unstable ~s/~s",
- [ShardName, DocId]
- ),
- false;
- true ->
- should_handle_doc_int(ShardName, DocId)
- end.
-
--spec should_handle_doc_int(
- ShardName :: binary(),
- DocId :: binary()
-) -> boolean().
-should_handle_doc_int(ShardName, DocId) ->
- DbName = mem3:dbname(ShardName),
- Live = [erlang:node() | erlang:nodes()],
- Shards = mem3:shards(DbName, DocId),
- Nodes = [N || #shard{node = N} <- Shards, lists:member(N, Live)],
- case mem3:owner(DbName, DocId, Nodes) of
- ThisNode when ThisNode =:= node() ->
- couch_log:debug("peruser: handling ~s/~s", [DbName, DocId]),
- % do the database action
- true;
- _OtherNode ->
- couch_log:debug("peruser: skipping ~s/~s", [DbName, DocId]),
- false
- end.
-
--spec delete_user_db(Prefix :: binary(), User :: binary()) -> binary().
-delete_user_db(Prefix, User) ->
- UserDb = user_db_name(Prefix, User),
- try
- case fabric:delete_db(UserDb, [?ADMIN_CTX]) of
- ok -> ok;
- accepted -> ok
- end
- catch
- error:database_does_not_exist ->
- ok
- end,
- UserDb.
-
--spec ensure_user_db(Prefix :: binary(), User :: binary(), Q :: integer()) -> binary().
-ensure_user_db(Prefix, User, Q) ->
- UserDb = user_db_name(Prefix, User),
- try
- {ok, _DbInfo} = fabric:get_db_info(UserDb)
- catch
- error:database_does_not_exist ->
- case fabric:create_db(UserDb, [?ADMIN_CTX, {q, integer_to_list(Q)}]) of
- {error, file_exists} -> ok;
- ok -> ok;
- accepted -> ok
- end
- end,
- UserDb.
-
--spec add_user(
- User :: binary(),
- Properties :: tuple(),
- Acc :: tuple()
-) -> tuple().
-add_user(User, Prop, {Modified, SecProps}) ->
- {PropValue} = couch_util:get_value(Prop, SecProps, {[]}),
- Names = couch_util:get_value(<<"names">>, PropValue, []),
- case lists:member(User, Names) of
- true ->
- {Modified, SecProps};
- false ->
- {true,
- lists:keystore(
- Prop,
- 1,
- SecProps,
- {Prop,
- {lists:keystore(
- <<"names">>,
- 1,
- PropValue,
- {<<"names">>, [User | Names]}
- )}}
- )}
- end.
-
--spec remove_user(
- User :: binary(),
- Properties :: tuple(),
- Acc :: tuple()
-) -> tuple().
-remove_user(User, Prop, {Modified, SecProps}) ->
- {PropValue} = couch_util:get_value(Prop, SecProps, {[]}),
- Names = couch_util:get_value(<<"names">>, PropValue, []),
- case lists:member(User, Names) of
- false ->
- {Modified, SecProps};
- true ->
- {true,
- lists:keystore(
- Prop,
- 1,
- SecProps,
- {Prop,
- {lists:keystore(
- <<"names">>,
- 1,
- PropValue,
- {<<"names">>, lists:delete(User, Names)}
- )}}
- )}
- end.
-
--spec ensure_security(
- User :: binary(),
- UserDb :: binary(),
- TransformFun :: fun()
-) -> ok.
-ensure_security(User, UserDb, TransformFun) ->
- case fabric:get_all_security(UserDb, [?ADMIN_CTX]) of
- {error, no_majority} ->
- % TODO: make sure this is still true: single node, ignore
- ok;
- {ok, Shards} ->
- {_ShardInfo, {SecProps}} = hd(Shards),
- % assert that shards have the same security object
- true = lists:all(
- fun({_, {SecProps1}}) ->
- SecProps =:= SecProps1
- end,
- Shards
- ),
- case
- lists:foldl(
- fun(Prop, SAcc) -> TransformFun(User, Prop, SAcc) end,
- {false, SecProps},
- [<<"admins">>, <<"members">>]
- )
- of
- {false, _} ->
- ok;
- {true, SecProps1} ->
- ok = fabric:set_security(UserDb, {SecProps1}, [?ADMIN_CTX])
- end
- end.
-
--spec user_db_name(Prefix :: binary(), User :: binary()) -> binary().
-user_db_name(Prefix, User) ->
- HexUser = list_to_binary(
- [string:to_lower(integer_to_list(X, 16)) || <<X>> <= User]
- ),
- <<Prefix/binary, HexUser/binary>>.
-
--spec exit_changes(State :: #state{}) -> ok.
-exit_changes(State) ->
- lists:foreach(
- fun(ChangesState) ->
- demonitor(ChangesState#changes_state.changes_ref, [flush]),
- unlink(ChangesState#changes_state.changes_pid),
- exit(ChangesState#changes_state.changes_pid, kill)
- end,
- State#state.states
- ).
-
--spec is_stable() -> true | false.
-is_stable() ->
- gen_server:call(?MODULE, is_stable).
-
--spec subscribe_for_changes() -> ok.
-subscribe_for_changes() ->
- config:subscribe_for_changes([
- {"couch_httpd_auth", "authentication_db"},
- "couch_peruser"
- ]).
-
-% Mem3 cluster callbacks
-
-% TODO: find out what type Server is
--spec cluster_unstable(Server :: any()) -> any().
-cluster_unstable(Server) ->
- gen_server:cast(Server, cluster_unstable),
- Server.
-
-% TODO: find out what type Server is
--spec cluster_stable(Server :: any()) -> any().
-cluster_stable(Server) ->
- gen_server:cast(Server, cluster_stable),
- Server.
-
-%% gen_server callbacks
--spec init(Options :: list()) -> {ok, #state{}}.
-init([]) ->
- ok = subscribe_for_changes(),
- {ok, init_state()}.
-
-handle_call(is_stable, _From, #state{cluster_stable = IsStable} = State) ->
- {reply, IsStable, State};
-handle_call(_Msg, _From, State) ->
- {reply, error, State}.
-
-handle_cast(update_config, State) when State#state.states =/= undefined ->
- exit_changes(State),
- {noreply, init_state()};
-handle_cast(update_config, _) ->
- {noreply, init_state()};
-handle_cast(stop, State) ->
- {stop, normal, State};
-handle_cast(cluster_unstable, State) when State#state.states =/= undefined ->
- exit_changes(State),
- {noreply, init_state()};
-handle_cast(cluster_unstable, _) ->
- {noreply, init_state()};
-handle_cast(cluster_stable, State) ->
- {noreply, start_listening(State)};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({'DOWN', _Ref, _, _, _Reason}, State) ->
- {stop, normal, State};
-handle_info({config_change, "couch_peruser", _, _, _}, State) ->
- handle_cast(update_config, State);
-handle_info(
- {
- config_change,
- "couch_httpd_auth",
- "authentication_db",
- _,
- _
- },
- State
-) ->
- handle_cast(update_config, State);
-handle_info({gen_event_EXIT, _Handler, _Reason}, State) ->
- erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
- {noreply, State};
-handle_info({'EXIT', _Pid, _Reason}, State) ->
- erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
- {noreply, State};
-handle_info(restart_config_listener, State) ->
- ok = subscribe_for_changes(),
- {noreply, State};
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- %% Everything should be linked or monitored, let nature
- %% take its course.
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/couch_peruser/src/couch_peruser_app.erl b/src/couch_peruser/src/couch_peruser_app.erl
deleted file mode 100644
index ab0e04444..000000000
--- a/src/couch_peruser/src/couch_peruser_app.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_peruser_app).
-
--behaviour(application).
-
--export([start/2, stop/1]).
-
-start(_Type, _StartArgs) ->
- couch_peruser_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/couch_peruser/src/couch_peruser_sup.erl b/src/couch_peruser/src/couch_peruser_sup.erl
deleted file mode 100644
index a9a789ce6..000000000
--- a/src/couch_peruser/src/couch_peruser_sup.erl
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_peruser_sup).
-
--behaviour(supervisor).
-
--export([start_link/0, init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init([]) ->
- {ok, {{one_for_one, 5, 10}, [?CHILD(couch_peruser, worker)]}}.
diff --git a/src/couch_peruser/test/eunit/couch_peruser_test.erl b/src/couch_peruser/test/eunit/couch_peruser_test.erl
deleted file mode 100644
index 3ba344e9d..000000000
--- a/src/couch_peruser/test/eunit/couch_peruser_test.erl
+++ /dev/null
@@ -1,569 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_peruser_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(ADMIN_USERNAME, "admin").
--define(ADMIN_PASSWORD, "secret").
-
--define(WAIT_FOR_USER_DELETE_TIMEOUT, 1000).
-
-setup_all() ->
- TestCtx = test_util:start_couch([chttpd]),
- ok = application:start(couch_peruser),
- Hashed = couch_passwords:hash_admin_password(?ADMIN_PASSWORD),
- ok = config:set("admins", ?ADMIN_USERNAME, ?b2l(Hashed), _Persist = false),
- TestCtx.
-
-teardown_all(TestCtx) ->
- config:delete("admins", ?ADMIN_USERNAME),
- ok = application:stop(couch_peruser),
- test_util:stop_couch(TestCtx).
-
-setup() ->
- TestAuthDb = ?tempdb(),
- do_request(put, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
- do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
- set_config("couch_httpd_auth", "authentication_db", ?b2l(TestAuthDb)),
- set_config("couch_peruser", "cluster_quiet_period", "0"),
- set_config("couch_peruser", "cluster_start_period", "0"),
- set_config("couch_peruser", "enable", "true"),
- set_config("cluster", "n", "1"),
- TestAuthDb.
-
-teardown(TestAuthDb) ->
- set_config("couch_peruser", "enable", "false"),
- set_config("couch_peruser", "delete_dbs", "false"),
- set_config("couch_httpd_auth", "authentication_db", "_users"),
- set_config("couch_peruser", "cluster_quiet_period", "60"),
- set_config("couch_peruser", "cluster_start_period", "5"),
- set_config("cluster", "n", "3"),
- do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
- do_request(delete, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
- lists:foreach(
- fun(DbName) ->
- case binary:part(DbName, 0, 7) of
- <<"userdb-">> -> delete_db(DbName);
- _ -> ok
- end
- end,
- all_dbs()
- ).
-
-set_config(Section, Key, Value) ->
- ok = config:set(Section, Key, Value, _Persist = false).
-
-delete_config(Section, Key) ->
- ok = config:delete(Section, Key, _Persist = false).
-
-do_request(Method, Url) ->
- Headers = [{basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}}],
- {ok, _, _, _} = test_request:request(Method, Url, Headers).
-
-do_request(Method, Url, Body) ->
- Headers = [
- {basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}},
- {"Content-Type", "application/json"}
- ],
- {ok, _, _, _} = test_request:request(Method, Url, Headers, Body).
-
-do_anon_request(Method, Url, Body) ->
- Headers = [
- {"Content-Type", "application/json"}
- ],
- {ok, _, _, _} = test_request:request(Method, Url, Headers, Body).
-
-create_db(DbName) ->
- {ok, _, _, _} = do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(DbName)).
-
-delete_db(DbName) ->
- {ok, _, _, _} = do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(DbName)).
-
-create_user(AuthDb, Name) ->
- Body =
- "{\"name\":\"" ++ Name ++
- "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
- Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name
- ]),
- {ok, 201, _, _} = do_request(put, Url, Body).
-
-create_anon_user(AuthDb, Name) ->
- Body =
- "{\"name\":\"" ++ Name ++
- "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
- Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name
- ]),
- {ok, 201, _, _} = do_anon_request(put, Url, Body).
-
-delete_user(AuthDb, Name) ->
- Url = lists:concat([
- get_cluster_base_url(),
- "/",
- ?b2l(AuthDb),
- "/org.couchdb.user:",
- Name
- ]),
- {ok, 200, _, Body} = do_request(get, Url),
- {DocProps} = jiffy:decode(Body),
- Rev = proplists:get_value(<<"_rev">>, DocProps),
- {ok, 200, _, _} = do_request(delete, Url ++ "?rev=" ++ ?b2l(Rev)).
-
-get_security(DbName) ->
- Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(DbName), "/_security"
- ]),
- test_util:wait(fun() ->
- {ok, 200, _, Body} = do_request(get, Url),
- case jiffy:decode(Body) of
- {[]} -> wait;
- {SecurityProperties} -> SecurityProperties
- end
- end).
-
-set_security(DbName, SecurityProperties) ->
- Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(DbName), "/_security"
- ]),
- Body = jiffy:encode({SecurityProperties}),
- {ok, 200, _, _} = do_request(put, Url, Body).
-
-all_dbs() ->
- {ok, 200, _, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"),
- jiffy:decode(Body).
-
-all_dbs_with_errors() ->
- {Result, StatusCode, _Headers, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"),
- {Result, StatusCode, _Headers, jiffy:decode(Body)}.
-
-get_base_url() ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "http://" ++ Addr ++ ":" ++ Port.
-
-get_cluster_base_url() ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- "http://" ++ Addr ++ ":" ++ Port.
-
-should_create_user_db_with_default(TestAuthDb) ->
- ?_test(begin
- create_user(TestAuthDb, "foo"),
- wait_for_db_create(<<"userdb-666f6f">>),
- {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
- {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
- ?assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
- ?assertEqual(1, couch_util:get_value(q, ClusterInfo))
- end).
-
-should_create_user_db_with_custom_prefix(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "database_prefix", "newuserdb-"),
- create_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"newuserdb-666f6f6f">>),
- delete_config("couch_peruser", "database_prefix"),
- ?assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs()))
- end).
-
-should_create_user_db_with_custom_special_prefix(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
- create_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
- delete_config("couch_peruser", "database_prefix"),
- ?assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs()))
- end).
-
-should_create_anon_user_db_with_default(TestAuthDb) ->
- ?_test(begin
- create_anon_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"userdb-666f6f6f">>),
- {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
- {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
- ?assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
- ?assertEqual(1, couch_util:get_value(q, ClusterInfo))
- end).
-
-should_create_anon_user_db_with_custom_prefix(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "database_prefix", "newuserdb-"),
- create_anon_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"newuserdb-666f6f6f">>),
- delete_config("couch_peruser", "database_prefix"),
- ?assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs()))
- end).
-
-should_create_anon_user_db_with_custom_special_prefix(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
- create_anon_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
- delete_config("couch_peruser", "database_prefix"),
- ?assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs()))
- end).
-
-should_create_user_db_with_q4(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "q", "4"),
- create_user(TestAuthDb, "foo"),
- wait_for_db_create(<<"userdb-666f6f">>),
- {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
- {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
- delete_config("couch_peruser", "q"),
- ?assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
- ?assertEqual(4, couch_util:get_value(q, ClusterInfo))
- end).
-
-should_create_anon_user_db_with_q4(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "q", "4"),
- create_anon_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"userdb-666f6f6f">>),
- {ok, TargetInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
- {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
- delete_config("couch_peruser", "q"),
- ?assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
- ?assertEqual(4, couch_util:get_value(q, ClusterInfo))
- end).
-
-should_not_delete_user_db(TestAuthDb) ->
- ?_test(begin
- User = "foo",
- UserDbName = <<"userdb-666f6f">>,
- create_user(TestAuthDb, User),
- wait_for_db_create(<<"userdb-666f6f">>),
- AfterCreate = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- AfterDelete = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate),
- ?assert(AfterDelete)
- end).
-
-should_delete_user_db(TestAuthDb) ->
- ?_test(begin
- User = "bar",
- UserDbName = <<"userdb-626172">>,
- set_config("couch_peruser", "delete_dbs", "true"),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- wait_for_db_delete(UserDbName),
- AfterDelete = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate),
- ?assertNot(AfterDelete)
- end).
-
-should_delete_user_db_with_custom_prefix(TestAuthDb) ->
- ?_test(begin
- User = "bar",
- UserDbName = <<"newuserdb-626172">>,
- set_config("couch_peruser", "delete_dbs", "true"),
- set_config("couch_peruser", "database_prefix", "newuserdb-"),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- wait_for_db_delete(UserDbName),
- delete_config("couch_peruser", "database_prefix"),
- AfterDelete = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate),
- ?assertNot(AfterDelete)
- end).
-
-should_delete_user_db_with_custom_special_prefix(TestAuthDb) ->
- ?_test(begin
- User = "bar",
- UserDbName = <<"userdb_$()+--/626172">>,
- set_config("couch_peruser", "delete_dbs", "true"),
- set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- wait_for_db_delete(UserDbName),
- delete_config("couch_peruser", "database_prefix"),
- AfterDelete = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate),
- ?assertNot(AfterDelete)
- end).
-
-should_reflect_config_changes(TestAuthDb) ->
- {timeout, 10000,
- ?_test(begin
- User = "baz",
- UserDbName = <<"userdb-62617a">>,
- set_config("couch_peruser", "delete_dbs", "true"),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate1 = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- wait_for_db_delete(UserDbName),
- AfterDelete1 = lists:member(UserDbName, all_dbs()),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate2 = lists:member(UserDbName, all_dbs()),
- set_config("couch_peruser", "delete_dbs", "false"),
- delete_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- AfterDelete2 = lists:member(UserDbName, all_dbs()),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- set_config("couch_peruser", "delete_dbs", "true"),
- delete_user(TestAuthDb, User),
- wait_for_db_delete(UserDbName),
- AfterDelete3 = lists:member(UserDbName, all_dbs()),
- set_config("couch_peruser", "enable", "false"),
- create_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- AfterCreate3 = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate1),
- ?assertNot(AfterDelete1),
- ?assert(AfterCreate2),
- ?assert(AfterDelete2),
- ?assertNot(AfterDelete3),
- ?assertNot(AfterCreate3)
- end)}.
-
-should_add_user_to_db_admins(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- ?assertEqual(
- {[{<<"names">>, [<<"qux">>]}]},
- proplists:get_value(<<"admins">>, get_security(UserDbName))
- )
- end).
-
-should_add_user_to_db_members(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- ?assertEqual(
- {[{<<"names">>, [<<"qux">>]}]},
- proplists:get_value(<<"members">>, get_security(UserDbName))
- )
- end).
-
-should_not_remove_existing_db_admins(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- SecurityProperties = [
- {<<"admins">>, {[{<<"names">>, [<<"foo">>, <<"bar">>]}]}},
- {<<"members">>, {[{<<"names">>, [<<"baz">>, <<"pow">>]}]}}
- ],
- create_db(UserDbName),
- set_security(UserDbName, SecurityProperties),
- create_user(TestAuthDb, User),
- wait_for_security_create(<<"admins">>, User, UserDbName),
- {AdminProperties} = proplists:get_value(
- <<"admins">>,
- get_security(UserDbName)
- ),
- AdminNames = proplists:get_value(<<"names">>, AdminProperties),
- ?assert(lists:member(<<"foo">>, AdminNames)),
- ?assert(lists:member(<<"bar">>, AdminNames)),
- ?assert(lists:member(<<"qux">>, AdminNames))
- end).
-
-should_not_remove_existing_db_members(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- SecurityProperties = [
- {<<"admins">>, {[{<<"names">>, [<<"pow">>, <<"wow">>]}]}},
- {<<"members">>, {[{<<"names">>, [<<"pow">>, <<"wow">>]}]}}
- ],
- create_db(UserDbName),
- set_security(UserDbName, SecurityProperties),
- create_user(TestAuthDb, User),
- wait_for_security_create(<<"members">>, User, UserDbName),
- {MemberProperties} = proplists:get_value(
- <<"members">>,
- get_security(UserDbName)
- ),
- MemberNames = proplists:get_value(<<"names">>, MemberProperties),
- ?assert(lists:member(<<"pow">>, MemberNames)),
- ?assert(lists:member(<<"wow">>, MemberNames)),
- ?assert(lists:member(<<"qux">>, MemberNames))
- end).
-
-should_remove_user_from_db_admins(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- SecurityProperties = [
- {<<"admins">>, {[{<<"names">>, [<<"foo">>, <<"bar">>]}]}},
- {<<"members">>, {[{<<"names">>, [<<"baz">>, <<"pow">>]}]}}
- ],
- create_db(UserDbName),
- set_security(UserDbName, SecurityProperties),
- create_user(TestAuthDb, User),
- wait_for_security_create(<<"admins">>, User, UserDbName),
- {AdminProperties} = proplists:get_value(
- <<"admins">>,
- get_security(UserDbName)
- ),
- AdminNames = proplists:get_value(<<"names">>, AdminProperties),
- FooBefore = lists:member(<<"foo">>, AdminNames),
- BarBefore = lists:member(<<"bar">>, AdminNames),
- QuxBefore = lists:member(<<"qux">>, AdminNames),
- delete_user(TestAuthDb, User),
- wait_for_security_delete(<<"admins">>, User, UserDbName),
- {NewAdminProperties} = proplists:get_value(
- <<"admins">>,
- get_security(UserDbName)
- ),
- NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties),
- FooAfter = lists:member(<<"foo">>, NewAdminNames),
- BarAfter = lists:member(<<"bar">>, NewAdminNames),
- QuxAfter = lists:member(<<"qux">>, NewAdminNames),
- ?assert(FooBefore),
- ?assert(BarBefore),
- ?assert(QuxBefore),
- ?assert(FooAfter),
- ?assert(BarAfter),
- ?assertNot(QuxAfter)
- end).
-
-should_remove_user_from_db_members(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- SecurityProperties = [
- {<<"admins">>, {[{<<"names">>, [<<"pow">>, <<"wow">>]}]}},
- {<<"members">>, {[{<<"names">>, [<<"pow">>, <<"wow">>]}]}}
- ],
- create_db(UserDbName),
- set_security(UserDbName, SecurityProperties),
- create_user(TestAuthDb, User),
- wait_for_security_create(<<"members">>, User, UserDbName),
- {MemberProperties} = proplists:get_value(
- <<"members">>,
- get_security(UserDbName)
- ),
- MemberNames = proplists:get_value(<<"names">>, MemberProperties),
- PowBefore = lists:member(<<"pow">>, MemberNames),
- WowBefore = lists:member(<<"wow">>, MemberNames),
- QuxBefore = lists:member(<<"qux">>, MemberNames),
- delete_user(TestAuthDb, User),
- wait_for_security_delete(<<"members">>, User, UserDbName),
- {NewMemberProperties} = proplists:get_value(
- <<"members">>,
- get_security(UserDbName)
- ),
- NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties),
- PowAfter = lists:member(<<"pow">>, NewMemberNames),
- WowAfter = lists:member(<<"wow">>, NewMemberNames),
- QuxAfter = lists:member(<<"qux">>, NewMemberNames),
- ?assert(PowBefore),
- ?assert(WowBefore),
- ?assert(QuxBefore),
- ?assert(PowAfter),
- ?assert(WowAfter),
- ?assertNot(QuxAfter)
- end).
-
-wait_for_db_create(UserDbName) ->
- test_util:wait(fun() ->
- case all_dbs_with_errors() of
- {error, _, _, _} ->
- wait;
- {ok, _, _, AllDbs} ->
- case lists:member(UserDbName, AllDbs) of
- true -> true;
- false -> wait
- end
- end
- end).
-
-wait_for_db_delete(UserDbName) ->
- test_util:wait(fun() ->
- case all_dbs_with_errors() of
- {ok, 500, _, _} ->
- wait;
- {ok, _, _, AllDbs} ->
- case not lists:member(UserDbName, AllDbs) of
- true -> true;
- false -> wait
- end
- end
- end).
-
-wait_for_security_create(Type, User0, UserDbName) ->
- User = ?l2b(User0),
- test_util:wait(fun() ->
- {Props} = proplists:get_value(Type, get_security(UserDbName)),
- Names = proplists:get_value(<<"names">>, Props),
- case lists:member(User, Names) of
- true -> true;
- false -> wait
- end
- end).
-
-wait_for_security_delete(Type, User0, UserDbName) ->
- User = ?l2b(User0),
- test_util:wait(fun() ->
- {Props} = proplists:get_value(Type, get_security(UserDbName)),
- Names = proplists:get_value(<<"names">>, Props),
- case not lists:member(User, Names) of
- true -> true;
- false -> wait
- end
- end).
-
-couch_peruser_test_() ->
- {
- "couch_peruser test",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_create_anon_user_db_with_default/1,
- fun should_create_anon_user_db_with_custom_prefix/1,
- fun should_create_anon_user_db_with_custom_special_prefix/1,
- fun should_create_user_db_with_default/1,
- fun should_create_user_db_with_custom_prefix/1,
- fun should_create_user_db_with_custom_special_prefix/1,
- fun should_create_user_db_with_q4/1,
- fun should_create_anon_user_db_with_q4/1,
- fun should_not_delete_user_db/1,
- fun should_delete_user_db/1,
- fun should_delete_user_db_with_custom_prefix/1,
- fun should_delete_user_db_with_custom_special_prefix/1,
- fun should_reflect_config_changes/1,
- fun should_add_user_to_db_admins/1,
- fun should_add_user_to_db_members/1,
- fun should_not_remove_existing_db_admins/1,
- fun should_not_remove_existing_db_members/1,
- fun should_remove_user_from_db_admins/1,
- fun should_remove_user_from_db_members/1
- ]
- }
- }
- }.
diff --git a/src/couch_plugins/LICENSE b/src/couch_plugins/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_plugins/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_plugins/Makefile.am b/src/couch_plugins/Makefile.am
deleted file mode 100644
index 37cd9d5c1..000000000
--- a/src/couch_plugins/Makefile.am
+++ /dev/null
@@ -1,40 +0,0 @@
-## Licensed under the Apache License, Version 2.0 (the "License"); you may not
-## use this file except in compliance with the License. You may obtain a copy of
-## the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-## License for the specific language governing permissions and limitations under
-## the License.
-
-couch_pluginslibdir = $(localerlanglibdir)/couch_plugins-0.1
-couch_pluginsebindir = $(couch_pluginslibdir)/ebin
-
-couch_pluginsebin_DATA = $(compiled_files)
-
-
-source_files = \
- src/couch_plugins.app.src \
- src/couch_plugins.erl \
- src/couch_plugins_httpd.erl
-
-compiled_files = \
- ebin/couch_plugins.app \
- ebin/couch_plugins.beam \
- ebin/couch_plugins_httpd.beam
-
-EXTRA_DIST = $(source_files) README.md
-CLEANFILES = $(compiled_files)
-
-ebin/%.app: src/%.app.src
- @mkdir -p ebin/
- sed -e "s|%version%|@version@|g" \
- < $< > $@
-
-ebin/%.beam: src/%.erl $(include_files)
- @mkdir -p ebin/
- $(ERLC) -Wall -I$(top_srcdir)/src -I$(top_srcdir)/src/couchdb \
- -o ebin/ $(ERLC_FLAGS) ${TEST} $<;
diff --git a/src/couch_plugins/README.md b/src/couch_plugins/README.md
deleted file mode 100644
index b00a080c1..000000000
--- a/src/couch_plugins/README.md
+++ /dev/null
@@ -1,159 +0,0 @@
-Heya,
-
-I couldn’t help myself thinking about plugin stuff and ended up
-whipping up a proof of concept.
-
-Here’s a <1 minute demo video:
-
- https://dl.dropboxusercontent.com/u/82149/couchdb-plugins-demo.mov
-
-Alternative encoding:
-
- https://dl.dropboxusercontent.com/u/82149/couchdb-plugins-demo.m4v)
-
-
-In my head the whole plugin idea is a very wide area, but I was so
-intrigued by the idea of getting something running with a click on a
-button in Futon. So I looked for a minimally viable plugin system.
-
-
-## Design principles
-
-It took me a day to put this all together and this was only possible
-because I took a lot of shortcuts. I believe they are all viable for a
-first iteration of a plugins system:
-
-1. Install with one click on a button in Futon (or HTTP call)
-2. Only pure Erlang plugins are allowed.
-3. The plugin author must provide a binary package for each Erlang (and,
- later, each CouchDB version).
-4. Complete trust-based system. You trust me to not do any nasty things
- when you click on the install button. No crypto, no nothing. Only
- people who can commit to Futon can release new versions of plugins.
-5. Minimal user-friendlyness: won’t install plugins that don’t match
- the current Erlang version, gives semi-sensible error messages
- (wrapped in a HTTP 500 response :)
-6. Require a pretty strict format for binary releases.
-
-
-## Roadmap
-
-Here’s a list of things this first iterations does and doesn’t do:
-
-- Pure Erlang plugins only. No C-dependencies, no JavaScript, no nothing.
-- No C-dependencies.
-- Install a plugin via Futon (or HTTP call). Admin only.
-- A hardcoded list of plugins in Futon.
-- Loads a pre-packaged, pre-compiled .tar.gz file from a URL.
-- Only installs if Erlang version matches.
-- No security checking of binaries.
-- No identity checking of binaries.
-- Register installed plugins in the config system.
-- Make sure plugins start with the next restart of CouchDB.
-- Uninstall a plugin via Futon (or HTTP call). Admin only.
-- Show when a particular plugin is installed.
-- Only installs if CouchDB version matches.
-- Serve static web assets (for Futon/Fauxton) from `/_plugins/<name>/`.
-
-I hope you agree we can ship this with a few warnings so people can get a
-hang of it.
-
-
-A roadmap, progress and issues can be found here:
-
-https://issues.apache.org/jira/issues/?jql=component+%3D+Plugins+AND+project+%3D+COUCHDB+AND+resolution+%3D+Unresolved+ORDER+BY+priority+DESC
-
-
-
-## How it works
-
-This plugin system lives in `src/couch_plugins` and is a tiny CouchDB
-module.
-
-It exposes one new API endpoint `/_plugins` that an admin user can
-POST to.
-
-The additional Futon page lives at `/_utils/plugins.html` it is
-hardcoded.
-
-Futon (or you) post an object to `/_plugins` with four properties:
-
- {
- "name": "geocouch", // name of the plugin, must be unique
- "url": "http://people.apache.org/~jan", // “base URL” for plugin releases (see below)
- "version": "couchdb1.2.x_v0.3.0-11-g4ea0bea", // whatever version internal to the plugin
- "checksums": {
- "R15B03": "ZetgdHj2bY2w37buulWVf3USOZs=" // base64’d sha hash over the binary
- }
- }
-
-`couch_plugins` then attempts to download a .tar.gz from this
-location:
-
- http://people.apache.org/~jan/geocouch-couchdb1.2.x_v0.3.0-12-g4ea0bea-R15B03.tar.gz
-
-It should be obvious how the URL is constructed from the POST data.
-(This url is live, feel free to play around with this tarball).
-
-Next it calculates the sha hash for the downloaded .tar.gz file and
-matches it against the correct version in the `checksums` parameter.
-
-If that succeeds, we unpack the .tar.gz file (currently in `/tmp`,
-need to find a better place for this) and adds the extracted directory
-to the Erlang code path
-(`code:add_path("/tmp/couchdb_plugins/geocouch-couchdb1.2.x_v0.3.0-12-g4ea0bea-R15B03/ebin")`)
-and loads the included application (`application:load(geocouch)`).
-
-Then it looks into the `./priv/default.d` directory that lives next to
-`ebin/` in the plugin directory for configuration `.ini` files and loads them.
-On next startup these configuration files are loaded after global defaults,
-and before any local configuration.
-
-If that all goes to plan, we report success back to the HTTP caller.
-
-That’s it! :)
-
-It’s deceptively simple, probably does a few things very wrong and
-leaves a few things open (see above).
-
-One open question I’d like an answer for is finding a good location to
-unpack & install the plugin files that isn’t `tmp`. If the answer is
-different for a pre-BigCouch/rcouch-merge and post-BigCouch/rcouch-
-merge world, I’d love to know :)
-
-
-## Code
-
-The main branch for this is 1867-feature-plugins:
-
- ASF: https://git-wip-us.apache.org/repos/asf?p=couchdb.git;a=log;h=refs/heads/1867-feature-plugins
- GitHub: https://github.com/janl/couchdb/compare/apache:master...1867-feature-plugins
-
-I created a branch on GeoCouch that adds a few lines to its `Makefile`
-that shows how a binary package is built:
-
- https://github.com/janl/geocouch/compare/couchbase:couchdb1.3.x...couchdb1.3.x-plugins
-
-
-## Build
-
-Build CouchDB as usual:
-
- ./bootstrap
- ./configure
- make
- make dev
- ./utils/run
-
-* * *
-
-I hope you like this :) Please comment and improve heavily!
-
-Let me know if you have any questions :)
-
-If you have any criticism, please phrase it in a way that we can use
-to improve this, thanks!
-
-Best,
-Jan
---
diff --git a/src/couch_plugins/src/couch_plugins.app.src b/src/couch_plugins/src/couch_plugins.app.src
deleted file mode 100644
index 07d6b14d6..000000000
--- a/src/couch_plugins/src/couch_plugins.app.src
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-{application, couch_plugins,
- [
- {description, "A CouchDB Plugin Installer"},
- {vsn, git},
- {registered, []},
- {applications, [
- kernel,
- stdlib
- ]},
- {env, []}
- ]}.
diff --git a/src/couch_plugins/src/couch_plugins.erl b/src/couch_plugins/src/couch_plugins.erl
deleted file mode 100644
index 97834134b..000000000
--- a/src/couch_plugins/src/couch_plugins.erl
+++ /dev/null
@@ -1,322 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_plugins).
--include_lib("couch/include/couch_db.hrl").
--export([install/1, uninstall/1]).
-
-% couch_plugins:install({"geocouch", "http://127.0.0.1:8000", "1.0.0", [{"R15B03", "+XOJP6GSzmuO2qKdnjO+mWckXVs="}]}).
-% couch_plugins:install({"geocouch", "http://people.apache.org/~jan/", "couchdb1.2.x_v0.3.0-11-gd83ba22", [{"R15B03", "ZetgdHj2bY2w37buulWVf3USOZs="}]}).
-
-plugin_dir() ->
- couch_config:get("couchdb", "plugin_dir").
-
-log(T) ->
- couch_log:debug("[couch_plugins] ~p ~n", [T]).
-
-%% "geocouch", "http://localhost:8000/dist", "1.0.0"
--type plugin() :: {string(), string(), string(), list()}.
--spec install(plugin()) -> ok | {error, string()}.
-install({Name, _BaseUrl, Version, Checksums} = Plugin) ->
- log("Installing " ++ Name),
-
- {ok, LocalFilename} = download(Plugin),
- log("downloaded to " ++ LocalFilename),
-
- ok = verify_checksum(LocalFilename, Checksums),
- log("checksum verified"),
-
- ok = untargz(LocalFilename),
- log("extraction done"),
-
- ok = add_code_path(Name, Version),
- log("added code path"),
-
- ok = register_plugin(Name, Version),
- log("registered plugin"),
-
- load_config(Name, Version),
- log("loaded config"),
-
- ok.
-
-% Idempotent uninstall, if you uninstall a non-existant
-% plugin, you get an `ok`.
--spec uninstall(plugin()) -> ok | {error, string()}.
-uninstall({Name, _BaseUrl, Version, _Checksums}) ->
- % unload config
- ok = unload_config(Name, Version),
- log("config unloaded"),
-
- % delete files
- ok = delete_files(Name, Version),
- log("files deleted"),
-
- % delete code path
- ok = del_code_path(Name, Version),
- log("deleted code path"),
-
- % unregister plugin
- ok = unregister_plugin(Name),
- log("unregistered plugin"),
-
- % done
- ok.
-
-%% * * *
-
-%% Plugin Registration
-%% On uninstall:
-%% - add plugins/name = version to config
-%% On uninstall:
-%% - remove plugins/name from config
-
--spec register_plugin(string(), string()) -> ok.
-register_plugin(Name, Version) ->
- couch_config:set("plugins", Name, Version).
-
--spec unregister_plugin(string()) -> ok.
-unregister_plugin(Name) ->
- couch_config:delete("plugins", Name).
-
-%% * * *
-
-%% Load Config
-%% Parses <plugindir>/priv/default.d/<pluginname.ini> and applies
-%% the contents to the config system, or removes them on uninstall
-
--spec load_config(string(), string()) -> ok.
-load_config(Name, Version) ->
- loop_config(Name, Version, fun set_config/1).
-
--spec unload_config(string(), string()) -> ok.
-unload_config(Name, Version) ->
- loop_config(Name, Version, fun delete_config/1).
-
--spec loop_config(string(), string(), function()) -> ok.
-loop_config(Name, Version, Fun) ->
- lists:foreach(
- fun(File) -> load_config_file(File, Fun) end,
- filelib:wildcard(file_names(Name, Version))
- ).
-
--spec load_config_file(string(), function()) -> ok.
-load_config_file(File, Fun) ->
- {ok, Config} = couch_config:parse_ini_file(File),
- lists:foreach(Fun, Config).
-
--spec set_config({{string(), string()}, string()}) -> ok.
-set_config({{Section, Key}, Value}) ->
- ok = couch_config:set(Section, Key, Value).
-
--spec delete_config({{string(), string()}, _Value}) -> ok.
-delete_config({{Section, Key}, _Value}) ->
- ok = couch_config:delete(Section, Key).
-
--spec file_names(string(), string()) -> string().
-file_names(Name, Version) ->
- filename:join(
- [
- plugin_dir(),
- get_file_slug(Name, Version),
- "priv",
- "default.d",
- "*.ini"
- ]
- ).
-
-%% * * *
-
-%% Code Path Management
-%% The Erlang code path is where the Erlang runtime looks for `.beam`
-%% files to load on, say, `application:load()`. Since plugin directories
-%% are created on demand and named after CouchDB and Erlang versions,
-%% we manage the Erlang code path semi-automatically here.
-
--spec add_code_path(string(), string()) -> ok | {error, bad_directory}.
-add_code_path(Name, Version) ->
- PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version) ++ "/ebin",
- case code:add_path(PluginPath) of
- true ->
- ok;
- Else ->
- couch_log:error("Failed to add PluginPath: '~s'", [PluginPath]),
- Else
- end.
-
--spec del_code_path(string(), string()) -> ok | {error, atom()}.
-del_code_path(Name, Version) ->
- PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version) ++ "/ebin",
- case code:del_path(PluginPath) of
- true ->
- ok;
- _Else ->
- couch_log:debug(
- "Failed to delete PluginPath: '~s', ignoring",
- [PluginPath]
- ),
- ok
- end.
-
-%% * * *
-
--spec untargz(string()) -> {ok, string()} | {error, string()}.
-untargz(Filename) ->
- % read .gz file
- {ok, GzData} = file:read_file(Filename),
- % gunzip
- log("unzipped"),
- TarData = zlib:gunzip(GzData),
- ok = filelib:ensure_dir(plugin_dir()),
- % untar
- erl_tar:extract({binary, TarData}, [{cwd, plugin_dir()}, keep_old_files]).
-
--spec delete_files(string(), string()) -> ok | {error, atom()}.
-delete_files(Name, Version) ->
- PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version),
- mochitemp:rmtempdir(PluginPath).
-
-% downloads a pluygin .tar.gz into a local plugins directory
--spec download(string()) -> ok | {error, string()}.
-download({Name, _BaseUrl, Version, _Checksums} = Plugin) ->
- TargetFile = filename:join(mochitemp:gettempdir(), get_filename(Name, Version)),
- case file_exists(TargetFile) of
- %% wipe and redownload
- true -> file:delete(TargetFile);
- _Else -> ok
- end,
- Url = get_url(Plugin),
- HTTPOptions = [
- % 30 seconds
- {connect_timeout, 30 * 1000},
- % 30 seconds
- {timeout, 30 * 1000}
- ],
- % todo: windows
- Options = [
- % /tmp/something
- {stream, TargetFile},
- {body_format, binary},
- {full_result, false}
- ],
- % todo: reduce to just httpc:request()
- case httpc:request(get, {Url, []}, HTTPOptions, Options) of
- {ok, _Result} ->
- log("downloading " ++ Url),
- {ok, TargetFile};
- Error ->
- Error
- end.
-
--spec verify_checksum(string(), list()) -> ok | {error, string()}.
-verify_checksum(Filename, Checksums) ->
- CouchDBVersion = couchdb_version(),
- case proplists:get_value(CouchDBVersion, Checksums) of
- undefined ->
- couch_log:error(
- "[couch_plugins] Can't find checksum for CouchDB Version"
- " '~s'",
- [CouchDBVersion]
- ),
- {error, no_couchdb_checksum};
- OTPChecksum ->
- OTPRelease = erlang:system_info(otp_release),
- case proplists:get_value(OTPRelease, OTPChecksum) of
- undefined ->
- couch_log:error(
- "[couch_plugins] Can't find checksum for Erlang Version"
- " '~s'",
- [OTPRelease]
- ),
- {error, no_erlang_checksum};
- Checksum ->
- do_verify_checksum(Filename, Checksum)
- end
- end.
-
--spec do_verify_checksum(string(), string()) -> ok | {error, string()}.
-do_verify_checksum(Filename, Checksum) ->
- couch_log:debug("Checking Filename: ~s", [Filename]),
- case file:read_file(Filename) of
- {ok, Data} ->
- ComputedChecksum = binary_to_list(base64:encode(crypto:hash(sha, Data))),
- case ComputedChecksum of
- Checksum ->
- ok;
- _Else ->
- couch_log:error(
- "Checksum mismatch. Wanted: '~p'. Got '~p'",
- [Checksum, ComputedChecksum]
- ),
- {error, checksum_mismatch}
- end;
- Error ->
- Error
- end.
-
-%% utils
-
--spec get_url(plugin()) -> string().
-get_url({Name, BaseUrl, Version, _Checksums}) ->
- BaseUrl ++ "/" ++ get_filename(Name, Version).
-
--spec get_filename(string(), string()) -> string().
-get_filename(Name, Version) ->
- get_file_slug(Name, Version) ++ ".tar.gz".
-
--spec get_file_slug(string(), string()) -> string().
-get_file_slug(Name, Version) ->
- % OtpRelease does not include patch levels like the -1 in R15B03-1
- OTPRelease = erlang:system_info(otp_release),
- CouchDBVersion = couchdb_version(),
- string:join([Name, Version, OTPRelease, CouchDBVersion], "-").
-
--spec file_exists(string()) -> boolean().
-file_exists(Filename) ->
- does_file_exist(file:read_file_info(Filename)).
--spec does_file_exist(term()) -> boolean().
-does_file_exist({error, enoent}) -> false;
-does_file_exist(_Else) -> true.
-
-couchdb_version() ->
- couch_server:get_version(short).
-
-% installing a plugin:
-% - POST /_plugins -d {plugin-def}
-% - get plugin definition
-% - get download URL (matching erlang version)
-% - download archive
-% - match checksum
-% - untar-gz archive into a plugins dir
-% - code:add_path(“geocouch-{geocouch_version}-{erlang_version}/ebin”)
-% - [cp geocouch-{geocouch_version}-{erlang_version}/etc/ ]
-% - application:start(geocouch)
-% - register plugin in plugin registry
-
-% Plugin registry impl:
-% - _plugins database
-% - pro: known db ops
-% - con: no need for replication, needs to be system db etc.
-% - _config/plugins namespace in config
-% - pro: lightweight, fits rarely-changing nature better
-% - con: potentially not flexible enough
-
-
-
-% /geocouch
-% /geocouch/dist/
-% /geocouch/dist/geocouch-{geocouch_version}-{erlang_version}.tar.gz
-
-% tar.gz includes:
-% geocouch-{geocouch_version}-{erlang_version}/
-% geocouch-{geocouch_version}-{erlang_version}/ebin
-% [geocouch-{geocouch_version}-{erlang_version}/config/config.erlt]
-% [geocouch-{geocouch_version}-{erlang_version}/share/]
diff --git a/src/couch_plugins/src/couch_plugins_httpd.erl b/src/couch_plugins/src/couch_plugins_httpd.erl
deleted file mode 100644
index 784f040fc..000000000
--- a/src/couch_plugins/src/couch_plugins_httpd.erl
+++ /dev/null
@@ -1,69 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_plugins_httpd).
-
--export([handle_req/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-handle_req(#httpd{method = 'POST'} = Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- couch_httpd:validate_ctype(Req, "application/json"),
-
- {PluginSpec} = couch_httpd:json_body_obj(Req),
- Url = binary_to_list(couch_util:get_value(<<"url">>, PluginSpec)),
- Name = binary_to_list(couch_util:get_value(<<"name">>, PluginSpec)),
- Version = binary_to_list(couch_util:get_value(<<"version">>, PluginSpec)),
- Delete = couch_util:get_value(<<"delete">>, PluginSpec),
- {Checksums0} = couch_util:get_value(<<"checksums">>, PluginSpec),
- Checksums = parse_checksums(Checksums0),
-
- Plugin = {Name, Url, Version, Checksums},
- case do_install(Delete, Plugin) of
- ok ->
- couch_httpd:send_json(Req, 202, {[{ok, true}]});
- Error ->
- couch_log:debug("Plugin Spec: ~p", [PluginSpec]),
- couch_httpd:send_error(Req, {bad_request, Error})
- end;
-% handles /_plugins/<pluginname>/<file>
-% serves <plugin_dir>/<pluginname>-<pluginversion>-<otpversion>-<couchdbversion>/<file>
-handle_req(#httpd{method = 'GET', path_parts = [_, Name0 | Path0]} = Req) ->
- Name = ?b2l(Name0),
- Path = lists:map(fun binary_to_list/1, Path0),
- OTPRelease = erlang:system_info(otp_release),
- PluginVersion = couch_config:get("plugins", Name),
- CouchDBVersion = couch_server:get_version(short),
- FullName = string:join([Name, PluginVersion, OTPRelease, CouchDBVersion], "-"),
- FullPath = filename:join([FullName, "priv", "www", string:join(Path, "/")]) ++ "/",
- couch_log:debug("Serving ~p from ~p", [FullPath, plugin_dir()]),
- couch_httpd:serve_file(Req, FullPath, plugin_dir());
-handle_req(Req) ->
- couch_httpd:send_method_not_allowed(Req, "POST").
-
-plugin_dir() ->
- couch_config:get("couchdb", "plugin_dir").
-do_install(false, Plugin) ->
- couch_plugins:install(Plugin);
-do_install(true, Plugin) ->
- couch_plugins:uninstall(Plugin).
-
-parse_checksums(Checksums) ->
- lists:map(
- fun
- ({K, {V}}) ->
- {binary_to_list(K), parse_checksums(V)};
- ({K, V}) ->
- {binary_to_list(K), binary_to_list(V)}
- end,
- Checksums
- ).
diff --git a/src/couch_prometheus/src/couch_prometheus.app.src b/src/couch_prometheus/src/couch_prometheus.app.src
deleted file mode 100644
index bf49e59d2..000000000
--- a/src/couch_prometheus/src/couch_prometheus.app.src
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_prometheus, [
- {description, "Aggregated metrics info for Prometheus consumption"},
- {vsn, git},
- {registered, []},
- {applications, [kernel, stdlib, folsom, couch_stats, couch_log]},
- {mod, {couch_prometheus_app, []}},
- {env, []}
-]}.
diff --git a/src/couch_prometheus/src/couch_prometheus.hrl b/src/couch_prometheus/src/couch_prometheus.hrl
deleted file mode 100644
index 0970f4469..000000000
--- a/src/couch_prometheus/src/couch_prometheus.hrl
+++ /dev/null
@@ -1,15 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(REFRESH_INTERVAL, 5).
--define(PROMETHEUS_VERSION, "2.0").
-
diff --git a/src/couch_prometheus/src/couch_prometheus_app.erl b/src/couch_prometheus/src/couch_prometheus_app.erl
deleted file mode 100644
index 232c16a8a..000000000
--- a/src/couch_prometheus/src/couch_prometheus_app.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_prometheus_app).
-
--behaviour(application).
-
--export([start/2, stop/1]).
-
-start(_StartType, _StartArgs) ->
- couch_prometheus_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/couch_prometheus/src/couch_prometheus_http.erl b/src/couch_prometheus/src/couch_prometheus_http.erl
deleted file mode 100644
index b3df1ea4b..000000000
--- a/src/couch_prometheus/src/couch_prometheus_http.erl
+++ /dev/null
@@ -1,112 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_prometheus_http).
-
--compile(tuple_calls).
-
--export([
- start_link/0,
- handle_request/1
-]).
-
--include("couch_prometheus.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-start_link() ->
- IP =
- case config:get("prometheus", "bind_address", "any") of
- "any" -> any;
- Else -> Else
- end,
- Port = config:get("prometheus", "port"),
- ok = couch_httpd:validate_bind_address(IP),
-
- Options = [
- {name, ?MODULE},
- {loop, fun ?MODULE:handle_request/1},
- {ip, IP},
- {port, Port}
- ],
- case mochiweb_http:start(Options) of
- {ok, Pid} ->
- {ok, Pid};
- {error, Reason} ->
- io:format("Failure to start Mochiweb: ~s~n", [Reason]),
- {error, Reason}
- end.
-
-handle_request(MochiReq) ->
- RawUri = MochiReq:get(raw_path),
- {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
- PathParts = string:tokens(Path, "/"),
- try
- case PathParts of
- ["_node", Node, "_prometheus"] ->
- send_prometheus(MochiReq, Node);
- _ ->
- send_error(MochiReq, 404, <<"not_found">>, <<>>)
- end
- catch
- T:R ->
- Body = list_to_binary(io_lib:format("~p:~p", [T, R])),
- send_error(MochiReq, 500, <<"server_error">>, Body)
- end.
-
-send_prometheus(MochiReq, Node) ->
- Type = "text/plain; version=" ++ ?PROMETHEUS_VERSION,
- Headers =
- couch_httpd:server_header() ++
- [
- {<<"Content-Type">>, ?l2b(Type)}
- ],
- Body = call_node(Node, couch_prometheus_server, scrape, []),
- send_resp(MochiReq, 200, Headers, Body).
-
-send_resp(MochiReq, Status, ExtraHeaders, Body) ->
- Headers = couch_httpd:server_header() ++ ExtraHeaders,
- MochiReq:respond({Status, Headers, Body}).
-
-send_error(MochiReq, Code, Error, Reason) ->
- Headers =
- couch_httpd:server_header() ++
- [
- {<<"Content-Type">>, <<"application/json">>}
- ],
- JsonError =
- {[
- {<<"error">>, Error},
- {<<"reason">>, Reason}
- ]},
- Body = ?JSON_ENCODE(JsonError),
- MochiReq:respond({Code, Headers, Body}).
-
-call_node("_local", Mod, Fun, Args) ->
- call_node(node(), Mod, Fun, Args);
-call_node(Node0, Mod, Fun, Args) when is_list(Node0) ->
- Node1 =
- try
- list_to_existing_atom(Node0)
- catch
- error:badarg ->
- NoNode = list_to_binary(Node0),
- throw({not_found, <<"no such node: ", NoNode/binary>>})
- end,
- call_node(Node1, Mod, Fun, Args);
-call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
- case rpc:call(Node, Mod, Fun, Args) of
- {badrpc, nodedown} ->
- Reason = list_to_binary(io_lib:format("~s is down", [Node])),
- throw({error, {nodedown, Reason}});
- Else ->
- Else
- end.
diff --git a/src/couch_prometheus/src/couch_prometheus_server.erl b/src/couch_prometheus/src/couch_prometheus_server.erl
deleted file mode 100644
index 701483a38..000000000
--- a/src/couch_prometheus/src/couch_prometheus_server.erl
+++ /dev/null
@@ -1,189 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_prometheus_server).
-
--behaviour(gen_server).
-
--import(couch_prometheus_util, [
- couch_to_prom/3,
- to_prom/3,
- to_prom_summary/2
-]).
-
--export([
- scrape/0,
- version/0
-]).
-
--export([
- start_link/0,
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- terminate/2
-]).
-
--include("couch_prometheus.hrl").
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
--record(st, {
- metrics,
- refresh
-}).
-
-init([]) ->
- Metrics = refresh_metrics(),
- RT = update_refresh_timer(),
- {ok, #st{metrics = Metrics, refresh = RT}}.
-
-scrape() ->
- {ok, Metrics} = gen_server:call(?MODULE, scrape),
- Metrics.
-
-version() ->
- ?PROMETHEUS_VERSION.
-
-handle_call(scrape, _from, #st{metrics = Metrics} = State) ->
- {reply, {ok, Metrics}, State};
-handle_call(refresh, _from, #st{refresh = OldRT} = State) ->
- timer:cancel(OldRT),
- Metrics = refresh_metrics(),
- RT = update_refresh_timer(),
- {reply, ok, State#st{metrics = Metrics, refresh = RT}};
-handle_call(Msg, _From, State) ->
- {stop, {unknown_call, Msg}, error, State}.
-
-handle_cast(Msg, State) ->
- {stop, {unknown_cast, Msg}, State}.
-
-handle_info(refresh, State) ->
- Metrics = refresh_metrics(),
- RT = update_refresh_timer(),
- {noreply, State#st{metrics = Metrics, refresh = RT}};
-handle_info(Msg, State) ->
- {stop, {unknown_info, Msg}, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-refresh_metrics() ->
- CouchDB = get_couchdb_stats(),
- System = couch_stats_httpd:to_ejson(get_system_stats()),
- couch_prometheus_util:to_bin(
- lists:map(
- fun(Line) ->
- io_lib:format("~s~n", [Line])
- end,
- CouchDB ++ System
- )
- ).
-
-get_couchdb_stats() ->
- Stats = lists:sort(couch_stats:fetch()),
- lists:flatmap(
- fun({Path, Info}) ->
- couch_to_prom(Path, Info, Stats)
- end,
- Stats
- ).
-
-get_system_stats() ->
- lists:flatten([
- get_uptime_stat(),
- get_vm_stats(),
- get_io_stats(),
- get_message_queue_stats(),
- get_run_queue_stats(),
- get_vm_stats(),
- get_ets_stats()
- ]).
-
-get_uptime_stat() ->
- to_prom(uptime_seconds, counter, couch_app:uptime() div 1000).
-
-get_vm_stats() ->
- MemLabels = lists:map(
- fun({Type, Value}) ->
- {[{memory_type, Type}], Value}
- end,
- erlang:memory()
- ),
- {NumGCs, WordsReclaimed, _} = erlang:statistics(garbage_collection),
- CtxSwitches = element(1, erlang:statistics(context_switches)),
- Reds = element(1, erlang:statistics(reductions)),
- ProcCount = erlang:system_info(process_count),
- ProcLimit = erlang:system_info(process_limit),
- [
- to_prom(erlang_memory_bytes, gauge, MemLabels),
- to_prom(erlang_gc_collections_total, counter, NumGCs),
- to_prom(erlang_gc_words_reclaimed_total, counter, WordsReclaimed),
- to_prom(erlang_context_switches_total, counter, CtxSwitches),
- to_prom(erlang_reductions_total, counter, Reds),
- to_prom(erlang_processes, gauge, ProcCount),
- to_prom(erlang_process_limit, gauge, ProcLimit)
- ].
-
-get_io_stats() ->
- {{input, In}, {output, Out}} = erlang:statistics(io),
- [
- to_prom(erlang_io_recv_bytes_total, counter, In),
- to_prom(erlang_io_sent_bytes_total, counter, Out)
- ].
-
-get_message_queue_stats() ->
- Queues = lists:map(
- fun(Name) ->
- case process_info(whereis(Name), message_queue_len) of
- {message_queue_len, N} ->
- N;
- _ ->
- 0
- end
- end,
- registered()
- ),
- [
- to_prom(erlang_message_queues, gauge, lists:sum(Queues)),
- to_prom(erlang_message_queue_min, gauge, lists:min(Queues)),
- to_prom(erlang_message_queue_max, gauge, lists:max(Queues))
- ].
-
-get_run_queue_stats() ->
- %% Workaround for https://bugs.erlang.org/browse/ERL-1355
- {Normal, Dirty} =
- case erlang:system_info(dirty_cpu_schedulers) > 0 of
- false ->
- {statistics(run_queue), 0};
- true ->
- [DCQ | SQs] = lists:reverse(statistics(run_queue_lengths)),
- {lists:sum(SQs), DCQ}
- end,
- [
- to_prom(erlang_scheduler_queues, gauge, Normal),
- to_prom(erlang_dirty_cpu_scheduler_queues, gauge, Dirty)
- ].
-
-get_ets_stats() ->
- NumTabs = length(ets:all()),
- to_prom(erlang_ets_table, gauge, NumTabs).
-
-update_refresh_timer() ->
- RefreshTime = 1000 * config:get_integer("couch_prometheus", "interval", ?REFRESH_INTERVAL),
- erlang:send_after(RefreshTime, self(), refresh).
diff --git a/src/couch_prometheus/src/couch_prometheus_sup.erl b/src/couch_prometheus/src/couch_prometheus_sup.erl
deleted file mode 100644
index 45a884fad..000000000
--- a/src/couch_prometheus/src/couch_prometheus_sup.erl
+++ /dev/null
@@ -1,40 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_prometheus_sup).
-
--behaviour(supervisor).
-
--export([
- start_link/0,
- init/1
-]).
-
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init([]) ->
- {ok, {
- {one_for_one, 5, 10},
- [
- ?CHILD(couch_prometheus_server, worker)
- ] ++ maybe_start_prometheus_http()
- }}.
-
-maybe_start_prometheus_http() ->
- case config:get("prometheus", "additional_port", "false") of
- "false" -> [];
- "true" -> [?CHILD(couch_prometheus_http, worker)];
- _ -> []
- end.
diff --git a/src/couch_prometheus/src/couch_prometheus_util.erl b/src/couch_prometheus/src/couch_prometheus_util.erl
deleted file mode 100644
index ea2cdf737..000000000
--- a/src/couch_prometheus/src/couch_prometheus_util.erl
+++ /dev/null
@@ -1,169 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_prometheus_util).
-
--export([
- couch_to_prom/3,
- to_bin/1,
- to_prom/3,
- to_prom_summary/2
-]).
-
--include("couch_prometheus.hrl").
-
-couch_to_prom([couch_log, level, alert], Info, _All) ->
- to_prom(couch_log_requests_total, counter, {[{level, alert}], val(Info)});
-couch_to_prom([couch_log, level, Level], Info, _All) ->
- to_prom(couch_log_requests_total, {[{level, Level}], val(Info)});
-couch_to_prom([couch_replicator, checkpoints, failure], Info, _All) ->
- to_prom(couch_replicator_checkpoints_failure_total, counter, val(Info));
-couch_to_prom([couch_replicator, checkpoints, success], Info, All) ->
- Total = val(Info) + val([couch_replicator, checkpoints, failure], All),
- to_prom(couch_replicator_checkpoints_total, counter, Total);
-couch_to_prom([couch_replicator, responses, failure], Info, _All) ->
- to_prom(couch_replicator_responses_failure_total, counter, val(Info));
-couch_to_prom([couch_replicator, responses, success], Info, All) ->
- Total = val(Info) + val([couch_replicator, responses, failure], All),
- to_prom(couch_replicator_responses_total, counter, Total);
-couch_to_prom([couch_replicator, stream_responses, failure], Info, _All) ->
- to_prom(couch_replicator_stream_responses_failure_total, counter, val(Info));
-couch_to_prom([couch_replicator, stream_responses, success], Info, All) ->
- Total = val(Info) + val([couch_replicator, stream_responses, failure], All),
- to_prom(couch_replicator_stream_responses_total, counter, Total);
-couch_to_prom([couchdb, auth_cache_hits], Info, All) ->
- Total = val(Info) + val([couchdb, auth_cache_misses], All),
- to_prom(auth_cache_requests_total, counter, Total);
-couch_to_prom([couchdb, auth_cache_misses], Info, _All) ->
- to_prom(auth_cache_misses_total, counter, val(Info));
-couch_to_prom([couchdb, httpd_request_methods, 'COPY'], Info, _All) ->
- to_prom(httpd_request_methods, counter, {[{method, 'COPY'}], val(Info)});
-couch_to_prom([couchdb, httpd_request_methods, Method], Info, _All) ->
- to_prom(httpd_request_methods, {[{method, Method}], val(Info)});
-couch_to_prom([couchdb, httpd_status_codes, Code], Info, _All) ->
- to_prom(httpd_status_codes, {[{code, Code}], val(Info)});
-couch_to_prom([ddoc_cache, hit], Info, All) ->
- Total = val(Info) + val([ddoc_cache, miss], All),
- to_prom(ddoc_cache_requests_total, counter, Total);
-couch_to_prom([ddoc_cache, miss], Info, _All) ->
- to_prom(ddoc_cache_requests_failures_total, counter, val(Info));
-couch_to_prom([ddoc_cache, recovery], Info, _All) ->
- to_prom(ddoc_cache_requests_recovery_total, counter, val(Info));
-couch_to_prom([fabric, read_repairs, failure], Info, _All) ->
- to_prom(fabric_read_repairs_failures_total, counter, val(Info));
-couch_to_prom([fabric, read_repairs, success], Info, All) ->
- Total = val(Info) + val([fabric, read_repairs, failure], All),
- to_prom(fabric_read_repairs_total, counter, Total);
-couch_to_prom([rexi, streams, timeout, init_stream], Info, _All) ->
- to_prom(rexi_streams_timeout_total, counter, {[{stage, init_stream}], val(Info)});
-couch_to_prom([rexi_streams, timeout, Stage], Info, _All) ->
- to_prom(rexi_streams_timeout_total, {[{stage, Stage}], val(Info)});
-couch_to_prom([couchdb | Rest], Info, All) ->
- couch_to_prom(Rest, Info, All);
-couch_to_prom(Path, Info, _All) ->
- case lists:keyfind(type, 1, Info) of
- {type, counter} ->
- Metric = counter_metric(Path),
- to_prom(Metric, counter, val(Info));
- {type, gauge} ->
- to_prom(path_to_name(Path), gauge, val(Info));
- {type, histogram} ->
- to_prom_summary(Path, Info)
- end.
-
-to_prom(Metric, Type, Data) ->
- TypeStr = to_bin(io_lib:format("# TYPE ~s ~s", [to_prom_name(Metric), Type])),
- [TypeStr] ++ to_prom(Metric, Data).
-
-to_prom(Metric, Instances) when is_list(Instances) ->
- lists:flatmap(fun(Inst) -> to_prom(Metric, Inst) end, Instances);
-to_prom(Metric, {Labels, Value}) ->
- LabelParts = lists:map(
- fun({K, V}) ->
- lists:flatten(io_lib:format("~s=\"~s\"", [to_bin(K), to_bin(V)]))
- end,
- Labels
- ),
- MetricStr =
- case length(LabelParts) > 0 of
- true ->
- LabelStr = string:join(LabelParts, ", "),
- lists:flatten(io_lib:format("~s{~s}", [to_prom_name(Metric), LabelStr]));
- false ->
- lists:flatten(io_lib:format("~s", [to_prom_name(Metric)]))
- end,
- [to_bin(io_lib:format("~s ~p", [MetricStr, Value]))];
-to_prom(Metric, Value) ->
- [to_bin(io_lib:format("~s ~p", [to_prom_name(Metric), Value]))].
-
-to_prom_summary(Path, Info) ->
- Metric = path_to_name(Path ++ ["seconds"]),
- {value, Value} = lists:keyfind(value, 1, Info),
- {arithmetic_mean, Mean} = lists:keyfind(arithmetic_mean, 1, Value),
- {percentile, Percentiles} = lists:keyfind(percentile, 1, Value),
- {n, Count} = lists:keyfind(n, 1, Value),
- Quantiles = lists:map(
- fun({Perc, Val0}) ->
- % Prometheus uses seconds, so we need to covert milliseconds to seconds
- Val = Val0 / 1000,
- case Perc of
- 50 -> {[{quantile, <<"0.5">>}], Val};
- 75 -> {[{quantile, <<"0.75">>}], Val};
- 90 -> {[{quantile, <<"0.9">>}], Val};
- 95 -> {[{quantile, <<"0.95">>}], Val};
- 99 -> {[{quantile, <<"0.99">>}], Val};
- 999 -> {[{quantile, <<"0.999">>}], Val}
- end
- end,
- Percentiles
- ),
- SumMetric = path_to_name(Path ++ ["seconds", "sum"]),
- SumStat = to_prom(SumMetric, Count * Mean),
- CountMetric = path_to_name(Path ++ ["seconds", "count"]),
- CountStat = to_prom(CountMetric, Count),
- to_prom(Metric, summary, Quantiles) ++ [SumStat, CountStat].
-
-to_prom_name(Metric) ->
- to_bin(io_lib:format("couchdb_~s", [Metric])).
-
-path_to_name(Path) ->
- Parts = lists:map(
- fun(Part) ->
- io_lib:format("~s", [Part])
- end,
- Path
- ),
- string:join(Parts, "_").
-
-counter_metric(Path) ->
- Name = path_to_name(Path),
- case string:find(Name, <<"_total">>, trailing) == <<"_total">> of
- true -> Name;
- false -> to_bin(io_lib:format("~s_total", [Name]))
- end.
-
-to_bin(Data) when is_list(Data) ->
- iolist_to_binary(Data);
-to_bin(Data) when is_atom(Data) ->
- atom_to_binary(Data, utf8);
-to_bin(Data) when is_integer(Data) ->
- integer_to_binary(Data);
-to_bin(Data) when is_binary(Data) ->
- Data.
-
-val(Data) ->
- {value, V} = lists:keyfind(value, 1, Data),
- V.
-
-val(Key, Stats) ->
- {Key, Data} = lists:keyfind(Key, 1, Stats),
- val(Data).
diff --git a/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl b/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
deleted file mode 100644
index f986fc6b0..000000000
--- a/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
+++ /dev/null
@@ -1,151 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_prometheus_e2e_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "prometheus_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(PROM_PORT, "17986").
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
-
-start() ->
- test_util:start_couch([chttpd, couch_prometheus]).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- ok = config:set_integer("stats", "interval", 2),
- ok = config:set_integer("couch_prometheus", "interval", 1),
- Port = mochiweb_socket_server:get(chttpd, port),
- construct_url(Port).
-
-teardown(_) ->
- ok.
-
-couch_prometheus_e2e_test_() ->
- {
- "Prometheus E2E Tests",
- {
- setup,
- fun start/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun node_call_chttpd/1,
- fun node_call_prometheus_http/1,
- fun deny_prometheus_http/1,
- fun node_see_updated_metrics/1
- ]
- }
- }
- }.
-
-% normal chttpd path via cluster port
-node_call_chttpd(Url) ->
- {ok, RC1, _, _} = test_request:get(
- Url,
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?_assertEqual(200, RC1).
-
-% normal chttpd path via cluster port
-node_see_updated_metrics(Url) ->
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- DbUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- create_db(DbUrl),
- [create_doc(DbUrl, "testdoc" ++ integer_to_binary(I)) || I <- lists:seq(1, 100)],
- delete_db(DbUrl),
- InitMetrics = wait_for_metrics(Url, "couchdb_httpd_requests_total 0", 5000),
- UpdatedMetrics = wait_for_metrics(Url, "couchdb_httpd_requests_total", 10000),
- % since the puts happen so fast, we can't have an exact
- % total requests given the scraping interval. so we just want to acknowledge
- % a change as occurred
- ?_assertNotEqual(InitMetrics, UpdatedMetrics).
-
-% normal chttpd path via cluster port
-node_call_prometheus_http(_) ->
- maybe_start_http_server("true"),
- Url = construct_url(?PROM_PORT),
- {ok, RC1, _, _} = test_request:get(
- Url,
- [?CONTENT_JSON, ?AUTH]
- ),
- % since this port doesn't require auth, this should work
- {ok, RC2, _, _} = test_request:get(
- Url,
- [?CONTENT_JSON]
- ),
- delete_db(Url),
- ?_assertEqual({200, 200}, {RC1, RC2}).
-
-% we don't start the http server
-deny_prometheus_http(_) ->
- maybe_start_http_server("false"),
- Url = construct_url(?PROM_PORT),
- Response = test_request:get(
- Url,
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?_assertEqual({error, {conn_failed, {error, econnrefused}}}, Response).
-
-maybe_start_http_server(Additional) ->
- test_util:stop_applications([couch_prometheus, chttpd]),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- ok = config:set("prometheus", "additional_port", Additional),
- ok = config:set("prometheus", "port", ?PROM_PORT),
- test_util:start_applications([couch_prometheus, chttpd]).
-
-construct_url(Port) ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- lists:concat(["http://", Addr, ":", Port, "/_node/_local/_prometheus"]).
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-create_doc(Url, Id) ->
- test_request:put(
- Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH],
- "{\"mr\": \"rockoartischocko\"}"
- ).
-
-wait_for_metrics(Url, Value, Timeout) ->
- test_util:wait(
- fun() ->
- {ok, _, _, Body} = test_request:get(
- Url,
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- case string:find(Body, Value) of
- nomatch -> wait;
- M -> M
- end
- end,
- Timeout
- ).
diff --git a/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl b/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl
deleted file mode 100644
index 65828db62..000000000
--- a/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl
+++ /dev/null
@@ -1,75 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_prometheus_util_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--import(couch_prometheus_util, [
- to_prom/3,
- to_prom_summary/2
-]).
-
-couch_prometheus_util_test_() ->
- [
- ?_assertEqual(
- <<"couchdb_ddoc_cache 10">>,
- test_to_prom_output(ddoc_cache, counter, 10)
- ),
- ?_assertEqual(
- <<"couchdb_httpd_status_codes{code=\"200\"} 3">>,
- test_to_prom_output(httpd_status_codes, counter, {[{code, 200}], 3})
- ),
- ?_assertEqual(
- <<"couchdb_temperature_celsius 36">>,
- test_to_prom_output(temperature_celsius, gauge, 36)
- ),
- ?_assertEqual(
- <<"couchdb_mango_query_time_seconds{quantile=\"0.75\"} 4.5">>,
- test_to_prom_sum_output([mango_query_time], [
- {value, [
- {min, 0.0},
- {max, 0.0},
- {arithmetic_mean, 0.0},
- {geometric_mean, 0.0},
- {harmonic_mean, 0.0},
- {median, 0.0},
- {variance, 0.0},
- {standard_deviation, 0.0},
- {skewness, 0.0},
- {kurtosis, 0.0},
- {percentile, [
- {50, 0.0},
- {75, 4500},
- {90, 0.0},
- {95, 0.0},
- {99, 0.0},
- {999, 0.0}
- ]},
- {histogram, [
- {0, 0}
- ]},
- {n, 0}
- ]},
- {type, histogram},
- {desc, <<"length of time processing a mango query">>}
- ])
- )
- ].
-
-test_to_prom_output(Metric, Type, Val) ->
- Out = to_prom(Metric, Type, Val),
- lists:nth(2, Out).
-
-test_to_prom_sum_output(Metric, Info) ->
- Out = to_prom_summary(Metric, Info),
- lists:nth(3, Out).
diff --git a/src/couch_pse_tests/src/couch_pse_tests.app.src b/src/couch_pse_tests/src/couch_pse_tests.app.src
deleted file mode 100644
index 83f3875a0..000000000
--- a/src/couch_pse_tests/src/couch_pse_tests.app.src
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_pse_tests, [
- {description, "Apache CouchDB Pluggable Storage Engine Test Suite"},
- {vsn, git},
- {applications, [
- kernel,
- stdlib
- ]}
-]}.
diff --git a/src/couch_pse_tests/src/cpse_gather.erl b/src/couch_pse_tests/src/cpse_gather.erl
deleted file mode 100644
index 346eca29b..000000000
--- a/src/couch_pse_tests/src/cpse_gather.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_gather).
-
--export([
- module/1
-]).
-
-module(ModName) ->
- Exports = ModName:module_info(exports),
-
- SetupMod = get_setup_all(ModName, Exports),
- TeardownMod = get_teardown_all(ModName, Exports),
- SetupTest = get_fun(ModName, setup_each, 0, Exports),
- TeardownTest = get_fun(ModName, teardown_each, 1, Exports),
-
- RevTests = lists:foldl(
- fun({Fun, Arity}, Acc) ->
- case {atom_to_list(Fun), Arity} of
- {[$c, $p, $s, $e, $_ | _], Arity} when Arity == 0; Arity == 1 ->
- TestFun = make_test_fun(ModName, Fun, Arity),
- [TestFun | Acc];
- _ ->
- Acc
- end
- end,
- [],
- Exports
- ),
- Tests = lists:reverse(RevTests),
-
- {
- setup,
- spawn,
- SetupMod,
- TeardownMod,
- [
- {
- foreach,
- SetupTest,
- TeardownTest,
- Tests
- }
- ]
- }.
-
-get_setup_all(ModName, Exports) ->
- case lists:member({setup_all, 0}, Exports) of
- true -> fun ModName:setup_all/0;
- false -> fun cpse_util:setup_all/0
- end.
-
-get_teardown_all(ModName, Exports) ->
- case lists:member({teardown_all, 1}, Exports) of
- true -> fun ModName:teardown_all/1;
- false -> fun cpse_util:teardown_all/1
- end.
-
-get_fun(ModName, FunName, Arity, Exports) ->
- case lists:member({FunName, Arity}, Exports) of
- true -> fun ModName:FunName/Arity;
- false when Arity == 0 -> fun() -> ok end;
- false when Arity == 1 -> fun(_) -> ok end
- end.
-
-make_test_fun(Module, Fun, Arity) ->
- Name = atom_to_list(Fun),
- case Arity of
- 0 ->
- fun(_) ->
- {timeout, 60,
- {Name, fun() ->
- process_flag(trap_exit, true),
- Module:Fun()
- end}}
- end;
- 1 ->
- fun(Arg) ->
- {timeout, 60,
- {Name, fun() ->
- process_flag(trap_exit, true),
- Module:Fun(Arg)
- end}}
- end
- end.
diff --git a/src/couch_pse_tests/src/cpse_test_attachments.erl b/src/couch_pse_tests/src/cpse_test_attachments.erl
deleted file mode 100644
index 4447b8120..000000000
--- a/src/couch_pse_tests/src/cpse_test_attachments.erl
+++ /dev/null
@@ -1,98 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_attachments).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-cpse_write_attachment(Db1) ->
- AttBin = crypto:strong_rand_bytes(32768),
-
- try
- [Att0] = cpse_util:prep_atts(Db1, [
- {<<"ohai.txt">>, AttBin}
- ]),
-
- {stream, Stream} = couch_att:fetch(data, Att0),
- ?assertEqual(true, couch_db_engine:is_active_stream(Db1, Stream)),
-
- Actions = [{create, {<<"first">>, {[]}, [Att0]}}],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- [FDI] = couch_db_engine:open_docs(Db3, [<<"first">>]),
-
- #rev_info{
- rev = {RevPos, PrevRevId},
- deleted = Deleted,
- body_sp = DocPtr
- } = cpse_util:prev_rev(FDI),
-
- Doc0 = #doc{
- id = <<"foo">>,
- revs = {RevPos, [PrevRevId]},
- deleted = Deleted,
- body = DocPtr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Atts1 =
- if
- not is_binary(Doc1#doc.atts) -> Doc1#doc.atts;
- true -> couch_compress:decompress(Doc1#doc.atts)
- end,
-
- StreamSrc = fun(Sp) -> couch_db_engine:open_read_stream(Db3, Sp) end,
- [Att1] = [couch_att:from_disk_term(StreamSrc, T) || T <- Atts1],
- ReadBin = couch_att:to_binary(Att1),
- ?assertEqual(AttBin, ReadBin)
- catch
- throw:not_supported ->
- ok
- end.
-
-% N.B. This test may be overly specific for some theoretical
-% storage engines that don't re-initialize their
-% attachments streams when restarting (for instance if
-% we ever have something that stores attachemnts in
-% an external object store)
-cpse_inactive_stream(Db1) ->
- AttBin = crypto:strong_rand_bytes(32768),
-
- try
- [Att0] = cpse_util:prep_atts(Db1, [
- {<<"ohai.txt">>, AttBin}
- ]),
-
- {stream, Stream} = couch_att:fetch(data, Att0),
- ?assertEqual(true, couch_db_engine:is_active_stream(Db1, Stream)),
-
- cpse_util:shutdown_db(Db1),
- {ok, Db2} = couch_db:reopen(Db1),
-
- ?assertEqual(false, couch_db_engine:is_active_stream(Db2, Stream))
- catch
- throw:not_supported ->
- ok
- end.
diff --git a/src/couch_pse_tests/src/cpse_test_compaction.erl b/src/couch_pse_tests/src/cpse_test_compaction.erl
deleted file mode 100644
index 3be95db60..000000000
--- a/src/couch_pse_tests/src/cpse_test_compaction.erl
+++ /dev/null
@@ -1,331 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_compaction).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-cpse_compact_empty(Db1) ->
- Term1 = cpse_util:db_as_term(Db1),
-
- cpse_util:compact(Db1),
-
- {ok, Db2} = couch_db:reopen(Db1),
- Term2 = cpse_util:db_as_term(Db2),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-cpse_compact_doc(Db1) ->
- Actions = [{create, {<<"foo">>, {[]}}}],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- Term1 = cpse_util:db_as_term(Db2),
-
- cpse_util:compact(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
- Term2 = cpse_util:db_as_term(Db3),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-cpse_compact_local_doc(Db1) ->
- Actions = [{create, {<<"_local/foo">>, {[]}}}],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- Term1 = cpse_util:db_as_term(Db2),
-
- cpse_util:compact(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
- Term2 = cpse_util:db_as_term(Db3),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-cpse_compact_with_everything(Db1) ->
- % Add a whole bunch of docs
- DocActions = lists:map(
- fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end,
- lists:seq(1, 1000)
- ),
-
- LocalActions = lists:map(
- fun(I) ->
- {create, {local_docid(I), {[{<<"int">>, I}]}}}
- end,
- lists:seq(1, 25)
- ),
-
- Actions1 = DocActions ++ LocalActions,
-
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions1),
- ok = couch_db:set_security(Db1, {[{<<"foo">>, <<"bar">>}]}),
- ok = couch_db:set_revs_limit(Db1, 500),
-
- Actions2 = [
- {create, {<<"foo">>, {[]}}},
- {create, {<<"bar">>, {[{<<"hooray">>, <<"purple">>}]}}},
- {conflict, {<<"bar">>, {[{<<"booo">>, false}]}}}
- ],
-
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions2),
-
- [FooFDI, BarFDI] = couch_db_engine:open_docs(Db3, [<<"foo">>, <<"bar">>]),
-
- FooRev = cpse_util:prev_rev(FooFDI),
- BarRev = cpse_util:prev_rev(BarFDI),
-
- Actions3 = [
- {purge, {<<"foo">>, FooRev#rev_info.rev}},
- {purge, {<<"bar">>, BarRev#rev_info.rev}}
- ],
-
- {ok, Db4} = cpse_util:apply_actions(Db3, Actions3),
-
- PurgedIdRevs = [
- {<<"bar">>, [BarRev#rev_info.rev]},
- {<<"foo">>, [FooRev#rev_info.rev]}
- ],
-
- {ok, PIdRevs4} = couch_db_engine:fold_purge_infos(
- Db4, 0, fun fold_fun/2, [], []
- ),
- ?assertEqual(PurgedIdRevs, PIdRevs4),
-
- {ok, Db5} =
- try
- [Att0, Att1, Att2, Att3, Att4] = cpse_util:prep_atts(Db4, [
- {<<"ohai.txt">>, crypto:strong_rand_bytes(2048)},
- {<<"stuff.py">>, crypto:strong_rand_bytes(32768)},
- {<<"a.erl">>, crypto:strong_rand_bytes(29)},
- {<<"a.hrl">>, crypto:strong_rand_bytes(5000)},
- {<<"a.app">>, crypto:strong_rand_bytes(400)}
- ]),
-
- Actions4 = [
- {create, {<<"small_att">>, {[]}, [Att0]}},
- {create, {<<"large_att">>, {[]}, [Att1]}},
- {create, {<<"multi_att">>, {[]}, [Att2, Att3, Att4]}}
- ],
- cpse_util:apply_actions(Db4, Actions4)
- catch
- throw:not_supported ->
- {ok, Db4}
- end,
- {ok, Db6} = couch_db:reopen(Db5),
-
- Term1 = cpse_util:db_as_term(Db6),
-
- Config = [
- {"database_compaction", "doc_buffer_size", "1024"},
- {"database_compaction", "checkpoint_after", "2048"}
- ],
-
- cpse_util:with_config(Config, fun() ->
- cpse_util:compact(Db6)
- end),
-
- {ok, Db7} = couch_db:reopen(Db6),
- Term2 = cpse_util:db_as_term(Db7),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-cpse_recompact_updates(Db1) ->
- Actions1 = lists:map(
- fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end,
- lists:seq(1, 1000)
- ),
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions1),
-
- {ok, Compactor} = couch_db:start_compact(Db2),
- catch erlang:suspend_process(Compactor),
-
- Actions2 = [
- {update, {<<"0001">>, {[{<<"updated">>, true}]}}},
- {create, {<<"boop">>, {[]}}}
- ],
-
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions2),
- Term1 = cpse_util:db_as_term(Db3),
-
- catch erlang:resume_process(Compactor),
- cpse_util:compact(Db3),
-
- {ok, Db4} = couch_db:reopen(Db3),
- Term2 = cpse_util:db_as_term(Db4),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-cpse_purge_during_compact(Db1) ->
- Actions1 = lists:map(
- fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end,
- lists:seq(1, 1000)
- ),
- Actions2 = [
- {create, {<<"foo">>, {[]}}},
- {create, {<<"bar">>, {[]}}},
- {create, {<<"baz">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions1 ++ Actions2),
- Actions3 = [
- {conflict, {<<"bar">>, {[{<<"vsn">>, 2}]}}}
- ],
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions3),
-
- {ok, Pid} = couch_db:start_compact(Db3),
- catch erlang:suspend_process(Pid),
-
- [BarFDI, BazFDI] = couch_db_engine:open_docs(Db3, [<<"bar">>, <<"baz">>]),
- BarRev = cpse_util:prev_rev(BarFDI),
- BazRev = cpse_util:prev_rev(BazFDI),
- Actions4 = [
- {purge, {<<"bar">>, BarRev#rev_info.rev}},
- {purge, {<<"baz">>, BazRev#rev_info.rev}}
- ],
-
- {ok, Db4} = cpse_util:apply_actions(Db3, Actions4),
- Term1 = cpse_util:db_as_term(Db4),
-
- catch erlang:resume_process(Pid),
- cpse_util:compact(Db4),
-
- {ok, Db5} = couch_db:reopen(Db4),
- Term2 = cpse_util:db_as_term(Db5),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-cpse_multiple_purge_during_compact(Db1) ->
- Actions1 = lists:map(
- fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end,
- lists:seq(1, 1000)
- ),
- Actions2 = [
- {create, {<<"foo">>, {[]}}},
- {create, {<<"bar">>, {[]}}},
- {create, {<<"baz">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions1 ++ Actions2),
-
- Actions3 = [
- {conflict, {<<"bar">>, {[{<<"vsn">>, 2}]}}}
- ],
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions3),
-
- {ok, Pid} = couch_db:start_compact(Db3),
- catch erlang:suspend_process(Pid),
-
- [BarFDI, BazFDI] = couch_db_engine:open_docs(Db3, [<<"bar">>, <<"baz">>]),
- BarRev = cpse_util:prev_rev(BarFDI),
- Actions4 = [
- {purge, {<<"bar">>, BarRev#rev_info.rev}}
- ],
- {ok, Db4} = cpse_util:apply_actions(Db3, Actions4),
-
- BazRev = cpse_util:prev_rev(BazFDI),
- Actions5 = [
- {purge, {<<"baz">>, BazRev#rev_info.rev}}
- ],
-
- {ok, Db5} = cpse_util:apply_actions(Db4, Actions5),
- Term1 = cpse_util:db_as_term(Db5),
-
- catch erlang:resume_process(Pid),
- cpse_util:compact(Db5),
-
- {ok, Db6} = couch_db:reopen(Db5),
- Term2 = cpse_util:db_as_term(Db6),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-cpse_compact_purged_docs_limit(Db1) ->
- NumDocs = 1200,
- {RActions, RIds} = lists:foldl(
- fun(Id, {CActions, CIds}) ->
- Id1 = docid(Id),
- Action = {create, {Id1, {[{<<"int">>, Id}]}}},
- {[Action | CActions], [Id1 | CIds]}
- end,
- {[], []},
- lists:seq(1, NumDocs)
- ),
- Ids = lists:reverse(RIds),
- {ok, Db2} = cpse_util:apply_batch(Db1, lists:reverse(RActions)),
-
- FDIs = couch_db_engine:open_docs(Db2, Ids),
- RActions2 = lists:foldl(
- fun(FDI, CActions) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- [{purge, {Id, Rev}} | CActions]
- end,
- [],
- FDIs
- ),
- {ok, Db3} = cpse_util:apply_batch(Db2, lists:reverse(RActions2)),
-
- % check that before compaction all NumDocs of purge_requests
- % are in purge_tree,
- % even if NumDocs=1200 is greater than purged_docs_limit=1000
- {ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos(
- Db3, 0, fun fold_fun/2, [], []
- ),
- ?assertEqual(1, couch_db_engine:get_oldest_purge_seq(Db3)),
- ?assertEqual(NumDocs, length(PurgedIdRevs)),
-
- % compact db
- cpse_util:compact(Db3),
- {ok, Db4} = couch_db:reopen(Db3),
-
- % check that after compaction only purged_docs_limit purge_requests
- % are in purge_tree
- PurgedDocsLimit = couch_db_engine:get_purge_infos_limit(Db4),
- OldestPSeq = couch_db_engine:get_oldest_purge_seq(Db4),
- {ok, PurgedIdRevs2} = couch_db_engine:fold_purge_infos(
- Db4, OldestPSeq - 1, fun fold_fun/2, [], []
- ),
- ExpectedOldestPSeq = NumDocs - PurgedDocsLimit + 1,
- ?assertEqual(ExpectedOldestPSeq, OldestPSeq),
- ?assertEqual(PurgedDocsLimit, length(PurgedIdRevs2)).
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
-
-local_docid(I) ->
- Str = io_lib:format("_local/~4..0b", [I]),
- iolist_to_binary(Str).
-
-fold_fun({_PSeq, _UUID, Id, Revs}, Acc) ->
- {ok, [{Id, Revs} | Acc]}.
diff --git a/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl b/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl
deleted file mode 100644
index a32f866b4..000000000
--- a/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl
+++ /dev/null
@@ -1,88 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_copy_purge_infos).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(NUM_DOCS, 100).
-
-setup_each() ->
- {ok, SrcDb} = cpse_util:create_db(),
- {ok, SrcDb2} = create_and_purge(SrcDb),
- {ok, TrgDb} = cpse_util:create_db(),
- {SrcDb2, TrgDb}.
-
-teardown_each({SrcDb, TrgDb}) ->
- ok = couch_server:delete(couch_db:name(SrcDb), []),
- ok = couch_server:delete(couch_db:name(TrgDb), []).
-
-cpse_copy_empty_purged_info({_, Db}) ->
- {ok, Db1} = couch_db_engine:copy_purge_infos(Db, []),
- ?assertEqual(ok, cpse_util:assert_each_prop(Db1, [{purge_infos, []}])).
-
-cpse_copy_purged_info({SrcDb, TrgDb}) ->
- {ok, RPIs} = couch_db_engine:fold_purge_infos(
- SrcDb,
- 0,
- fun(PI, Acc) ->
- {ok, [PI | Acc]}
- end,
- [],
- []
- ),
- PIs = lists:reverse(RPIs),
- AEPFold = fun({PSeq, UUID, Id, Revs}, {CPSeq, CPurges}) ->
- {max(PSeq, CPSeq), [{UUID, Id, Revs} | CPurges]}
- end,
- {PurgeSeq, RPurges} = lists:foldl(AEPFold, {0, []}, PIs),
- Purges = lists:reverse(RPurges),
- {ok, TrgDb2} = couch_db_engine:copy_purge_infos(TrgDb, PIs),
- AssertProps = [{purge_seq, PurgeSeq}, {purge_infos, Purges}],
- ?assertEqual(ok, cpse_util:assert_each_prop(TrgDb2, AssertProps)).
-
-create_and_purge(Db) ->
- {RActions, RIds} = lists:foldl(
- fun(Id, {CActions, CIds}) ->
- Id1 = docid(Id),
- Action = {create, {Id1, {[{<<"int">>, Id}]}}},
- {[Action | CActions], [Id1 | CIds]}
- end,
- {[], []},
- lists:seq(1, ?NUM_DOCS)
- ),
- Actions = lists:reverse(RActions),
- Ids = lists:reverse(RIds),
- {ok, Db1} = cpse_util:apply_batch(Db, Actions),
-
- FDIs = couch_db_engine:open_docs(Db1, Ids),
- RActions2 = lists:foldl(
- fun(FDI, CActions) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- Action = {purge, {Id, Rev}},
- [Action | CActions]
- end,
- [],
- FDIs
- ),
- Actions2 = lists:reverse(RActions2),
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions2),
- {ok, Db2}.
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_fold_changes.erl b/src/couch_pse_tests/src/cpse_test_fold_changes.erl
deleted file mode 100644
index 91f7c63e9..000000000
--- a/src/couch_pse_tests/src/cpse_test_fold_changes.erl
+++ /dev/null
@@ -1,182 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_fold_changes).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(NUM_DOCS, 25).
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-cpse_empty_changes(Db) ->
- ?assertEqual(0, couch_db_engine:count_changes_since(Db, 0)),
- ?assertEqual(
- {ok, []},
- couch_db_engine:fold_changes(Db, 0, fun fold_fun/2, [], [])
- ).
-
-cpse_single_change(Db1) ->
- Actions = [{create, {<<"a">>, {[]}}}],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(1, couch_db_engine:count_changes_since(Db2, 0)),
- ?assertEqual(
- {ok, [{<<"a">>, 1}]},
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], [])
- ).
-
-cpse_two_changes(Db1) ->
- Actions = [
- {create, {<<"a">>, {[]}}},
- {create, {<<"b">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
-
-cpse_two_changes_batch(Db1) ->
- Actions = [
- {batch, [
- {create, {<<"a">>, {[]}}},
- {create, {<<"b">>, {[]}}}
- ]}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
-
-cpse_two_changes_batch_sorted(Db1) ->
- Actions = [
- {batch, [
- {create, {<<"b">>, {[]}}},
- {create, {<<"a">>, {[]}}}
- ]}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
-
-cpse_update_one(Db1) ->
- Actions = [
- {create, {<<"a">>, {[]}}},
- {update, {<<"a">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(1, couch_db_engine:count_changes_since(Db2, 0)),
- ?assertEqual(
- {ok, [{<<"a">>, 2}]},
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], [])
- ).
-
-cpse_update_first_of_two(Db1) ->
- Actions = [
- {create, {<<"a">>, {[]}}},
- {create, {<<"b">>, {[]}}},
- {update, {<<"a">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"b">>, 2}, {<<"a">>, 3}], lists:reverse(Changes)).
-
-cpse_update_second_of_two(Db1) ->
- Actions = [
- {create, {<<"a">>, {[]}}},
- {create, {<<"b">>, {[]}}},
- {update, {<<"b">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"a">>, 1}, {<<"b">>, 3}], lists:reverse(Changes)).
-
-cpse_check_mutation_ordering(Db1) ->
- Actions = shuffle(
- lists:map(
- fun(Seq) ->
- {create, {docid(Seq), {[]}}}
- end,
- lists:seq(1, ?NUM_DOCS)
- )
- ),
-
- DocIdOrder = [DocId || {_, {DocId, _}} <- Actions],
- DocSeqs = lists:zip(DocIdOrder, lists:seq(1, ?NUM_DOCS)),
-
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- % First lets see that we can get the correct
- % suffix/prefix starting at every update sequence
- lists:foreach(
- fun(Seq) ->
- {ok, Suffix} =
- couch_db_engine:fold_changes(Db2, Seq, fun fold_fun/2, [], []),
- ?assertEqual(lists:nthtail(Seq, DocSeqs), lists:reverse(Suffix)),
-
- {ok, Prefix} = couch_db_engine:fold_changes(
- Db2, Seq, fun fold_fun/2, [], [{dir, rev}]
- ),
- ?assertEqual(lists:sublist(DocSeqs, Seq + 1), Prefix)
- end,
- lists:seq(0, ?NUM_DOCS)
- ),
-
- ok = do_mutation_ordering(Db2, ?NUM_DOCS + 1, DocSeqs, []).
-
-do_mutation_ordering(Db, _Seq, [], FinalDocSeqs) ->
- {ok, RevOrder} = couch_db_engine:fold_changes(Db, 0, fun fold_fun/2, [], []),
- ?assertEqual(FinalDocSeqs, lists:reverse(RevOrder)),
- ok;
-do_mutation_ordering(Db, Seq, [{DocId, _OldSeq} | Rest], DocSeqAcc) ->
- Actions = [{update, {DocId, {[]}}}],
- {ok, NewDb} = cpse_util:apply_actions(Db, Actions),
- NewAcc = DocSeqAcc ++ [{DocId, Seq}],
- Expected = Rest ++ NewAcc,
- {ok, RevOrder} =
- couch_db_engine:fold_changes(NewDb, 0, fun fold_fun/2, [], []),
- ?assertEqual(Expected, lists:reverse(RevOrder)),
- do_mutation_ordering(NewDb, Seq + 1, Rest, NewAcc).
-
-shuffle(List) ->
- Paired = [{couch_rand:uniform(), I} || I <- List],
- Sorted = lists:sort(Paired),
- [I || {_, I} <- Sorted].
-
-fold_fun(#full_doc_info{id = Id, update_seq = Seq}, Acc) ->
- {ok, [{Id, Seq} | Acc]}.
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_fold_docs.erl b/src/couch_pse_tests/src/cpse_test_fold_docs.erl
deleted file mode 100644
index 2d6eb7a9d..000000000
--- a/src/couch_pse_tests/src/cpse_test_fold_docs.erl
+++ /dev/null
@@ -1,414 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_fold_docs).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(NUM_DOCS, 100).
-
-setup_each() ->
- cpse_util:dbname().
-
-teardown_each(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-cpse_fold_all(DbName) ->
- fold_all(DbName, fold_docs, fun docid/1).
-
-cpse_fold_all_local(DbName) ->
- fold_all(DbName, fold_local_docs, fun local_docid/1).
-
-cpse_fold_start_key(DbName) ->
- fold_start_key(DbName, fold_docs, fun docid/1).
-
-cpse_fold_start_key_local(DbName) ->
- fold_start_key(DbName, fold_local_docs, fun local_docid/1).
-
-cpse_fold_end_key(DbName) ->
- fold_end_key(DbName, fold_docs, fun docid/1).
-
-cpse_fold_end_key_local(DbName) ->
- fold_end_key(DbName, fold_local_docs, fun local_docid/1).
-
-cpse_fold_end_key_gt(DbName) ->
- fold_end_key_gt(DbName, fold_docs, fun docid/1).
-
-cpse_fold_end_key_gt_local(DbName) ->
- fold_end_key_gt(DbName, fold_local_docs, fun local_docid/1).
-
-cpse_fold_range(DbName) ->
- fold_range(DbName, fold_docs, fun docid/1).
-
-cpse_fold_range_local(DbName) ->
- fold_range(DbName, fold_local_docs, fun local_docid/1).
-
-cpse_fold_stop(DbName) ->
- fold_user_fun_stop(DbName, fold_docs, fun docid/1).
-
-cpse_fold_stop_local(DbName) ->
- fold_user_fun_stop(DbName, fold_local_docs, fun local_docid/1).
-
-% This is a loose test but we have to have this until
-% I figure out what to do about the total_rows/offset
-% meta data included in _all_docs
-cpse_fold_include_reductions(DbName) ->
- {ok, Db} = init_db(DbName, fun docid/1),
- FoldFun = fun(_, _, nil) -> {ok, nil} end,
- Opts = [include_reductions],
- {ok, Count, nil} = couch_db_engine:fold_docs(Db, FoldFun, nil, Opts),
- ?assert(is_integer(Count)),
- ?assert(Count >= 0).
-
-fold_all(DbName, FoldFun, DocIdFun) ->
- DocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
- {ok, Db} = init_db(DbName, DocIdFun),
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], []),
- ?assertEqual(?NUM_DOCS, length(DocIdAccFwd)),
- ?assertEqual(DocIds, lists:reverse(DocIdAccFwd)),
-
- Opts = [{dir, rev}],
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], Opts),
- ?assertEqual(?NUM_DOCS, length(DocIdAccRev)),
- ?assertEqual(DocIds, DocIdAccRev).
-
-fold_start_key(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- StartKeyNum = ?NUM_DOCS div 4,
- StartKey = DocIdFun(StartKeyNum),
-
- AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
- DocIdsFwd = [DocIdFun(I) || I <- lists:seq(StartKeyNum, ?NUM_DOCS)],
- DocIdsRev = [DocIdFun(I) || I <- lists:seq(1, StartKeyNum)],
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<255>>}
- ])
- ),
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<"">>}
- ])
- ),
-
- {ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<"">>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
-
- {ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<255>>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccRev)),
- ?assertEqual(AllDocIds, AllDocIdAccRev),
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, StartKey}
- ]),
- ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
- ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
-
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, StartKey}
- ]),
- ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
- ?assertEqual(DocIdsRev, DocIdAccRev).
-
-fold_end_key(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- EndKeyNum = ?NUM_DOCS div 4,
- EndKey = DocIdFun(EndKeyNum),
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key, <<"">>}
- ])
- ),
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key, <<255>>}
- ])
- ),
-
- AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
-
- {ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key, <<255>>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
-
- {ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key, <<"">>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, AllDocIdAccRev),
-
- DocIdsFwd = [DocIdFun(I) || I <- lists:seq(1, EndKeyNum)],
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key, EndKey}
- ]),
- ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
- ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
-
- DocIdsRev = [DocIdFun(I) || I <- lists:seq(EndKeyNum, ?NUM_DOCS)],
-
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key, EndKey}
- ]),
- ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
- ?assertEqual(DocIdsRev, DocIdAccRev).
-
-fold_end_key_gt(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- EndKeyNum = ?NUM_DOCS div 4,
- EndKey = DocIdFun(EndKeyNum),
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key_gt, <<"">>}
- ])
- ),
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key_gt, <<255>>}
- ])
- ),
-
- AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
-
- {ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key_gt, <<255>>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
-
- {ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key_gt, <<"">>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, AllDocIdAccRev),
-
- DocIdsFwd = [DocIdFun(I) || I <- lists:seq(1, EndKeyNum - 1)],
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key_gt, EndKey}
- ]),
- ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
- ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
-
- DocIdsRev = [DocIdFun(I) || I <- lists:seq(EndKeyNum + 1, ?NUM_DOCS)],
-
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key_gt, EndKey}
- ]),
- ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
- ?assertEqual(DocIdsRev, DocIdAccRev).
-
-fold_range(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- StartKeyNum = ?NUM_DOCS div 4,
- EndKeyNum = StartKeyNum * 3,
-
- StartKey = DocIdFun(StartKeyNum),
- EndKey = DocIdFun(EndKeyNum),
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<"">>},
- {end_key, <<"">>}
- ])
- ),
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<"">>},
- {end_key, <<255>>}
- ])
- ),
-
- AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
-
- {ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<"">>},
- {end_key, <<255>>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
-
- {ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<255>>},
- {end_key_gt, <<"">>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, AllDocIdAccRev),
-
- DocIdsFwd = [DocIdFun(I) || I <- lists:seq(StartKeyNum, EndKeyNum)],
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, StartKey},
- {end_key, EndKey}
- ]),
- ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
- ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
-
- DocIdsRev = [DocIdFun(I) || I <- lists:seq(StartKeyNum, EndKeyNum)],
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, StartKey},
- {end_key, EndKey}
- ])
- ),
-
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, EndKey},
- {end_key, StartKey}
- ]),
- ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
- ?assertEqual(DocIdsRev, DocIdAccRev).
-
-fold_user_fun_stop(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- StartKeyNum = ?NUM_DOCS div 4,
- StartKey = DocIdFun(StartKeyNum),
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {start_key, <<255>>}
- ])
- ),
-
- ?assertEqual(
- {ok, []},
- couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {dir, rev},
- {start_key, <<"">>}
- ])
- ),
-
- SuffixDocIds = [DocIdFun(I) || I <- lists:seq(?NUM_DOCS - 3, ?NUM_DOCS)],
-
- {ok, SuffixDocIdAcc} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {start_key, DocIdFun(?NUM_DOCS - 3)}
- ]),
- ?assertEqual(length(SuffixDocIds), length(SuffixDocIdAcc)),
- ?assertEqual(SuffixDocIds, lists:reverse(SuffixDocIdAcc)),
-
- PrefixDocIds = [DocIdFun(I) || I <- lists:seq(1, 3)],
-
- {ok, PrefixDocIdAcc} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {dir, rev},
- {start_key, DocIdFun(3)}
- ]),
- ?assertEqual(3, length(PrefixDocIdAcc)),
- ?assertEqual(PrefixDocIds, PrefixDocIdAcc),
-
- FiveDocIdsFwd = [
- DocIdFun(I)
- || I <- lists:seq(StartKeyNum, StartKeyNum + 5)
- ],
-
- {ok, FiveDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {start_key, StartKey}
- ]),
- ?assertEqual(length(FiveDocIdsFwd), length(FiveDocIdAccFwd)),
- ?assertEqual(FiveDocIdsFwd, lists:reverse(FiveDocIdAccFwd)),
-
- FiveDocIdsRev = [
- DocIdFun(I)
- || I <- lists:seq(StartKeyNum - 5, StartKeyNum)
- ],
-
- {ok, FiveDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {dir, rev},
- {start_key, StartKey}
- ]),
- ?assertEqual(length(FiveDocIdsRev), length(FiveDocIdAccRev)),
- ?assertEqual(FiveDocIdsRev, FiveDocIdAccRev).
-
-init_db(DbName, DocIdFun) ->
- {ok, Db1} = cpse_util:create_db(DbName),
- Actions = lists:map(
- fun(Id) ->
- {create, {DocIdFun(Id), {[{<<"int">>, Id}]}}}
- end,
- lists:seq(1, ?NUM_DOCS)
- ),
- cpse_util:apply_actions(Db1, [{batch, Actions}]).
-
-fold_fun(Doc, Acc) ->
- Id =
- case Doc of
- #doc{id = Id0} -> Id0;
- #full_doc_info{id = Id0} -> Id0
- end,
- {ok, [Id | Acc]}.
-
-fold_stop(Doc, Acc) ->
- Id =
- case Doc of
- #doc{id = Id0} -> Id0;
- #full_doc_info{id = Id0} -> Id0
- end,
- case length(Acc) of
- N when N =< 4 ->
- {ok, [Id | Acc]};
- _ ->
- {stop, [Id | Acc]}
- end.
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
-
-local_docid(I) ->
- Str = io_lib:format("_local/~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl b/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
deleted file mode 100644
index 6225cbdb0..000000000
--- a/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
+++ /dev/null
@@ -1,179 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_fold_purge_infos).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(NUM_DOCS, 100).
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-cpse_empty_purged_docs(Db) ->
- ?assertEqual(
- {ok, []},
- couch_db_engine:fold_purge_infos(
- Db, 0, fun fold_fun/2, [], []
- )
- ).
-
-cpse_all_purged_docs(Db1) ->
- {RActions, RIds} = lists:foldl(
- fun(Id, {CActions, CIds}) ->
- Id1 = docid(Id),
- Action = {create, {Id1, {[{<<"int">>, Id}]}}},
- {[Action | CActions], [Id1 | CIds]}
- end,
- {[], []},
- lists:seq(1, ?NUM_DOCS)
- ),
- Actions = lists:reverse(RActions),
- Ids = lists:reverse(RIds),
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions),
-
- FDIs = couch_db_engine:open_docs(Db2, Ids),
- {RevActions2, RevIdRevs} = lists:foldl(
- fun(FDI, {CActions, CIdRevs}) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- Action = {purge, {Id, Rev}},
- {[Action | CActions], [{Id, [Rev]} | CIdRevs]}
- end,
- {[], []},
- FDIs
- ),
- {Actions2, IdsRevs} = {lists:reverse(RevActions2), lists:reverse(RevIdRevs)},
-
- {ok, Db3} = cpse_util:apply_batch(Db2, Actions2),
- {ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos(
- Db3, 0, fun fold_fun/2, [], []
- ),
- ?assertEqual(IdsRevs, lists:reverse(PurgedIdRevs)).
-
-cpse_start_seq(Db1) ->
- Actions1 = [
- {create, {docid(1), {[{<<"int">>, 1}]}}},
- {create, {docid(2), {[{<<"int">>, 2}]}}},
- {create, {docid(3), {[{<<"int">>, 3}]}}},
- {create, {docid(4), {[{<<"int">>, 4}]}}},
- {create, {docid(5), {[{<<"int">>, 5}]}}}
- ],
- Ids = [docid(1), docid(2), docid(3), docid(4), docid(5)],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions1),
-
- FDIs = couch_db_engine:open_docs(Db2, Ids),
- {RActions2, RIdRevs} = lists:foldl(
- fun(FDI, {CActions, CIdRevs}) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- Action = {purge, {Id, Rev}},
- {[Action | CActions], [{Id, [Rev]} | CIdRevs]}
- end,
- {[], []},
- FDIs
- ),
- {ok, Db3} = cpse_util:apply_actions(Db2, lists:reverse(RActions2)),
-
- StartSeq = 3,
- StartSeqIdRevs = lists:nthtail(StartSeq, lists:reverse(RIdRevs)),
- {ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos(
- Db3, StartSeq, fun fold_fun/2, [], []
- ),
- ?assertEqual(StartSeqIdRevs, lists:reverse(PurgedIdRevs)).
-
-cpse_id_rev_repeated(Db1) ->
- Actions1 = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {conflict, {<<"foo">>, {[{<<"vsn">>, 2}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions1),
-
- [FDI1] = couch_db_engine:open_docs(Db2, [<<"foo">>]),
- PrevRev1 = cpse_util:prev_rev(FDI1),
- Rev1 = PrevRev1#rev_info.rev,
- Actions2 = [
- {purge, {<<"foo">>, Rev1}}
- ],
-
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions2),
- {ok, PurgedIdRevs1} = couch_db_engine:fold_purge_infos(
- Db3, 0, fun fold_fun/2, [], []
- ),
- ExpectedPurgedIdRevs1 = [
- {<<"foo">>, [Rev1]}
- ],
-
- ?assertEqual(ExpectedPurgedIdRevs1, lists:reverse(PurgedIdRevs1)),
- ?assertEqual(1, couch_db_engine:get_purge_seq(Db3)),
-
- % purge the same Id,Rev when the doc still exists
- {ok, Db4} = cpse_util:apply_actions(Db3, Actions2),
- {ok, PurgedIdRevs2} = couch_db_engine:fold_purge_infos(
- Db4, 0, fun fold_fun/2, [], []
- ),
- ExpectedPurgedIdRevs2 = [
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev1]}
- ],
- ?assertEqual(ExpectedPurgedIdRevs2, lists:reverse(PurgedIdRevs2)),
- ?assertEqual(2, couch_db_engine:get_purge_seq(Db4)),
-
- [FDI2] = couch_db_engine:open_docs(Db4, [<<"foo">>]),
- PrevRev2 = cpse_util:prev_rev(FDI2),
- Rev2 = PrevRev2#rev_info.rev,
- Actions3 = [
- {purge, {<<"foo">>, Rev2}}
- ],
- {ok, Db5} = cpse_util:apply_actions(Db4, Actions3),
-
- {ok, PurgedIdRevs3} = couch_db_engine:fold_purge_infos(
- Db5, 0, fun fold_fun/2, [], []
- ),
- ExpectedPurgedIdRevs3 = [
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev2]}
- ],
- ?assertEqual(ExpectedPurgedIdRevs3, lists:reverse(PurgedIdRevs3)),
- ?assertEqual(3, couch_db_engine:get_purge_seq(Db5)),
-
- % purge the same Id,Rev when the doc was completely purged
- {ok, Db6} = cpse_util:apply_actions(Db5, Actions3),
-
- {ok, PurgedIdRevs4} = couch_db_engine:fold_purge_infos(
- Db6, 0, fun fold_fun/2, [], []
- ),
- ExpectedPurgedIdRevs4 = [
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev2]},
- {<<"foo">>, [Rev2]}
- ],
- ?assertEqual(ExpectedPurgedIdRevs4, lists:reverse(PurgedIdRevs4)),
- ?assertEqual(4, couch_db_engine:get_purge_seq(Db6)).
-
-fold_fun({_PSeq, _UUID, Id, Revs}, Acc) ->
- {ok, [{Id, Revs} | Acc]}.
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_get_set_props.erl b/src/couch_pse_tests/src/cpse_test_get_set_props.erl
deleted file mode 100644
index 773f1d0dc..000000000
--- a/src/couch_pse_tests/src/cpse_test_get_set_props.erl
+++ /dev/null
@@ -1,90 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_get_set_props).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
-
-setup_each() ->
- cpse_util:dbname().
-
-teardown_each(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-cpse_default_props(DbName) ->
- {ok, {_App, Engine, _Extension}} = application:get_env(couch, test_engine),
- {ok, Db} = cpse_util:create_db(DbName),
- Node = node(),
-
- ?assertEqual(Engine, couch_db_engine:get_engine(Db)),
- ?assertEqual(0, couch_db_engine:get_doc_count(Db)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db)),
- ?assertEqual(true, is_list(couch_db_engine:get_size_info(Db))),
- ?assertEqual(true, is_integer(couch_db_engine:get_disk_version(Db))),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db)),
- ?assertEqual(0, couch_db_engine:get_purge_seq(Db)),
- ?assertEqual(true, is_integer(couch_db_engine:get_purge_infos_limit(Db))),
- ?assertEqual(true, couch_db_engine:get_purge_infos_limit(Db) > 0),
- ?assertEqual([], couch_db_engine:get_security(Db)),
- ?assertEqual(1000, couch_db_engine:get_revs_limit(Db)),
- ?assertMatch(<<_:32/binary>>, couch_db_engine:get_uuid(Db)),
- ?assertEqual([{Node, 0}], couch_db_engine:get_epochs(Db)),
- ?assertEqual(0, couch_db_engine:get_compacted_seq(Db)).
-
--define(ADMIN_ONLY_SEC_PROPS,
- {[
- {<<"members">>,
- {[
- {<<"roles">>, [<<"_admin">>]}
- ]}},
- {<<"admins">>,
- {[
- {<<"roles">>, [<<"_admin">>]}
- ]}}
- ]}
-).
-
-cpse_admin_only_security(DbName) ->
- Config = [{"couchdb", "default_security", "admin_only"}],
- {ok, Db1} = cpse_util:with_config(Config, fun() ->
- cpse_util:create_db(DbName)
- end),
-
- ?assertEqual(?ADMIN_ONLY_SEC_PROPS, couch_db:get_security(Db1)),
- cpse_util:shutdown_db(Db1),
-
- {ok, Db2} = couch_db:reopen(Db1),
- couch_log:error("~n~n~n~n~s -> ~s~n~n", [couch_db:name(Db1), couch_db:name(Db2)]),
- ?assertEqual(?ADMIN_ONLY_SEC_PROPS, couch_db:get_security(Db2)).
-
-cpse_set_security(DbName) ->
- SecProps = {[{<<"foo">>, <<"bar">>}]},
- check_prop_set(DbName, get_security, set_security, {[]}, SecProps).
-
-cpse_set_revs_limit(DbName) ->
- check_prop_set(DbName, get_revs_limit, set_revs_limit, 1000, 50).
-
-check_prop_set(DbName, GetFun, SetFun, Default, Value) ->
- {ok, Db0} = cpse_util:create_db(DbName),
-
- ?assertEqual(Default, couch_db:GetFun(Db0)),
- ?assertMatch(ok, couch_db:SetFun(Db0, Value)),
-
- {ok, Db1} = couch_db:reopen(Db0),
- ?assertEqual(Value, couch_db:GetFun(Db1)),
-
- cpse_util:shutdown_db(Db1),
-
- {ok, Db2} = couch_db:reopen(Db1),
- ?assertEqual(Value, couch_db:GetFun(Db2)).
diff --git a/src/couch_pse_tests/src/cpse_test_open_close_delete.erl b/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
deleted file mode 100644
index c63a05bea..000000000
--- a/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_open_close_delete).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
-
-setup_each() ->
- cpse_util:dbname().
-
-teardown_each(DbName) ->
- case couch_server:exists(DbName) of
- true -> ok = couch_server:delete(DbName, []);
- false -> ok
- end.
-
-cpse_open_non_existent(DbName) ->
- % Try twice to check that a failed open doesn't create
- % the database for some reason.
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)).
-
-cpse_open_create(DbName) ->
- ?assertEqual(false, couch_server:exists(DbName)),
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
- ?assertMatch({ok, _}, cpse_util:create_db(DbName)),
- ?assertEqual(true, couch_server:exists(DbName)).
-
-cpse_open_when_exists(DbName) ->
- ?assertEqual(false, couch_server:exists(DbName)),
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
- ?assertMatch({ok, _}, cpse_util:create_db(DbName)),
- ?assertEqual(file_exists, cpse_util:create_db(DbName)).
-
-cpse_terminate(DbName) ->
- ?assertEqual(false, couch_server:exists(DbName)),
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
- ?assertEqual(ok, cycle_db(DbName, create_db)),
- ?assertEqual(true, couch_server:exists(DbName)).
-
-cpse_rapid_recycle(DbName) ->
- ?assertEqual(ok, cycle_db(DbName, create_db)),
- lists:foreach(
- fun(_) ->
- ?assertEqual(ok, cycle_db(DbName, open_db))
- end,
- lists:seq(1, 100)
- ).
-
-cpse_delete(DbName) ->
- ?assertEqual(false, couch_server:exists(DbName)),
- ?assertMatch(ok, cycle_db(DbName, create_db)),
- ?assertEqual(true, couch_server:exists(DbName)),
- ?assertEqual(ok, couch_server:delete(DbName, [])),
- ?assertEqual(false, couch_server:exists(DbName)).
-
-cycle_db(DbName, Type) ->
- {ok, Db} = cpse_util:Type(DbName),
- cpse_util:shutdown_db(Db).
diff --git a/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl b/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl
deleted file mode 100644
index bddbdb699..000000000
--- a/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl
+++ /dev/null
@@ -1,83 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_purge_bad_checkpoints).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup_each() ->
- {ok, Db1} = cpse_util:create_db(),
- {ok, Revs} = cpse_util:save_docs(couch_db:name(Db1), [
- {[{'_id', foo0}, {vsn, 0}]},
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]},
- {[{'_id', foo3}, {vsn, 3}]},
- {[{'_id', foo4}, {vsn, 4}]},
- {[{'_id', foo5}, {vsn, 5}]},
- {[{'_id', foo6}, {vsn, 6}]},
- {[{'_id', foo7}, {vsn, 7}]},
- {[{'_id', foo8}, {vsn, 8}]},
- {[{'_id', foo9}, {vsn, 9}]}
- ]),
- PInfos = lists:map(
- fun(Idx) ->
- DocId = iolist_to_binary(["foo", $0 + Idx]),
- Rev = lists:nth(Idx + 1, Revs),
- {cpse_util:uuid(), DocId, [Rev]}
- end,
- lists:seq(0, 9)
- ),
- {ok, _} = cpse_util:purge(couch_db:name(Db1), PInfos),
- {ok, Db2} = couch_db:reopen(Db1),
- Db2.
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-cpse_bad_purge_seq(Db1) ->
- Db2 = save_local_doc(Db1, <<"foo">>),
- ?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)),
-
- ok = couch_db:set_purge_infos_limit(Db2, 5),
- {ok, Db3} = couch_db:reopen(Db2),
- ?assertEqual(1, couch_db:get_minimum_purge_seq(Db3)).
-
-cpse_verify_non_boolean(Db1) ->
- Db2 = save_local_doc(Db1, 2),
- ?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)),
-
- ok = couch_db:set_purge_infos_limit(Db2, 5),
- {ok, Db3} = couch_db:reopen(Db2),
- ?assertEqual(5, couch_db:get_minimum_purge_seq(Db3)).
-
-save_local_doc(Db1, PurgeSeq) ->
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- Doc = couch_doc:from_json_obj(
- ?JSON_DECODE(
- ?JSON_ENCODE(
- {[
- {<<"_id">>, <<"_local/purge-test-stuff">>},
- {<<"purge_seq">>, PurgeSeq},
- {<<"timestamp_utc">>, NowSecs},
- {<<"verify_options">>, {[{<<"signature">>, <<"stuff">>}]}},
- {<<"type">>, <<"test">>}
- ]}
- )
- )
- ),
- {ok, _} = couch_db:update_doc(Db1, Doc, []),
- {ok, Db2} = couch_db:reopen(Db1),
- Db2.
diff --git a/src/couch_pse_tests/src/cpse_test_purge_docs.erl b/src/couch_pse_tests/src/cpse_test_purge_docs.erl
deleted file mode 100644
index f0ed3d747..000000000
--- a/src/couch_pse_tests/src/cpse_test_purge_docs.erl
+++ /dev/null
@@ -1,453 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_purge_docs).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(REV_DEPTH, 100).
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- couch_db:name(Db).
-
-teardown_each(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-cpse_purge_simple(DbName) ->
- {ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_simple_info_check(DbName) ->
- {ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev]}
- ],
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev], PRevs),
-
- {ok, AllInfos} = couch_util:with_db(DbName, fun(Db) ->
- couch_db_engine:fold_purge_infos(Db, 0, fun fold_all_infos/2, [], [])
- end),
-
- ?assertMatch([{1, <<_/binary>>, <<"foo1">>, [Rev]}], AllInfos).
-
-cpse_purge_empty_db(DbName) ->
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [{0, <<0>>}]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_single_docid(DbName) ->
- {ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev1], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 1},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_multiple_docids(DbName) ->
- {ok, [Rev1, Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1.1}]},
- {[{'_id', foo2}, {vsn, 1.2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]},
- {cpse_util:uuid(), <<"foo2">>, [Rev2]}
- ],
-
- {ok, [{ok, PRevs1}, {ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos),
-
- ?assertEqual([Rev1], PRevs1),
- ?assertEqual([Rev2], PRevs2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 0},
- {purge_seq, 2},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_no_docids(DbName) ->
- {ok, [_Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- {ok, []} = cpse_util:purge(DbName, []),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]).
-
-cpse_purge_rev_path(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, 1}]}),
- Update =
- {[
- {<<"_id">>, <<"foo">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev1)},
- {<<"_deleted">>, true},
- {<<"vsn">>, 2}
- ]},
- {ok, Rev2} = cpse_util:save_doc(DbName, Update),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 1},
- {update_seq, 2},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev2]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev2], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_deep_revision_path(DbName) ->
- {ok, InitRev} = cpse_util:save_doc(DbName, {[{'_id', bar}, {vsn, 0}]}),
- LastRev = lists:foldl(
- fun(Count, PrevRev) ->
- Update =
- {[
- {'_id', bar},
- {'_rev', couch_doc:rev_to_str(PrevRev)},
- {vsn, Count}
- ]},
- {ok, NewRev} = cpse_util:save_doc(DbName, Update),
- NewRev
- end,
- InitRev,
- lists:seq(1, ?REV_DEPTH)
- ),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"bar">>, [LastRev]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([LastRev], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, ?REV_DEPTH + 2},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_partial_revs(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, <<"1.1">>}]}),
- Update =
- {[
- {'_id', foo},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {vsn, <<"1.2">>}
- ]},
- {ok, [_Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev1]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev1], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 1},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_missing_docid(DbName) ->
- {ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"baz">>, [Rev1]}
- ],
-
- {ok, [{ok, []}]} = cpse_util:purge(DbName, PurgeInfos),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 2},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_duplicate_docids(DbName) ->
- {ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 0},
- {changes, 2},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]},
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
-
- {ok, Resp} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([{ok, [Rev1]}, {ok, []}], Resp),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {purge_seq, 2},
- {changes, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_internal_revision(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, 1}]}),
- Update =
- {[
- {'_id', foo},
- {'_rev', couch_doc:rev_to_str(Rev1)},
- {vsn, 2}
- ]},
- {ok, _Rev2} = cpse_util:save_doc(DbName, Update),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev1]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 1},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_missing_revision(DbName) ->
- {ok, [_Rev1, Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev2]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 2},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_repeated_revisions(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, <<"1.1">>}]}),
- Update =
- {[
- {'_id', foo},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {vsn, <<"1.2">>}
- ]},
- {ok, [Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo">>, [Rev1]},
- {cpse_util:uuid(), <<"foo">>, [Rev1, Rev2]}
- ],
-
- {ok, [{ok, PRevs1}, {ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
- ?assertEqual([Rev2], PRevs2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 0},
- {purge_seq, 2},
- {purge_infos, PurgeInfos1}
- ]).
-
-cpse_purge_repeated_uuid(DbName) ->
- {ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs1}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev], PRevs1),
-
- % Attempting to purge a repeated UUID is an error
- ?assertThrow({badreq, _}, cpse_util:purge(DbName, PurgeInfos)),
-
- % Although we can replicate it in
- {ok, []} = cpse_util:purge(DbName, PurgeInfos, [replicated_changes]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-fold_all_infos(Info, Acc) ->
- {ok, [Info | Acc]}.
diff --git a/src/couch_pse_tests/src/cpse_test_purge_replication.erl b/src/couch_pse_tests/src/cpse_test_purge_replication.erl
deleted file mode 100644
index 5ec04b711..000000000
--- a/src/couch_pse_tests/src/cpse_test_purge_replication.erl
+++ /dev/null
@@ -1,206 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_purge_replication).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-setup_all() ->
- cpse_util:setup_all([mem3, fabric, couch_replicator]).
-
-setup_each() ->
- {ok, Src} = cpse_util:create_db(),
- {ok, Tgt} = cpse_util:create_db(),
- {couch_db:name(Src), couch_db:name(Tgt)}.
-
-teardown_each({SrcDb, TgtDb}) ->
- ok = couch_server:delete(SrcDb, []),
- ok = couch_server:delete(TgtDb, []).
-
-cpse_purge_http_replication({Source, Target}) ->
- {ok, Rev1} = cpse_util:save_doc(Source, {[{'_id', foo}, {vsn, 1}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Source, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- RepObject =
- {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)}
- ]},
-
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- {ok, Doc1} = cpse_util:open_doc(Target, foo),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Target, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev1]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(Source, PurgeInfos),
- ?assertEqual([Rev1], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Source, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]),
-
- % Show that a purge on the source is
- % not replicated to the target
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- {ok, Doc2} = cpse_util:open_doc(Target, foo),
- [Rev2] = Doc2#doc_info.revs,
- ?assertEqual(Rev1, Rev2#rev_info.rev),
- ?assertEqual(Doc1, Doc2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Target, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- % Show that replicating from the target
- % back to the source reintroduces the doc
- RepObject2 =
- {[
- {<<"source">>, db_url(Target)},
- {<<"target">>, db_url(Source)}
- ]},
-
- {ok, _} = couch_replicator:replicate(RepObject2, ?ADMIN_USER),
- {ok, Doc3} = cpse_util:open_doc(Source, foo),
- [Revs3] = Doc3#doc_info.revs,
- ?assertEqual(Rev1, Revs3#rev_info.rev),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Source, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 1},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-cpse_purge_internal_repl_disabled({Source, Target}) ->
- cpse_util:with_config([{"mem3", "replicate_purges", "false"}], fun() ->
- repl(Source, Target),
-
- {ok, [Rev1, Rev2]} = cpse_util:save_docs(Source, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- repl(Source, Target),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
- {ok, [{ok, PRevs1}]} = cpse_util:purge(Source, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
-
- PurgeInfos2 = [
- {cpse_util:uuid(), <<"foo2">>, [Rev2]}
- ],
- {ok, [{ok, PRevs2}]} = cpse_util:purge(Target, PurgeInfos2),
- ?assertEqual([Rev2], PRevs2),
-
- SrcShard = make_shard(Source),
- TgtShard = make_shard(Target),
- ?assertEqual({ok, 0}, mem3_rep:go(SrcShard, TgtShard)),
- ?assertEqual({ok, 0}, mem3_rep:go(TgtShard, SrcShard)),
-
- ?assertMatch({ok, #doc_info{}}, cpse_util:open_doc(Source, <<"foo2">>)),
- ?assertMatch({ok, #doc_info{}}, cpse_util:open_doc(Target, <<"foo1">>))
- end).
-
-cpse_purge_repl_simple_pull({Source, Target}) ->
- repl(Source, Target),
-
- {ok, Rev} = cpse_util:save_doc(Source, {[{'_id', foo}, {vsn, 1}]}),
- repl(Source, Target),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev]}
- ],
- {ok, [{ok, PRevs}]} = cpse_util:purge(Target, PurgeInfos),
- ?assertEqual([Rev], PRevs),
- repl(Source, Target).
-
-cpse_purge_repl_simple_push({Source, Target}) ->
- repl(Source, Target),
-
- {ok, Rev} = cpse_util:save_doc(Source, {[{'_id', foo}, {vsn, 1}]}),
- repl(Source, Target),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev]}
- ],
- {ok, [{ok, PRevs}]} = cpse_util:purge(Source, PurgeInfos),
- ?assertEqual([Rev], PRevs),
- repl(Source, Target).
-
-repl(Source, Target) ->
- SrcShard = make_shard(Source),
- TgtShard = make_shard(Target),
-
- ?assertEqual({ok, 0}, mem3_rep:go(SrcShard, TgtShard)),
-
- SrcTerm = cpse_util:db_as_term(Source, replication),
- TgtTerm = cpse_util:db_as_term(Target, replication),
-
- Diff = cpse_util:term_diff(SrcTerm, TgtTerm),
- ?assertEqual(nodiff, Diff).
-
-make_shard(DbName) ->
- #shard{
- name = DbName,
- node = node(),
- dbname = DbName,
- range = [0, 16#FFFFFFFF]
- }.
-
-db_url(DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- Url = ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])),
- test_util:wait(fun() ->
- case test_request:get(?b2l(Url)) of
- {ok, 200, _, _} -> ok;
- _ -> wait
- end
- end),
- Url.
diff --git a/src/couch_pse_tests/src/cpse_test_purge_seqs.erl b/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
deleted file mode 100644
index f9d87945e..000000000
--- a/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
+++ /dev/null
@@ -1,125 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_purge_seqs).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- couch_db:name(Db).
-
-teardown_each(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-cpse_increment_purge_seq_on_complete_purge(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
- {ok, Rev2} = cpse_util:save_doc(DbName, {[{'_id', foo2}, {vsn, 1.2}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
- {ok, [{ok, PRevs1}]} = cpse_util:purge(DbName, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {purge_seq, 1},
- {purge_infos, PurgeInfos1}
- ]),
-
- PurgeInfos2 = [
- {cpse_util:uuid(), <<"foo2">>, [Rev2]}
- ],
- {ok, [{ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos2),
- ?assertEqual([Rev2], PRevs2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 4},
- {purge_seq, 2},
- {purge_infos, PurgeInfos1 ++ PurgeInfos2}
- ]).
-
-cpse_increment_purge_multiple_times(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
- {ok, Rev2} = cpse_util:save_doc(DbName, {[{'_id', foo2}, {vsn, 1.2}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]},
- {cpse_util:uuid(), <<"foo2">>, [Rev2]}
- ],
- {ok, [{ok, PRevs1}, {ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
- ?assertEqual([Rev2], PRevs2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 3},
- {purge_seq, 2},
- {purge_infos, PurgeInfos1}
- ]).
-
-cpse_increment_purge_seq_on_partial_purge(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, <<"1.1">>}]}),
- Update =
- {[
- {'_id', foo1},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {vsn, <<"1.2">>}
- ]},
- {ok, [_Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
- {ok, [{ok, PRevs1}]} = cpse_util:purge(DbName, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {purge_seq, 1},
- {purge_infos, PurgeInfos1}
- ]).
diff --git a/src/couch_pse_tests/src/cpse_test_read_write_docs.erl b/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
deleted file mode 100644
index f51e50aec..000000000
--- a/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
+++ /dev/null
@@ -1,303 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_read_write_docs).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-cpse_read_docs_from_empty_db(Db) ->
- ?assertEqual([not_found], couch_db_engine:open_docs(Db, [<<"foo">>])),
- ?assertEqual(
- [not_found, not_found],
- couch_db_engine:open_docs(Db, [<<"a">>, <<"b">>])
- ).
-
-cpse_read_empty_local_docs(Db) ->
- {LocalA, LocalB} = {<<"_local/a">>, <<"_local/b">>},
- ?assertEqual([not_found], couch_db_engine:open_local_docs(Db, [LocalA])),
- ?assertEqual(
- [not_found, not_found],
- couch_db_engine:open_local_docs(Db, [LocalA, LocalB])
- ).
-
-cpse_write_one_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- ?assertEqual(1, couch_db_engine:get_doc_count(Db2)),
-
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(1, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(1, couch_db_engine:get_update_seq(Db3)),
-
- [FDI] = couch_db_engine:open_docs(Db3, [<<"foo">>]),
- #rev_info{
- rev = {RevPos, PrevRevId},
- deleted = Deleted,
- body_sp = DocPtr
- } = cpse_util:prev_rev(FDI),
-
- Doc0 = #doc{
- id = <<"foo">>,
- revs = {RevPos, [PrevRevId]},
- deleted = Deleted,
- body = DocPtr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Body1 =
- if
- not is_binary(Doc1#doc.body) -> Doc1#doc.body;
- true -> couch_compress:decompress(Doc1#doc.body)
- end,
- ?assertEqual({[{<<"vsn">>, 1}]}, Body1).
-
-cpse_write_two_docs(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {create, {<<"bar">>, {[{<<"stuff">>, true}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(2, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(2, couch_db_engine:get_update_seq(Db3)),
-
- Resps = couch_db_engine:open_docs(Db3, [<<"foo">>, <<"bar">>]),
- ?assertEqual(false, lists:member(not_found, Resps)).
-
-cpse_write_three_doc_batch(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {batch, [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {create, {<<"bar">>, {[{<<"stuff">>, true}]}}},
- {create, {<<"baz">>, {[]}}}
- ]}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(3, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(3, couch_db_engine:get_update_seq(Db3)),
-
- Resps = couch_db_engine:open_docs(Db3, [<<"foo">>, <<"bar">>, <<"baz">>]),
- ?assertEqual(false, lists:member(not_found, Resps)).
-
-cpse_update_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {update, {<<"foo">>, {[{<<"vsn">>, 2}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(1, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(2, couch_db_engine:get_update_seq(Db3)),
-
- [FDI] = couch_db_engine:open_docs(Db3, [<<"foo">>]),
-
- #rev_info{
- rev = {RevPos, PrevRevId},
- deleted = Deleted,
- body_sp = DocPtr
- } = cpse_util:prev_rev(FDI),
-
- Doc0 = #doc{
- id = <<"foo">>,
- revs = {RevPos, [PrevRevId]},
- deleted = Deleted,
- body = DocPtr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Body1 =
- if
- not is_binary(Doc1#doc.body) -> Doc1#doc.body;
- true -> couch_compress:decompress(Doc1#doc.body)
- end,
-
- ?assertEqual({[{<<"vsn">>, 2}]}, Body1).
-
-cpse_delete_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {delete, {<<"foo">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
- ?assertEqual(0, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(1, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(2, couch_db_engine:get_update_seq(Db3)),
-
- [FDI] = couch_db_engine:open_docs(Db3, [<<"foo">>]),
-
- #rev_info{
- rev = {RevPos, PrevRevId},
- deleted = Deleted,
- body_sp = DocPtr
- } = cpse_util:prev_rev(FDI),
-
- Doc0 = #doc{
- id = <<"foo">>,
- revs = {RevPos, [PrevRevId]},
- deleted = Deleted,
- body = DocPtr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Body1 =
- if
- not is_binary(Doc1#doc.body) -> Doc1#doc.body;
- true -> couch_compress:decompress(Doc1#doc.body)
- end,
-
- ?assertEqual({[]}, Body1).
-
-cpse_write_local_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"_local/foo">>, {[{<<"yay">>, false}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(0, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db3)),
-
- [not_found] = couch_db_engine:open_docs(Db3, [<<"_local/foo">>]),
- [#doc{} = Doc] = couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>]),
- ?assertEqual({[{<<"yay">>, false}]}, Doc#doc.body).
-
-cpse_write_mixed_batch(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {batch, [
- {create, {<<"bar">>, {[]}}},
- {create, {<<"_local/foo">>, {[{<<"yay">>, false}]}}}
- ]}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(1, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(1, couch_db_engine:get_update_seq(Db3)),
-
- [#full_doc_info{}] = couch_db_engine:open_docs(Db3, [<<"bar">>]),
- [not_found] = couch_db_engine:open_docs(Db3, [<<"_local/foo">>]),
-
- [not_found] = couch_db_engine:open_local_docs(Db3, [<<"bar">>]),
- [#doc{}] = couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>]).
-
-cpse_update_local_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"_local/foo">>, {[]}}},
- {update, {<<"_local/foo">>, {[{<<"stuff">>, null}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(0, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db3)),
-
- [not_found] = couch_db_engine:open_docs(Db3, [<<"_local/foo">>]),
- [#doc{} = Doc] = couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>]),
- ?assertEqual({[{<<"stuff">>, null}]}, Doc#doc.body).
-
-cpse_delete_local_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"_local/foo">>, []}},
- {delete, {<<"_local/foo">>, []}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(0, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db3)),
-
- [not_found] = couch_db_engine:open_docs(Db3, [<<"_local/foo">>]),
- ?assertEqual(
- [not_found],
- couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>])
- ).
diff --git a/src/couch_pse_tests/src/cpse_test_ref_counting.erl b/src/couch_pse_tests/src/cpse_test_ref_counting.erl
deleted file mode 100644
index a0123d1ca..000000000
--- a/src/couch_pse_tests/src/cpse_test_ref_counting.erl
+++ /dev/null
@@ -1,105 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_ref_counting).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(NUM_CLIENTS, 1000).
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- {Db, self()}.
-
-teardown_each({Db, _}) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-cpse_empty_monitors({Db, Pid}) ->
- Pids = couch_db_engine:monitored_by(Db),
- ?assert(is_list(Pids)),
- Expected = [
- Pid,
- couch_db:get_pid(Db),
- whereis(couch_stats_process_tracker)
- ],
- ?assertEqual([], Pids -- Expected).
-
-cpse_incref_decref({Db, _}) ->
- {Pid, _} = Client = start_client(Db),
- wait_client(Client),
-
- Pids1 = couch_db_engine:monitored_by(Db),
- ?assert(lists:member(Pid, Pids1)),
-
- close_client(Client),
-
- Pids2 = couch_db_engine:monitored_by(Db),
- ?assert(not lists:member(Pid, Pids2)).
-
-cpse_incref_decref_many({Db, _}) ->
- Clients = lists:map(
- fun(_) ->
- start_client(Db)
- end,
- lists:seq(1, ?NUM_CLIENTS)
- ),
-
- lists:foreach(fun(C) -> wait_client(C) end, Clients),
-
- Pids1 = couch_db_engine:monitored_by(Db),
- % +3 for self, db pid, and process tracker
- ?assertEqual(?NUM_CLIENTS + 3, length(Pids1)),
-
- lists:foreach(fun(C) -> close_client(C) end, Clients),
-
- Pids2 = couch_db_engine:monitored_by(Db),
- ?assertEqual(3, length(Pids2)).
-
-start_client(Db0) ->
- spawn_monitor(fun() ->
- {ok, Db1} = couch_db:open_int(couch_db:name(Db0), []),
-
- receive
- {waiting, Pid} ->
- Pid ! go
- after 1000 ->
- erlang:error(timeout)
- end,
-
- receive
- close ->
- couch_db:close(Db1),
- ok
- after 1000 ->
- erlang:error(timeout)
- end
- end).
-
-wait_client({Pid, _Ref}) ->
- Pid ! {waiting, self()},
- receive
- go -> ok
- after 1000 ->
- erlang:error(timeout)
- end.
-
-close_client({Pid, Ref}) ->
- Pid ! close,
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- after 1000 ->
- erlang:error(timeout)
- end.
diff --git a/src/couch_pse_tests/src/cpse_util.erl b/src/couch_pse_tests/src/cpse_util.erl
deleted file mode 100644
index bcbea4487..000000000
--- a/src/couch_pse_tests/src/cpse_util.erl
+++ /dev/null
@@ -1,667 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_util).
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TEST_MODULES, [
- cpse_test_open_close_delete,
- cpse_test_get_set_props,
- cpse_test_read_write_docs,
- cpse_test_attachments,
- cpse_test_fold_docs,
- cpse_test_fold_changes,
- cpse_test_fold_purge_infos,
- cpse_test_copy_purge_infos,
- cpse_test_purge_docs,
- cpse_test_purge_replication,
- cpse_test_purge_bad_checkpoints,
- cpse_test_compaction,
- cpse_test_ref_counting,
- cpse_test_purge_seqs
-]).
-
--define(SHUTDOWN_TIMEOUT, 5000).
--define(COMPACTOR_TIMEOUT, 50000).
--define(ATTACHMENT_WRITE_TIMEOUT, 10000).
--define(MAKE_DOC_SUMMARY_TIMEOUT, 5000).
-
-create_tests(EngineApp, Extension) ->
- create_tests(EngineApp, EngineApp, Extension).
-
-create_tests(EngineApp, EngineModule, Extension) ->
- TestEngine = {EngineApp, EngineModule, Extension},
- application:set_env(couch, test_engine, TestEngine),
- lists:map(
- fun(TestMod) ->
- {atom_to_list(TestMod), cpse_gather:module(TestMod)}
- end,
- ?TEST_MODULES
- ).
-
-setup_all() ->
- setup_all([]).
-
-setup_all(ExtraApps) ->
- Ctx = test_util:start_couch(ExtraApps),
- {ok, {_, EngineMod, Extension}} = application:get_env(couch, test_engine),
- EngineModStr = atom_to_list(EngineMod),
- config:set("couchdb_engines", Extension, EngineModStr, false),
- config:set("log", "include_sasl", "false", false),
- config:set("mem3", "replicate_purges", "true", false),
- Ctx.
-
-teardown_all(Ctx) ->
- test_util:stop_couch(Ctx).
-
-rootdir() ->
- config:get("couchdb", "database_dir", ".").
-
-dbname() ->
- UUID = couch_uuids:random(),
- <<"db-", UUID/binary>>.
-
-get_engine() ->
- case application:get_env(couch, test_engine) of
- {ok, {_App, _Mod, Extension}} ->
- list_to_binary(Extension);
- _ ->
- <<"couch">>
- end.
-
-create_db() ->
- create_db(dbname()).
-
-create_db(DbName) ->
- Engine = get_engine(),
- couch_db:create(DbName, [{engine, Engine}, ?ADMIN_CTX]).
-
-open_db(DbName) ->
- Engine = get_engine(),
- couch_db:open_int(DbName, [{engine, Engine}, ?ADMIN_CTX]).
-
-shutdown_db(Db) ->
- Pid = couch_db:get_pid(Db),
- Ref = erlang:monitor(process, Pid),
- exit(Pid, kill),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- after ?SHUTDOWN_TIMEOUT ->
- erlang:error(database_shutdown_timeout)
- end,
- test_util:wait(fun() ->
- case
- ets:member(
- couch_server:couch_dbs(couch_db:name(Db)),
- couch_db:name(Db)
- )
- of
- true -> wait;
- false -> ok
- end
- end).
-
-save_doc(DbName, Json) ->
- {ok, [Rev]} = save_docs(DbName, [Json], []),
- {ok, Rev}.
-
-save_docs(DbName, JsonDocs) ->
- save_docs(DbName, JsonDocs, []).
-
-save_docs(DbName, JsonDocs, Options) ->
- Docs = lists:map(
- fun(JDoc) ->
- couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc)))
- end,
- JsonDocs
- ),
- Opts = [full_commit | Options],
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- case lists:member(replicated_changes, Options) of
- true ->
- {ok, []} = couch_db:update_docs(
- Db, Docs, Opts, replicated_changes
- ),
- {ok,
- lists:map(
- fun(Doc) ->
- {Pos, [RevId | _]} = Doc#doc.revs,
- {Pos, RevId}
- end,
- Docs
- )};
- false ->
- {ok, Resp} = couch_db:update_docs(Db, Docs, Opts),
- {ok, [Rev || {ok, Rev} <- Resp]}
- end
- after
- couch_db:close(Db)
- end.
-
-open_doc(DbName, DocId0) ->
- DocId = ?JSON_DECODE(?JSON_ENCODE(DocId0)),
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- couch_db:get_doc_info(Db, DocId)
- after
- couch_db:close(Db)
- end.
-
-purge(DbName, PurgeInfos) ->
- purge(DbName, PurgeInfos, []).
-
-purge(DbName, PurgeInfos0, Options) when is_list(PurgeInfos0) ->
- PurgeInfos = lists:map(
- fun({UUID, DocIdJson, Revs}) ->
- {UUID, ?JSON_DECODE(?JSON_ENCODE(DocIdJson)), Revs}
- end,
- PurgeInfos0
- ),
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- couch_db:purge_docs(Db, PurgeInfos, Options)
- after
- couch_db:close(Db)
- end.
-
-uuid() ->
- couch_uuids:random().
-
-assert_db_props(Module, Line, DbName, Props) when is_binary(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- assert_db_props(Module, Line, Db, Props)
- catch
- error:{assertEqual, Props} ->
- {_, Rest} = proplists:split(Props, [module, line]),
- erlang:error({assertEqual, [{module, Module}, {line, Line} | Rest]})
- after
- couch_db:close(Db)
- end;
-assert_db_props(Module, Line, Db, Props) ->
- try
- assert_each_prop(Db, Props)
- catch
- error:{assertEqual, Props} ->
- {_, Rest} = proplists:split(Props, [module, line]),
- erlang:error({assertEqual, [{module, Module}, {line, Line} | Rest]})
- end.
-
-assert_each_prop(_Db, []) ->
- ok;
-assert_each_prop(Db, [{doc_count, Expect} | Rest]) ->
- {ok, DocCount} = couch_db:get_doc_count(Db),
- ?assertEqual(Expect, DocCount),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{del_doc_count, Expect} | Rest]) ->
- {ok, DelDocCount} = couch_db:get_del_doc_count(Db),
- ?assertEqual(Expect, DelDocCount),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{update_seq, Expect} | Rest]) ->
- UpdateSeq = couch_db:get_update_seq(Db),
- ?assertEqual(Expect, UpdateSeq),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{changes, Expect} | Rest]) ->
- {ok, NumChanges} = couch_db:fold_changes(Db, 0, fun aep_changes/2, 0, []),
- ?assertEqual(Expect, NumChanges),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{purge_seq, Expect} | Rest]) ->
- PurgeSeq = couch_db:get_purge_seq(Db),
- ?assertEqual(Expect, PurgeSeq),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{purge_infos, Expect} | Rest]) ->
- {ok, PurgeInfos} = couch_db:fold_purge_infos(Db, 0, fun aep_fold/2, [], []),
- ?assertEqual(Expect, lists:reverse(PurgeInfos)),
- assert_each_prop(Db, Rest).
-
-aep_changes(_A, Acc) ->
- {ok, Acc + 1}.
-
-aep_fold({_PSeq, UUID, Id, Revs}, Acc) ->
- {ok, [{UUID, Id, Revs} | Acc]}.
-
-apply_actions(DbName, Actions) when is_binary(DbName) ->
- {ok, Db0} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, Db1} = apply_actions(Db0, Actions),
- couch_db:close(Db1),
- ok;
-apply_actions(Db, []) ->
- {ok, Db};
-apply_actions(Db, [Action | Rest]) ->
- {ok, NewDb} = apply_action(Db, Action),
- apply_actions(NewDb, Rest).
-
-apply_action(Db, {batch, BatchActions}) ->
- apply_batch(Db, BatchActions);
-apply_action(Db, Action) ->
- apply_batch(Db, [Action]).
-
-apply_batch(Db, Actions) ->
- AccIn = {[], [], [], []},
- AccOut = lists:foldl(
- fun(Action, Acc) ->
- {DocAcc, ConfAcc, LDocAcc, PurgeAcc} = Acc,
- case gen_write(Db, Action) of
- {update, Doc} ->
- {[Doc | DocAcc], ConfAcc, LDocAcc, PurgeAcc};
- {conflict, Doc} ->
- {DocAcc, [Doc | ConfAcc], LDocAcc, PurgeAcc};
- {local, Doc} ->
- {DocAcc, ConfAcc, [Doc | LDocAcc], PurgeAcc};
- {purge, PurgeInfo} ->
- {DocAcc, ConfAcc, LDocAcc, [PurgeInfo | PurgeAcc]}
- end
- end,
- AccIn,
- Actions
- ),
-
- {Docs0, Conflicts0, LDocs0, PurgeInfos0} = AccOut,
- Docs = lists:reverse(Docs0),
- Conflicts = lists:reverse(Conflicts0),
- LDocs = lists:reverse(LDocs0),
- PurgeInfos = lists:reverse(PurgeInfos0),
-
- {ok, Resp} = couch_db:update_docs(Db, Docs ++ LDocs),
- false = lists:member(conflict, Resp),
- {ok, Db1} = couch_db:reopen(Db),
-
- {ok, []} = couch_db:update_docs(Db, Conflicts, [], replicated_changes),
- {ok, Db2} = couch_db:reopen(Db1),
-
- if
- PurgeInfos == [] -> ok;
- true -> {ok, _} = couch_db:purge_docs(Db2, PurgeInfos)
- end,
- couch_db:reopen(Db2).
-
-gen_write(Db, {Action, {<<"_local/", _/binary>> = DocId, Body}}) ->
- PrevRev =
- case couch_db:open_doc(Db, DocId) of
- {not_found, _} ->
- 0;
- {ok, #doc{revs = {0, []}}} ->
- 0;
- {ok, #doc{revs = {0, [RevStr | _]}}} ->
- binary_to_integer(RevStr)
- end,
- {RevId, Deleted} =
- case Action of
- Action when Action == create; Action == update ->
- {PrevRev + 1, false};
- delete ->
- {0, true}
- end,
- {local, #doc{
- id = DocId,
- revs = {0, [list_to_binary(integer_to_list(RevId))]},
- body = Body,
- deleted = Deleted
- }};
-gen_write(Db, {Action, {DocId, Body}}) ->
- gen_write(Db, {Action, {DocId, Body, []}});
-gen_write(Db, {create, {DocId, Body, Atts}}) ->
- {not_found, _} = couch_db:open_doc(Db, DocId),
- {update, #doc{
- id = DocId,
- revs = {0, []},
- deleted = false,
- body = Body,
- atts = Atts
- }};
-gen_write(_Db, {purge, {DocId, PrevRevs0, _}}) ->
- PrevRevs =
- if
- is_list(PrevRevs0) -> PrevRevs0;
- true -> [PrevRevs0]
- end,
- {purge, {couch_uuids:random(), DocId, PrevRevs}};
-gen_write(Db, {Action, {DocId, Body, Atts}}) ->
- #full_doc_info{} = PrevFDI = couch_db:get_full_doc_info(Db, DocId),
-
- #full_doc_info{
- id = DocId
- } = PrevFDI,
-
- #rev_info{
- rev = PrevRev
- } = prev_rev(PrevFDI),
-
- NewRev = gen_rev(Action, DocId, PrevRev, Body, Atts),
-
- Deleted =
- case Action of
- update -> false;
- conflict -> false;
- delete -> true
- end,
-
- Type =
- case Action of
- conflict -> conflict;
- _ -> update
- end,
-
- {Type, #doc{
- id = DocId,
- revs = NewRev,
- deleted = Deleted,
- body = Body,
- atts = Atts
- }}.
-
-gen_rev(A, DocId, {Pos, Rev}, Body, Atts) when A == update; A == delete ->
- NewRev = couch_hash:md5_hash(term_to_binary({DocId, Rev, Body, Atts})),
- {Pos + 1, [NewRev, Rev]};
-gen_rev(conflict, DocId, _, Body, Atts) ->
- UUID = couch_uuids:random(),
- NewRev = couch_hash:md5_hash(term_to_binary({DocId, UUID, Body, Atts})),
- {1, [NewRev]}.
-
-prep_atts(_Db, []) ->
- [];
-prep_atts(Db, [{FileName, Data} | Rest]) ->
- {_, Ref} = spawn_monitor(fun() ->
- {ok, Stream} = couch_db:open_write_stream(Db, []),
- exit(write_att(Stream, FileName, Data, Data))
- end),
- Att =
- receive
- {'DOWN', Ref, _, _, {{no_catch, not_supported}, _}} ->
- throw(not_supported);
- {'DOWN', Ref, _, _, Resp} ->
- Resp
- after ?ATTACHMENT_WRITE_TIMEOUT ->
- erlang:error(attachment_write_timeout)
- end,
- [Att | prep_atts(Db, Rest)].
-
-write_att(Stream, FileName, OrigData, <<>>) ->
- {StreamEngine, Len, Len, Md5, Md5} = couch_stream:close(Stream),
- couch_util:check_md5(Md5, couch_hash:md5_hash(OrigData)),
- Len = size(OrigData),
- couch_att:new([
- {name, FileName},
- {type, <<"application/octet-stream">>},
- {data, {stream, StreamEngine}},
- {att_len, Len},
- {disk_len, Len},
- {md5, Md5},
- {encoding, identity}
- ]);
-write_att(Stream, FileName, OrigData, Data) ->
- {Chunk, Rest} =
- case size(Data) > 4096 of
- true ->
- <<Head:4096/binary, Tail/binary>> = Data,
- {Head, Tail};
- false ->
- {Data, <<>>}
- end,
- ok = couch_stream:write(Stream, Chunk),
- write_att(Stream, FileName, OrigData, Rest).
-
-prev_rev(#full_doc_info{} = FDI) ->
- #doc_info{
- revs = [#rev_info{} = PrevRev | _]
- } = couch_doc:to_doc_info(FDI),
- PrevRev.
-
-db_as_term(Db) ->
- db_as_term(Db, compact).
-
-db_as_term(DbName, Type) when is_binary(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- db_as_term(Db, Type)
- end);
-db_as_term(Db, Type) ->
- [
- {props, db_props_as_term(Db, Type)},
- {docs, db_docs_as_term(Db)},
- {local_docs, db_local_docs_as_term(Db, Type)},
- {changes, db_changes_as_term(Db)},
- {purged_docs, db_purged_docs_as_term(Db)}
- ].
-
-db_props_as_term(Db, Type) ->
- Props0 = [
- get_doc_count,
- get_del_doc_count,
- get_disk_version,
- get_update_seq,
- get_purge_seq,
- get_purge_infos_limit,
- get_security,
- get_revs_limit,
- get_uuid,
- get_epochs
- ],
- Props =
- if
- Type /= replication -> Props0;
- true -> Props0 -- [get_uuid]
- end,
- lists:map(
- fun(Fun) ->
- {Fun, couch_db_engine:Fun(Db)}
- end,
- Props
- ).
-
-db_docs_as_term(Db) ->
- FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
- {ok, FDIs} = couch_db:fold_docs(Db, FoldFun, [], []),
- lists:reverse(
- lists:map(
- fun(FDI) ->
- fdi_to_term(Db, FDI)
- end,
- FDIs
- )
- ).
-
-db_local_docs_as_term(Db, Type) ->
- FoldFun = fun(Doc, Acc) ->
- case Doc#doc.id of
- <<?LOCAL_DOC_PREFIX, "purge-mem3", _/binary>> when
- Type == replication
- ->
- {ok, Acc};
- _ ->
- {ok, [Doc | Acc]}
- end
- end,
- {ok, LDocs} = couch_db:fold_local_docs(Db, FoldFun, [], []),
- lists:reverse(LDocs).
-
-db_changes_as_term(Db) ->
- FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
- {ok, Changes} = couch_db:fold_changes(Db, 0, FoldFun, [], []),
- lists:reverse(
- lists:map(
- fun(FDI) ->
- fdi_to_term(Db, FDI)
- end,
- Changes
- )
- ).
-
-db_purged_docs_as_term(Db) ->
- InitPSeq = couch_db_engine:get_oldest_purge_seq(Db) - 1,
- FoldFun = fun({PSeq, UUID, Id, Revs}, Acc) ->
- {ok, [{PSeq, UUID, Id, Revs} | Acc]}
- end,
- {ok, PDocs} = couch_db_engine:fold_purge_infos(
- Db, InitPSeq, FoldFun, [], []
- ),
- lists:reverse(PDocs).
-
-fdi_to_term(Db, FDI) ->
- #full_doc_info{
- id = DocId,
- rev_tree = OldTree
- } = FDI,
- {NewRevTree, _} = couch_key_tree:mapfold(
- fun(Rev, Node, Type, Acc) ->
- tree_to_term(Rev, Node, Type, Acc, DocId)
- end,
- Db,
- OldTree
- ),
- FDI#full_doc_info{
- rev_tree = NewRevTree,
- % Blank out sizes because we allow storage
- % engines to handle this with their own
- % definition until further notice.
- sizes = #size_info{
- active = -1,
- external = -1
- }
- }.
-
-tree_to_term(_Rev, _Leaf, branch, Acc, _DocId) ->
- {?REV_MISSING, Acc};
-tree_to_term({Pos, RevId}, #leaf{} = Leaf, leaf, Db, DocId) ->
- #leaf{
- deleted = Deleted,
- ptr = Ptr
- } = Leaf,
-
- Doc0 = #doc{
- id = DocId,
- revs = {Pos, [RevId]},
- deleted = Deleted,
- body = Ptr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db, Doc0),
-
- Body =
- if
- not is_binary(Doc1#doc.body) -> Doc1#doc.body;
- true -> couch_compress:decompress(Doc1#doc.body)
- end,
-
- Atts1 =
- if
- not is_binary(Doc1#doc.atts) -> Doc1#doc.atts;
- true -> couch_compress:decompress(Doc1#doc.atts)
- end,
-
- StreamSrc = fun(Sp) -> couch_db:open_read_stream(Db, Sp) end,
- Atts2 = [couch_att:from_disk_term(StreamSrc, Att) || Att <- Atts1],
- Atts = [att_to_term(Att) || Att <- Atts2],
-
- NewLeaf = Leaf#leaf{
- ptr = Body,
- sizes = #size_info{active = -1, external = -1},
- atts = Atts
- },
- {NewLeaf, Db}.
-
-att_to_term(Att) ->
- Bin = couch_att:to_binary(Att),
- couch_att:store(data, Bin, Att).
-
-term_diff(T1, T2) when is_tuple(T1), is_tuple(T2) ->
- tuple_diff(tuple_to_list(T1), tuple_to_list(T2));
-term_diff(L1, L2) when is_list(L1), is_list(L2) ->
- list_diff(L1, L2);
-term_diff(V1, V2) when V1 == V2 ->
- nodiff;
-term_diff(V1, V2) ->
- {V1, V2}.
-
-tuple_diff([], []) ->
- nodiff;
-tuple_diff([T1 | _], []) ->
- {longer, T1};
-tuple_diff([], [T2 | _]) ->
- {shorter, T2};
-tuple_diff([T1 | R1], [T2 | R2]) ->
- case term_diff(T1, T2) of
- nodiff ->
- tuple_diff(R1, R2);
- Else ->
- {T1, Else}
- end.
-
-list_diff([], []) ->
- nodiff;
-list_diff([T1 | _], []) ->
- {longer, T1};
-list_diff([], [T2 | _]) ->
- {shorter, T2};
-list_diff([T1 | R1], [T2 | R2]) ->
- case term_diff(T1, T2) of
- nodiff ->
- list_diff(R1, R2);
- Else ->
- {T1, Else}
- end.
-
-compact(Db) ->
- {ok, Pid} = couch_db:start_compact(Db),
- Ref = erlang:monitor(process, Pid),
-
- % Ideally I'd assert that Pid is linked to us
- % at this point but its technically possible
- % that it could have finished compacting by
- % the time we check... Quite the quandry.
-
- receive
- {'DOWN', Ref, _, _, normal} ->
- ok;
- {'DOWN', Ref, _, _, noproc} ->
- ok;
- {'DOWN', Ref, _, _, Reason} ->
- erlang:error({compactor_died, Reason})
- after ?COMPACTOR_TIMEOUT ->
- erlang:error(compactor_timed_out)
- end,
-
- test_util:wait(fun() ->
- {ok, Db2} = couch_db:open_int(couch_db:name(Db), []),
- try
- CPid = couch_db:get_compactor_pid(Db2),
- case is_pid(CPid) of
- true -> wait;
- false -> ok
- end
- after
- couch_db:close(Db2)
- end
- end).
-
-with_config(Config, Fun) ->
- OldConfig = apply_config(Config),
- try
- Fun()
- after
- apply_config(OldConfig)
- end.
-
-apply_config([]) ->
- [];
-apply_config([{Section, Key, Value} | Rest]) ->
- Orig = config:get(Section, Key),
- case Value of
- undefined -> config:delete(Section, Key, false);
- _ -> config:set(Section, Key, Value, false)
- end,
- [{Section, Key, Orig} | apply_config(Rest)].
diff --git a/src/couch_replicator/.gitignore b/src/couch_replicator/.gitignore
deleted file mode 100644
index b3099f518..000000000
--- a/src/couch_replicator/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-*.beam
-.eunit
-ebin/replicator.app
-.DS_Store \ No newline at end of file
diff --git a/src/couch_replicator/LICENSE b/src/couch_replicator/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_replicator/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_replicator/README.md b/src/couch_replicator/README.md
deleted file mode 100644
index 6a2a5cfdd..000000000
--- a/src/couch_replicator/README.md
+++ /dev/null
@@ -1,285 +0,0 @@
-Developer Oriented Replicator Description
-=========================================
-
-This description of scheduling replicator's functionality is mainly geared to
-CouchDB developers. It dives a bit into the internal and explains how
-everything is connected together.
-
-A natural place to start is the top application supervisor:
-`couch_replicator_sup`. It's a `rest_for_one` restart strategy supervisor,
-so if a child process terminates, the rest of the children in the hierarchy
-following it are also terminated. This structure implies a useful constraint --
-children lower in the list can safely call their siblings which are higher in
-the list.
-
-A description of each child:
-
- * `couch_replication_event`: Starts a gen_event publication bus to handle some
- replication related events. This used for example, to publish cluster
- membership changes by the `couch_replicator_clustering` process. But is
- also used in replication tests to monitor for replication events.
- Notification is performed via the `couch_replicator_notifier:notify/1`
- function. It's the first (left-most) child because
- `couch_replicator_clustering` uses it.
-
- * `couch_replicator_clustering`: This module maintains cluster membership
- information for the replication application and provides functions to check
- ownership of replication jobs. A cluster membership change is published via
- the `gen_event` event server named `couch_replication_event` as previously
- covered. Published events are `{cluster, stable}` when cluster membership
- has stabilized, that it, no node membership changes in a given period, and
- `{cluster, unstable}` which indicates there was a recent change to the
- cluster membership and now it's considered unstable. Listeners for cluster
- membership change include `couch_replicator_doc_processor` and
- `couch_replicator_db_changes`. When doc processor gets an `{cluster,
- stable}` event it will remove all the replication jobs not belonging to the
- current node. When `couch_replicator_db_changes` gets a `{cluster,
- stable}` event, it will restart the `couch_multidb_changes` process it
- controls, which will launch an new scan of all the replicator databases.
-
- * `couch_replicator_connection`: Maintains a global replication connection
- pool. It allows reusing connections across replication tasks. The main
- interface is `acquire/1` and `release/1`. The general idea is once a
- connection is established, it is kept around for
- `replicator.connection_close_interval` milliseconds in case another
- replication task wants to re-use it. It is worth pointing out how linking
- and monitoring is handled: workers are linked to the connection pool when
- they are created. If they crash, the connection pool will receive an 'EXIT'
- event and clean up after the worker. The connection pool also monitors
- owners (by monitoring the `Pid` from the `From` argument in the call to
- `acquire/1`) and cleans up if owner dies, and the pool receives a 'DOWN'
- message. Another interesting thing is that connection establishment
- (creation) happens in the owner process so the pool is not blocked on it.
-
- * `couch_replicator_rate_limiter`: Implements a rate limiter to handle
- connection throttling from sources or targets where requests return 429
- error codes. Uses the Additive Increase / Multiplicative Decrease feedback
- control algorithm to converge on the channel capacity. Implemented using a
- 16-way sharded ETS table to maintain connection state. The table sharding
- code is split out to `couch_replicator_rate_limiter_tables` module. The
- purpose of the module it to maintain and continually estimate sleep
- intervals for each connection represented as a `{Method, Url}` pair. The
- interval is updated accordingly on each call to `failure/1` or `success/1`
- calls. For a successful request, a client should call `success/1`. Whenever
- a 429 response is received the client should call `failure/1`. When no
- failures are happening the code ensures the ETS tables are empty in
- order to have a lower impact on a running system.
-
- * `couch_replicator_scheduler`: This is the core component of the scheduling
- replicator. It's main task is to switch between replication jobs, by
- stopping some and starting others to ensure all of them make progress.
- Replication jobs which fail are penalized using an exponential backoff.
- That is, each consecutive failure will double the time penalty. This frees
- up system resources for more useful work than just continuously trying to
- run the same subset of failing jobs.
-
- The main API function is `add_job/1`. Its argument is an instance of the
- `#rep{}` record, which could be the result of a document update from a
- `_replicator` db or the result of a POST to `_replicate` endpoint.
-
- Each job internally is represented by the `#job{}` record. It contains the
- original `#rep{}` but also, maintains an event history. The history is a
- sequence of past events for each job. These are timestamped and ordered
- such that the most recent event is at the head. History length is limited
- based on the `replicator.max_history` configuration value. The default is
- 20 entries. History events types are:
-
- * `added` : job was just added to the scheduler. This is the first event.
- * `started` : job was started. This was an attempt to run the job.
- * `stopped` : job was stopped by the scheduler.
- * `crashed` : job has crashed (instead of stopping cleanly).
-
- The core of the scheduling algorithm is the `reschedule/1` function. This
- function is called every `replicator.interval` milliseconds (default is
- 60000 i.e. a minute). During each call the scheduler will try to stop some
- jobs, start some new ones and will also try to keep the maximum number of
- jobs running less than `replicator.max_jobs` (default 500). So the
- functions does these operations (actual code paste):
-
- ```
- Running = running_job_count(),
- Pending = pending_job_count(),
- stop_excess_jobs(State, Running),
- start_pending_jobs(State, Running, Pending),
- rotate_jobs(State, Running, Pending),
- update_running_jobs_stats(State#state.stats_pid)
- ```
-
- `Running` is the total number of currently running jobs. `Pending` is the
- total number of jobs waiting to be run. `stop_excess_jobs` will stop any
- exceeding the `replicator.max_jobs` configured limit. This code takes
- effect if user reduces the `max_jobs` configuration value.
- `start_pending_jobs` will start any jobs if there is more room available.
- This will take effect on startup or when user increases the `max_jobs`
- configuration value. `rotate_jobs` is where all the action happens. The
- scheduler picks `replicator.max_churn` running jobs to stop and then picks
- the same number of pending jobs to start. The default value of `max_churn`
- is 20. So by default every minute, 20 running jobs are stopped, and 20 new
- pending jobs are started.
-
- Before moving on it is worth pointing out that scheduler treats continuous
- and non-continuous replications differently. Normal (non-continuous)
- replications once started will be allowed to run to completion. That
- behavior is to preserve their semantics of replicating a snapshot of the
- source database to the target. For example if new documents are added to
- the source after the replication are started, those updates should not show
- up on the target database. Stopping and restarting a normal replication
- would violate that constraint. The only exception to the rule is the user
- explicitly reduces `replicator.max_jobs` configuration value. Even then
- scheduler will first attempt to stop as many continuous jobs as possible
- and only if it has no choice left will it stop normal jobs.
-
- Keeping that in mind and going back to the scheduling algorithm, the next
- interesting part is how the scheduler picks which jobs to stop and which
- ones to start:
-
- * Stopping: When picking jobs to stop the scheduler will pick longest
- running continuous jobs first. The sorting callback function to get the
- longest running jobs is unsurprisingly called `longest_running/2`. To
- pick the longest running jobs it looks at the most recent `started`
- event. After it gets a sorted list by longest running, it simply picks
- first few depending on the value of `max_churn` using `lists:sublist/2`.
- Then those jobs are stopped.
-
- * Starting: When starting the scheduler will pick the jobs which have been
- waiting the longest. Surprisingly, in this case it also looks at the
- `started` timestamp and picks the jobs which have the oldest `started`
- timestamp. If there are 3 jobs, A[started=10], B[started=7],
- C[started=9], then B will be picked first, then C then A. This ensures
- that jobs are not starved, which is a classic scheduling pitfall.
-
- In the code, the list of pending jobs is picked slightly differently than
- how the list of running jobs is picked. `pending_jobs/1` uses `ets:foldl`
- to iterate over all the pending jobs. As it iterates it tries to keep only
- up to `max_churn` oldest items in the accumulator. The reason this is done
- is that there could be a very large number of pending jobs and loading them
- all in a list (making a copy from ETS) and then sorting it can be quite
- expensive performance-wise. The tricky part of the iteration is happening
- in `pending_maybe_replace/2`. A `gb_sets` ordered set is used to keep top-N
- longest waiting jobs so far. The code has a comment with a helpful example
- on how this algorithm works.
-
- The last part is how the scheduler treats jobs which keep crashing. If a
- job is started but then crashes then that job is considered unhealthy. The
- main idea is to penalize such jobs such that they are forced to wait an
- exponentially larger amount of time with each consecutive crash. A central
- part to this algorithm is determining what forms a sequence of consecutive
- crashes. If a job starts then quickly crashes, and after its next start it
- crashes again, then that would become a sequence of 2 consecutive crashes.
- The penalty then would be calculated by `backoff_micros/1` function where
- the consecutive crash count would end up as the exponent. However for
- practical concerns there is also maximum penalty specified and that's the
- equivalent of 10 consecutive crashes. Timewise it ends up being about 8
- hours. That means even a job which keep crashing will still get a chance to
- retry once in 8 hours.
-
- There is subtlety when calculating consecutive crashes and that is deciding
- when the sequence stops. That is, figuring out when a job becomes healthy
- again. The scheduler considers a job healthy again if it started and hasn't
- crashed in a while. The "in a while" part is a configuration parameter
- `replicator.health_threshold` defaulting to 2 minutes. This means if job
- has been crashing, for example 5 times in a row, but then on the 6th
- attempt it started and ran for more than 2 minutes then it is considered
- healthy again. The next time it crashes its sequence of consecutive crashes
- will restart at 1.
-
- * `couch_replicator_scheduler_sup`: This module is a supervisor for running
- replication tasks. The most interesting thing about it is perhaps that it is
- not used to restart children. The scheduler handles restarts and error
- handling backoffs.
-
- * `couch_replicator_doc_processor`: The doc processor component is in charge
- of processing replication document updates, turning them into replication
- jobs and adding those jobs to the scheduler. Unfortunately the only reason
- there is even a `couch_replicator_doc_processor` gen_server, instead of
- replication documents being turned to jobs and inserted into the scheduler
- directly, is because of one corner case -- filtered replications using
- custom (JavaScript mostly) filters. More about this later. It is better to
- start with how updates flow through the doc processor:
-
- Document updates come via the `db_change/3` callback from
- `couch_multidb_changes`, then go to the `process_change/2` function.
-
- In `process_change/2` a few decisions are made regarding how to proceed. The
- first is "ownership" check. That is a check if the replication document
- belongs on the current node. If not, then it is ignored. In a cluster, in
- general there would be N copies of a document change and we only want to run
- the replication once. Another check is to see if the update has arrived
- during a time when the cluster is considered "unstable". If so, it is
- ignored, because soon enough a rescan will be launched and all the documents
- will be reprocessed anyway. Another noteworthy thing in `process_change/2`
- is handling of upgrades from the previous version of the replicator when
- transient states were written to the documents. Two such states were
- `triggered` and `error`. Both of those states are removed from the document
- then then update proceeds in the regular fashion. `failed` documents are
- also ignored here. `failed` is a terminal state which indicates the document
- was somehow unsuitable to become a replication job (it was malformed or a
- duplicate). Otherwise the state update proceeds to `process_updated/2`.
-
- `process_updated/2` is where replication document updates are parsed and
- translated to `#rep{}` records. The interesting part here is that the
- replication ID isn't calculated yet. Unsurprisingly the parsing function
- used is called `parse_rep_doc_without_id/1`. Also note that up until now
- everything is still running in the context of the `db_change/3` callback.
- After replication filter type is determined the update gets passed to the
- `couch_replicator_doc_processor` gen_server.
-
- The `couch_replicator_doc_processor` gen_server's main role is to try to
- calculate replication IDs for each `#rep{}` record passed to it, then add
- that as a scheduler job. As noted before, `#rep{}` records parsed up until
- this point lack a replication ID. The reason is replication ID calculation
- includes a hash of the filter code. And because user defined replication
- filters live in the source DB, which most likely involves a remote network
- fetch there is a possibility of blocking and a need to handle various
- network failures and retries. Because of that `replication_doc_processor`
- dispatches all of that blocking and retrying to a separate `worker` process
- (`couch_replicator_doc_processor_worker` module).
-
- `couch_replicator_doc_processor_worker` is where replication IDs are
- calculated for each individual doc update. There are two separate modules
- which contain utilities related to replication ID calculation:
- `couch_replicator_ids` and `couch_replicator_filters`. The first one
- contains ID calculation algorithms and the second one knows how to parse and
- fetch user filters from a remote source DB. One interesting thing about the
- worker is that it is time-bounded and is guaranteed to not be stuck forever.
- That's why it spawns an extra process with `spawn_monitor`, just so it can
- do an `after` clause in receive and bound the maximum time this worker will
- take.
-
- A doc processor worker will either succeed or fail but never block for too
- long. Success and failure are returned as exit values. Those are handled in
- the `worker_returned/3` doc processor clauses. The most common pattern is
- that a worker is spawned to add a replication job, it does so and returns a
- `{ok, ReplicationID}` value in `worker_returned`.
-
- In case of a filtered replication with custom user code there are two case to
- consider:
-
- 1. Filter fetching code has failed. In that case worker returns an error.
- But because the error could be a transient network error, another
- worker is started to try again. It could fail and return an error
- again, then another one is started and so on. However each consecutive
- worker will do an exponential backoff, not unlike the scheduler code.
- `error_backoff/1` is where the backoff period is calculated.
- Consecutive errors are held in the `errcnt` field in the ETS table.
-
- 2. Fetching filter code succeeds, replication ID is calculated and job is
- added to the scheduler. However, because this is a filtered replication
- the source database could get an updated filter. Which means
- replication ID could change again. So the worker is spawned to
- periodically check the filter and see if it changed. In other words doc
- processor will do the work of checking for filtered replications, get
- an updated filter and will then refresh the replication job (remove the
- old one and add a new one with a different ID). The filter checking
- interval is determined by the `filter_backoff` function. An unusual
- thing about that function is it calculates the period based on the size
- of the ETS table. The idea there is for a few replications in a
- cluster, it's ok to check filter changes often. But when there are lots
- of replications running, having each one checking their filter often is
- not a good idea.
-
- * `couch_replicator_db_changes`: This process specializes and configures
- `couch_multidb_changes` so that it looks for `_replicator` suffixed shards
- and makes sure to restart it when node membership changes.
-
-
diff --git a/src/couch_replicator/include/couch_replicator_api_wrap.hrl b/src/couch_replicator/include/couch_replicator_api_wrap.hrl
deleted file mode 100644
index 0f8213c51..000000000
--- a/src/couch_replicator/include/couch_replicator_api_wrap.hrl
+++ /dev/null
@@ -1,31 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-
--record(httpdb, {
- url,
- auth_props = [],
- headers = [
- {"Accept", "application/json"},
- {"User-Agent", "CouchDB-Replicator/" ++ couch_server:get_version()}
- ],
- timeout, % milliseconds
- ibrowse_options = [],
- retries = 5,
- wait = 250, % milliseconds
- httpc_pool = nil,
- http_connections,
- first_error_timestamp = nil,
- proxy_url,
- auth_context = nil
-}).
diff --git a/src/couch_replicator/priv/stats_descriptions.cfg b/src/couch_replicator/priv/stats_descriptions.cfg
deleted file mode 100644
index d9efb91dc..000000000
--- a/src/couch_replicator/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,152 +0,0 @@
-{[couch_replicator, changes_read_failures], [
- {type, counter},
- {desc, <<"number of failed replicator changes read failures">>}
-]}.
-{[couch_replicator, changes_reader_deaths], [
- {type, counter},
- {desc, <<"number of failed replicator changes readers">>}
-]}.
-{[couch_replicator, changes_manager_deaths], [
- {type, counter},
- {desc, <<"number of failed replicator changes managers">>}
-]}.
-{[couch_replicator, changes_queue_deaths], [
- {type, counter},
- {desc, <<"number of failed replicator changes work queues">>}
-]}.
-{[couch_replicator, checkpoints, success], [
- {type, counter},
- {desc, <<"number of checkpoints successfully saves">>}
-]}.
-{[couch_replicator, checkpoints, failure], [
- {type, counter},
- {desc, <<"number of failed checkpoint saves">>}
-]}.
-{[couch_replicator, failed_starts], [
- {type, counter},
- {desc, <<"number of replications that have failed to start">>}
-]}.
-{[couch_replicator, requests], [
- {type, counter},
- {desc, <<"number of HTTP requests made by the replicator">>}
-]}.
-{[couch_replicator, responses, failure], [
- {type, counter},
- {desc, <<"number of failed HTTP responses received by the replicator">>}
-]}.
-{[couch_replicator, responses, success], [
- {type, counter},
- {desc, <<"number of successful HTTP responses received by the replicator">>}
-]}.
-{[couch_replicator, stream_responses, failure], [
- {type, counter},
- {desc, <<"number of failed streaming HTTP responses received by the replicator">>}
-]}.
-{[couch_replicator, stream_responses, success], [
- {type, counter},
- {desc, <<"number of successful streaming HTTP responses received by the replicator">>}
-]}.
-{[couch_replicator, worker_deaths], [
- {type, counter},
- {desc, <<"number of failed replicator workers">>}
-]}.
-{[couch_replicator, workers_started], [
- {type, counter},
- {desc, <<"number of replicator workers started">>}
-]}.
-{[couch_replicator, cluster_is_stable], [
- {type, gauge},
- {desc, <<"1 if cluster is stable, 0 if unstable">>}
-]}.
-{[couch_replicator, db_scans], [
- {type, counter},
- {desc, <<"number of times replicator db scans have been started">>}
-]}.
-{[couch_replicator, docs, dbs_created], [
- {type, counter},
- {desc, <<"number of db shard creations seen by replicator doc processor">>}
-]}.
-{[couch_replicator, docs, dbs_deleted], [
- {type, counter},
- {desc, <<"number of db shard deletions seen by replicator doc processor">>}
-]}.
-{[couch_replicator, docs, dbs_found], [
- {type, counter},
- {desc, <<"number of db shard found by replicator doc processor">>}
-]}.
-{[couch_replicator, docs, db_changes], [
- {type, counter},
- {desc, <<"number of db changes processed by replicator doc processor">>}
-]}.
-{[couch_replicator, docs, failed_state_updates], [
- {type, counter},
- {desc, <<"number of 'failed' state document updates">>}
-]}.
-{[couch_replicator, docs, completed_state_updates], [
- {type, counter},
- {desc, <<"number of 'completed' state document updates">>}
-]}.
-{[couch_replicator, jobs, adds], [
- {type, counter},
- {desc, <<"number of jobs added to replicator scheduler">>}
-]}.
-{[couch_replicator, jobs, duplicate_adds], [
- {type, counter},
- {desc, <<"number of duplicate jobs added to replicator scheduler">>}
-]}.
-{[couch_replicator, jobs, removes], [
- {type, counter},
- {desc, <<"number of jobs removed from replicator scheduler">>}
-]}.
-{[couch_replicator, jobs, starts], [
- {type, counter},
- {desc, <<"number of jobs started by replicator scheduler">>}
-]}.
-{[couch_replicator, jobs, stops], [
- {type, counter},
- {desc, <<"number of jobs stopped by replicator scheduler">>}
-]}.
-{[couch_replicator, jobs, crashes], [
- {type, counter},
- {desc, <<"number of job crashed noticed by replicator scheduler">>}
-]}.
-{[couch_replicator, jobs, running], [
- {type, gauge},
- {desc, <<"replicator scheduler running jobs">>}
-]}.
-{[couch_replicator, jobs, pending], [
- {type, gauge},
- {desc, <<"replicator scheduler pending jobs">>}
-]}.
-{[couch_replicator, jobs, crashed], [
- {type, gauge},
- {desc, <<"replicator scheduler crashed jobs">>}
-]}.
-{[couch_replicator, jobs, total], [
- {type, gauge},
- {desc, <<"total number of replicator scheduler jobs">>}
-]}.
-{[couch_replicator, connection, acquires], [
- {type, counter},
- {desc, <<"number of times connections are shared">>}
-]}.
-{[couch_replicator, connection, creates], [
- {type, counter},
- {desc, <<"number of connections created">>}
-]}.
-{[couch_replicator, connection, releases], [
- {type, counter},
- {desc, <<"number of times ownership of a connection is released">>}
-]}.
-{[couch_replicator, connection, owner_crashes], [
- {type, counter},
- {desc, <<"number of times a connection owner crashes while owning at least one connection">>}
-]}.
-{[couch_replicator, connection, worker_crashes], [
- {type, counter},
- {desc, <<"number of times a worker unexpectedly terminates">>}
-]}.
-{[couch_replicator, connection, closes], [
- {type, counter},
- {desc, <<"number of times a worker is gracefully shut down">>}
-]}.
diff --git a/src/couch_replicator/src/couch_replicator.app.src b/src/couch_replicator/src/couch_replicator.app.src
deleted file mode 100644
index 2e0e191d3..000000000
--- a/src/couch_replicator/src/couch_replicator.app.src
+++ /dev/null
@@ -1,37 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_replicator, [
- {description, "CouchDB replicator"},
- {vsn, git},
- {mod, {couch_replicator_app, []}},
- {registered, [
- couch_replicator_sup,
- couch_replicator_rate_limiter,
- couch_replicator_connection,
- couch_replication, % couch_replication_event gen_event
- couch_replicator_clustering,
- couch_replicator_scheduler,
- couch_replicator_scheduler_sup,
- couch_replicator_doc_processor
- ]},
- {applications, [
- kernel,
- stdlib,
- couch_log,
- mem3,
- config,
- couch,
- couch_event,
- couch_stats
- ]}
-]}.
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
deleted file mode 100644
index 39b3903ea..000000000
--- a/src/couch_replicator/src/couch_replicator.erl
+++ /dev/null
@@ -1,419 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator).
-
--export([
- replicate/2,
- replication_states/0,
- job/1,
- doc/3,
- active_doc/2,
- info_from_doc/2,
- restart_job/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_replicator.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(DESIGN_DOC_CREATION_DELAY_MSEC, 1000).
--define(REPLICATION_STATES, [
- % Just added to scheduler
- initializing,
- % Could not be turned into a replication job
- error,
- % Scheduled and running
- running,
- % Scheduled and waiting to run
- pending,
- % Scheduled but crashing, backed off by the scheduler
- crashing,
- % Non-continuous (normal) completed replication
- completed,
- % Terminal failure, will not be retried anymore
- failed
-]).
-
--import(couch_util, [
- get_value/2,
- get_value/3
-]).
-
--spec replicate({[_]}, any()) ->
- {ok, {continuous, binary()}}
- | {ok, {[_]}}
- | {ok, {cancelled, binary()}}
- | {error, any()}
- | no_return().
-replicate(PostBody, Ctx) ->
- {ok, Rep0} = couch_replicator_utils:parse_rep_doc(PostBody, Ctx),
- Rep = Rep0#rep{start_time = os:timestamp()},
- #rep{id = RepId, options = Options, user_ctx = UserCtx} = Rep,
- case get_value(cancel, Options, false) of
- true ->
- CancelRepId =
- case get_value(id, Options, nil) of
- nil ->
- RepId;
- RepId2 ->
- RepId2
- end,
- case check_authorization(CancelRepId, UserCtx) of
- ok ->
- cancel_replication(CancelRepId);
- not_found ->
- {error, not_found}
- end;
- false ->
- check_authorization(RepId, UserCtx),
- {ok, Listener} = rep_result_listener(RepId),
- Result = do_replication_loop(Rep),
- couch_replicator_notifier:stop(Listener),
- Result
- end.
-
--spec do_replication_loop(#rep{}) ->
- {ok, {continuous, binary()}} | {ok, tuple()} | {error, any()}.
-do_replication_loop(#rep{id = {BaseId, Ext} = Id, options = Options} = Rep) ->
- ok = couch_replicator_scheduler:add_job(Rep),
- case get_value(continuous, Options, false) of
- true ->
- {ok, {continuous, ?l2b(BaseId ++ Ext)}};
- false ->
- wait_for_result(Id)
- end.
-
--spec rep_result_listener(rep_id()) -> {ok, pid()}.
-rep_result_listener(RepId) ->
- ReplyTo = self(),
- {ok, _Listener} = couch_replicator_notifier:start_link(
- fun
- ({_, RepId2, _} = Ev) when RepId2 =:= RepId ->
- ReplyTo ! Ev;
- (_) ->
- ok
- end
- ).
-
--spec wait_for_result(rep_id()) ->
- {ok, {[_]}} | {error, any()}.
-wait_for_result(RepId) ->
- receive
- {finished, RepId, RepResult} ->
- {ok, RepResult};
- {error, RepId, Reason} ->
- {error, Reason}
- end.
-
--spec cancel_replication(rep_id()) ->
- {ok, {cancelled, binary()}} | {error, not_found}.
-cancel_replication({BasedId, Extension} = RepId) ->
- FullRepId = BasedId ++ Extension,
- couch_log:notice("Canceling replication '~s' ...", [FullRepId]),
- case couch_replicator_scheduler:rep_state(RepId) of
- #rep{} ->
- ok = couch_replicator_scheduler:remove_job(RepId),
- couch_log:notice("Replication '~s' cancelled", [FullRepId]),
- {ok, {cancelled, ?l2b(FullRepId)}};
- nil ->
- couch_log:notice("Replication '~s' not found", [FullRepId]),
- {error, not_found}
- end.
-
--spec replication_states() -> [atom()].
-replication_states() ->
- ?REPLICATION_STATES.
-
--spec strip_url_creds(binary() | {[_]}) -> binary().
-strip_url_creds(Endpoint) ->
- try couch_replicator_docs:parse_rep_db(Endpoint, [], []) of
- #httpdb{url = Url} ->
- iolist_to_binary(couch_util:url_strip_password(Url))
- catch
- throw:{error, local_endpoints_not_supported} ->
- Endpoint;
- error:_ ->
- % Avoid exposing any part of the URL in case there is a password in
- % the malformed endpoint URL
- null
- end.
-
--spec job(binary()) -> {ok, {[_]}} | {error, not_found}.
-job(JobId0) when is_binary(JobId0) ->
- JobId = couch_replicator_ids:convert(JobId0),
- {Res, _Bad} = rpc:multicall(couch_replicator_scheduler, job, [JobId]),
- case [JobInfo || {ok, JobInfo} <- Res] of
- [JobInfo | _] ->
- {ok, JobInfo};
- [] ->
- {error, not_found}
- end.
-
--spec restart_job(binary() | list() | rep_id()) ->
- {ok, {[_]}} | {error, not_found}.
-restart_job(JobId0) ->
- JobId = couch_replicator_ids:convert(JobId0),
- {Res, _} = rpc:multicall(couch_replicator_scheduler, restart_job, [JobId]),
- case [JobInfo || {ok, JobInfo} <- Res] of
- [JobInfo | _] ->
- {ok, JobInfo};
- [] ->
- {error, not_found}
- end.
-
--spec active_doc(binary(), binary()) -> {ok, {[_]}} | {error, not_found}.
-active_doc(DbName, DocId) ->
- try
- Shards = mem3:shards(DbName),
- Live = [node() | nodes()],
- Nodes = lists:usort([
- N
- || #shard{node = N} <- Shards,
- lists:member(N, Live)
- ]),
- Owner = mem3:owner(DbName, DocId, Nodes),
- case active_doc_rpc(DbName, DocId, [Owner]) of
- {ok, DocInfo} ->
- {ok, DocInfo};
- {error, not_found} ->
- active_doc_rpc(DbName, DocId, Nodes -- [Owner])
- end
- catch
- % Might be a local database
- error:database_does_not_exist ->
- active_doc_rpc(DbName, DocId, [node()])
- end.
-
--spec active_doc_rpc(binary(), binary(), [node()]) ->
- {ok, {[_]}} | {error, not_found}.
-active_doc_rpc(_DbName, _DocId, []) ->
- {error, not_found};
-active_doc_rpc(DbName, DocId, [Node]) when Node =:= node() ->
- couch_replicator_doc_processor:doc(DbName, DocId);
-active_doc_rpc(DbName, DocId, Nodes) ->
- {Res, _Bad} = rpc:multicall(
- Nodes,
- couch_replicator_doc_processor,
- doc,
- [DbName, DocId]
- ),
- case [DocInfo || {ok, DocInfo} <- Res] of
- [DocInfo | _] ->
- {ok, DocInfo};
- [] ->
- {error, not_found}
- end.
-
--spec doc(binary(), binary(), any()) -> {ok, {[_]}} | {error, not_found}.
-doc(RepDb, DocId, UserCtx) ->
- case active_doc(RepDb, DocId) of
- {ok, DocInfo} ->
- {ok, DocInfo};
- {error, not_found} ->
- doc_from_db(RepDb, DocId, UserCtx)
- end.
-
--spec doc_from_db(binary(), binary(), any()) -> {ok, {[_]}} | {error, not_found}.
-doc_from_db(RepDb, DocId, UserCtx) ->
- case fabric:open_doc(RepDb, DocId, [UserCtx, ejson_body]) of
- {ok, Doc} ->
- {ok, info_from_doc(RepDb, couch_doc:to_json_obj(Doc, []))};
- {not_found, _Reason} ->
- {error, not_found}
- end.
-
--spec info_from_doc(binary(), {[_]}) -> {[_]}.
-info_from_doc(RepDb, {Props}) ->
- DocId = get_value(<<"_id">>, Props),
- Source = get_value(<<"source">>, Props),
- Target = get_value(<<"target">>, Props),
- State0 = state_atom(get_value(<<"_replication_state">>, Props, null)),
- StateTime = get_value(<<"_replication_state_time">>, Props, null),
- {State1, StateInfo, ErrorCount, StartTime} =
- case State0 of
- completed ->
- {InfoP} = get_value(<<"_replication_stats">>, Props, {[]}),
- case lists:keytake(<<"start_time">>, 1, InfoP) of
- {value, {_, Time}, InfoP1} ->
- {State0, {InfoP1}, 0, Time};
- false ->
- case lists:keytake(start_time, 1, InfoP) of
- {value, {_, Time}, InfoP1} ->
- {State0, {InfoP1}, 0, Time};
- false ->
- {State0, {InfoP}, 0, null}
- end
- end;
- failed ->
- Info = get_value(<<"_replication_state_reason">>, Props, nil),
- EJsonInfo = couch_replicator_utils:ejson_state_info(Info),
- {State0, EJsonInfo, 1, StateTime};
- _OtherState ->
- {null, null, 0, null}
- end,
- {[
- {doc_id, DocId},
- {database, RepDb},
- {id, null},
- {source, strip_url_creds(Source)},
- {target, strip_url_creds(Target)},
- {state, State1},
- {error_count, ErrorCount},
- {info, StateInfo},
- {start_time, StartTime},
- {last_updated, StateTime}
- ]}.
-
-state_atom(<<"triggered">>) ->
- % This handles a legacy case were document wasn't converted yet
- triggered;
-state_atom(State) when is_binary(State) ->
- erlang:binary_to_existing_atom(State, utf8);
-state_atom(State) when is_atom(State) ->
- State.
-
--spec check_authorization(rep_id(), #user_ctx{}) -> ok | not_found.
-check_authorization(RepId, #user_ctx{name = Name} = Ctx) ->
- case couch_replicator_scheduler:rep_state(RepId) of
- #rep{user_ctx = #user_ctx{name = Name}} ->
- ok;
- #rep{} ->
- couch_httpd:verify_is_server_admin(Ctx);
- nil ->
- not_found
- end.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-authorization_test_() ->
- {
- foreach,
- fun() -> ok end,
- fun(_) -> meck:unload() end,
- [
- t_admin_is_always_authorized(),
- t_username_must_match(),
- t_replication_not_found()
- ]
- }.
-
-t_admin_is_always_authorized() ->
- ?_test(begin
- expect_rep_user_ctx(<<"someuser">>, <<"_admin">>),
- UserCtx = #user_ctx{name = <<"adm">>, roles = [<<"_admin">>]},
- ?assertEqual(ok, check_authorization(<<"RepId">>, UserCtx))
- end).
-
-t_username_must_match() ->
- ?_test(begin
- expect_rep_user_ctx(<<"user">>, <<"somerole">>),
- UserCtx1 = #user_ctx{name = <<"user">>, roles = [<<"somerole">>]},
- ?assertEqual(ok, check_authorization(<<"RepId">>, UserCtx1)),
- UserCtx2 = #user_ctx{name = <<"other">>, roles = [<<"somerole">>]},
- ?assertThrow(
- {unauthorized, _},
- check_authorization(
- <<"RepId">>,
- UserCtx2
- )
- )
- end).
-
-t_replication_not_found() ->
- ?_test(begin
- meck:expect(couch_replicator_scheduler, rep_state, 1, nil),
- UserCtx1 = #user_ctx{name = <<"user">>, roles = [<<"somerole">>]},
- ?assertEqual(not_found, check_authorization(<<"RepId">>, UserCtx1)),
- UserCtx2 = #user_ctx{name = <<"adm">>, roles = [<<"_admin">>]},
- ?assertEqual(not_found, check_authorization(<<"RepId">>, UserCtx2))
- end).
-
-expect_rep_user_ctx(Name, Role) ->
- meck:expect(
- couch_replicator_scheduler,
- rep_state,
- fun(_Id) ->
- UserCtx = #user_ctx{name = Name, roles = [Role]},
- #rep{user_ctx = UserCtx}
- end
- ).
-
-strip_url_creds_test_() ->
- {
- setup,
- fun() ->
- meck:expect(config, get, fun(_, _, Default) -> Default end)
- end,
- fun(_) ->
- meck:unload()
- end,
- [
- t_strip_http_basic_creds(),
- t_strip_http_props_creds(),
- t_strip_local_db_creds(),
- t_strip_url_creds_errors()
- ]
- }.
-
-t_strip_local_db_creds() ->
- ?_test(?assertEqual(<<"localdb">>, strip_url_creds(<<"localdb">>))).
-
-t_strip_http_basic_creds() ->
- ?_test(begin
- Url1 = <<"http://adm:pass@host/db">>,
- ?assertEqual(<<"http://host/db/">>, strip_url_creds(Url1)),
- Url2 = <<"https://adm:pass@host/db">>,
- ?assertEqual(<<"https://host/db/">>, strip_url_creds(Url2)),
- Url3 = <<"http://adm:pass@host:80/db">>,
- ?assertEqual(<<"http://host:80/db/">>, strip_url_creds(Url3)),
- Url4 = <<"http://adm:pass@host/db?a=b&c=d">>,
- ?assertEqual(
- <<"http://host/db?a=b&c=d">>,
- strip_url_creds(Url4)
- )
- end).
-
-t_strip_http_props_creds() ->
- ?_test(begin
- Props1 = {[{<<"url">>, <<"http://adm:pass@host/db">>}]},
- ?assertEqual(<<"http://host/db/">>, strip_url_creds(Props1)),
- Props2 =
- {[
- {<<"url">>, <<"http://host/db">>},
- {<<"headers">>, {[{<<"Authorization">>, <<"Basic pa55">>}]}}
- ]},
- ?assertEqual(<<"http://host/db/">>, strip_url_creds(Props2))
- end).
-
-t_strip_url_creds_errors() ->
- ?_test(begin
- Bad1 = {[{<<"url">>, <<"http://adm:pass/bad">>}]},
- ?assertEqual(null, strip_url_creds(Bad1)),
- Bad2 = {[{<<"garbage">>, <<"more garbage">>}]},
- ?assertEqual(null, strip_url_creds(Bad2)),
- Bad3 = <<"http://a:b:c">>,
- ?assertEqual(null, strip_url_creds(Bad3)),
- Bad4 = <<"http://adm:pass:pass/bad">>,
- ?assertEqual(null, strip_url_creds(Bad4)),
- ?assertEqual(null, strip_url_creds(null)),
- ?assertEqual(null, strip_url_creds(42)),
- ?assertEqual(null, strip_url_creds([<<"a">>, <<"b">>])),
- Bad5 = {[{<<"source_proxy">>, <<"http://adm:pass/bad">>}]},
- ?assertEqual(null, strip_url_creds(Bad5))
- end).
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator.hrl b/src/couch_replicator/src/couch_replicator.hrl
deleted file mode 100644
index 7c39c7c95..000000000
--- a/src/couch_replicator/src/couch_replicator.hrl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(REP_ID_VERSION, 4).
-
--record(rep, {
- id :: rep_id() | '_' | 'undefined',
- source :: any() | '_',
- target :: any() | '_',
- options :: [_] | '_',
- user_ctx :: any() | '_',
- type = db :: atom() | '_',
- view = nil :: any() | '_',
- doc_id :: any() | '_',
- db_name = null :: null | binary() | '_',
- start_time = {0, 0, 0} :: erlang:timestamp() | '_',
- stats = couch_replicator_stats:new() :: orddict:orddict() | '_'
-}).
-
--type rep_id() :: {string(), string()}.
--type db_doc_id() :: {binary(), binary() | '_'}.
--type seconds() :: non_neg_integer().
--type rep_start_result() ::
- {ok, rep_id()} |
- ignore |
- {temporary_error, binary()} |
- {permanent_failure, binary()}.
-
-
--record(doc_worker_result, {
- id :: db_doc_id(),
- wref :: reference(),
- result :: rep_start_result()
-}).
-
-
--type job_id() :: term().
--type job_args() :: term().
--type event_type() :: added | started | stopped | {crashed, any()}.
--type event() :: {Type:: event_type(), When :: erlang:timestamp()}.
--type history() :: nonempty_list(event()).
-
-
--record(job, {
- id :: job_id() | '$1' | '_',
- rep :: #rep{} | '_',
- pid :: undefined | pid() | '$1' | '_',
- monitor :: undefined | reference() | '_',
- history :: history() | '_'
-}).
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
deleted file mode 100644
index 193f8dad4..000000000
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ /dev/null
@@ -1,1057 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_api_wrap).
-
-% This module wraps the native erlang API, and allows for performing
-% operations on a remote vs. local databases via the same API.
-%
-% Notes:
-% Many options and apis aren't yet supported here, they are added as needed.
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include("couch_replicator_api_wrap.hrl").
-
--export([
- db_open/1,
- db_open/3,
- db_close/1,
- get_db_info/1,
- get_pending_count/2,
- get_view_info/3,
- update_doc/3,
- update_doc/4,
- update_docs/3,
- update_docs/4,
- ensure_full_commit/1,
- get_missing_revs/2,
- open_doc/3,
- open_doc_revs/6,
- changes_since/5,
- db_uri/1,
- normalize_db/1
-]).
-
--import(couch_replicator_httpc, [
- send_req/3
-]).
-
--import(couch_util, [
- encode_doc_id/1,
- get_value/2,
- get_value/3
-]).
-
--define(MAX_WAIT, 5 * 60 * 1000).
-
--define(MAX_URL_LEN, 7000).
--define(MIN_URL_LEN, 200).
-
-db_uri(#httpdb{url = Url}) ->
- couch_util:url_strip_password(Url);
-db_uri(DbName) when is_binary(DbName) ->
- ?b2l(DbName);
-db_uri(Db) ->
- db_uri(couch_db:name(Db)).
-
-db_open(Db) ->
- db_open(Db, false, []).
-
-db_open(#httpdb{} = Db1, Create, CreateParams) ->
- {ok, Db} = couch_replicator_httpc:setup(Db1),
- try
- case Create of
- false ->
- ok;
- true ->
- Db2 = maybe_append_create_query_params(Db, CreateParams),
- send_req(
- Db2,
- [{method, put}],
- fun
- (401, _, _) ->
- throw({unauthorized, ?l2b(db_uri(Db2))});
- (403, _, _) ->
- throw({forbidden, ?l2b(db_uri(Db2))});
- (_, _, _) ->
- ok
- end
- )
- end,
- send_req(
- Db,
- [{method, get}],
- fun
- (200, _, {Props}) ->
- UpdateSeq = get_value(<<"update_seq">>, Props),
- InstanceStart = get_value(<<"instance_start_time">>, Props),
- case {UpdateSeq, InstanceStart} of
- {undefined, _} ->
- throw({db_not_found, ?l2b(db_uri(Db))});
- {_, undefined} ->
- throw({db_not_found, ?l2b(db_uri(Db))});
- _ ->
- {ok, Db}
- end;
- (200, _, _Body) ->
- throw({db_not_found, ?l2b(db_uri(Db))});
- (401, _, _) ->
- throw({unauthorized, ?l2b(db_uri(Db))});
- (403, _, _) ->
- throw({forbidden, ?l2b(db_uri(Db))});
- (_, _, _) ->
- throw({db_not_found, ?l2b(db_uri(Db))})
- end
- )
- catch
- throw:Error ->
- db_close(Db),
- throw(Error);
- error:Error ->
- db_close(Db),
- erlang:error(Error);
- exit:Error ->
- db_close(Db),
- erlang:exit(Error)
- end.
-
-db_close(#httpdb{httpc_pool = Pool} = HttpDb) ->
- couch_replicator_auth:cleanup(HttpDb),
- unlink(Pool),
- ok = couch_replicator_httpc_pool:stop(Pool).
-
-get_db_info(#httpdb{} = Db) ->
- send_req(
- Db,
- [],
- fun(200, _, {Props}) ->
- {ok, Props}
- end
- ).
-
-get_pending_count(#httpdb{} = Db, Seq) when is_number(Seq) ->
- % Source looks like Apache CouchDB and not Cloudant so we fall
- % back to using update sequence differences.
- send_req(Db, [], fun(200, _, {Props}) ->
- case get_value(<<"update_seq">>, Props) of
- UpdateSeq when is_number(UpdateSeq) ->
- {ok, UpdateSeq - Seq};
- _ ->
- {ok, null}
- end
- end);
-get_pending_count(#httpdb{} = Db, Seq) ->
- Options = [{path, "_changes"}, {qs, [{"since", ?JSON_ENCODE(Seq)}, {"limit", "0"}]}],
- send_req(Db, Options, fun(200, _, {Props}) ->
- {ok, couch_util:get_value(<<"pending">>, Props, null)}
- end).
-
-get_view_info(#httpdb{} = Db, DDocId, ViewName) ->
- Path = io_lib:format("~s/_view/~s/_info", [DDocId, ViewName]),
- send_req(
- Db,
- [{path, Path}],
- fun(200, _, {Props}) ->
- {VInfo} = couch_util:get_value(<<"view_index">>, Props, {[]}),
- {ok, VInfo}
- end
- ).
-
-ensure_full_commit(#httpdb{} = Db) ->
- send_req(
- Db,
- [
- {method, post},
- {path, "_ensure_full_commit"},
- {headers, [{"Content-Type", "application/json"}]}
- ],
- fun
- (201, _, {Props}) ->
- {ok, get_value(<<"instance_start_time">>, Props)};
- (_, _, {Props}) ->
- {error, get_value(<<"error">>, Props)}
- end
- ).
-
-get_missing_revs(#httpdb{} = Db, IdRevs) ->
- JsonBody = {[{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- IdRevs]},
- send_req(
- Db,
- [
- {method, post},
- {path, "_revs_diff"},
- {body, ?JSON_ENCODE(JsonBody)},
- {headers, [{"Content-Type", "application/json"}]}
- ],
- fun
- (200, _, {Props}) ->
- ConvertToNativeFun = fun({Id, {Result}}) ->
- MissingRevs = couch_doc:parse_revs(
- get_value(<<"missing">>, Result)
- ),
- PossibleAncestors = couch_doc:parse_revs(
- get_value(<<"possible_ancestors">>, Result, [])
- ),
- {Id, MissingRevs, PossibleAncestors}
- end,
- {ok, lists:map(ConvertToNativeFun, Props)};
- (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
- {error, {revs_diff_failed, ErrCode, ErrMsg}}
- end
- ).
-
-open_doc_revs(#httpdb{retries = 0} = HttpDb, Id, Revs, Options, _Fun, _Acc) ->
- Path = encode_doc_id(Id),
- QS = options_to_query_args(HttpDb, Path, [revs, {open_revs, Revs} | Options]),
- Url = couch_util:url_strip_password(
- couch_replicator_httpc:full_url(HttpDb, [{path, Path}, {qs, QS}])
- ),
- couch_log:error("Replication crashing because GET ~s failed", [Url]),
- exit(kaboom);
-open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
- Path = encode_doc_id(Id),
- QS = options_to_query_args(HttpDb, Path, [revs, {open_revs, Revs} | Options]),
- {Pid, Ref} = spawn_monitor(fun() ->
- Self = self(),
- Callback = fun
- (200, Headers, StreamDataFun) ->
- remote_open_doc_revs_streamer_start(Self),
- {<<"--">>, _, _} = couch_httpd:parse_multipart_request(
- header_value("Content-Type", Headers),
- StreamDataFun,
- fun mp_parse_mixed/1
- );
- (414, _, _) ->
- exit(request_uri_too_long)
- end,
- Streamer = spawn_link(fun() ->
- Params = [
- {path, Path},
- {qs, QS},
- {ibrowse_options, [{stream_to, {self(), once}}]},
- {headers, [{"Accept", "multipart/mixed"}]}
- ],
- % We're setting retries to 0 here to avoid the case where the
- % Streamer retries the request and ends up jumbling together two
- % different response bodies. Retries are handled explicitly by
- % open_doc_revs itself.
- send_req(HttpDb#httpdb{retries = 0}, Params, Callback)
- end),
- % If this process dies normally we can leave
- % the Streamer process hanging around keeping an
- % HTTP connection open. This is a bit of a
- % hammer approach to making sure it releases
- % that connection back to the pool.
- spawn(fun() ->
- Ref = erlang:monitor(process, Self),
- receive
- {'DOWN', Ref, process, Self, normal} ->
- exit(Streamer, {streamer_parent_died, Self});
- {'DOWN', Ref, process, Self, _} ->
- ok
- end
- end),
- receive
- {started_open_doc_revs, Ref} ->
- Ret = receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc),
- exit({exit_ok, Ret})
- end
- end),
- receive
- {'DOWN', Ref, process, Pid, {exit_ok, Ret}} ->
- Ret;
- {'DOWN', Ref, process, Pid, {{nocatch, missing_doc}, _}} ->
- throw(missing_doc);
- {'DOWN', Ref, process, Pid, {{nocatch, {missing_stub, _} = Stub}, _}} ->
- throw(Stub);
- {'DOWN', Ref, process, Pid, {http_request_failed, _, _, max_backoff}} ->
- exit(max_backoff);
- {'DOWN', Ref, process, Pid, request_uri_too_long} ->
- NewMaxLen = get_value(max_url_len, Options, ?MAX_URL_LEN) div 2,
- case NewMaxLen < ?MIN_URL_LEN of
- true ->
- throw(request_uri_too_long);
- false ->
- couch_log:info(
- "Reducing url length to ~B because of"
- " 414 response",
- [NewMaxLen]
- ),
- Options1 = lists:keystore(
- max_url_len,
- 1,
- Options,
- {max_url_len, NewMaxLen}
- ),
- open_doc_revs(HttpDb, Id, Revs, Options1, Fun, Acc)
- end;
- {'DOWN', Ref, process, Pid, Else} ->
- Url = couch_util:url_strip_password(
- couch_replicator_httpc:full_url(HttpDb, [{path, Path}, {qs, QS}])
- ),
- #httpdb{retries = Retries, wait = Wait0} = HttpDb,
- Wait = 2 * erlang:min(Wait0 * 2, ?MAX_WAIT),
- couch_log:notice(
- "Retrying GET to ~s in ~p seconds due to error ~w",
- [Url, Wait / 1000, error_reason(Else)]
- ),
- ok = timer:sleep(Wait),
- RetryDb = HttpDb#httpdb{
- retries = Retries - 1,
- wait = Wait
- },
- open_doc_revs(RetryDb, Id, Revs, Options, Fun, Acc)
- end.
-
-error_reason({http_request_failed, "GET", _Url, {error, timeout}}) ->
- timeout;
-error_reason({http_request_failed, "GET", _Url, {error, {_, req_timedout}}}) ->
- req_timedout;
-error_reason({http_request_failed, "GET", _Url, Error}) ->
- Error;
-error_reason(Else) ->
- Else.
-
-open_doc(#httpdb{} = Db, Id, Options) ->
- send_req(
- Db,
- [{path, encode_doc_id(Id)}, {qs, options_to_query_args(Options, [])}],
- fun
- (200, _, Body) ->
- {ok, couch_doc:from_json_obj(Body)};
- (_, _, {Props}) ->
- {error, get_value(<<"error">>, Props)}
- end
- ).
-
-update_doc(Db, Doc, Options) ->
- update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(#httpdb{} = HttpDb, #doc{id = DocId} = Doc, Options, Type) ->
- QArgs =
- case Type of
- replicated_changes ->
- [{"new_edits", "false"}];
- _ ->
- []
- end ++ options_to_query_args(Options, []),
- Boundary = couch_uuids:random(),
- JsonBytes = ?JSON_ENCODE(
- couch_doc:to_json_obj(
- Doc, [revs, attachments, follows, att_encoding_info | Options]
- )
- ),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
- Boundary,
- JsonBytes,
- Doc#doc.atts,
- true
- ),
- Headers =
- case lists:member(delay_commit, Options) of
- true ->
- [{"X-Couch-Full-Commit", "false"}];
- false ->
- []
- end ++ [{"Content-Type", ?b2l(ContentType)}, {"Content-Length", Len}],
- Body = {fun stream_doc/1, {JsonBytes, Doc#doc.atts, Boundary, Len}},
- send_req(
- % A crash here bubbles all the way back up to run_user_fun inside
- % open_doc_revs, which will retry the whole thing. That's the
- % appropriate course of action, since we've already started streaming
- % the response body from the GET request.
- HttpDb#httpdb{retries = 0},
- [
- {method, put},
- {path, encode_doc_id(DocId)},
- {qs, QArgs},
- {headers, Headers},
- {body, Body}
- ],
- fun
- (Code, _, {Props}) when Code =:= 200 orelse Code =:= 201 orelse Code =:= 202 ->
- {ok, couch_doc:parse_rev(get_value(<<"rev">>, Props))};
- (409, _, _) ->
- throw(conflict);
- (Code, _, {Props}) ->
- case {Code, get_value(<<"error">>, Props)} of
- {401, <<"unauthorized">>} ->
- throw({unauthorized, get_value(<<"reason">>, Props)});
- {403, <<"forbidden">>} ->
- throw({forbidden, get_value(<<"reason">>, Props)});
- {412, <<"missing_stub">>} ->
- throw({missing_stub, get_value(<<"reason">>, Props)});
- {413, _} ->
- {error, request_body_too_large};
- {_, Error} ->
- {error, Error}
- end
- end
- ).
-
-update_docs(Db, DocList, Options) ->
- update_docs(Db, DocList, Options, interactive_edit).
-
-update_docs(_Db, [], _Options, _UpdateType) ->
- {ok, []};
-update_docs(#httpdb{} = HttpDb, DocList, Options, UpdateType) ->
- FullCommit = atom_to_list(not lists:member(delay_commit, Options)),
- Prefix =
- case UpdateType of
- replicated_changes ->
- <<"{\"new_edits\":false,\"docs\":[">>;
- interactive_edit ->
- <<"{\"docs\":[">>
- end,
- Suffix = <<"]}">>,
- % Note: nginx and other servers don't like PUT/POST requests without
- % a Content-Length header, so we can't do a chunked transfer encoding
- % and JSON encode each doc only before sending it through the socket.
- {Docs, Len} = lists:mapfoldl(
- fun
- (#doc{} = Doc, Acc) ->
- Json = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
- {Json, Acc + iolist_size(Json)};
- (Doc, Acc) ->
- {Doc, Acc + iolist_size(Doc)}
- end,
- byte_size(Prefix) + byte_size(Suffix) + length(DocList) - 1,
- DocList
- ),
- BodyFun = fun
- (eof) ->
- eof;
- ([]) ->
- {ok, Suffix, eof};
- ([prefix | Rest]) ->
- {ok, Prefix, Rest};
- ([Doc]) ->
- {ok, Doc, []};
- ([Doc | RestDocs]) ->
- {ok, [Doc, ","], RestDocs}
- end,
- Headers = [
- {"Content-Length", Len},
- {"Content-Type", "application/json"},
- {"X-Couch-Full-Commit", FullCommit}
- ],
- send_req(
- HttpDb,
- [
- {method, post},
- {path, "_bulk_docs"},
- {body, {BodyFun, [prefix | Docs]}},
- {headers, Headers}
- ],
- fun
- (201, _, Results) when is_list(Results) ->
- {ok, bulk_results_to_errors(DocList, Results, remote)};
- (413, _, _) ->
- {error, request_body_too_large};
- (417, _, Results) when is_list(Results) ->
- {ok, bulk_results_to_errors(DocList, Results, remote)};
- (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
- {error, {bulk_docs_failed, ErrCode, ErrMsg}}
- end
- ).
-
-changes_since(
- #httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb,
- Style,
- StartSeq,
- UserFun,
- Options
-) ->
- Timeout = erlang:max(1000, InactiveTimeout div 3),
- BaseQArgs =
- case get_value(continuous, Options, false) of
- false ->
- [{"feed", "normal"}];
- true ->
- [{"feed", "continuous"}]
- end ++
- [
- {"style", atom_to_list(Style)},
- {"since", ?JSON_ENCODE(StartSeq)},
- {"timeout", integer_to_list(Timeout)}
- ],
- DocIds = get_value(doc_ids, Options),
- Selector = get_value(selector, Options),
- {QArgs, Method, Body, Headers} =
- case {DocIds, Selector} of
- {undefined, undefined} ->
- QArgs1 = maybe_add_changes_filter_q_args(BaseQArgs, Options),
- {QArgs1, get, [], Headers1};
- {undefined, _} when is_tuple(Selector) ->
- Headers2 = [{"Content-Type", "application/json"} | Headers1],
- JsonSelector = ?JSON_ENCODE({[{<<"selector">>, Selector}]}),
- {[{"filter", "_selector"} | BaseQArgs], post, JsonSelector, Headers2};
- {_, undefined} when is_list(DocIds) ->
- Headers2 = [{"Content-Type", "application/json"} | Headers1],
- JsonDocIds = ?JSON_ENCODE({[{<<"doc_ids">>, DocIds}]}),
- {[{"filter", "_doc_ids"} | BaseQArgs], post, JsonDocIds, Headers2}
- end,
- try
- send_req(
- HttpDb,
- [
- {method, Method},
- {path, "_changes"},
- {qs, QArgs},
- {headers, Headers},
- {body, Body},
- {ibrowse_options, [{stream_to, {self(), once}}]}
- ],
- fun
- (200, _, DataStreamFun) ->
- parse_changes_feed(Options, UserFun, DataStreamFun);
- (405, _, _) when is_list(DocIds) ->
- % CouchDB versions < 1.1.0 don't have the builtin
- % _changes feed filter "_doc_ids" neither support POST
- send_req(
- HttpDb,
- [
- {method, get},
- {path, "_changes"},
- {qs, BaseQArgs},
- {headers, Headers1},
- {ibrowse_options, [{stream_to, {self(), once}}]}
- ],
- fun(200, _, DataStreamFun2) ->
- UserFun2 = fun
- (#doc_info{id = Id} = DocInfo) ->
- case lists:member(Id, DocIds) of
- true ->
- UserFun(DocInfo);
- false ->
- ok
- end;
- (LastSeq) ->
- UserFun(LastSeq)
- end,
- parse_changes_feed(
- Options,
- UserFun2,
- DataStreamFun2
- )
- end
- );
- (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
- throw({retry_limit, {changes_req_failed, ErrCode, ErrMsg}})
- end
- )
- catch
- exit:{http_request_failed, _, _, max_backoff} ->
- exit(max_backoff);
- exit:{http_request_failed, _, _, {error, {connection_closed, mid_stream}}} ->
- throw(retry_no_limit);
- exit:{http_request_failed, _, _, _} = Error ->
- throw({retry_limit, Error})
- end.
-
-% internal functions
-
-maybe_add_changes_filter_q_args(BaseQS, Options) ->
- case get_value(filter, Options) of
- undefined ->
- BaseQS;
- FilterName ->
- %% get list of view attributes
- ViewFields0 = [atom_to_list(F) || F <- record_info(fields, mrargs)],
- ViewFields = ["key" | ViewFields0],
-
- {Params} = get_value(query_params, Options, {[]}),
- [
- {"filter", ?b2l(FilterName)}
- | lists:foldl(
- fun({K, V}, QSAcc) ->
- Ks = couch_util:to_list(K),
- case lists:keymember(Ks, 1, QSAcc) of
- true ->
- QSAcc;
- false when FilterName =:= <<"_view">> ->
- V1 =
- case lists:member(Ks, ViewFields) of
- true -> ?JSON_ENCODE(V);
- false -> couch_util:to_list(V)
- end,
- [{Ks, V1} | QSAcc];
- false ->
- [{Ks, couch_util:to_list(V)} | QSAcc]
- end
- end,
- BaseQS,
- Params
- )
- ]
- end.
-
-parse_changes_feed(Options, UserFun, DataStreamFun) ->
- case get_value(continuous, Options, false) of
- true ->
- continuous_changes(DataStreamFun, UserFun);
- false ->
- EventFun = fun(Ev) ->
- changes_ev1(Ev, fun(DocInfo, _) -> UserFun(DocInfo) end, [])
- end,
- json_stream_parse:events(DataStreamFun, EventFun)
- end.
-
-options_to_query_args(HttpDb, Path, Options0) ->
- case lists:keytake(max_url_len, 1, Options0) of
- false ->
- MaxLen = ?MAX_URL_LEN,
- Options = Options0;
- {value, {max_url_len, MaxLen}, Options} ->
- ok
- end,
- case lists:keytake(atts_since, 1, Options) of
- false ->
- options_to_query_args(Options, []);
- {value, {atts_since, []}, Options2} ->
- options_to_query_args(Options2, []);
- {value, {atts_since, PAs}, Options2} ->
- QueryArgs1 = options_to_query_args(Options2, []),
- FullUrl = couch_replicator_httpc:full_url(
- HttpDb, [{path, Path}, {qs, QueryArgs1}]
- ),
- RevList = atts_since_arg(
- length("GET " ++ FullUrl ++ " HTTP/1.1\r\n") +
- % +6 = % encoded [ and ]
- length("&atts_since=") + 6,
- PAs,
- MaxLen,
- []
- ),
- [{"atts_since", ?b2l(iolist_to_binary(?JSON_ENCODE(RevList)))} | QueryArgs1]
- end.
-
-options_to_query_args([], Acc) ->
- lists:reverse(Acc);
-options_to_query_args([ejson_body | Rest], Acc) ->
- options_to_query_args(Rest, Acc);
-options_to_query_args([delay_commit | Rest], Acc) ->
- options_to_query_args(Rest, Acc);
-options_to_query_args([revs | Rest], Acc) ->
- options_to_query_args(Rest, [{"revs", "true"} | Acc]);
-options_to_query_args([{open_revs, all} | Rest], Acc) ->
- options_to_query_args(Rest, [{"open_revs", "all"} | Acc]);
-options_to_query_args([latest | Rest], Acc) ->
- options_to_query_args(Rest, [{"latest", "true"} | Acc]);
-options_to_query_args([{open_revs, Revs} | Rest], Acc) ->
- JsonRevs = ?b2l(iolist_to_binary(?JSON_ENCODE(couch_doc:revs_to_strs(Revs)))),
- options_to_query_args(Rest, [{"open_revs", JsonRevs} | Acc]).
-
-atts_since_arg(_UrlLen, [], _MaxLen, Acc) ->
- lists:reverse(Acc);
-atts_since_arg(UrlLen, [PA | Rest], MaxLen, Acc) ->
- RevStr = couch_doc:rev_to_str(PA),
- NewUrlLen =
- case Rest of
- [] ->
- % plus 2 double quotes (% encoded)
- UrlLen + size(RevStr) + 6;
- _ ->
- % plus 2 double quotes and a comma (% encoded)
- UrlLen + size(RevStr) + 9
- end,
- case NewUrlLen >= MaxLen of
- true ->
- lists:reverse(Acc);
- false ->
- atts_since_arg(NewUrlLen, Rest, MaxLen, [RevStr | Acc])
- end.
-
-% TODO: A less verbose, more elegant and automatic restart strategy for
-% the exported open_doc_revs/6 function. The restart should be
-% transparent to the caller like any other Couch API function exported
-% by this module.
-receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc) ->
- try
- % Left only for debugging purposes via an interactive or remote shell
- erlang:put(open_doc_revs, {Id, Revs, Ref, Streamer}),
- receive_docs(Streamer, Fun, Ref, Acc)
- catch
- error:{restart_open_doc_revs, NewRef} ->
- receive_docs_loop(Streamer, Fun, Id, Revs, NewRef, Acc)
- end.
-
-receive_docs(Streamer, UserFun, Ref, UserAcc) ->
- Streamer ! {get_headers, Ref, self()},
- receive
- {started_open_doc_revs, NewRef} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {headers, Ref, Headers} ->
- case header_value("content-type", Headers) of
- {"multipart/related", _} = ContentType ->
- % Skip document body and attachment size limits validation here
- % since these should be validated by the replication target
- case
- couch_doc:doc_from_multi_part_stream(
- ContentType,
- fun() -> receive_doc_data(Streamer, Ref) end,
- Ref,
- _ValidateDocLimits = false
- )
- of
- {ok, Doc, WaitFun, Parser} ->
- case run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref) of
- {ok, UserAcc2} ->
- ok;
- {skip, UserAcc2} ->
- couch_httpd_multipart:abort_multipart_stream(Parser)
- end,
- WaitFun(),
- receive_docs(Streamer, UserFun, Ref, UserAcc2)
- end;
- {"application/json", []} ->
- Doc = couch_doc:from_json_obj(
- ?JSON_DECODE(receive_all(Streamer, Ref, []))
- ),
- {_, UserAcc2} = run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref),
- receive_docs(Streamer, UserFun, Ref, UserAcc2);
- {"application/json", [{"error", "true"}]} ->
- {ErrorProps} = ?JSON_DECODE(receive_all(Streamer, Ref, [])),
- Rev = get_value(<<"missing">>, ErrorProps),
- Result = {{not_found, missing}, couch_doc:parse_rev(Rev)},
- {_, UserAcc2} = run_user_fun(UserFun, Result, UserAcc, Ref),
- receive_docs(Streamer, UserFun, Ref, UserAcc2)
- end;
- {done, Ref} ->
- {ok, UserAcc}
- end.
-
-run_user_fun(UserFun, Arg, UserAcc, OldRef) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- try UserFun(Arg, UserAcc) of
- Resp ->
- exit({exit_ok, Resp})
- catch
- throw:Reason ->
- exit({exit_throw, Reason});
- error:Reason ->
- exit({exit_error, Reason});
- exit:Reason ->
- exit({exit_exit, Reason})
- end
- end),
- receive
- {started_open_doc_revs, NewRef} ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- restart_remote_open_doc_revs(OldRef, NewRef);
- {'DOWN', Ref, process, Pid, {exit_ok, Ret}} ->
- Ret;
- {'DOWN', Ref, process, Pid, {exit_throw, Reason}} ->
- throw(Reason);
- {'DOWN', Ref, process, Pid, {exit_error, Reason}} ->
- erlang:error(Reason);
- {'DOWN', Ref, process, Pid, {exit_exit, Reason}} ->
- erlang:exit(Reason)
- end.
-
-restart_remote_open_doc_revs(Ref, NewRef) ->
- receive
- {body_bytes, Ref, _} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {body_done, Ref} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {done, Ref} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {headers, Ref, _} ->
- restart_remote_open_doc_revs(Ref, NewRef)
- after 0 ->
- erlang:error({restart_open_doc_revs, NewRef})
- end.
-
-remote_open_doc_revs_streamer_start(Parent) ->
- receive
- {get_headers, _Ref, Parent} ->
- remote_open_doc_revs_streamer_start(Parent);
- {next_bytes, _Ref, Parent} ->
- remote_open_doc_revs_streamer_start(Parent)
- after 0 ->
- Parent ! {started_open_doc_revs, make_ref()}
- end.
-
-receive_all(Streamer, Ref, Acc) ->
- Streamer ! {next_bytes, Ref, self()},
- receive
- {started_open_doc_revs, NewRef} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {body_bytes, Ref, Bytes} ->
- receive_all(Streamer, Ref, [Bytes | Acc]);
- {body_done, Ref} ->
- lists:reverse(Acc)
- end.
-
-mp_parse_mixed(eof) ->
- receive
- {get_headers, Ref, From} ->
- From ! {done, Ref}
- end;
-mp_parse_mixed({headers, H}) ->
- receive
- {get_headers, Ref, From} ->
- From ! {headers, Ref, H}
- end,
- fun mp_parse_mixed/1;
-mp_parse_mixed({body, Bytes}) ->
- receive
- {next_bytes, Ref, From} ->
- From ! {body_bytes, Ref, Bytes}
- end,
- fun mp_parse_mixed/1;
-mp_parse_mixed(body_end) ->
- receive
- {next_bytes, Ref, From} ->
- From ! {body_done, Ref};
- {get_headers, Ref, From} ->
- self() ! {get_headers, Ref, From}
- end,
- fun mp_parse_mixed/1.
-
-receive_doc_data(Streamer, Ref) ->
- Streamer ! {next_bytes, Ref, self()},
- receive
- {body_bytes, Ref, Bytes} ->
- {Bytes, fun() -> receive_doc_data(Streamer, Ref) end};
- {body_done, Ref} ->
- {<<>>, fun() -> receive_doc_data(Streamer, Ref) end}
- end.
-
-changes_ev1(object_start, UserFun, UserAcc) ->
- fun(Ev) -> changes_ev2(Ev, UserFun, UserAcc) end.
-
-changes_ev2({key, <<"results">>}, UserFun, UserAcc) ->
- fun(Ev) -> changes_ev3(Ev, UserFun, UserAcc) end;
-changes_ev2(_, UserFun, UserAcc) ->
- fun(Ev) -> changes_ev2(Ev, UserFun, UserAcc) end.
-
-changes_ev3(array_start, UserFun, UserAcc) ->
- fun(Ev) -> changes_ev_loop(Ev, UserFun, UserAcc) end.
-
-changes_ev_loop(object_start, UserFun, UserAcc) ->
- fun(Ev) ->
- json_stream_parse:collect_object(
- Ev,
- fun(Obj) ->
- UserAcc2 = UserFun(json_to_doc_info(Obj), UserAcc),
- fun(Ev2) -> changes_ev_loop(Ev2, UserFun, UserAcc2) end
- end
- )
- end;
-changes_ev_loop(array_end, _UserFun, _UserAcc) ->
- fun(_Ev) -> changes_ev_done() end.
-
-changes_ev_done() ->
- fun(_Ev) -> changes_ev_done() end.
-
-continuous_changes(DataFun, UserFun) ->
- {DataFun2, _, Rest} = json_stream_parse:events(
- DataFun,
- fun(Ev) -> parse_changes_line(Ev, UserFun) end
- ),
- continuous_changes(fun() -> {Rest, DataFun2} end, UserFun).
-
-parse_changes_line(object_start, UserFun) ->
- fun(Ev) ->
- json_stream_parse:collect_object(
- Ev,
- fun(Obj) -> UserFun(json_to_doc_info(Obj)) end
- )
- end.
-
-json_to_doc_info({Props}) ->
- case get_value(<<"changes">>, Props) of
- undefined ->
- {last_seq, get_value(<<"last_seq">>, Props)};
- Changes ->
- RevsInfo0 = lists:map(
- fun({Change}) ->
- Rev = couch_doc:parse_rev(get_value(<<"rev">>, Change)),
- Del = couch_replicator_utils:is_deleted(Change),
- #rev_info{rev = Rev, deleted = Del}
- end,
- Changes
- ),
-
- RevsInfo =
- case get_value(<<"removed">>, Props) of
- true ->
- [_ | RevsInfo1] = RevsInfo0,
- RevsInfo1;
- _ ->
- RevsInfo0
- end,
-
- #doc_info{
- id = get_value(<<"id">>, Props),
- high_seq = get_value(<<"seq">>, Props),
- revs = RevsInfo
- }
- end.
-
-bulk_results_to_errors(Docs, {ok, Results}, interactive_edit) ->
- lists:reverse(
- lists:foldl(
- fun
- ({_, {ok, _}}, Acc) ->
- Acc;
- ({#doc{id = Id, revs = {Pos, [RevId | _]}}, Error}, Acc) ->
- {_, Error, Reason} = couch_httpd:error_info(Error),
- [
- {[
- {id, Id},
- {rev, rev_to_str({Pos, RevId})},
- {error, Error},
- {reason, Reason}
- ]}
- | Acc
- ]
- end,
- [],
- lists:zip(Docs, Results)
- )
- );
-bulk_results_to_errors(Docs, {ok, Results}, replicated_changes) ->
- bulk_results_to_errors(Docs, {aborted, Results}, interactive_edit);
-bulk_results_to_errors(_Docs, {aborted, Results}, interactive_edit) ->
- lists:map(
- fun({{Id, Rev}, Err}) ->
- {_, Error, Reason} = couch_httpd:error_info(Err),
- {[{id, Id}, {rev, rev_to_str(Rev)}, {error, Error}, {reason, Reason}]}
- end,
- Results
- );
-bulk_results_to_errors(_Docs, Results, remote) ->
- lists:reverse(
- lists:foldl(
- fun({Props}, Acc) ->
- case get_value(<<"error">>, Props, get_value(error, Props)) of
- undefined ->
- Acc;
- Error ->
- Id = get_value(<<"id">>, Props, get_value(id, Props)),
- Rev = get_value(<<"rev">>, Props, get_value(rev, Props)),
- Reason = get_value(<<"reason">>, Props, get_value(reason, Props)),
- [
- {[
- {id, Id},
- {rev, rev_to_str(Rev)},
- {error, Error},
- {reason, Reason}
- ]}
- | Acc
- ]
- end
- end,
- [],
- Results
- )
- ).
-
-rev_to_str({_Pos, _Id} = Rev) ->
- couch_doc:rev_to_str(Rev);
-rev_to_str(Rev) ->
- Rev.
-
-write_fun() ->
- fun(Data) ->
- receive
- {get_data, Ref, From} ->
- From ! {data, Ref, Data}
- end
- end.
-
-stream_doc({JsonBytes, Atts, Boundary, Len}) ->
- case erlang:erase({doc_streamer, Boundary}) of
- Pid when is_pid(Pid) ->
- unlink(Pid),
- exit(Pid, kill);
- _ ->
- ok
- end,
- DocStreamer = spawn_link(
- couch_doc,
- doc_to_multi_part_stream,
- [Boundary, JsonBytes, Atts, write_fun(), true]
- ),
- erlang:put({doc_streamer, Boundary}, DocStreamer),
- {ok, <<>>, {Len, Boundary}};
-stream_doc({0, Id}) ->
- erlang:erase({doc_streamer, Id}),
- eof;
-stream_doc({LenLeft, Id}) when LenLeft > 0 ->
- Ref = make_ref(),
- erlang:get({doc_streamer, Id}) ! {get_data, Ref, self()},
- receive
- {data, Ref, Data} ->
- {ok, Data, {LenLeft - iolist_size(Data), Id}}
- end.
-
-header_value(Key, Headers) ->
- header_value(Key, Headers, undefined).
-
-header_value(Key, Headers, Default) ->
- Headers1 = [{string:to_lower(K), V} || {K, V} <- Headers],
- case lists:keyfind(string:to_lower(Key), 1, Headers1) of
- {_, Value} ->
- Value;
- _ ->
- Default
- end.
-
-% Normalize an #httpdb{} or #db{} record such that it can be used for
-% comparisons. This means remove things like pids and also sort options / props.
-normalize_db(#httpdb{} = HttpDb) ->
- #httpdb{
- url = HttpDb#httpdb.url,
- auth_props = lists:sort(HttpDb#httpdb.auth_props),
- headers = lists:keysort(1, HttpDb#httpdb.headers),
- timeout = HttpDb#httpdb.timeout,
- ibrowse_options = lists:keysort(1, HttpDb#httpdb.ibrowse_options),
- retries = HttpDb#httpdb.retries,
- http_connections = HttpDb#httpdb.http_connections
- };
-normalize_db(<<DbName/binary>>) ->
- DbName.
-
-maybe_append_create_query_params(Db, []) ->
- Db;
-maybe_append_create_query_params(Db, CreateParams) ->
- NewUrl = Db#httpdb.url ++ "?" ++ mochiweb_util:urlencode(CreateParams),
- Db#httpdb{url = NewUrl}.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-normalize_http_db_test() ->
- HttpDb = #httpdb{
- url = "http://host/db",
- auth_props = [{"key", "val"}],
- headers = [{"k2", "v2"}, {"k1", "v1"}],
- timeout = 30000,
- ibrowse_options = [{k2, v2}, {k1, v1}],
- retries = 10,
- http_connections = 20
- },
- Expected = HttpDb#httpdb{
- headers = [{"k1", "v1"}, {"k2", "v2"}],
- ibrowse_options = [{k1, v1}, {k2, v2}]
- },
- ?assertEqual(Expected, normalize_db(HttpDb)),
- ?assertEqual(<<"local">>, normalize_db(<<"local">>)).
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_app.erl b/src/couch_replicator/src/couch_replicator_app.erl
deleted file mode 100644
index e4dc63e1d..000000000
--- a/src/couch_replicator/src/couch_replicator_app.erl
+++ /dev/null
@@ -1,17 +0,0 @@
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, []) ->
- couch_replicator_sup:start_link().
-
-stop([]) ->
- ok.
diff --git a/src/couch_replicator/src/couch_replicator_auth.erl b/src/couch_replicator/src/couch_replicator_auth.erl
deleted file mode 100644
index e5c024f7e..000000000
--- a/src/couch_replicator/src/couch_replicator_auth.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_auth).
-
--export([
- initialize/1,
- update_headers/2,
- handle_response/3,
- cleanup/1
-]).
-
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
--type headers() :: [{string(), string()}].
--type code() :: non_neg_integer().
-
--define(DEFAULT_PLUGINS, "couch_replicator_auth_session,couch_replicator_auth_noop").
-
-% Behavior API
-
-% Note for plugin developers: consider using the "auth" field in the source and
-% target objects to store credentials. In that case non-owner and non-admin
-% users will have those credentials stripped when they read the replication
-% document, which mimicks the behavior for "headers" and user and pass fields
-% in endpoint URLs".
-
--callback initialize(#httpdb{}) ->
- {ok, #httpdb{}, term()} | {error, term()} | ignore.
-
--callback update_headers(term(), headers()) -> {headers(), term()}.
-
--callback handle_response(term(), code(), headers()) ->
- {continue | retry, term()}.
-
--callback cleanup(term()) -> ok.
-
-% Main API
-
--spec initialize(#httpdb{}) -> {ok, #httpdb{}} | {error, term()}.
-initialize(#httpdb{auth_context = nil} = HttpDb) ->
- case try_initialize(get_plugin_modules(), HttpDb) of
- {ok, Mod, HttpDb1, Context} ->
- {ok, HttpDb1#httpdb{auth_context = {Mod, Context}}};
- {error, Error} ->
- {error, Error}
- end.
-
--spec update_headers(#httpdb{}, headers()) -> {headers(), #httpdb{}}.
-update_headers(#httpdb{auth_context = {Mod, Context}} = HttpDb, Headers) ->
- {Headers1, Context1} = Mod:update_headers(Context, Headers),
- {Headers1, HttpDb#httpdb{auth_context = {Mod, Context1}}}.
-
--spec handle_response(#httpdb{}, code(), headers()) ->
- {continue | retry, term()}.
-handle_response(#httpdb{} = HttpDb, Code, Headers) ->
- {Mod, Context} = HttpDb#httpdb.auth_context,
- {Res, Context1} = Mod:handle_response(Context, Code, Headers),
- {Res, HttpDb#httpdb{auth_context = {Mod, Context1}}}.
-
--spec cleanup(#httpdb{}) -> #httpdb{}.
-cleanup(#httpdb{auth_context = {Module, Context}} = HttpDb) ->
- ok = Module:cleanup(Context),
- HttpDb#httpdb{auth_context = nil}.
-
-% Private helper functions
-
--spec get_plugin_modules() -> [atom()].
-get_plugin_modules() ->
- Plugins1 = config:get("replicator", "auth_plugins", ?DEFAULT_PLUGINS),
- [list_to_atom(Plugin) || Plugin <- string:tokens(Plugins1, ",")].
-
-try_initialize([], _HttpDb) ->
- {error, no_more_auth_plugins_left_to_try};
-try_initialize([Mod | Modules], HttpDb) ->
- try Mod:initialize(HttpDb) of
- {ok, HttpDb1, Context} ->
- {ok, Mod, HttpDb1, Context};
- ignore ->
- try_initialize(Modules, HttpDb);
- {error, Error} ->
- {error, Error}
- catch
- error:undef ->
- {error, {could_not_load_plugin_module, Mod}}
- end.
diff --git a/src/couch_replicator/src/couch_replicator_auth_noop.erl b/src/couch_replicator/src/couch_replicator_auth_noop.erl
deleted file mode 100644
index e2a7ee839..000000000
--- a/src/couch_replicator/src/couch_replicator_auth_noop.erl
+++ /dev/null
@@ -1,44 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_auth_noop).
-
--behavior(couch_replicator_auth).
-
--export([
- initialize/1,
- update_headers/2,
- handle_response/3,
- cleanup/1
-]).
-
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
--type headers() :: [{string(), string()}].
--type code() :: non_neg_integer().
-
--spec initialize(#httpdb{}) -> {ok, #httpdb{}, term()} | ignore.
-initialize(#httpdb{} = HttpDb) ->
- {ok, HttpDb, nil}.
-
--spec update_headers(term(), headers()) -> {headers(), term()}.
-update_headers(Context, Headers) ->
- {Headers, Context}.
-
--spec handle_response(term(), code(), headers()) ->
- {continue | retry, term()}.
-handle_response(Context, _Code, _Headers) ->
- {continue, Context}.
-
--spec cleanup(term()) -> ok.
-cleanup(_Context) ->
- ok.
diff --git a/src/couch_replicator/src/couch_replicator_auth_session.erl b/src/couch_replicator/src/couch_replicator_auth_session.erl
deleted file mode 100644
index d29600706..000000000
--- a/src/couch_replicator/src/couch_replicator_auth_session.erl
+++ /dev/null
@@ -1,719 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% This is the replicator session auth plugin. It implements session based
-% authentication for the replicator. The only public API are the functions from
-% the couch_replicator_auth behaviour. Most of the logic and state is in the
-% gen_server. An instance of a gen_server could be spawned for the source and
-% target endpoints of each replication jobs.
-%
-% The workflow is roughly this:
-%
-% * On initialization, try to get a cookie in `refresh/1` If an error occurs,
-% the crash. If `_session` endpoint fails with a 404 (not found), return
-% `ignore` assuming session authentication is not support or we simply hit a
-% non-CouchDb server.
-%
-% * Before each request, auth framework calls `update_headers` API function.
-% Before updating the headers and returning, check if need to refresh again.
-% The check looks `next_refresh` time. If that time is set (not `infinity`)
-% and just expired, then obtain a new cookie, then update headers and
-% return.
-%
-% * After each request, auth framework calls `handle_response` function. If
-% request was successful check if a new cookie was sent by the server in the
-% `Set-Cookie` header. If it was then then that becomes the current cookie.
-%
-% * If last request has an auth failure, check if request used a stale cookie
-% In this case nothing is done, and the client is told to retry. Next time
-% it updates its headers befor the request it should pick up the latest
-% cookie.
-%
-% * If last request failed and cookie was the latest known cookie, schedule a
-% refresh and tell client to retry. However, if the cookie was just updated,
-% tell the client to continue such that it will handle the auth failure on
-% its own via a set of retries with exponential backoffs. This is it to
-% ensure if something goes wrong and one of the endpoints issues invalid
-% cookies, replicator won't be stuck in a busy loop refreshing them.
-
--module(couch_replicator_auth_session).
-
--behaviour(couch_replicator_auth).
--behaviour(gen_server).
-
--export([
- initialize/1,
- update_headers/2,
- handle_response/3,
- cleanup/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- format_status/2
-]).
-
--include_lib("ibrowse/include/ibrowse.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
--type headers() :: [{string(), string()}].
--type code() :: non_neg_integer().
--type time_sec() :: non_neg_integer().
--type age() :: time_sec() | undefined.
-
--define(MIN_UPDATE_INTERVAL_SEC, 5).
--define(DEFAULT_REFRESH_INTERVAL_SEC, 550).
-
--record(state, {
- epoch = 0 :: non_neg_integer(),
- cookie :: string() | undefined,
- user :: string() | undefined,
- pass :: string() | undefined,
- httpdb_timeout :: integer(),
- httpdb_pool :: pid(),
- httpdb_ibrowse_options = [] :: list(),
- session_url :: string(),
- next_refresh = infinity :: infinity | non_neg_integer(),
- refresh_tstamp = 0 :: non_neg_integer(),
- require_valid_user = false :: boolean()
-}).
-
-% Behavior API callbacks
-
--spec initialize(#httpdb{}) ->
- {ok, #httpdb{}, term()} | {error, term()} | ignore.
-initialize(#httpdb{} = HttpDb) ->
- case init_state(HttpDb) of
- {ok, HttpDb1, State} ->
- {ok, Pid} = gen_server:start_link(?MODULE, [State], []),
- Epoch = State#state.epoch,
- Timeout = State#state.httpdb_timeout,
- {ok, HttpDb1, {Pid, Epoch, Timeout}};
- {error, Error} ->
- {error, Error};
- ignore ->
- ignore
- end.
-
--spec update_headers(term(), headers()) -> {headers(), term()}.
-update_headers({Pid, Epoch, Timeout}, Headers) ->
- Args = {update_headers, Headers, Epoch},
- {Headers1, Epoch1} = gen_server:call(Pid, Args, Timeout * 10),
- {Headers1, {Pid, Epoch1, Timeout}}.
-
--spec handle_response(term(), code(), headers()) ->
- {continue | retry, term()}.
-handle_response({Pid, Epoch, Timeout}, Code, Headers) ->
- Args = {handle_response, Code, Headers, Epoch},
- {Retry, Epoch1} = gen_server:call(Pid, Args, Timeout * 10),
- {Retry, {Pid, Epoch1, Timeout}}.
-
--spec cleanup(term()) -> ok.
-cleanup({Pid, _Epoch, Timeout}) ->
- gen_server:call(Pid, stop, Timeout * 10).
-
-%% gen_server functions
-
-init([#state{} = State]) ->
- {ok, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call({update_headers, Headers, _Epoch}, _From, State) ->
- case maybe_refresh(State) of
- {ok, State1} ->
- Cookie = "AuthSession=" ++ State1#state.cookie,
- Headers1 = [{"Cookie", Cookie} | Headers],
- {reply, {Headers1, State1#state.epoch}, State1};
- {error, Error} ->
- LogMsg = "~p: Stopping session auth plugin because of error ~p",
- couch_log:error(LogMsg, [?MODULE, Error]),
- {stop, Error, State}
- end;
-handle_call({handle_response, Code, Headers, Epoch}, _From, State) ->
- {Retry, State1} = process_response(Code, Headers, Epoch, State),
- {reply, {Retry, State1#state.epoch}, State1};
-handle_call(stop, _From, State) ->
- {stop, normal, ok, State}.
-
-handle_cast(Msg, State) ->
- couch_log:error("~p: Received un-expected cast ~p", [?MODULE, Msg]),
- {noreply, State}.
-
-handle_info(Msg, State) ->
- couch_log:error("~p : Received un-expected message ~p", [?MODULE, Msg]),
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-format_status(_Opt, [_PDict, State]) ->
- [
- {epoch, State#state.epoch},
- {user, State#state.user},
- {session_url, State#state.session_url},
- {refresh_tstamp, State#state.refresh_tstamp}
- ].
-
-%% Private helper functions
-
--spec init_state(#httpdb{}) ->
- {ok, #httpdb{}, #state{}} | {error, term()} | ignore.
-init_state(#httpdb{} = HttpDb) ->
- case extract_creds(HttpDb) of
- {ok, User, Pass, HttpDb1} ->
- State = #state{
- user = User,
- pass = Pass,
- session_url = get_session_url(HttpDb1#httpdb.url),
- httpdb_pool = HttpDb1#httpdb.httpc_pool,
- httpdb_timeout = HttpDb1#httpdb.timeout,
- httpdb_ibrowse_options = HttpDb1#httpdb.ibrowse_options
- },
- case refresh(State) of
- {ok, State1} ->
- {ok, HttpDb1, State1};
- {error, {session_not_supported, _, _}} ->
- ignore;
- {error, {session_requires_valid_user, _, _}} ->
- % If endpoint requires basic auth for _session then try
- % to refresh again with basic auth creds, then remember
- % this fact in the state for all subsequent requests to
- % _session endpoint
- case refresh(State#state{require_valid_user = true}) of
- {ok, State1} ->
- {ok, HttpDb1, State1};
- {error, {session_not_supported, _, _}} ->
- ignore;
- {error, Error} ->
- {error, Error}
- end;
- {error, Error} ->
- {error, Error}
- end;
- {error, missing_credentials} ->
- ignore;
- {error, Error} ->
- {error, Error}
- end.
-
--spec extract_creds(#httpdb{}) ->
- {ok, string(), string(), #httpdb{}} | {error, term()}.
-extract_creds(#httpdb{} = HttpDb) ->
- case couch_replicator_utils:get_basic_auth_creds(HttpDb) of
- {undefined, undefined} ->
- % Return error. Session plugin should ignore this replication
- % endpoint as there are no valid creds which can be used
- {error, missing_credentials};
- {User, Pass} when is_list(User), is_list(Pass) ->
- HttpDb1 = couch_replicator_utils:remove_basic_auth_creds(HttpDb),
- {ok, User, Pass, HttpDb1}
- end.
-
--spec process_response(
- non_neg_integer(),
- headers(),
- non_neg_integer(),
- #state{}
-) -> {retry | continue, #state{}}.
-process_response(403, _Headers, Epoch, State) ->
- process_auth_failure(Epoch, State);
-process_response(401, _Headers, Epoch, State) ->
- process_auth_failure(Epoch, State);
-process_response(Code, Headers, _Epoch, State) when Code >= 200, Code < 300 ->
- % If server noticed cookie is about to time out it can send a new cookie in
- % the response headers. Take advantage of that and refresh the cookie.
- State1 =
- case maybe_update_cookie(Headers, State) of
- {ok, UpdatedState} ->
- UpdatedState;
- {error, cookie_not_found} ->
- State;
- {error, Other} ->
- LogMsg = "~p : Could not parse cookie from response headers ~p",
- couch_log:error(LogMsg, [?MODULE, Other]),
- State
- end,
- {continue, State1};
-process_response(_Code, _Headers, _Epoch, State) ->
- {continue, State}.
-
--spec process_auth_failure(non_neg_integer(), #state{}) ->
- {retry | continue, #state{}}.
-process_auth_failure(Epoch, #state{epoch = StateEpoch} = State) when
- StateEpoch > Epoch
-->
- % This request used an outdated cookie, tell it to immediately retry
- % and it will pick up the current cookie when its headers are updated
- {retry, State};
-process_auth_failure(Epoch, #state{epoch = Epoch} = State) ->
- MinInterval = min_update_interval(),
- case cookie_age_sec(State, now_sec()) of
- AgeSec when AgeSec < MinInterval ->
- % A recently acquired cookie failed. Schedule a refresh and
- % return `continue` to let httpc's retry apply a backoff
- {continue, schedule_refresh(now_sec() + MinInterval, State)};
- _AgeSec ->
- % Current cookie failed auth. Schedule refresh and ask
- % httpc to retry the request.
- {retry, schedule_refresh(now_sec(), State)}
- end.
-
--spec get_session_url(string()) -> string().
-get_session_url(Url) ->
- #url{
- protocol = Proto,
- host = Host,
- port = Port
- } = ibrowse_lib:parse_url(Url),
- WithPort = lists:concat([Proto, "://", Host, ":", Port]),
- case lists:prefix(WithPort, Url) of
- true ->
- % Explicit port specified in the original url
- WithPort ++ "/_session";
- false ->
- % Implicit proto default port was used
- lists:concat([Proto, "://", Host, "/_session"])
- end.
-
--spec schedule_refresh(non_neg_integer(), #state{}) -> #state{}.
-schedule_refresh(T, #state{next_refresh = Tc} = State) when T < Tc ->
- State#state{next_refresh = T};
-schedule_refresh(_, #state{} = State) ->
- State.
-
--spec maybe_refresh(#state{}) -> {ok, #state{}} | {error, term()}.
-maybe_refresh(#state{next_refresh = T} = State) ->
- case now_sec() >= T of
- true ->
- refresh(State#state{next_refresh = infinity});
- false ->
- {ok, State}
- end.
-
--spec refresh(#state{}) -> {ok, #state{}} | {error, term()}.
-refresh(#state{session_url = Url, user = User, pass = Pass} = State) ->
- Body = mochiweb_util:urlencode([{name, User}, {password, Pass}]),
- Headers0 = [{"Content-Type", "application/x-www-form-urlencoded"}],
- Headers =
- case State#state.require_valid_user of
- true ->
- Headers0 ++ [{"Authorization", "Basic " ++ b64creds(User, Pass)}];
- false ->
- Headers0
- end,
- Result = http_request(State, Url, Headers, post, Body),
- http_response(Result, State).
-
--spec http_request(#state{}, string(), headers(), atom(), iolist()) ->
- {ok, string(), headers(), binary()} | {error, term()}.
-http_request(#state{httpdb_pool = Pool} = State, Url, Headers, Method, Body) ->
- Timeout = State#state.httpdb_timeout,
- Opts = [
- {response_format, binary},
- {inactivity_timeout, Timeout}
- | State#state.httpdb_ibrowse_options
- ],
- {ok, Wrk} = couch_replicator_httpc_pool:get_worker(Pool),
- try
- Result = ibrowse:send_req_direct(
- Wrk,
- Url,
- Headers,
- Method,
- Body,
- Opts,
- Timeout
- ),
- case Result of
- {ok, _, ResultHeaders, _} ->
- stop_worker_if_server_requested(ResultHeaders, Wrk);
- _Other ->
- ok
- end,
- Result
- after
- ok = couch_replicator_httpc_pool:release_worker_sync(Pool, Wrk)
- end.
-
--spec stop_worker_if_server_requested(headers(), pid()) -> ok.
-stop_worker_if_server_requested(ResultHeaders0, Worker) ->
- ResultHeaders = mochiweb_headers:make(ResultHeaders0),
- case mochiweb_headers:get_value("Connection", ResultHeaders) of
- "close" ->
- Ref = erlang:monitor(process, Worker),
- ibrowse_http_client:stop(Worker),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- end;
- _Other ->
- ok
- end.
-
--spec http_response(
- {ok, string(), headers(), binary()} | {error, term()},
- #state{}
-) -> {ok, #state{}} | {error, term()}.
-http_response({ok, "200", Headers, _}, State) ->
- maybe_update_cookie(Headers, State);
-http_response({ok, "401", Headers0, _}, #state{
- session_url = Url,
- user = User
-}) ->
- Headers = mochiweb_headers:make(Headers0),
- case mochiweb_headers:get_value("WWW-Authenticate", Headers) of
- undefined ->
- {error, {session_request_unauthorized, Url, User}};
- _SomeValue ->
- {error, {session_requires_valid_user, Url, User}}
- end;
-http_response({ok, "403", _, _}, #state{session_url = Url, user = User}) ->
- {error, {session_request_forbidden, Url, User}};
-http_response({ok, "404", _, _}, #state{session_url = Url, user = User}) ->
- {error, {session_not_supported, Url, User}};
-http_response({ok, Code, _, _}, #state{session_url = Url, user = User}) ->
- {error, {session_unexpected_result, Code, Url, User}};
-http_response({error, Error}, #state{session_url = Url, user = User}) ->
- {error, {session_request_failed, Url, User, Error}}.
-
--spec parse_cookie(list()) -> {ok, age(), string()} | {error, term()}.
-parse_cookie(Headers0) ->
- Headers = mochiweb_headers:make(Headers0),
- case mochiweb_headers:get_value("Set-Cookie", Headers) of
- undefined ->
- {error, cookie_not_found};
- CookieHeader ->
- CookieKVs = mochiweb_cookies:parse_cookie(CookieHeader),
- CaseInsKVs = mochiweb_headers:make(CookieKVs),
- case mochiweb_headers:get_value("AuthSession", CaseInsKVs) of
- undefined ->
- {error, cookie_format_invalid};
- Cookie ->
- MaxAge = parse_max_age(CaseInsKVs),
- {ok, MaxAge, Cookie}
- end
- end.
-
--spec parse_max_age(list()) -> age().
-parse_max_age(CaseInsKVs) ->
- case mochiweb_headers:get_value("Max-Age", CaseInsKVs) of
- String when is_list(String) ->
- try list_to_integer(String) of
- MaxAge when MaxAge >= 0 ->
- MaxAge;
- _ ->
- undefined
- catch
- error:badarg ->
- undefined
- end;
- _ ->
- undefined
- end.
-
--spec maybe_update_cookie(headers(), #state{}) ->
- {ok, string()} | {error, term()}.
-maybe_update_cookie(ResponseHeaders, State) ->
- case parse_cookie(ResponseHeaders) of
- {ok, MaxAge, Cookie} ->
- {ok, update_cookie(State, Cookie, now_sec(), MaxAge)};
- {error, Error} ->
- {error, Error}
- end.
-
--spec update_cookie(#state{}, string(), time_sec(), age()) -> #state{}.
-update_cookie(#state{cookie = Cookie} = State, Cookie, _, _) ->
- State;
-update_cookie(#state{epoch = Epoch} = State, Cookie, NowSec, MaxAge) ->
- NextRefresh = next_refresh(NowSec, MaxAge, refresh_interval()),
- NewState = State#state{
- epoch = Epoch + 1,
- cookie = Cookie,
- refresh_tstamp = NowSec
- },
- schedule_refresh(NextRefresh, NewState).
-
--spec next_refresh(time_sec(), age(), time_sec()) -> time_sec().
-next_refresh(NowSec, undefined, RefreshInterval) ->
- NowSec + RefreshInterval;
-next_refresh(NowSec, MaxAge, _) when is_integer(MaxAge) ->
- % Apply a fudge factor to account for delays in receving the cookie
- % and / or time adjustments happening over a longer period of time
- NowSec + trunc(MaxAge * 0.9).
-
--spec cookie_age_sec(#state{}, time_sec()) -> time_sec().
-cookie_age_sec(#state{refresh_tstamp = RefreshTs}, Now) ->
- max(0, Now - RefreshTs).
-
--spec now_sec() -> time_sec().
-now_sec() ->
- {Mega, Sec, _Micro} = os:timestamp(),
- Mega * 1000000 + Sec.
-
--spec min_update_interval() -> time_sec().
-min_update_interval() ->
- config:get_integer(
- "replicator",
- "session_min_update_interval",
- ?MIN_UPDATE_INTERVAL_SEC
- ).
-
--spec refresh_interval() -> integer().
-refresh_interval() ->
- config:get_integer(
- "replicator",
- "session_refresh_interval_sec",
- ?DEFAULT_REFRESH_INTERVAL_SEC
- ).
-
--spec b64creds(string(), string()) -> string().
-b64creds(User, Pass) ->
- base64:encode_to_string(User ++ ":" ++ Pass).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-get_session_url_test_() ->
- [
- ?_assertEqual(SessionUrl, get_session_url(Url))
- || {Url, SessionUrl} <- [
- {"http://host/db", "http://host/_session"},
- {"http://127.0.0.1/db", "http://127.0.0.1/_session"},
- {"http://host/x/y/z", "http://host/_session"},
- {"http://host:5984/db", "http://host:5984/_session"},
- {"https://host/db?q=1", "https://host/_session"}
- ]
- ].
-
-extract_creds_success_test() ->
- HttpDb = #httpdb{
- auth_props = [
- {<<"basic">>,
- {[
- {<<"username">>, <<"u2">>},
- {<<"password">>, <<"p2">>}
- ]}}
- ]
- },
- ?assertEqual({ok, "u2", "p2", #httpdb{}}, extract_creds(HttpDb)),
- ?assertEqual({error, missing_credentials}, extract_creds(#httpdb{})).
-
-cookie_update_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_do_refresh_without_max_age(),
- t_do_refresh_with_max_age(),
- t_dont_refresh(),
- t_process_auth_failure(),
- t_process_auth_failure_stale_epoch(),
- t_process_auth_failure_too_frequent(),
- t_process_ok_update_cookie(),
- t_process_ok_no_cookie(),
- t_init_state_fails_on_401(),
- t_init_state_401_with_require_valid_user(),
- t_init_state_404(),
- t_init_state_no_creds(),
- t_init_state_http_error()
- ]
- }
- }.
-
-t_do_refresh_without_max_age() ->
- ?_test(begin
- State = #state{next_refresh = 0},
- {ok, State1} = maybe_refresh(State),
- ?assertMatch(#state{epoch = 1, cookie = "Abc"}, State1),
- #state{next_refresh = NextRefresh} = State1,
- RefreshInterval = NextRefresh - now_sec(),
- ?assert(540 < RefreshInterval andalso RefreshInterval =< 550)
- end).
-
-t_do_refresh_with_max_age() ->
- ?_test(begin
- State = #state{next_refresh = 0},
- mock_http_cookie_response_with_age("Zig", "100"),
- {ok, State1} = maybe_refresh(State),
- ?assertMatch(#state{epoch = 1, cookie = "Zig"}, State1),
- #state{next_refresh = NextRefresh} = State1,
- RefreshInterval = NextRefresh - now_sec(),
- ?assert(80 < RefreshInterval andalso RefreshInterval =< 90)
- end).
-
-t_dont_refresh() ->
- ?_test(begin
- State = #state{
- next_refresh = now_sec() + 100,
- refresh_tstamp = now_sec()
- },
- {ok, State1} = maybe_refresh(State),
- ?assertMatch(State, State1),
- State2 = #state{
- next_refresh = infinity,
- refresh_tstamp = now_sec()
- },
- {ok, State3} = maybe_refresh(State2),
- ?assertMatch(State2, State3)
- end).
-
-t_process_auth_failure() ->
- ?_test(begin
- State = #state{epoch = 1, refresh_tstamp = 0},
- {retry, State1} = process_auth_failure(1, State),
- NextRefresh = State1#state.next_refresh,
- ?assert(NextRefresh =< now_sec())
- end).
-
-t_process_auth_failure_stale_epoch() ->
- ?_test(begin
- State = #state{epoch = 3},
- ?assertMatch({retry, State}, process_auth_failure(2, State))
- end).
-
-t_process_auth_failure_too_frequent() ->
- ?_test(begin
- State = #state{epoch = 4, refresh_tstamp = now_sec()},
- ?assertMatch({continue, _}, process_auth_failure(4, State))
- end).
-
-t_process_ok_update_cookie() ->
- ?_test(begin
- Headers = [{"set-CookiE", "AuthSession=xyz; Path=/;"}, {"X", "y"}],
- Res = process_response(200, Headers, 1, #state{}),
- ?assertMatch({continue, #state{cookie = "xyz", epoch = 1}}, Res),
- State = #state{cookie = "xyz", refresh_tstamp = 42, epoch = 2},
- Res2 = process_response(200, Headers, 1, State),
- ?assertMatch({continue, #state{cookie = "xyz", epoch = 2}}, Res2)
- end).
-
-t_process_ok_no_cookie() ->
- ?_test(begin
- Headers = [{"X", "y"}],
- State = #state{cookie = "old", epoch = 3, refresh_tstamp = 42},
- Res = process_response(200, Headers, 1, State),
- ?assertMatch({continue, State}, Res)
- end).
-
-t_init_state_fails_on_401() ->
- ?_test(begin
- mock_http_401_response(),
- {error, Error} = init_state(httpdb("http://u:p@h")),
- SessionUrl = "http://h/_session",
- ?assertEqual({session_request_unauthorized, SessionUrl, "u"}, Error)
- end).
-
-t_init_state_401_with_require_valid_user() ->
- ?_test(begin
- mock_http_401_response_with_require_valid_user(),
- ?assertMatch(
- {ok, #httpdb{}, #state{cookie = "Cookie"}},
- init_state(httpdb("http://u:p@h"))
- )
- end).
-
-t_init_state_404() ->
- ?_test(begin
- mock_http_404_response(),
- ?assertEqual(ignore, init_state(httpdb("http://u:p@h")))
- end).
-
-t_init_state_no_creds() ->
- ?_test(begin
- ?_assertEqual(ignore, init_state(httpdb("http://h")))
- end).
-
-t_init_state_http_error() ->
- ?_test(begin
- mock_http_error_response(),
- {error, Error} = init_state(httpdb("http://u:p@h")),
- SessionUrl = "http://h/_session",
- ?assertEqual({session_request_failed, SessionUrl, "u", x}, Error)
- end).
-
-httpdb(Url) ->
- couch_replicator_utils:normalize_basic_auth(#httpdb{url = Url}).
-
-setup_all() ->
- meck:expect(couch_replicator_httpc_pool, get_worker, 1, {ok, worker}),
- meck:expect(couch_replicator_httpc_pool, release_worker_sync, 2, ok),
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- mock_http_cookie_response("Abc"),
- ok.
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset([
- config,
- couch_replicator_httpc_pool,
- ibrowse
- ]).
-
-teardown(_) ->
- ok.
-
-mock_http_cookie_response(Cookie) ->
- Resp = {ok, "200", [{"Set-Cookie", "AuthSession=" ++ Cookie}], []},
- meck:expect(ibrowse, send_req_direct, 7, Resp).
-
-mock_http_cookie_response_with_age(Cookie, Age) ->
- AgeKV = "Max-Age=" ++ Age,
- CookieKV = "AuthSession=" ++ Cookie,
- Resp = {ok, "200", [{"Set-Cookie", CookieKV ++ ";" ++ AgeKV}], []},
- meck:expect(ibrowse, send_req_direct, 7, Resp).
-
-mock_http_401_response() ->
- meck:expect(ibrowse, send_req_direct, 7, {ok, "401", [], []}).
-
-mock_http_401_response_with_require_valid_user() ->
- Resp1 = {ok, "401", [{"WWW-Authenticate", "Basic realm=\"server\""}], []},
- Resp2 = {ok, "200", [{"Set-Cookie", "AuthSession=Cookie"}], []},
- meck:expect(ibrowse, send_req_direct, 7, meck:seq([Resp1, Resp2])).
-
-mock_http_404_response() ->
- meck:expect(ibrowse, send_req_direct, 7, {ok, "404", [], []}).
-
-mock_http_error_response() ->
- meck:expect(ibrowse, send_req_direct, 7, {error, x}).
-
-parse_max_age_test_() ->
- [
- ?_assertEqual(R, parse_max_age(mochiweb_headers:make([{"Max-Age", A}])))
- || {A, R} <- [
- {"-10", undefined},
- {"\ufeff", undefined},
- {"*", undefined},
- {"\n1", undefined},
- {"1", 1},
- {"1 1", undefined},
- {"2", 2},
- {"100", 100},
- {"1234567890", 1234567890}
- ]
- ].
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_changes_reader.erl b/src/couch_replicator/src/couch_replicator_changes_reader.erl
deleted file mode 100644
index 83080b6fb..000000000
--- a/src/couch_replicator/src/couch_replicator_changes_reader.erl
+++ /dev/null
@@ -1,158 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_changes_reader).
-
-% Public API
--export([start_link/4]).
-
-% Exported for code reloading
--export([read_changes/5]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
--include("couch_replicator.hrl").
-
--import(couch_util, [
- get_value/2
-]).
-
-start_link(StartSeq, #httpdb{} = Db, ChangesQueue, Options) ->
- Parent = self(),
- {ok,
- spawn_link(fun() ->
- put(last_seq, StartSeq),
- put(retries_left, Db#httpdb.retries),
- ?MODULE:read_changes(
- Parent,
- StartSeq,
- Db#httpdb{retries = 0},
- ChangesQueue,
- Options
- )
- end)};
-start_link(StartSeq, Db, ChangesQueue, Options) ->
- Parent = self(),
- {ok,
- spawn_link(fun() ->
- ?MODULE:read_changes(Parent, StartSeq, Db, ChangesQueue, Options)
- end)}.
-
-read_changes(Parent, StartSeq, Db, ChangesQueue, Options) ->
- Continuous = couch_util:get_value(continuous, Options),
- try
- couch_replicator_api_wrap:changes_since(
- Db,
- all_docs,
- StartSeq,
- fun(Item) ->
- process_change(Item, {Parent, Db, ChangesQueue, Continuous})
- end,
- Options
- ),
- couch_work_queue:close(ChangesQueue)
- catch
- throw:recurse ->
- LS = get(last_seq),
- read_changes(Parent, LS, Db, ChangesQueue, Options);
- throw:retry_no_limit ->
- LS = get(last_seq),
- read_changes(Parent, LS, Db, ChangesQueue, Options);
- throw:{retry_limit, Error} ->
- couch_stats:increment_counter(
- [couch_replicator, changes_read_failures]
- ),
- case get(retries_left) of
- N when N > 0 ->
- put(retries_left, N - 1),
- LastSeq = get(last_seq),
- Db2 =
- case LastSeq of
- StartSeq ->
- couch_log:notice(
- "Retrying _changes request to source database ~s"
- " with since=~p in ~p seconds",
- [
- couch_replicator_api_wrap:db_uri(Db),
- LastSeq,
- Db#httpdb.wait / 1000
- ]
- ),
- ok = timer:sleep(Db#httpdb.wait),
- Db#httpdb{wait = 2 * Db#httpdb.wait};
- _ ->
- couch_log:notice(
- "Retrying _changes request to source database ~s"
- " with since=~p",
- [couch_replicator_api_wrap:db_uri(Db), LastSeq]
- ),
- Db
- end,
- read_changes(Parent, LastSeq, Db2, ChangesQueue, Options);
- _ ->
- exit(Error)
- end
- end.
-
-process_change(#doc_info{id = <<>>} = DocInfo, {_, Db, _, _}) ->
- % Previous CouchDB releases had a bug which allowed a doc with an empty ID
- % to be inserted into databases. Such doc is impossible to GET.
- couch_log:error(
- "Replicator: ignoring document with empty ID in "
- "source database `~s` (_changes sequence ~p)",
- [couch_replicator_api_wrap:db_uri(Db), DocInfo#doc_info.high_seq]
- );
-process_change(#doc_info{id = Id} = DocInfo, {Parent, Db, ChangesQueue, _}) ->
- case is_doc_id_too_long(byte_size(Id)) of
- true ->
- SourceDb = couch_replicator_api_wrap:db_uri(Db),
- couch_log:error(
- "Replicator: document id `~s...` from source db "
- " `~64s` is too long, ignoring.",
- [Id, SourceDb]
- ),
- Stats = couch_replicator_stats:new([{doc_write_failures, 1}]),
- ok = gen_server:call(Parent, {add_stats, Stats}, infinity);
- false ->
- ok = couch_work_queue:queue(ChangesQueue, DocInfo),
- put(last_seq, DocInfo#doc_info.high_seq)
- end;
-process_change({last_seq, LS}, {_Parent, _, ChangesQueue, true = _Continuous}) ->
- % LS should never be undefined, but it doesn't hurt to be defensive inside
- % the replicator.
- Seq =
- case LS of
- undefined -> get(last_seq);
- _ -> LS
- end,
- OldSeq = get(last_seq),
- if
- Seq == OldSeq -> ok;
- true -> ok = couch_work_queue:queue(ChangesQueue, {last_seq, Seq})
- end,
- put(last_seq, Seq),
- throw(recurse);
-process_change({last_seq, _}, _) ->
- % This clause is unreachable today, but let's plan ahead for the future
- % where we checkpoint against last_seq instead of the sequence of the last
- % change. The two can differ substantially in the case of a restrictive
- % filter.
- ok.
-
-is_doc_id_too_long(IdLength) ->
- case config:get("replicator", "max_document_id_length", "infinity") of
- "infinity" ->
- false;
- ConfigMaxStr ->
- ConfigMax = list_to_integer(ConfigMaxStr),
- ConfigMax > 0 andalso IdLength > ConfigMax
- end.
diff --git a/src/couch_replicator/src/couch_replicator_clustering.erl b/src/couch_replicator/src/couch_replicator_clustering.erl
deleted file mode 100644
index 8db320433..000000000
--- a/src/couch_replicator/src/couch_replicator_clustering.erl
+++ /dev/null
@@ -1,269 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Maintain cluster membership and stability notifications for replications.
-% On changes to cluster membership, broadcast events to `replication` gen_event.
-% Listeners will get `{cluster, stable}` or `{cluster, unstable}` events.
-%
-% Cluster stability is defined as "there have been no nodes added or removed in
-% last `QuietPeriod` seconds". QuietPeriod value is configurable. To ensure a
-% speedier startup, during initialization there is a shorter StartupPeriod
-% in effect (also configurable).
-%
-% This module is also in charge of calculating ownership of replications based
-% on where their _replicator db documents shards live.
-
--module(couch_replicator_clustering).
-
--behaviour(gen_server).
--behaviour(config_listener).
--behaviour(mem3_cluster).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
-]).
-
--export([
- owner/2,
- is_stable/0,
- link_cluster_event_listener/3
-]).
-
-% config_listener callbacks
--export([
- handle_config_change/5,
- handle_config_terminate/3
-]).
-
-% mem3_cluster callbacks
--export([
- cluster_stable/1,
- cluster_unstable/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-% seconds
--define(DEFAULT_QUIET_PERIOD, 60).
-% seconds
--define(DEFAULT_START_PERIOD, 5).
--define(RELISTEN_DELAY, 5000).
-
--record(state, {
- mem3_cluster_pid :: pid(),
- cluster_stable :: boolean()
-}).
-
--spec start_link() -> {ok, pid()} | ignore | {error, term()}.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-% owner/2 function computes ownership for a {DbName, DocId} tuple
-% `unstable` if cluster is considered to be unstable i.e. it has changed
-% recently, or returns node() which of the owner.
-%
--spec owner(Dbname :: binary(), DocId :: binary()) -> node() | unstable.
-owner(<<"shards/", _/binary>> = DbName, DocId) ->
- case is_stable() of
- false ->
- unstable;
- true ->
- owner_int(DbName, DocId)
- end;
-owner(_DbName, _DocId) ->
- node().
-
--spec is_stable() -> true | false.
-is_stable() ->
- gen_server:call(?MODULE, is_stable).
-
--spec link_cluster_event_listener(atom(), atom(), list()) -> pid().
-link_cluster_event_listener(Mod, Fun, Args) when
- is_atom(Mod), is_atom(Fun), is_list(Args)
-->
- CallbackFun =
- fun
- (Event = {cluster, _}) -> erlang:apply(Mod, Fun, Args ++ [Event]);
- (_) -> ok
- end,
- {ok, Pid} = couch_replicator_notifier:start_link(CallbackFun),
- Pid.
-
-% Mem3 cluster callbacks
-
-cluster_unstable(Server) ->
- ok = gen_server:call(Server, set_unstable),
- couch_replicator_notifier:notify({cluster, unstable}),
- couch_stats:update_gauge([couch_replicator, cluster_is_stable], 0),
- couch_log:notice("~s : cluster unstable", [?MODULE]),
- Server.
-
-cluster_stable(Server) ->
- ok = gen_server:call(Server, set_stable),
- couch_replicator_notifier:notify({cluster, stable}),
- couch_stats:update_gauge([couch_replicator, cluster_is_stable], 1),
- couch_log:notice("~s : cluster stable", [?MODULE]),
- Server.
-
-% gen_server callbacks
-
-init([]) ->
- ok = config:listen_for_changes(?MODULE, nil),
- Period = abs(
- config:get_integer(
- "replicator",
- "cluster_quiet_period",
- ?DEFAULT_QUIET_PERIOD
- )
- ),
- StartPeriod = abs(
- config:get_integer(
- "replicator",
- "cluster_start_period",
- ?DEFAULT_START_PERIOD
- )
- ),
- couch_stats:update_gauge([couch_replicator, cluster_is_stable], 0),
- {ok, Mem3Cluster} = mem3_cluster:start_link(
- ?MODULE,
- self(),
- StartPeriod,
- Period
- ),
- {ok, #state{mem3_cluster_pid = Mem3Cluster, cluster_stable = false}}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call(is_stable, _From, #state{cluster_stable = IsStable} = State) ->
- {reply, IsStable, State};
-handle_call(set_stable, _From, State) ->
- {reply, ok, State#state{cluster_stable = true}};
-handle_call(set_unstable, _From, State) ->
- {reply, ok, State#state{cluster_stable = false}}.
-
-handle_cast({set_period, Period}, #state{mem3_cluster_pid = Pid} = State) ->
- ok = mem3_cluster:set_period(Pid, Period),
- {noreply, State}.
-
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%% Internal functions
-
-handle_config_change("replicator", "cluster_quiet_period", V, _, S) ->
- ok = gen_server:cast(?MODULE, {set_period, list_to_integer(V)}),
- {ok, S};
-handle_config_change(_, _, _, _, S) ->
- {ok, S}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_S, _R, _St) ->
- Pid = whereis(?MODULE),
- erlang:send_after(?RELISTEN_DELAY, Pid, restart_config_listener).
-
--spec owner_int(binary(), binary()) -> node().
-owner_int(ShardName, DocId) ->
- DbName = mem3:dbname(ShardName),
- Live = [node() | nodes()],
- Shards = mem3:shards(DbName, DocId),
- Nodes = [N || #shard{node = N} <- Shards, lists:member(N, Live)],
- mem3:owner(DbName, DocId, Nodes).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-replicator_clustering_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_stable_callback(),
- t_unstable_callback()
- ]
- }
- }.
-
-t_stable_callback() ->
- ?_test(begin
- ?assertEqual(false, is_stable()),
- cluster_stable(whereis(?MODULE)),
- ?assertEqual(true, is_stable())
- end).
-
-t_unstable_callback() ->
- ?_test(begin
- cluster_stable(whereis(?MODULE)),
- ?assertEqual(true, is_stable()),
- cluster_unstable(whereis(?MODULE)),
- ?assertEqual(false, is_stable())
- end).
-
-setup_all() ->
- meck:expect(couch_log, notice, 2, ok),
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- meck:expect(config, listen_for_changes, 2, ok),
- meck:expect(couch_stats, update_gauge, 2, ok),
- meck:expect(couch_replicator_notifier, notify, 1, ok).
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset([
- config,
- couch_log,
- couch_stats,
- couch_replicator_notifier
- ]),
- stop_clustering_process(),
- {ok, Pid} = start_link(),
- Pid.
-
-teardown(Pid) ->
- stop_clustering_process(Pid).
-
-stop_clustering_process() ->
- stop_clustering_process(whereis(?MODULE)).
-
-stop_clustering_process(undefined) ->
- ok;
-stop_clustering_process(Pid) when is_pid(Pid) ->
- Ref = erlang:monitor(process, Pid),
- unlink(Pid),
- exit(Pid, kill),
- receive
- {'DOWN', Ref, _, _, _} -> ok
- end.
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_connection.erl b/src/couch_replicator/src/couch_replicator_connection.erl
deleted file mode 100644
index a158d2609..000000000
--- a/src/couch_replicator/src/couch_replicator_connection.erl
+++ /dev/null
@@ -1,281 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_connection).
-
--behavior(gen_server).
--behavior(config_listener).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
-]).
-
--export([
- acquire/1,
- acquire/2,
- release/1
-]).
-
--export([
- handle_config_change/5,
- handle_config_terminate/3
-]).
-
--include_lib("ibrowse/include/ibrowse.hrl").
-
--define(DEFAULT_CLOSE_INTERVAL, 90000).
--define(RELISTEN_DELAY, 5000).
-
--record(state, {
- close_interval,
- timer
-}).
-
--record(connection, {
- worker,
- host,
- port,
- proxy_host,
- proxy_port,
- mref
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
- process_flag(trap_exit, true),
- ?MODULE = ets:new(?MODULE, [
- named_table,
- public,
- {keypos, #connection.worker}
- ]),
- ok = config:listen_for_changes(?MODULE, nil),
- Interval = config:get_integer(
- "replicator",
- "connection_close_interval",
- ?DEFAULT_CLOSE_INTERVAL
- ),
- Timer = erlang:send_after(Interval, self(), close_idle_connections),
- ibrowse:add_config([
- {inactivity_timeout, Interval},
- {worker_trap_exits, false}
- ]),
- {ok, #state{close_interval = Interval, timer = Timer}}.
-
-acquire(Url) ->
- acquire(Url, undefined).
-
-acquire(Url, ProxyUrl) when is_binary(Url) ->
- acquire(binary_to_list(Url), ProxyUrl);
-acquire(Url, ProxyUrl) when is_binary(ProxyUrl) ->
- acquire(Url, binary_to_list(ProxyUrl));
-acquire(Url0, ProxyUrl0) ->
- Url = couch_util:url_strip_password(Url0),
- ProxyUrl =
- case ProxyUrl0 of
- undefined -> undefined;
- _ -> couch_util:url_strip_password(ProxyUrl0)
- end,
- case gen_server:call(?MODULE, {acquire, Url, ProxyUrl}) of
- {ok, Worker} ->
- link(Worker),
- {ok, Worker};
- {error, all_allocated} ->
- {ok, Pid} = ibrowse:spawn_link_worker_process(Url),
- ok = gen_server:call(?MODULE, {create, Url, ProxyUrl, Pid}),
- {ok, Pid};
- {error, Reason} ->
- {error, Reason}
- end.
-
-release(Worker) ->
- unlink(Worker),
- gen_server:cast(?MODULE, {release, Worker}).
-
-handle_call({acquire, Url, ProxyUrl}, From, State) ->
- {Pid, _Ref} = From,
- case {ibrowse_lib:parse_url(Url), parse_proxy_url(ProxyUrl)} of
- {#url{host = Host, port = Port}, #url{host = ProxyHost, port = ProxyPort}} ->
- Pat = #connection{
- host = Host,
- port = Port,
- proxy_host = ProxyHost,
- proxy_port = ProxyPort,
- mref = undefined,
- _ = '_'
- },
- case ets:match_object(?MODULE, Pat, 1) of
- '$end_of_table' ->
- {reply, {error, all_allocated}, State};
- {[Worker], _Cont} ->
- couch_stats:increment_counter([
- couch_replicator,
- connection,
- acquires
- ]),
- ets:insert(?MODULE, Worker#connection{
- mref = monitor(
- process,
- Pid
- )
- }),
- {reply, {ok, Worker#connection.worker}, State}
- end;
- {{error, invalid_uri}, _} ->
- {reply, {error, invalid_uri}, State};
- {_, {error, invalid_uri}} ->
- {reply, {error, invalid_uri}, State}
- end;
-handle_call({create, Url, ProxyUrl, Worker}, From, State) ->
- {Pid, _Ref} = From,
- case {ibrowse_lib:parse_url(Url), parse_proxy_url(ProxyUrl)} of
- {#url{host = Host, port = Port}, #url{host = ProxyHost, port = ProxyPort}} ->
- link(Worker),
- couch_stats:increment_counter([
- couch_replicator,
- connection,
- creates
- ]),
- true = ets:insert_new(
- ?MODULE,
- #connection{
- host = Host,
- port = Port,
- proxy_host = ProxyHost,
- proxy_port = ProxyPort,
- worker = Worker,
- mref = monitor(process, Pid)
- }
- ),
- {reply, ok, State}
- end.
-
-handle_cast({release, WorkerPid}, State) ->
- couch_stats:increment_counter([couch_replicator, connection, releases]),
- case ets:lookup(?MODULE, WorkerPid) of
- [Worker] ->
- case Worker#connection.mref of
- MRef when is_reference(MRef) -> demonitor(MRef, [flush]);
- undefined -> ok
- end,
- ets:insert(?MODULE, Worker#connection{mref = undefined});
- [] ->
- ok
- end,
- {noreply, State};
-handle_cast({connection_close_interval, V}, State) ->
- erlang:cancel_timer(State#state.timer),
- NewTimer = erlang:send_after(V, self(), close_idle_connections),
- ibrowse:add_config([{inactivity_timeout, V}]),
- {noreply, State#state{close_interval = V, timer = NewTimer}}.
-
-% owner crashed
-handle_info({'DOWN', Ref, process, _Pid, _Reason}, State) ->
- couch_stats:increment_counter([
- couch_replicator,
- connection,
- owner_crashes
- ]),
- Conns = ets:match_object(?MODULE, #connection{mref = Ref, _ = '_'}),
- lists:foreach(
- fun(Conn) ->
- couch_stats:increment_counter([couch_replicator, connection, closes]),
- delete_worker(Conn)
- end,
- Conns
- ),
- {noreply, State};
-% worker crashed
-handle_info({'EXIT', Pid, Reason}, State) ->
- couch_stats:increment_counter([
- couch_replicator,
- connection,
- worker_crashes
- ]),
- case ets:lookup(?MODULE, Pid) of
- [] ->
- ok;
- [Worker] ->
- #connection{host = Host, port = Port} = Worker,
- maybe_log_worker_death(Host, Port, Reason),
- case Worker#connection.mref of
- MRef when is_reference(MRef) -> demonitor(MRef, [flush]);
- undefined -> ok
- end,
- ets:delete(?MODULE, Pid)
- end,
- {noreply, State};
-handle_info(close_idle_connections, State) ->
- #state{
- close_interval = Interval,
- timer = Timer
- } = State,
- Conns = ets:match_object(?MODULE, #connection{mref = undefined, _ = '_'}),
- lists:foreach(
- fun(Conn) ->
- couch_stats:increment_counter([couch_replicator, connection, closes]),
- delete_worker(Conn)
- end,
- Conns
- ),
- erlang:cancel_timer(Timer),
- NewTimer = erlang:send_after(Interval, self(), close_idle_connections),
- {noreply, State#state{timer = NewTimer}};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-maybe_log_worker_death(_Host, _Port, normal) ->
- ok;
-maybe_log_worker_death(Host, Port, Reason) ->
- ErrMsg = "Replication connection to: ~p:~p died with reason ~p",
- couch_log:info(ErrMsg, [Host, Port, Reason]).
-
--spec delete_worker(#connection{}) -> ok.
-delete_worker(Worker) ->
- ets:delete(?MODULE, Worker#connection.worker),
- unlink(Worker#connection.worker),
- spawn(fun() -> ibrowse_http_client:stop(Worker#connection.worker) end),
- ok.
-
-handle_config_change("replicator", "connection_close_interval", V, _, S) ->
- ok = gen_server:cast(?MODULE, {connection_close_interval, list_to_integer(V)}),
- {ok, S};
-handle_config_change(_, _, _, _, S) ->
- {ok, S}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_, _, _) ->
- Pid = whereis(?MODULE),
- erlang:send_after(?RELISTEN_DELAY, Pid, restart_config_listener).
-
-parse_proxy_url(undefined) ->
- #url{host = undefined, port = undefined};
-parse_proxy_url(ProxyUrl) ->
- ibrowse_lib:parse_url(ProxyUrl).
diff --git a/src/couch_replicator/src/couch_replicator_db_changes.erl b/src/couch_replicator/src/couch_replicator_db_changes.erl
deleted file mode 100644
index 947af51b4..000000000
--- a/src/couch_replicator/src/couch_replicator_db_changes.erl
+++ /dev/null
@@ -1,103 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_db_changes).
-
--behaviour(gen_server).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
-]).
-
--export([
- notify_cluster_event/2
-]).
-
--record(state, {
- event_listener :: pid(),
- mdb_changes :: pid() | nil
-}).
-
--spec notify_cluster_event(pid(), {cluster, any()}) -> ok.
-notify_cluster_event(Server, {cluster, _} = Event) ->
- gen_server:cast(Server, Event).
-
--spec start_link() ->
- {ok, pid()} | ignore | {error, any()}.
-start_link() ->
- gen_server:start_link(?MODULE, [], []).
-
-init([]) ->
- EvtPid = couch_replicator_clustering:link_cluster_event_listener(
- ?MODULE,
- notify_cluster_event,
- [self()]
- ),
- State = #state{event_listener = EvtPid, mdb_changes = nil},
- case couch_replicator_clustering:is_stable() of
- true ->
- {ok, restart_mdb_changes(State)};
- false ->
- {ok, State}
- end.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call(_Msg, _From, State) ->
- {reply, {error, invalid_call}, State}.
-
-handle_cast({cluster, unstable}, State) ->
- {noreply, stop_mdb_changes(State)};
-handle_cast({cluster, stable}, State) ->
- {noreply, restart_mdb_changes(State)}.
-
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
--spec restart_mdb_changes(#state{}) -> #state{}.
-restart_mdb_changes(#state{mdb_changes = nil} = State) ->
- Suffix = <<"_replicator">>,
- CallbackMod = couch_replicator_doc_processor,
- Options = [skip_ddocs],
- {ok, Pid} = couch_multidb_changes:start_link(
- Suffix,
- CallbackMod,
- nil,
- Options
- ),
- couch_stats:increment_counter([couch_replicator, db_scans]),
- couch_log:notice("Started replicator db changes listener ~p", [Pid]),
- State#state{mdb_changes = Pid};
-restart_mdb_changes(#state{mdb_changes = _Pid} = State) ->
- restart_mdb_changes(stop_mdb_changes(State)).
-
--spec stop_mdb_changes(#state{}) -> #state{}.
-stop_mdb_changes(#state{mdb_changes = nil} = State) ->
- State;
-stop_mdb_changes(#state{mdb_changes = Pid} = State) ->
- couch_log:notice("Stopping replicator db changes listener ~p", [Pid]),
- unlink(Pid),
- exit(Pid, kill),
- State#state{mdb_changes = nil}.
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl
deleted file mode 100644
index 436d7c44d..000000000
--- a/src/couch_replicator/src/couch_replicator_doc_processor.erl
+++ /dev/null
@@ -1,934 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_doc_processor).
-
--behaviour(gen_server).
--behaviour(couch_multidb_changes).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
-]).
-
--export([
- db_created/2,
- db_deleted/2,
- db_found/2,
- db_change/3
-]).
-
--export([
- docs/1,
- doc/2,
- doc_lookup/3,
- update_docs/0,
- get_worker_ref/1,
- notify_cluster_event/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_replicator.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--import(couch_replicator_utils, [
- get_json_value/2,
- get_json_value/3
-]).
-
--define(DEFAULT_UPDATE_DOCS, false).
-% ~ 1 day on average
--define(ERROR_MAX_BACKOFF_EXPONENT, 12).
--define(TS_DAY_SEC, 86400).
--define(INITIAL_BACKOFF_EXPONENT, 64).
--define(MIN_FILTER_DELAY_SEC, 60).
-
--type filter_type() :: nil | view | user | docids | mango.
--type repstate() :: initializing | error | scheduled.
-
--record(rdoc, {
- id :: db_doc_id() | '_' | {any(), '_'},
- state :: repstate() | '_',
- rep :: #rep{} | nil | '_',
- rid :: rep_id() | nil | '_',
- filter :: filter_type() | '_',
- info :: binary() | nil | '_',
- errcnt :: non_neg_integer() | '_',
- worker :: reference() | nil | '_',
- last_updated :: erlang:timestamp() | '_'
-}).
-
-% couch_multidb_changes API callbacks
-
-db_created(DbName, Server) ->
- couch_stats:increment_counter([couch_replicator, docs, dbs_created]),
- couch_replicator_docs:ensure_rep_ddoc_exists(DbName),
- Server.
-
-db_deleted(DbName, Server) ->
- couch_stats:increment_counter([couch_replicator, docs, dbs_deleted]),
- ok = gen_server:call(?MODULE, {clean_up_replications, DbName}, infinity),
- Server.
-
-db_found(DbName, Server) ->
- couch_stats:increment_counter([couch_replicator, docs, dbs_found]),
- couch_replicator_docs:ensure_rep_ddoc_exists(DbName),
- Server.
-
-db_change(DbName, {ChangeProps} = Change, Server) ->
- couch_stats:increment_counter([couch_replicator, docs, db_changes]),
- try
- ok = process_change(DbName, Change)
- catch
- exit:{Error, {gen_server, call, [?MODULE, _, _]}} ->
- ErrMsg = "~p exited ~p while processing change from db ~p",
- couch_log:error(ErrMsg, [?MODULE, Error, DbName]);
- _Tag:Error ->
- {RepProps} = get_json_value(doc, ChangeProps),
- DocId = get_json_value(<<"_id">>, RepProps),
- couch_replicator_docs:update_failed(DbName, DocId, Error)
- end,
- Server.
-
--spec get_worker_ref(db_doc_id()) -> reference() | nil.
-get_worker_ref({DbName, DocId}) when is_binary(DbName), is_binary(DocId) ->
- case ets:lookup(?MODULE, {DbName, DocId}) of
- [#rdoc{worker = WRef}] when is_reference(WRef) ->
- WRef;
- [#rdoc{worker = nil}] ->
- nil;
- [] ->
- nil
- end.
-
-% Cluster membership change notification callback
--spec notify_cluster_event(pid(), {cluster, any()}) -> ok.
-notify_cluster_event(Server, {cluster, _} = Event) ->
- gen_server:cast(Server, Event).
-
-process_change(DbName, {Change}) ->
- {RepProps} = JsonRepDoc = get_json_value(doc, Change),
- DocId = get_json_value(<<"_id">>, RepProps),
- Owner = couch_replicator_clustering:owner(DbName, DocId),
- Id = {DbName, DocId},
- case {Owner, get_json_value(deleted, Change, false)} of
- {_, true} ->
- ok = gen_server:call(?MODULE, {removed, Id}, infinity);
- {unstable, false} ->
- couch_log:notice("Not starting '~s' as cluster is unstable", [DocId]);
- {ThisNode, false} when ThisNode =:= node() ->
- case get_json_value(<<"_replication_state">>, RepProps) of
- undefined ->
- ok = process_updated(Id, JsonRepDoc);
- <<"triggered">> ->
- maybe_remove_state_fields(DbName, DocId),
- ok = process_updated(Id, JsonRepDoc);
- <<"completed">> ->
- ok = gen_server:call(?MODULE, {completed, Id}, infinity);
- <<"error">> ->
- % Handle replications started from older versions of replicator
- % which wrote transient errors to replication docs
- maybe_remove_state_fields(DbName, DocId),
- ok = process_updated(Id, JsonRepDoc);
- <<"failed">> ->
- ok
- end;
- {Owner, false} ->
- ok
- end,
- ok.
-
-maybe_remove_state_fields(DbName, DocId) ->
- case update_docs() of
- true ->
- ok;
- false ->
- couch_replicator_docs:remove_state_fields(DbName, DocId)
- end.
-
-process_updated({DbName, _DocId} = Id, JsonRepDoc) ->
- % Parsing replication doc (but not calculating the id) could throw an
- % exception which would indicate this document is malformed. This exception
- % should propagate to db_change function and will be recorded as permanent
- % failure in the document. User will have to update the documet to fix the
- % problem.
- Rep0 = couch_replicator_docs:parse_rep_doc_without_id(JsonRepDoc),
- Rep = Rep0#rep{db_name = DbName, start_time = os:timestamp()},
- Filter =
- case couch_replicator_filters:parse(Rep#rep.options) of
- {ok, nil} ->
- nil;
- {ok, {user, _FName, _QP}} ->
- user;
- {ok, {view, _FName, _QP}} ->
- view;
- {ok, {docids, _DocIds}} ->
- docids;
- {ok, {mango, _Selector}} ->
- mango;
- {error, FilterError} ->
- throw(FilterError)
- end,
- gen_server:call(?MODULE, {updated, Id, Rep, Filter}, infinity).
-
-% Doc processor gen_server API and callbacks
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
- ?MODULE = ets:new(?MODULE, [
- named_table,
- {keypos, #rdoc.id},
- {read_concurrency, true},
- {write_concurrency, true}
- ]),
- couch_replicator_clustering:link_cluster_event_listener(
- ?MODULE,
- notify_cluster_event,
- [self()]
- ),
- {ok, nil}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call({updated, Id, Rep, Filter}, _From, State) ->
- ok = updated_doc(Id, Rep, Filter),
- {reply, ok, State};
-handle_call({removed, Id}, _From, State) ->
- ok = removed_doc(Id),
- {reply, ok, State};
-handle_call({completed, Id}, _From, State) ->
- true = ets:delete(?MODULE, Id),
- {reply, ok, State};
-handle_call({clean_up_replications, DbName}, _From, State) ->
- ok = removed_db(DbName),
- {reply, ok, State}.
-
-handle_cast({cluster, unstable}, State) ->
- % Ignoring unstable state transition
- {noreply, State};
-handle_cast({cluster, stable}, State) ->
- % Membership changed recheck all the replication document ownership
- nil = ets:foldl(fun cluster_membership_foldl/2, nil, ?MODULE),
- {noreply, State};
-handle_cast(Msg, State) ->
- {stop, {error, unexpected_message, Msg}, State}.
-
-handle_info(
- {'DOWN', _, _, _, #doc_worker_result{
- id = Id,
- wref = Ref,
- result = Res
- }},
- State
-) ->
- ok = worker_returned(Ref, Id, Res),
- {noreply, State};
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-% Doc processor gen_server private helper functions
-
-% Handle doc update -- add to ets, then start a worker to try to turn it into
-% a replication job. In most cases it will succeed quickly but for filtered
-% replications or if there are duplicates, it could take longer
-% (theoretically indefinitely) until a replication could be started. Before
-% adding replication job, make sure to delete all old jobs associated with
-% same document.
--spec updated_doc(db_doc_id(), #rep{}, filter_type()) -> ok.
-updated_doc(Id, Rep, Filter) ->
- NormCurRep = couch_replicator_utils:normalize_rep(current_rep(Id)),
- NormNewRep = couch_replicator_utils:normalize_rep(Rep),
- case NormCurRep == NormNewRep of
- false ->
- removed_doc(Id),
- Row = #rdoc{
- id = Id,
- state = initializing,
- rep = Rep,
- rid = nil,
- filter = Filter,
- info = nil,
- errcnt = 0,
- worker = nil,
- last_updated = os:timestamp()
- },
- true = ets:insert(?MODULE, Row),
- ok = maybe_start_worker(Id);
- true ->
- ok
- end.
-
-% Return current #rep{} record if any. If replication hasn't been submitted
-% to the scheduler yet, #rep{} record will be in the document processor's
-% ETS table, otherwise query scheduler for the #rep{} record.
--spec current_rep({binary(), binary()}) -> #rep{} | nil.
-current_rep({DbName, DocId}) when is_binary(DbName), is_binary(DocId) ->
- case ets:lookup(?MODULE, {DbName, DocId}) of
- [] ->
- nil;
- [#rdoc{state = scheduled, rep = nil, rid = JobId}] ->
- % When replication is scheduled, #rep{} record which can be quite
- % large compared to other bits in #rdoc is removed in order to avoid
- % having to keep 2 copies of it. So have to fetch it from the
- % scheduler.
- couch_replicator_scheduler:rep_state(JobId);
- [#rdoc{rep = Rep}] ->
- Rep
- end.
-
--spec worker_returned(reference(), db_doc_id(), rep_start_result()) -> ok.
-worker_returned(Ref, Id, {ok, RepId}) ->
- case ets:lookup(?MODULE, Id) of
- [#rdoc{worker = Ref} = Row] ->
- Row0 = Row#rdoc{
- state = scheduled,
- errcnt = 0,
- worker = nil,
- last_updated = os:timestamp()
- },
- NewRow =
- case Row0 of
- #rdoc{rid = RepId, filter = user} ->
- % Filtered replication id didn't change.
- Row0;
- #rdoc{rid = nil, filter = user} ->
- % Calculated new replication id for a filtered replication. Make
- % sure to schedule another check as filter code could change.
- % Replication starts could have been failing, so also clear
- % error count.
- Row0#rdoc{rid = RepId};
- #rdoc{rid = OldRepId, filter = user} ->
- % Replication id of existing replication job with filter has
- % changed. Remove old replication job from scheduler and
- % schedule check to check for future changes.
- ok = couch_replicator_scheduler:remove_job(OldRepId),
- Msg = io_lib:format("Replication id changed: ~p -> ~p", [
- OldRepId, RepId
- ]),
- Row0#rdoc{rid = RepId, info = couch_util:to_binary(Msg)};
- #rdoc{rid = nil} ->
- % Calculated new replication id for non-filtered replication.
- % Remove replication doc body, after this we won't need it
- % anymore.
- Row0#rdoc{rep = nil, rid = RepId, info = nil}
- end,
- true = ets:insert(?MODULE, NewRow),
- ok = maybe_update_doc_triggered(Row#rdoc.rep, RepId),
- ok = maybe_start_worker(Id);
- _ ->
- % doc could have been deleted, ignore
- ok
- end,
- ok;
-worker_returned(_Ref, _Id, ignore) ->
- ok;
-worker_returned(Ref, Id, {temporary_error, Reason}) ->
- case ets:lookup(?MODULE, Id) of
- [#rdoc{worker = Ref, errcnt = ErrCnt} = Row] ->
- NewRow = Row#rdoc{
- rid = nil,
- state = error,
- info = Reason,
- errcnt = ErrCnt + 1,
- worker = nil,
- last_updated = os:timestamp()
- },
- true = ets:insert(?MODULE, NewRow),
- ok = maybe_update_doc_error(NewRow#rdoc.rep, Reason),
- ok = maybe_start_worker(Id);
- _ ->
- % doc could have been deleted, ignore
- ok
- end,
- ok;
-worker_returned(Ref, Id, {permanent_failure, _Reason}) ->
- case ets:lookup(?MODULE, Id) of
- [#rdoc{worker = Ref}] ->
- true = ets:delete(?MODULE, Id);
- _ ->
- % doc could have been deleted, ignore
- ok
- end,
- ok.
-
--spec maybe_update_doc_error(#rep{}, any()) -> ok.
-maybe_update_doc_error(Rep, Reason) ->
- case update_docs() of
- true ->
- couch_replicator_docs:update_error(Rep, Reason);
- false ->
- ok
- end.
-
--spec maybe_update_doc_triggered(#rep{}, rep_id()) -> ok.
-maybe_update_doc_triggered(Rep, RepId) ->
- case update_docs() of
- true ->
- couch_replicator_docs:update_triggered(Rep, RepId);
- false ->
- ok
- end.
-
--spec error_backoff(non_neg_integer()) -> seconds().
-error_backoff(ErrCnt) ->
- Exp = min(ErrCnt, ?ERROR_MAX_BACKOFF_EXPONENT),
- % ErrCnt is the exponent here. The reason 64 is used is to start at
- % 64 (about a minute) max range. Then first backoff would be 30 sec
- % on average. Then 1 minute and so on.
- couch_rand:uniform(?INITIAL_BACKOFF_EXPONENT bsl Exp).
-
--spec filter_backoff() -> seconds().
-filter_backoff() ->
- Total = ets:info(?MODULE, size),
- % This value scaled by the number of replications. If the are a lot of them
- % wait is longer, but not more than a day (?TS_DAY_SEC). If there are just
- % few, wait is shorter, starting at about 30 seconds. `2 *` is used since
- % the expected wait would then be 0.5 * Range so it is easier to see the
- % average wait. `1 +` is used because couch_rand:uniform only
- % accepts >= 1 values and crashes otherwise.
- Range = 1 + min(2 * (Total / 10), ?TS_DAY_SEC),
- ?MIN_FILTER_DELAY_SEC + couch_rand:uniform(round(Range)).
-
-% Document removed from db -- clear ets table and remove all scheduled jobs
--spec removed_doc(db_doc_id()) -> ok.
-removed_doc({DbName, DocId} = Id) ->
- ets:delete(?MODULE, Id),
- RepIds = couch_replicator_scheduler:find_jobs_by_doc(DbName, DocId),
- lists:foreach(fun couch_replicator_scheduler:remove_job/1, RepIds).
-
-% Whole db shard is gone -- remove all its ets rows and stop jobs
--spec removed_db(binary()) -> ok.
-removed_db(DbName) ->
- EtsPat = #rdoc{id = {DbName, '_'}, _ = '_'},
- ets:match_delete(?MODULE, EtsPat),
- RepIds = couch_replicator_scheduler:find_jobs_by_dbname(DbName),
- lists:foreach(fun couch_replicator_scheduler:remove_job/1, RepIds).
-
-% Spawn a worker process which will attempt to calculate a replication id, then
-% start a replication. Returns a process monitor reference. The worker is
-% guaranteed to exit with rep_start_result() type only.
--spec maybe_start_worker(db_doc_id()) -> ok.
-maybe_start_worker(Id) ->
- case ets:lookup(?MODULE, Id) of
- [] ->
- ok;
- [#rdoc{state = scheduled, filter = Filter}] when Filter =/= user ->
- ok;
- [#rdoc{rep = Rep} = Doc] ->
- % For any replication with a user created filter function, periodically
- % (every `filter_backoff/0` seconds) to try to see if the user filter
- % has changed by using a worker to check for changes. When the worker
- % returns check if replication ID has changed. If it hasn't keep
- % checking (spawn another worker and so on). If it has stop the job
- % with the old ID and continue checking.
- Wait = get_worker_wait(Doc),
- Ref = make_ref(),
- true = ets:insert(?MODULE, Doc#rdoc{worker = Ref}),
- couch_replicator_doc_processor_worker:spawn_worker(Id, Rep, Wait, Ref),
- ok
- end.
-
--spec get_worker_wait(#rdoc{}) -> seconds().
-get_worker_wait(#rdoc{state = scheduled, filter = user}) ->
- filter_backoff();
-get_worker_wait(#rdoc{state = error, errcnt = ErrCnt}) ->
- error_backoff(ErrCnt);
-get_worker_wait(#rdoc{state = initializing}) ->
- 0.
-
--spec update_docs() -> boolean().
-update_docs() ->
- config:get_boolean("replicator", "update_docs", ?DEFAULT_UPDATE_DOCS).
-
-% _scheduler/docs HTTP endpoint helpers
-
--spec docs([atom()]) -> [{[_]}] | [].
-docs(States) ->
- HealthThreshold = couch_replicator_scheduler:health_threshold(),
- ets:foldl(
- fun(RDoc, Acc) ->
- case ejson_doc(RDoc, HealthThreshold) of
- nil ->
- % Could have been deleted if job just completed
- Acc;
- {Props} = EJson ->
- {state, DocState} = lists:keyfind(state, 1, Props),
- case ejson_doc_state_filter(DocState, States) of
- true ->
- [EJson | Acc];
- false ->
- Acc
- end
- end
- end,
- [],
- ?MODULE
- ).
-
--spec doc(binary(), binary()) -> {ok, {[_]}} | {error, not_found}.
-doc(Db, DocId) ->
- HealthThreshold = couch_replicator_scheduler:health_threshold(),
- Res =
- (catch ets:foldl(
- fun(RDoc, nil) ->
- {Shard, RDocId} = RDoc#rdoc.id,
- case {mem3:dbname(Shard), RDocId} of
- {Db, DocId} ->
- throw({found, ejson_doc(RDoc, HealthThreshold)});
- {_OtherDb, _OtherDocId} ->
- nil
- end
- end,
- nil,
- ?MODULE
- )),
- case Res of
- {found, DocInfo} ->
- {ok, DocInfo};
- nil ->
- {error, not_found}
- end.
-
--spec doc_lookup(binary(), binary(), integer()) ->
- {ok, {[_]}} | {error, not_found}.
-doc_lookup(Db, DocId, HealthThreshold) ->
- case ets:lookup(?MODULE, {Db, DocId}) of
- [#rdoc{} = RDoc] ->
- {ok, ejson_doc(RDoc, HealthThreshold)};
- [] ->
- {error, not_found}
- end.
-
--spec ejson_rep_id(rep_id() | nil) -> binary() | null.
-ejson_rep_id(nil) ->
- null;
-ejson_rep_id({BaseId, Ext}) ->
- iolist_to_binary([BaseId, Ext]).
-
--spec ejson_doc(#rdoc{}, non_neg_integer()) -> {[_]} | nil.
-ejson_doc(#rdoc{state = scheduled} = RDoc, HealthThreshold) ->
- #rdoc{id = {DbName, DocId}, rid = RepId} = RDoc,
- JobProps = couch_replicator_scheduler:job_summary(RepId, HealthThreshold),
- case JobProps of
- nil ->
- nil;
- [{_, _} | _] ->
- {[
- {doc_id, DocId},
- {database, DbName},
- {id, ejson_rep_id(RepId)},
- {node, node()}
- | JobProps
- ]}
- end;
-ejson_doc(#rdoc{state = RepState} = RDoc, _HealthThreshold) ->
- #rdoc{
- id = {DbName, DocId},
- info = StateInfo,
- rid = RepId,
- errcnt = ErrorCount,
- last_updated = StateTime,
- rep = Rep
- } = RDoc,
- {[
- {doc_id, DocId},
- {database, DbName},
- {id, ejson_rep_id(RepId)},
- {state, RepState},
- {info, couch_replicator_utils:ejson_state_info(StateInfo)},
- {error_count, ErrorCount},
- {node, node()},
- {last_updated, couch_replicator_utils:iso8601(StateTime)},
- {start_time, couch_replicator_utils:iso8601(Rep#rep.start_time)}
- ]}.
-
--spec ejson_doc_state_filter(atom(), [atom()]) -> boolean().
-ejson_doc_state_filter(_DocState, []) ->
- true;
-ejson_doc_state_filter(State, States) when is_list(States), is_atom(State) ->
- lists:member(State, States).
-
--spec cluster_membership_foldl(#rdoc{}, nil) -> nil.
-cluster_membership_foldl(#rdoc{id = {DbName, DocId} = Id, rid = RepId}, nil) ->
- case couch_replicator_clustering:owner(DbName, DocId) of
- unstable ->
- nil;
- ThisNode when ThisNode =:= node() ->
- nil;
- OtherNode ->
- Msg = "Replication doc ~p:~p with id ~p usurped by node ~p",
- couch_log:notice(Msg, [DbName, DocId, RepId, OtherNode]),
- removed_doc(Id),
- nil
- end.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(DB, <<"db">>).
--define(EXIT_DB, <<"exit_db">>).
--define(DOC1, <<"doc1">>).
--define(DOC2, <<"doc2">>).
--define(R1, {"1", ""}).
--define(R2, {"2", ""}).
-
-doc_processor_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_bad_change(),
- t_regular_change(),
- t_change_with_doc_processor_crash(),
- t_change_with_existing_job(),
- t_deleted_change(),
- t_triggered_change(),
- t_completed_change(),
- t_active_replication_completed(),
- t_error_change(),
- t_failed_change(),
- t_change_for_different_node(),
- t_change_when_cluster_unstable(),
- t_ejson_docs(),
- t_cluster_membership_foldl()
- ]
- }
- }.
-
-% Can't parse replication doc, so should write failure state to document.
-t_bad_change() ->
- ?_test(begin
- ?assertEqual(acc, db_change(?DB, bad_change(), acc)),
- ?assert(updated_doc_with_failed_state())
- end).
-
-% Regular change, parse to a #rep{} and then add job.
-t_regular_change() ->
- ?_test(begin
- mock_existing_jobs_lookup([]),
- ?assertEqual(ok, process_change(?DB, change())),
- ?assert(ets:member(?MODULE, {?DB, ?DOC1})),
- ?assert(started_worker({?DB, ?DOC1}))
- end).
-
-% Handle cases where doc processor exits or crashes while processing a change
-t_change_with_doc_processor_crash() ->
- ?_test(begin
- mock_existing_jobs_lookup([]),
- ?assertEqual(acc, db_change(?EXIT_DB, change(), acc)),
- ?assert(failed_state_not_updated())
- end).
-
-% Regular change, parse to a #rep{} and then add job but there is already
-% a running job with same Id found.
-t_change_with_existing_job() ->
- ?_test(begin
- mock_existing_jobs_lookup([test_rep(?R2)]),
- ?assertEqual(ok, process_change(?DB, change())),
- ?assert(ets:member(?MODULE, {?DB, ?DOC1})),
- ?assert(started_worker({?DB, ?DOC1}))
- end).
-
-% Change is a deletion, and job is running, so remove job.
-t_deleted_change() ->
- ?_test(begin
- mock_existing_jobs_lookup([test_rep(?R2)]),
- ?assertEqual(ok, process_change(?DB, deleted_change())),
- ?assert(removed_job(?R2))
- end).
-
-% Change is in `triggered` state. Remove legacy state and add job.
-t_triggered_change() ->
- ?_test(begin
- mock_existing_jobs_lookup([]),
- ?assertEqual(ok, process_change(?DB, change(<<"triggered">>))),
- ?assert(removed_state_fields()),
- ?assert(ets:member(?MODULE, {?DB, ?DOC1})),
- ?assert(started_worker({?DB, ?DOC1}))
- end).
-
-% Change is in `completed` state, so skip over it.
-t_completed_change() ->
- ?_test(begin
- ?assertEqual(ok, process_change(?DB, change(<<"completed">>))),
- ?assert(did_not_remove_state_fields()),
- ?assertNot(ets:member(?MODULE, {?DB, ?DOC1})),
- ?assert(did_not_spawn_worker())
- end).
-
-% Completed change comes for what used to be an active job. In this case
-% remove entry from doc_processor's ets (because there is no linkage or
-% callback mechanism for scheduler to tell doc_processsor a replication just
-% completed).
-t_active_replication_completed() ->
- ?_test(begin
- mock_existing_jobs_lookup([]),
- ?assertEqual(ok, process_change(?DB, change())),
- ?assert(ets:member(?MODULE, {?DB, ?DOC1})),
- ?assertEqual(ok, process_change(?DB, change(<<"completed">>))),
- ?assert(did_not_remove_state_fields()),
- ?assertNot(ets:member(?MODULE, {?DB, ?DOC1}))
- end).
-
-% Change is in `error` state. Remove legacy state and retry
-% running the job. This state was used for transient erorrs which are not
-% written to the document anymore.
-t_error_change() ->
- ?_test(begin
- mock_existing_jobs_lookup([]),
- ?assertEqual(ok, process_change(?DB, change(<<"error">>))),
- ?assert(removed_state_fields()),
- ?assert(ets:member(?MODULE, {?DB, ?DOC1})),
- ?assert(started_worker({?DB, ?DOC1}))
- end).
-
-% Change is in `failed` state. This is a terminal state and it will not
-% be tried again, so skip over it.
-t_failed_change() ->
- ?_test(begin
- ?assertEqual(ok, process_change(?DB, change(<<"failed">>))),
- ?assert(did_not_remove_state_fields()),
- ?assertNot(ets:member(?MODULE, {?DB, ?DOC1})),
- ?assert(did_not_spawn_worker())
- end).
-
-% Normal change, but according to cluster ownership algorithm, replication
-% belongs to a different node, so this node should skip it.
-t_change_for_different_node() ->
- ?_test(begin
- meck:expect(couch_replicator_clustering, owner, 2, different_node),
- ?assertEqual(ok, process_change(?DB, change())),
- ?assert(did_not_spawn_worker())
- end).
-
-% Change handled when cluster is unstable (nodes are added or removed), so
-% job is not added. A rescan will be triggered soon and change will be
-% evaluated again.
-t_change_when_cluster_unstable() ->
- ?_test(begin
- meck:expect(couch_replicator_clustering, owner, 2, unstable),
- ?assertEqual(ok, process_change(?DB, change())),
- ?assert(did_not_spawn_worker())
- end).
-
-% Check if docs/0 function produces expected ejson after adding a job
-t_ejson_docs() ->
- ?_test(begin
- mock_existing_jobs_lookup([]),
- ?assertEqual(ok, process_change(?DB, change())),
- ?assert(ets:member(?MODULE, {?DB, ?DOC1})),
- EJsonDocs = docs([]),
- ?assertMatch([{[_ | _]}], EJsonDocs),
- [{DocProps}] = EJsonDocs,
- {value, StateTime, DocProps1} = lists:keytake(
- last_updated,
- 1,
- DocProps
- ),
- ?assertMatch(
- {last_updated, BinVal1} when is_binary(BinVal1),
- StateTime
- ),
- {value, StartTime, DocProps2} = lists:keytake(start_time, 1, DocProps1),
- ?assertMatch({start_time, BinVal2} when is_binary(BinVal2), StartTime),
- ExpectedProps = [
- {database, ?DB},
- {doc_id, ?DOC1},
- {error_count, 0},
- {id, null},
- {info, null},
- {node, node()},
- {state, initializing}
- ],
- ?assertEqual(ExpectedProps, lists:usort(DocProps2))
- end).
-
-% Check that when cluster membership changes records from doc processor and job
-% scheduler get removed
-t_cluster_membership_foldl() ->
- ?_test(begin
- mock_existing_jobs_lookup([test_rep(?R1)]),
- ?assertEqual(ok, process_change(?DB, change())),
- meck:expect(couch_replicator_clustering, owner, 2, different_node),
- ?assert(ets:member(?MODULE, {?DB, ?DOC1})),
- gen_server:cast(?MODULE, {cluster, stable}),
- meck:wait(2, couch_replicator_scheduler, find_jobs_by_doc, 2, 5000),
- ?assertNot(ets:member(?MODULE, {?DB, ?DOC1})),
- ?assert(removed_job(?R1))
- end).
-
-get_worker_ref_test_() ->
- {
- setup,
- fun() ->
- ets:new(?MODULE, [named_table, public, {keypos, #rdoc.id}])
- end,
- fun(_) -> ets:delete(?MODULE) end,
- ?_test(begin
- Id = {<<"db">>, <<"doc">>},
- ?assertEqual(nil, get_worker_ref(Id)),
- ets:insert(?MODULE, #rdoc{id = Id, worker = nil}),
- ?assertEqual(nil, get_worker_ref(Id)),
- Ref = make_ref(),
- ets:insert(?MODULE, #rdoc{id = Id, worker = Ref}),
- ?assertEqual(Ref, get_worker_ref(Id))
- end)
- }.
-
-% Test helper functions
-
-setup_all() ->
- meck:expect(couch_log, info, 2, ok),
- meck:expect(couch_log, notice, 2, ok),
- meck:expect(couch_log, warning, 2, ok),
- meck:expect(couch_log, error, 2, ok),
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- meck:expect(config, listen_for_changes, 2, ok),
- meck:expect(couch_replicator_clustering, owner, 2, node()),
- meck:expect(
- couch_replicator_clustering,
- link_cluster_event_listener,
- 3,
- ok
- ),
- meck:expect(couch_replicator_doc_processor_worker, spawn_worker, fun
- ({?EXIT_DB, _}, _, _, _) -> exit(kapow);
- (_, _, _, _) -> pid
- end),
- meck:expect(couch_replicator_scheduler, remove_job, 1, ok),
- meck:expect(couch_replicator_docs, remove_state_fields, 2, ok),
- meck:expect(couch_replicator_docs, update_failed, 3, ok).
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset([
- config,
- couch_log,
- couch_replicator_clustering,
- couch_replicator_doc_processor_worker,
- couch_replicator_docs,
- couch_replicator_scheduler
- ]),
- % Set this expectation back to the default for
- % each test since some tests change it
- meck:expect(couch_replicator_clustering, owner, 2, node()),
- {ok, Pid} = start_link(),
- unlink(Pid),
- Pid.
-
-teardown(Pid) ->
- % 1s wait should suffice
- test_util:stop_sync(Pid, kill, 1000).
-
-removed_state_fields() ->
- meck:called(couch_replicator_docs, remove_state_fields, [?DB, ?DOC1]).
-
-started_worker(_Id) ->
- 1 == meck:num_calls(couch_replicator_doc_processor_worker, spawn_worker, 4).
-
-removed_job(Id) ->
- meck:called(couch_replicator_scheduler, remove_job, [test_rep(Id)]).
-
-did_not_remove_state_fields() ->
- 0 == meck:num_calls(couch_replicator_docs, remove_state_fields, '_').
-
-did_not_spawn_worker() ->
- 0 ==
- meck:num_calls(
- couch_replicator_doc_processor_worker,
- spawn_worker,
- '_'
- ).
-
-updated_doc_with_failed_state() ->
- 1 == meck:num_calls(couch_replicator_docs, update_failed, '_').
-
-failed_state_not_updated() ->
- 0 == meck:num_calls(couch_replicator_docs, update_failed, '_').
-
-mock_existing_jobs_lookup(ExistingJobs) ->
- meck:expect(couch_replicator_scheduler, find_jobs_by_doc, fun
- (?EXIT_DB, ?DOC1) -> [];
- (?DB, ?DOC1) -> ExistingJobs
- end).
-
-test_rep(Id) ->
- #rep{id = Id, start_time = {0, 0, 0}}.
-
-change() ->
- {[
- {<<"id">>, ?DOC1},
- {doc,
- {[
- {<<"_id">>, ?DOC1},
- {<<"source">>, <<"http://srchost.local/src">>},
- {<<"target">>, <<"http://tgthost.local/tgt">>}
- ]}}
- ]}.
-
-change(State) ->
- {[
- {<<"id">>, ?DOC1},
- {doc,
- {[
- {<<"_id">>, ?DOC1},
- {<<"source">>, <<"http://srchost.local/src">>},
- {<<"target">>, <<"http://tgthost.local/tgt">>},
- {<<"_replication_state">>, State}
- ]}}
- ]}.
-
-deleted_change() ->
- {[
- {<<"id">>, ?DOC1},
- {<<"deleted">>, true},
- {doc,
- {[
- {<<"_id">>, ?DOC1},
- {<<"source">>, <<"http://srchost.local/src">>},
- {<<"target">>, <<"http://tgthost.local/tgt">>}
- ]}}
- ]}.
-
-bad_change() ->
- {[
- {<<"id">>, ?DOC2},
- {doc,
- {[
- {<<"_id">>, ?DOC2},
- {<<"source">>, <<"src">>}
- ]}}
- ]}.
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl b/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
deleted file mode 100644
index 5d971151b..000000000
--- a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
+++ /dev/null
@@ -1,295 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_doc_processor_worker).
-
--export([
- spawn_worker/4
-]).
-
--include("couch_replicator.hrl").
-
--import(couch_replicator_utils, [
- pp_rep_id/1
-]).
-
-% 61 seconds here because request usually have 10, 15, 30 second
-% timeouts set. We'd want the worker to get a chance to make a few
-% requests (maybe one failing one and a retry) and then fail with its
-% own error (timeout, network error), which would be more specific and
-% informative, before it simply gets killed because of the timeout
-% here. That is, if all fails and the worker is actually blocked then
-% 61 sec is a safety net to brutally kill the worker so doesn't end up
-% hung forever.
--define(WORKER_TIMEOUT_MSEC, 61000).
-
-% Spawn a worker which attempts to calculate replication id then add a
-% replication job to scheduler. This function create a monitor to the worker
-% a worker will then exit with the #doc_worker_result{} record within
-% ?WORKER_TIMEOUT_MSEC timeout period.A timeout is considered a
-%`temporary_error`. Result will be sent as the `Reason` in the {'DOWN',...}
-% message.
--spec spawn_worker(db_doc_id(), #rep{}, seconds(), reference()) -> pid().
-spawn_worker(Id, Rep, WaitSec, WRef) ->
- {Pid, _Ref} = spawn_monitor(fun() ->
- worker_fun(Id, Rep, WaitSec, WRef)
- end),
- Pid.
-
-% Private functions
-
--spec worker_fun(db_doc_id(), #rep{}, seconds(), reference()) -> no_return().
-worker_fun(Id, Rep, WaitSec, WRef) ->
- timer:sleep(WaitSec * 1000),
- Fun = fun() ->
- try maybe_start_replication(Id, Rep, WRef) of
- Res ->
- exit(Res)
- catch
- throw:{filter_fetch_error, Reason} ->
- exit({temporary_error, Reason});
- _Tag:Reason ->
- exit({temporary_error, Reason})
- end
- end,
- {Pid, Ref} = spawn_monitor(Fun),
- receive
- {'DOWN', Ref, _, Pid, Result} ->
- exit(#doc_worker_result{id = Id, wref = WRef, result = Result})
- after ?WORKER_TIMEOUT_MSEC ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- {DbName, DocId} = Id,
- TimeoutSec = round(?WORKER_TIMEOUT_MSEC / 1000),
- Msg = io_lib:format(
- "Replication for db ~p doc ~p failed to start due "
- "to timeout after ~B seconds",
- [DbName, DocId, TimeoutSec]
- ),
- Result = {temporary_error, couch_util:to_binary(Msg)},
- exit(#doc_worker_result{id = Id, wref = WRef, result = Result})
- end.
-
-% Try to start a replication. Used by a worker. This function should return
-% rep_start_result(), also throws {filter_fetch_error, Reason} if cannot fetch
-% filter.It can also block for an indeterminate amount of time while fetching
-% filter.
-maybe_start_replication(Id, RepWithoutId, WRef) ->
- Rep = couch_replicator_docs:update_rep_id(RepWithoutId),
- case maybe_add_job_to_scheduler(Id, Rep, WRef) of
- ignore ->
- ignore;
- {ok, RepId} ->
- {ok, RepId};
- {temporary_error, Reason} ->
- {temporary_error, Reason};
- {permanent_failure, Reason} ->
- {DbName, DocId} = Id,
- couch_replicator_docs:update_failed(DbName, DocId, Reason),
- {permanent_failure, Reason}
- end.
-
--spec maybe_add_job_to_scheduler(db_doc_id(), #rep{}, reference()) ->
- rep_start_result().
-maybe_add_job_to_scheduler({DbName, DocId}, Rep, WRef) ->
- RepId = Rep#rep.id,
- case couch_replicator_scheduler:rep_state(RepId) of
- nil ->
- % Before adding a job check that this worker is still the current
- % worker. This is to handle a race condition where a worker which was
- % sleeping and then checking a replication filter may inadvertently
- % re-add a replication which was already deleted.
- case couch_replicator_doc_processor:get_worker_ref({DbName, DocId}) of
- WRef ->
- ok = couch_replicator_scheduler:add_job(Rep),
- {ok, RepId};
- _NilOrOtherWRef ->
- ignore
- end;
- #rep{doc_id = DocId} ->
- {ok, RepId};
- #rep{doc_id = null} ->
- Msg = io_lib:format(
- "Replication `~s` specified by document `~s`"
- " already running as a transient replication, started via"
- " `_replicate` API endpoint",
- [pp_rep_id(RepId), DocId]
- ),
- {temporary_error, couch_util:to_binary(Msg)};
- #rep{db_name = OtherDb, doc_id = OtherDocId} ->
- Msg = io_lib:format(
- "Replication `~s` specified by document `~s`"
- " already started, triggered by document `~s` from db `~s`",
- [pp_rep_id(RepId), DocId, OtherDocId, mem3:dbname(OtherDb)]
- ),
- {permanent_failure, couch_util:to_binary(Msg)}
- end.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(DB, <<"db">>).
--define(DOC1, <<"doc1">>).
--define(R1, {"ad08e05057046eabe898a2572bbfb573", ""}).
-
-doc_processor_worker_test_() ->
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_should_add_job(),
- t_already_running_same_docid(),
- t_already_running_transient(),
- t_already_running_other_db_other_doc(),
- t_spawn_worker(),
- t_ignore_if_doc_deleted(),
- t_ignore_if_worker_ref_does_not_match()
- ]
- }.
-
-% Replication is already running, with same doc id. Ignore change.
-t_should_add_job() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- ?assertEqual({ok, ?R1}, maybe_start_replication(Id, Rep, nil)),
- ?assert(added_job())
- end).
-
-% Replication is already running, with same doc id. Ignore change.
-t_already_running_same_docid() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- mock_already_running(?DB, ?DOC1),
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- ?assertEqual({ok, ?R1}, maybe_start_replication(Id, Rep, nil)),
- ?assert(did_not_add_job())
- end).
-
-% There is a transient replication with same replication id running. Ignore.
-t_already_running_transient() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- mock_already_running(null, null),
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- ?assertMatch(
- {temporary_error, _},
- maybe_start_replication(
- Id,
- Rep,
- nil
- )
- ),
- ?assert(did_not_add_job())
- end).
-
-% There is a duplicate replication potentially from a different db and doc.
-% Write permanent failure to doc.
-t_already_running_other_db_other_doc() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- mock_already_running(<<"otherdb">>, <<"otherdoc">>),
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- ?assertMatch(
- {permanent_failure, _},
- maybe_start_replication(
- Id,
- Rep,
- nil
- )
- ),
- ?assert(did_not_add_job()),
- 1 == meck:num_calls(couch_replicator_docs, update_failed, '_')
- end).
-
-% Should spawn worker
-t_spawn_worker() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- WRef = make_ref(),
- meck:expect(couch_replicator_doc_processor, get_worker_ref, 1, WRef),
- Pid = spawn_worker(Id, Rep, 0, WRef),
- Res =
- receive
- {'DOWN', _Ref, process, Pid, Reason} -> Reason
- after 1000 -> timeout
- end,
- Expect = #doc_worker_result{id = Id, wref = WRef, result = {ok, ?R1}},
- ?assertEqual(Expect, Res),
- ?assert(added_job())
- end).
-
-% Should not add job if by the time worker got to fetching the filter
-% and getting a replication id, replication doc was deleted
-t_ignore_if_doc_deleted() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- meck:expect(couch_replicator_doc_processor, get_worker_ref, 1, nil),
- ?assertEqual(ignore, maybe_start_replication(Id, Rep, make_ref())),
- ?assertNot(added_job())
- end).
-
-% Should not add job if by the time worker got to fetchign the filter
-% and building a replication id, another worker was spawned.
-t_ignore_if_worker_ref_does_not_match() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- meck:expect(
- couch_replicator_doc_processor,
- get_worker_ref,
- 1,
- make_ref()
- ),
- ?assertEqual(ignore, maybe_start_replication(Id, Rep, make_ref())),
- ?assertNot(added_job())
- end).
-
-% Test helper functions
-
-setup() ->
- meck:expect(couch_replicator_scheduler, add_job, 1, ok),
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- meck:expect(couch_server, get_uuid, 0, this_is_snek),
- meck:expect(couch_replicator_docs, update_failed, 3, ok),
- meck:expect(couch_replicator_scheduler, rep_state, 1, nil),
- meck:expect(couch_replicator_doc_processor, get_worker_ref, 1, nil),
- ok.
-
-teardown(_) ->
- meck:unload().
-
-mock_already_running(DbName, DocId) ->
- meck:expect(
- couch_replicator_scheduler,
- rep_state,
- fun(RepId) -> #rep{id = RepId, doc_id = DocId, db_name = DbName} end
- ).
-
-added_job() ->
- 1 == meck:num_calls(couch_replicator_scheduler, add_job, '_').
-
-did_not_add_job() ->
- 0 == meck:num_calls(couch_replicator_scheduler, add_job, '_').
-
-change() ->
- {[
- {<<"_id">>, ?DOC1},
- {<<"source">>, <<"http://srchost.local/src">>},
- {<<"target">>, <<"http://tgthost.local/tgt">>}
- ]}.
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
deleted file mode 100644
index bcab46747..000000000
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ /dev/null
@@ -1,952 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_docs).
-
--export([
- parse_rep_doc/1,
- parse_rep_doc/2,
- parse_rep_db/3,
- parse_rep_doc_without_id/1,
- parse_rep_doc_without_id/2,
- before_doc_update/3,
- after_doc_read/2,
- ensure_rep_ddoc_exists/1,
- ensure_cluster_rep_ddoc_exists/1,
- remove_state_fields/2,
- update_doc_completed/3,
- update_failed/3,
- update_rep_id/1,
- update_triggered/2,
- update_error/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("ibrowse/include/ibrowse.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
--include("couch_replicator.hrl").
--include("couch_replicator_js_functions.hrl").
-
--import(couch_util, [
- get_value/2,
- get_value/3,
- to_binary/1
-]).
-
--import(couch_replicator_utils, [
- get_json_value/2,
- get_json_value/3
-]).
-
--define(REP_DB_NAME, <<"_replicator">>).
--define(REP_DESIGN_DOC, <<"_design/_replicator">>).
--define(OWNER, <<"owner">>).
--define(CTX, {user_ctx, #user_ctx{roles = [<<"_admin">>, <<"_replicator">>]}}).
--define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
-remove_state_fields(DbName, DocId) ->
- update_rep_doc(DbName, DocId, [
- {<<"_replication_state">>, undefined},
- {<<"_replication_state_time">>, undefined},
- {<<"_replication_state_reason">>, undefined},
- {<<"_replication_id">>, undefined},
- {<<"_replication_stats">>, undefined}
- ]).
-
--spec update_doc_completed(binary(), binary(), [_]) -> any().
-update_doc_completed(DbName, DocId, Stats) ->
- update_rep_doc(DbName, DocId, [
- {<<"_replication_state">>, <<"completed">>},
- {<<"_replication_state_reason">>, undefined},
- {<<"_replication_stats">>, {Stats}}
- ]),
- couch_stats:increment_counter([
- couch_replicator,
- docs,
- completed_state_updates
- ]).
-
--spec update_failed(binary(), binary(), any()) -> any().
-update_failed(DbName, DocId, Error) ->
- Reason = error_reason(Error),
- couch_log:error(
- "Error processing replication doc `~s` from `~s`: ~s",
- [DocId, DbName, Reason]
- ),
- update_rep_doc(DbName, DocId, [
- {<<"_replication_state">>, <<"failed">>},
- {<<"_replication_stats">>, undefined},
- {<<"_replication_state_reason">>, Reason}
- ]),
- couch_stats:increment_counter([
- couch_replicator,
- docs,
- failed_state_updates
- ]).
-
--spec update_triggered(#rep{}, rep_id()) -> ok.
-update_triggered(Rep, {Base, Ext}) ->
- #rep{
- db_name = DbName,
- doc_id = DocId
- } = Rep,
- update_rep_doc(DbName, DocId, [
- {<<"_replication_state">>, <<"triggered">>},
- {<<"_replication_state_reason">>, undefined},
- {<<"_replication_id">>, iolist_to_binary([Base, Ext])},
- {<<"_replication_stats">>, undefined}
- ]),
- ok.
-
--spec update_error(#rep{}, any()) -> ok.
-update_error(#rep{db_name = DbName, doc_id = DocId, id = RepId}, Error) ->
- Reason = error_reason(Error),
- BinRepId =
- case RepId of
- {Base, Ext} ->
- iolist_to_binary([Base, Ext]);
- _Other ->
- null
- end,
- update_rep_doc(DbName, DocId, [
- {<<"_replication_state">>, <<"error">>},
- {<<"_replication_state_reason">>, Reason},
- {<<"_replication_stats">>, undefined},
- {<<"_replication_id">>, BinRepId}
- ]),
- ok.
-
--spec ensure_rep_ddoc_exists(binary()) -> ok.
-ensure_rep_ddoc_exists(RepDb) ->
- case mem3:belongs(RepDb, ?REP_DESIGN_DOC) of
- true ->
- ensure_rep_ddoc_exists(RepDb, ?REP_DESIGN_DOC);
- false ->
- ok
- end.
-
--spec ensure_rep_ddoc_exists(binary(), binary()) -> ok.
-ensure_rep_ddoc_exists(RepDb, DDocId) ->
- case open_rep_doc(RepDb, DDocId) of
- {not_found, no_db_file} ->
- %% database was deleted.
- ok;
- {not_found, _Reason} ->
- DocProps = replication_design_doc_props(DDocId),
- DDoc = couch_doc:from_json_obj({DocProps}),
- couch_log:notice("creating replicator ddoc ~p", [RepDb]),
- {ok, _Rev} = save_rep_doc(RepDb, DDoc);
- {ok, Doc} ->
- Latest = replication_design_doc_props(DDocId),
- {Props0} = couch_doc:to_json_obj(Doc, []),
- {value, {_, Rev}, Props} = lists:keytake(<<"_rev">>, 1, Props0),
- case compare_ejson({Props}, {Latest}) of
- true ->
- ok;
- false ->
- LatestWithRev = [{<<"_rev">>, Rev} | Latest],
- DDoc = couch_doc:from_json_obj({LatestWithRev}),
- couch_log:notice("updating replicator ddoc ~p", [RepDb]),
- try
- {ok, _} = save_rep_doc(RepDb, DDoc)
- catch
- throw:conflict ->
- %% ignore, we'll retry next time
- ok
- end
- end
- end,
- ok.
-
--spec ensure_cluster_rep_ddoc_exists(binary()) -> ok.
-ensure_cluster_rep_ddoc_exists(RepDb) ->
- DDocId = ?REP_DESIGN_DOC,
- [#shard{name = DbShard} | _] = mem3:shards(RepDb, DDocId),
- ensure_rep_ddoc_exists(DbShard, DDocId).
-
--spec compare_ejson({[_]}, {[_]}) -> boolean().
-compare_ejson(EJson1, EJson2) ->
- EjsonSorted1 = couch_replicator_filters:ejsort(EJson1),
- EjsonSorted2 = couch_replicator_filters:ejsort(EJson2),
- EjsonSorted1 == EjsonSorted2.
-
--spec replication_design_doc_props(binary()) -> [_].
-replication_design_doc_props(DDocId) ->
- [
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"validate_doc_update">>, ?REP_DB_DOC_VALIDATE_FUN}
- ].
-
-% Note: parse_rep_doc can handle filtered replications. During parsing of the
-% replication doc it will make possibly remote http requests to the source
-% database. If failure or parsing of filter docs fails, parse_doc throws a
-% {filter_fetch_error, Error} excation. This exception should be considered
-% transient in respect to the contents of the document itself, since it depends
-% on netowrk availability of the source db and other factors.
--spec parse_rep_doc({[_]}) -> #rep{}.
-parse_rep_doc(RepDoc) ->
- {ok, Rep} =
- try
- parse_rep_doc(RepDoc, rep_user_ctx(RepDoc))
- catch
- throw:{error, Reason} ->
- throw({bad_rep_doc, Reason});
- throw:{filter_fetch_error, Reason} ->
- throw({filter_fetch_error, Reason});
- Tag:Err ->
- throw({bad_rep_doc, to_binary({Tag, Err})})
- end,
- Rep.
-
--spec parse_rep_doc_without_id({[_]}) -> #rep{}.
-parse_rep_doc_without_id(RepDoc) ->
- {ok, Rep} =
- try
- parse_rep_doc_without_id(RepDoc, rep_user_ctx(RepDoc))
- catch
- throw:{error, Reason} ->
- throw({bad_rep_doc, Reason});
- Tag:Err ->
- throw({bad_rep_doc, to_binary({Tag, Err})})
- end,
- Rep.
-
--spec parse_rep_doc({[_]}, #user_ctx{}) -> {ok, #rep{}}.
-parse_rep_doc(Doc, UserCtx) ->
- {ok, Rep} = parse_rep_doc_without_id(Doc, UserCtx),
- Cancel = get_value(cancel, Rep#rep.options, false),
- Id = get_value(id, Rep#rep.options, nil),
- case {Cancel, Id} of
- {true, nil} ->
- % Cancel request with no id, must parse id out of body contents
- {ok, update_rep_id(Rep)};
- {true, Id} ->
- % Cancel request with an id specified, so do not parse id from body
- {ok, Rep};
- {false, _Id} ->
- % Not a cancel request, regular replication doc
- {ok, update_rep_id(Rep)}
- end.
-
--spec parse_rep_doc_without_id({[_]}, #user_ctx{}) -> {ok, #rep{}}.
-parse_rep_doc_without_id({Props}, UserCtx) ->
- {SrcProxy, TgtProxy} = parse_proxy_settings(Props),
- Opts = make_options(Props),
- case
- get_value(cancel, Opts, false) andalso
- (get_value(id, Opts, nil) =/= nil)
- of
- true ->
- {ok, #rep{options = Opts, user_ctx = UserCtx}};
- false ->
- Source = parse_rep_db(get_value(<<"source">>, Props), SrcProxy, Opts),
- Target = parse_rep_db(get_value(<<"target">>, Props), TgtProxy, Opts),
- {Type, View} =
- case couch_replicator_filters:view_type(Props, Opts) of
- {error, Error} ->
- throw({bad_request, Error});
- Result ->
- Result
- end,
- Rep = #rep{
- source = Source,
- target = Target,
- options = Opts,
- user_ctx = UserCtx,
- type = Type,
- view = View,
- doc_id = get_value(<<"_id">>, Props, null)
- },
- % Check if can parse filter code, if not throw exception
- case couch_replicator_filters:parse(Opts) of
- {error, FilterError} ->
- throw({error, FilterError});
- {ok, _Filter} ->
- ok
- end,
- {ok, Rep}
- end.
-
-parse_proxy_settings(Props) when is_list(Props) ->
- Proxy = get_value(<<"proxy">>, Props, <<>>),
- SrcProxy = get_value(<<"source_proxy">>, Props, <<>>),
- TgtProxy = get_value(<<"target_proxy">>, Props, <<>>),
-
- case Proxy =/= <<>> of
- true when SrcProxy =/= <<>> ->
- Error = "`proxy` is mutually exclusive with `source_proxy`",
- throw({bad_request, Error});
- true when TgtProxy =/= <<>> ->
- Error = "`proxy` is mutually exclusive with `target_proxy`",
- throw({bad_request, Error});
- true ->
- {Proxy, Proxy};
- false ->
- {SrcProxy, TgtProxy}
- end.
-
-% Update a #rep{} record with a replication_id. Calculating the id might involve
-% fetching a filter from the source db, and so it could fail intermetently.
-% In case of a failure to fetch the filter this function will throw a
-% `{filter_fetch_error, Reason} exception.
-update_rep_id(Rep) ->
- RepId = couch_replicator_ids:replication_id(Rep),
- Rep#rep{id = RepId}.
-
-update_rep_doc(RepDbName, RepDocId, KVs) ->
- update_rep_doc(RepDbName, RepDocId, KVs, 1).
-
-update_rep_doc(RepDbName, RepDocId, KVs, Wait) when is_binary(RepDocId) ->
- try
- case open_rep_doc(RepDbName, RepDocId) of
- {ok, LastRepDoc} ->
- update_rep_doc(RepDbName, LastRepDoc, KVs, Wait * 2);
- _ ->
- ok
- end
- catch
- throw:conflict ->
- Msg = "Conflict when updating replication doc `~s`. Retrying.",
- couch_log:error(Msg, [RepDocId]),
- ok = timer:sleep(couch_rand:uniform(erlang:min(128, Wait)) * 100),
- update_rep_doc(RepDbName, RepDocId, KVs, Wait * 2)
- end;
-update_rep_doc(RepDbName, #doc{body = {RepDocBody}} = RepDoc, KVs, _Try) ->
- NewRepDocBody = lists:foldl(
- fun
- ({K, undefined}, Body) ->
- lists:keydelete(K, 1, Body);
- ({<<"_replication_state">> = K, State} = KV, Body) ->
- case get_json_value(K, Body) of
- State ->
- Body;
- _ ->
- Body1 = lists:keystore(K, 1, Body, KV),
- Timestamp = couch_replicator_utils:iso8601(os:timestamp()),
- lists:keystore(
- <<"_replication_state_time">>,
- 1,
- Body1,
- {<<"_replication_state_time">>, Timestamp}
- )
- end;
- ({K, _V} = KV, Body) ->
- lists:keystore(K, 1, Body, KV)
- end,
- RepDocBody,
- KVs
- ),
- case NewRepDocBody of
- RepDocBody ->
- ok;
- _ ->
- % Might not succeed - when the replication doc is deleted right
- % before this update (not an error, ignore).
- save_rep_doc(RepDbName, RepDoc#doc{body = {NewRepDocBody}})
- end.
-
-open_rep_doc(DbName, DocId) ->
- case couch_db:open_int(DbName, [?CTX, sys_db]) of
- {ok, Db} ->
- try
- couch_db:open_doc(Db, DocId, [ejson_body])
- after
- couch_db:close(Db)
- end;
- Else ->
- Else
- end.
-
-save_rep_doc(DbName, Doc) ->
- {ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]),
- try
- couch_db:update_doc(Db, Doc, [])
- catch
- % User can accidently write a VDU which prevents _replicator from
- % updating replication documents. Avoid crashing replicator and thus
- % preventing all other replication jobs on the node from running.
- throw:{forbidden, Reason} ->
- Msg = "~p VDU function preventing doc update to ~s ~s ~p",
- couch_log:error(Msg, [?MODULE, DbName, Doc#doc.id, Reason]),
- {ok, forbidden}
- after
- couch_db:close(Db)
- end.
-
--spec rep_user_ctx({[_]}) -> #user_ctx{}.
-rep_user_ctx({RepDoc}) ->
- case get_json_value(<<"user_ctx">>, RepDoc) of
- undefined ->
- #user_ctx{};
- {UserCtx} ->
- #user_ctx{
- name = get_json_value(<<"name">>, UserCtx, null),
- roles = get_json_value(<<"roles">>, UserCtx, [])
- }
- end.
-
--spec parse_rep_db({[_]} | binary(), binary(), [_]) -> #httpd{} | binary().
-parse_rep_db({Props}, Proxy, Options) ->
- ProxyParams = parse_proxy_params(Proxy),
- ProxyURL =
- case ProxyParams of
- [] -> undefined;
- _ -> binary_to_list(Proxy)
- end,
- Url = maybe_add_trailing_slash(get_value(<<"url">>, Props)),
- {AuthProps} = get_value(<<"auth">>, Props, {[]}),
- {BinHeaders} = get_value(<<"headers">>, Props, {[]}),
- Headers = lists:ukeysort(1, [{?b2l(K), ?b2l(V)} || {K, V} <- BinHeaders]),
- DefaultHeaders = (#httpdb{})#httpdb.headers,
- HttpDb = #httpdb{
- url = Url,
- auth_props = AuthProps,
- headers = lists:ukeymerge(1, Headers, DefaultHeaders),
- ibrowse_options = lists:keysort(
- 1,
- [
- {socket_options, get_value(socket_options, Options)}
- | ProxyParams ++ ssl_params(Url)
- ]
- ),
- timeout = get_value(connection_timeout, Options),
- http_connections = get_value(http_connections, Options),
- retries = get_value(retries, Options),
- proxy_url = ProxyURL
- },
- couch_replicator_utils:normalize_basic_auth(HttpDb);
-parse_rep_db(<<"http://", _/binary>> = Url, Proxy, Options) ->
- parse_rep_db({[{<<"url">>, Url}]}, Proxy, Options);
-parse_rep_db(<<"https://", _/binary>> = Url, Proxy, Options) ->
- parse_rep_db({[{<<"url">>, Url}]}, Proxy, Options);
-parse_rep_db(<<_/binary>>, _Proxy, _Options) ->
- throw({error, local_endpoints_not_supported});
-parse_rep_db(undefined, _Proxy, _Options) ->
- throw({error, <<"Missing replicator database">>}).
-
--spec maybe_add_trailing_slash(binary() | list()) -> list().
-maybe_add_trailing_slash(Url) when is_binary(Url) ->
- maybe_add_trailing_slash(?b2l(Url));
-maybe_add_trailing_slash(Url) ->
- case lists:member($?, Url) of
- true ->
- % skip if there are query params
- Url;
- false ->
- case lists:last(Url) of
- $/ ->
- Url;
- _ ->
- Url ++ "/"
- end
- end.
-
--spec make_options([_]) -> [_].
-make_options(Props) ->
- Options0 = lists:ukeysort(1, convert_options(Props)),
- Options = check_options(Options0),
- DefWorkers = config:get_integer("replicator", "worker_processes", 4),
- DefBatchSize = config:get_integer("replicator", "worker_batch_size", 500),
- DefConns = config:get_integer("replicator", "http_connections", 20),
- DefTimeout = config:get_integer("replicator", "connection_timeout", 30000),
- DefRetries = config:get_integer("replicator", "retries_per_request", 5),
- UseCheckpoints = config:get_boolean("replicator", "use_checkpoints", true),
- DefCheckpointInterval = config:get_integer(
- "replicator",
- "checkpoint_interval",
- 30000
- ),
- {ok, DefSocketOptions} = couch_util:parse_term(
- config:get(
- "replicator",
- "socket_options",
- "[{keepalive, true}, {nodelay, false}]"
- )
- ),
- lists:ukeymerge(
- 1,
- Options,
- lists:keysort(1, [
- {connection_timeout, DefTimeout},
- {retries, DefRetries},
- {http_connections, DefConns},
- {socket_options, DefSocketOptions},
- {worker_batch_size, DefBatchSize},
- {worker_processes, DefWorkers},
- {use_checkpoints, UseCheckpoints},
- {checkpoint_interval, DefCheckpointInterval}
- ])
- ).
-
--spec convert_options([_]) -> [_].
-convert_options([]) ->
- [];
-convert_options([{<<"cancel">>, V} | _R]) when not is_boolean(V) ->
- throw({bad_request, <<"parameter `cancel` must be a boolean">>});
-convert_options([{<<"cancel">>, V} | R]) ->
- [{cancel, V} | convert_options(R)];
-convert_options([{IdOpt, V} | R]) when
- IdOpt =:= <<"_local_id">>;
- IdOpt =:= <<"replication_id">>;
- IdOpt =:= <<"id">>
-->
- [{id, couch_replicator_ids:convert(V)} | convert_options(R)];
-convert_options([{<<"create_target">>, V} | _R]) when not is_boolean(V) ->
- throw({bad_request, <<"parameter `create_target` must be a boolean">>});
-convert_options([{<<"create_target">>, V} | R]) ->
- [{create_target, V} | convert_options(R)];
-convert_options([{<<"create_target_params">>, V} | _R]) when not is_tuple(V) ->
- throw({bad_request, <<"parameter `create_target_params` must be a JSON object">>});
-convert_options([{<<"create_target_params">>, V} | R]) ->
- [{create_target_params, V} | convert_options(R)];
-convert_options([{<<"continuous">>, V} | _R]) when not is_boolean(V) ->
- throw({bad_request, <<"parameter `continuous` must be a boolean">>});
-convert_options([{<<"continuous">>, V} | R]) ->
- [{continuous, V} | convert_options(R)];
-convert_options([{<<"filter">>, V} | R]) ->
- [{filter, V} | convert_options(R)];
-convert_options([{<<"query_params">>, V} | R]) ->
- [{query_params, V} | convert_options(R)];
-convert_options([{<<"doc_ids">>, null} | R]) ->
- convert_options(R);
-convert_options([{<<"doc_ids">>, V} | _R]) when not is_list(V) ->
- throw({bad_request, <<"parameter `doc_ids` must be an array">>});
-convert_options([{<<"doc_ids">>, V} | R]) ->
- % Ensure same behaviour as old replicator: accept a list of percent
- % encoded doc IDs.
- DocIds = lists:usort([?l2b(couch_httpd:unquote(Id)) || Id <- V]),
- [{doc_ids, DocIds} | convert_options(R)];
-convert_options([{<<"selector">>, V} | _R]) when not is_tuple(V) ->
- throw({bad_request, <<"parameter `selector` must be a JSON object">>});
-convert_options([{<<"selector">>, V} | R]) ->
- [{selector, V} | convert_options(R)];
-convert_options([{<<"worker_processes">>, V} | R]) ->
- [{worker_processes, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"worker_batch_size">>, V} | R]) ->
- [{worker_batch_size, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"http_connections">>, V} | R]) ->
- [{http_connections, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"connection_timeout">>, V} | R]) ->
- [{connection_timeout, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"retries_per_request">>, V} | R]) ->
- [{retries, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"socket_options">>, V} | R]) ->
- {ok, SocketOptions} = couch_util:parse_term(V),
- [{socket_options, SocketOptions} | convert_options(R)];
-convert_options([{<<"since_seq">>, V} | R]) ->
- [{since_seq, V} | convert_options(R)];
-convert_options([{<<"use_checkpoints">>, V} | R]) ->
- [{use_checkpoints, V} | convert_options(R)];
-convert_options([{<<"checkpoint_interval">>, V} | R]) ->
- [{checkpoint_interval, couch_util:to_integer(V)} | convert_options(R)];
-% skip unknown option
-convert_options([_ | R]) ->
- convert_options(R).
-
--spec check_options([_]) -> [_].
-check_options(Options) ->
- DocIds = lists:keyfind(doc_ids, 1, Options),
- Filter = lists:keyfind(filter, 1, Options),
- Selector = lists:keyfind(selector, 1, Options),
- case {DocIds, Filter, Selector} of
- {false, false, false} -> Options;
- {false, false, _} -> Options;
- {false, _, false} -> Options;
- {_, false, false} -> Options;
- _ -> throw({bad_request, "`doc_ids`,`filter`,`selector` are mutually exclusive"})
- end.
-
--spec parse_proxy_params(binary() | [_]) -> [_].
-parse_proxy_params(ProxyUrl) when is_binary(ProxyUrl) ->
- parse_proxy_params(?b2l(ProxyUrl));
-parse_proxy_params([]) ->
- [];
-parse_proxy_params(ProxyUrl) ->
- #url{
- host = Host,
- port = Port,
- username = User,
- password = Passwd,
- protocol = Protocol
- } = ibrowse_lib:parse_url(ProxyUrl),
- Params =
- [
- {proxy_host, Host},
- {proxy_port, Port}
- ] ++
- case is_list(User) andalso is_list(Passwd) of
- false ->
- [];
- true ->
- [{proxy_user, User}, {proxy_password, Passwd}]
- end,
- case Protocol of
- socks5 ->
- [proxy_to_socks5(Param) || Param <- Params];
- _ ->
- Params
- end.
-
--spec proxy_to_socks5({atom(), string()}) -> {atom(), string()}.
-proxy_to_socks5({proxy_host, Val}) ->
- {socks5_host, Val};
-proxy_to_socks5({proxy_port, Val}) ->
- {socks5_port, Val};
-proxy_to_socks5({proxy_user, Val}) ->
- {socks5_user, Val};
-proxy_to_socks5({proxy_password, Val}) ->
- {socks5_password, Val}.
-
--spec ssl_params([_]) -> [_].
-ssl_params(Url) ->
- case ibrowse_lib:parse_url(Url) of
- #url{protocol = https} ->
- Depth = config:get_integer(
- "replicator",
- "ssl_certificate_max_depth",
- 3
- ),
- VerifyCerts = config:get_boolean(
- "replicator",
- "verify_ssl_certificates",
- false
- ),
- CertFile = config:get("replicator", "cert_file", undefined),
- KeyFile = config:get("replicator", "key_file", undefined),
- Password = config:get("replicator", "password", undefined),
- SslOpts = [{depth, Depth} | ssl_verify_options(VerifyCerts)],
- SslOpts1 =
- case CertFile /= undefined andalso KeyFile /= undefined of
- true ->
- case Password of
- undefined ->
- [{certfile, CertFile}, {keyfile, KeyFile}] ++ SslOpts;
- _ ->
- [
- {certfile, CertFile},
- {keyfile, KeyFile},
- {password, Password}
- ] ++ SslOpts
- end;
- false ->
- SslOpts
- end,
- [{is_ssl, true}, {ssl_options, SslOpts1}];
- #url{protocol = http} ->
- []
- end.
-
--spec ssl_verify_options(true | false) -> [_].
-ssl_verify_options(true) ->
- CAFile = config:get("replicator", "ssl_trusted_certificates_file"),
- [{verify, verify_peer}, {cacertfile, CAFile}];
-ssl_verify_options(false) ->
- [{verify, verify_none}].
-
--spec before_doc_update(#doc{}, Db :: any(), couch_db:update_type()) -> #doc{}.
-before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db, _UpdateType) ->
- Doc;
-before_doc_update(#doc{body = {Body}} = Doc, Db, _UpdateType) ->
- #user_ctx{
- roles = Roles,
- name = Name
- } = couch_db:get_user_ctx(Db),
- case lists:member(<<"_replicator">>, Roles) of
- true ->
- Doc;
- false ->
- case couch_util:get_value(?OWNER, Body) of
- undefined ->
- Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
- Name ->
- Doc;
- Other ->
- case (catch couch_db:check_is_admin(Db)) of
- ok when Other =:= null ->
- Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
- ok ->
- Doc;
- _ ->
- throw(
- {forbidden,
- <<"Can't update replication documents", " from other users.">>}
- )
- end
- end
- end.
-
--spec after_doc_read(#doc{}, Db :: any()) -> #doc{}.
-after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db) ->
- Doc;
-after_doc_read(#doc{body = {Body}} = Doc, Db) ->
- #user_ctx{name = Name} = couch_db:get_user_ctx(Db),
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- Doc;
- _ ->
- case couch_util:get_value(?OWNER, Body) of
- Name ->
- Doc;
- _Other ->
- Source = strip_credentials(
- couch_util:get_value(
- <<"source">>,
- Body
- )
- ),
- Target = strip_credentials(
- couch_util:get_value(
- <<"target">>,
- Body
- )
- ),
- NewBody0 = ?replace(Body, <<"source">>, Source),
- NewBody = ?replace(NewBody0, <<"target">>, Target),
- #doc{revs = {Pos, [_ | Revs]}} = Doc,
- NewDoc = Doc#doc{body = {NewBody}, revs = {Pos - 1, Revs}},
- NewRevId = couch_db:new_revid(NewDoc),
- NewDoc#doc{revs = {Pos, [NewRevId | Revs]}}
- end
- end.
-
--spec strip_credentials
- (undefined) -> undefined;
- (binary()) -> binary();
- ({[_]}) -> {[_]}.
-strip_credentials(undefined) ->
- undefined;
-strip_credentials(Url) when is_binary(Url) ->
- re:replace(
- Url,
- "http(s)?://(?:[^:]+):[^@]+@(.*)$",
- "http\\1://\\2",
- [{return, binary}]
- );
-strip_credentials({Props0}) ->
- Props1 = lists:keydelete(<<"headers">>, 1, Props0),
- % Strip "auth" just like headers, for replication plugins it can be a place
- % to stash credential that are not necessarily in headers
- Props2 = lists:keydelete(<<"auth">>, 1, Props1),
- {Props2}.
-
-error_reason({shutdown, Error}) ->
- error_reason(Error);
-error_reason({bad_rep_doc, Reason}) ->
- to_binary(Reason);
-error_reason({error, {Error, Reason}}) when
- is_atom(Error), is_binary(Reason)
-->
- to_binary(io_lib:format("~s: ~s", [Error, Reason]));
-error_reason({error, Reason}) ->
- to_binary(Reason);
-error_reason(Reason) ->
- to_binary(Reason).
-
--ifdef(TEST).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-check_options_pass_values_test() ->
- ?assertEqual(check_options([]), []),
- ?assertEqual(check_options([baz, {other, fiz}]), [baz, {other, fiz}]),
- ?assertEqual(check_options([{doc_ids, x}]), [{doc_ids, x}]),
- ?assertEqual(check_options([{filter, x}]), [{filter, x}]),
- ?assertEqual(check_options([{selector, x}]), [{selector, x}]).
-
-check_options_fail_values_test() ->
- ?assertThrow(
- {bad_request, _},
- check_options([{doc_ids, x}, {filter, y}])
- ),
- ?assertThrow(
- {bad_request, _},
- check_options([{doc_ids, x}, {selector, y}])
- ),
- ?assertThrow(
- {bad_request, _},
- check_options([{filter, x}, {selector, y}])
- ),
- ?assertThrow(
- {bad_request, _},
- check_options([{doc_ids, x}, {selector, y}, {filter, z}])
- ).
-
-check_convert_options_pass_test() ->
- ?assertEqual([], convert_options([])),
- ?assertEqual([], convert_options([{<<"random">>, 42}])),
- ?assertEqual(
- [{cancel, true}],
- convert_options([{<<"cancel">>, true}])
- ),
- ?assertEqual(
- [{create_target, true}],
- convert_options([{<<"create_target">>, true}])
- ),
- ?assertEqual(
- [{continuous, true}],
- convert_options([{<<"continuous">>, true}])
- ),
- ?assertEqual(
- [{doc_ids, [<<"id">>]}],
- convert_options([{<<"doc_ids">>, [<<"id">>]}])
- ),
- ?assertEqual(
- [{selector, {key, value}}],
- convert_options([{<<"selector">>, {key, value}}])
- ).
-
-check_convert_options_fail_test() ->
- ?assertThrow(
- {bad_request, _},
- convert_options([{<<"cancel">>, <<"true">>}])
- ),
- ?assertThrow(
- {bad_request, _},
- convert_options([{<<"create_target">>, <<"true">>}])
- ),
- ?assertThrow(
- {bad_request, _},
- convert_options([{<<"continuous">>, <<"true">>}])
- ),
- ?assertThrow(
- {bad_request, _},
- convert_options([{<<"doc_ids">>, not_a_list}])
- ),
- ?assertThrow(
- {bad_request, _},
- convert_options([{<<"selector">>, [{key, value}]}])
- ).
-
-check_strip_credentials_test() ->
- [
- ?assertEqual(Expected, strip_credentials(Body))
- || {Expected, Body} <- [
- {
- undefined,
- undefined
- },
- {
- <<"https://remote_server/database">>,
- <<"https://foo:bar@remote_server/database">>
- },
- {
- {[{<<"_id">>, <<"foo">>}]},
- {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"bar">>}]}
- },
- {
- {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]},
- {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]}
- },
- {
- {[{<<"_id">>, <<"foo">>}]},
- {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"baz">>}]}
- },
- {
- {[{<<"_id">>, <<"foo">>}]},
- {[{<<"_id">>, <<"foo">>}, {<<"auth">>, <<"pluginsecret">>}]}
- }
- ]
- ].
-
-parse_proxy_params_test() ->
- ?assertEqual(
- [
- {proxy_host, "foo.com"},
- {proxy_port, 443},
- {proxy_user, "u"},
- {proxy_password, "p"}
- ],
- parse_proxy_params("https://u:p@foo.com")
- ),
- ?assertEqual(
- [
- {socks5_host, "foo.com"},
- {socks5_port, 1080},
- {socks5_user, "u"},
- {socks5_password, "p"}
- ],
- parse_proxy_params("socks5://u:p@foo.com")
- ).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_vdu(DbName),
- DbName.
-
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-create_vdu(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- VduFun = <<"function(newdoc, olddoc, userctx) {throw({'forbidden':'fail'})}">>,
- Doc = #doc{
- id = <<"_design/vdu">>,
- body = {[{<<"validate_doc_update">>, VduFun}]}
- },
- {ok, _} = couch_db:update_docs(Db, [Doc])
- end).
-
-update_replicator_doc_with_bad_vdu_test_() ->
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun t_vdu_does_not_crash_on_save/1
- ]
- }
- }.
-
-t_vdu_does_not_crash_on_save(DbName) ->
- ?_test(begin
- Doc = #doc{id = <<"some_id">>, body = {[{<<"foo">>, 42}]}},
- ?assertEqual({ok, forbidden}, save_rep_doc(DbName, Doc))
- end).
-
-local_replication_endpoint_error_test_() ->
- {
- foreach,
- fun() ->
- meck:expect(
- config,
- get,
- fun(_, _, Default) -> Default end
- )
- end,
- fun(_) -> meck:unload() end,
- [
- t_error_on_local_endpoint()
- ]
- }.
-
-t_error_on_local_endpoint() ->
- ?_test(begin
- RepDoc =
- {[
- {<<"_id">>, <<"someid">>},
- {<<"source">>, <<"localdb">>},
- {<<"target">>, <<"http://somehost.local/tgt">>}
- ]},
- Expect = local_endpoints_not_supported,
- ?assertThrow({bad_rep_doc, Expect}, parse_rep_doc_without_id(RepDoc))
- end).
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_fabric.erl b/src/couch_replicator/src/couch_replicator_fabric.erl
deleted file mode 100644
index 6e5ebfc25..000000000
--- a/src/couch_replicator/src/couch_replicator_fabric.erl
+++ /dev/null
@@ -1,158 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_fabric).
-
--export([
- docs/5
-]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-docs(DbName, Options, QueryArgs, Callback, Acc) ->
- Shards = mem3:shards(DbName),
- Workers0 = fabric_util:submit_jobs(
- Shards, couch_replicator_fabric_rpc, docs, [Options, QueryArgs]
- ),
- RexiMon = fabric_util:create_monitors(Workers0),
- try
- case fabric_streams:start(Workers0, #shard.ref) of
- {ok, Workers} ->
- try
- docs_int(DbName, Workers, QueryArgs, Callback, Acc)
- after
- fabric_streams:cleanup(Workers)
- end;
- {timeout, NewState} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- NewState#stream_acc.workers, waiting
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "replicator docs"
- ),
- Callback({error, timeout}, Acc);
- {error, Error} ->
- Callback({error, Error}, Acc)
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-docs_int(DbName, Workers, QueryArgs, Callback, Acc0) ->
- #mrargs{limit = Limit, skip = Skip} = QueryArgs,
- State = #collector{
- db_name = DbName,
- query_args = QueryArgs,
- callback = Callback,
- counters = fabric_dict:init(Workers, 0),
- skip = Skip,
- limit = Limit,
- user_acc = Acc0,
- update_seq = nil
- },
- case
- rexi_utils:recv(
- Workers,
- #shard.ref,
- fun handle_message/3,
- State,
- infinity,
- 5000
- )
- of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
- fabric_view:check_down_shards(State, NodeRef);
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- fabric_view:handle_worker_exit(State, Worker, Reason);
-handle_message({meta, Meta0}, {Worker, From}, State) ->
- Tot = couch_util:get_value(total, Meta0, 0),
- Off = couch_util:get_value(offset, Meta0, 0),
- #collector{
- callback = Callback,
- counters = Counters0,
- total_rows = Total0,
- offset = Offset0,
- user_acc = AccIn
- } = State,
- % Assert that we don't have other messages from this
- % worker when the total_and_offset message arrives.
- 0 = fabric_dict:lookup_element(Worker, Counters0),
- rexi:stream_ack(From),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- Total = Total0 + Tot,
- Offset = Offset0 + Off,
- case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- total_rows = Total,
- offset = Offset
- }};
- false ->
- FinalOffset = erlang:min(Total, Offset + State#collector.skip),
- Meta = [{total, Total}, {offset, FinalOffset}],
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc
- }}
- end;
-handle_message(#view_row{id = Id, doc = Doc} = Row0, {Worker, From}, State) ->
- #collector{query_args = Args, counters = Counters0, rows = Rows0} = State,
- case maybe_fetch_and_filter_doc(Id, Doc, State) of
- {[_ | _]} = NewDoc ->
- Row = Row0#view_row{doc = NewDoc},
- Dir = Args#mrargs.direction,
- Rows = merge_row(Dir, Row#view_row{worker = {Worker, From}}, Rows0),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows = Rows, counters = Counters1},
- fabric_view:maybe_send_row(State1);
- skip ->
- rexi:stream_ack(From),
- {ok, State}
- end;
-handle_message(complete, Worker, State) ->
- Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
- fabric_view:maybe_send_row(State#collector{counters = Counters}).
-
-merge_row(fwd, Row, Rows) ->
- lists:keymerge(#view_row.id, [Row], Rows);
-merge_row(rev, Row, Rows) ->
- lists:rkeymerge(#view_row.id, [Row], Rows).
-
-maybe_fetch_and_filter_doc(Id, undecided, State) ->
- #collector{db_name = DbName, query_args = #mrargs{extra = Extra}} = State,
- FilterStates = proplists:get_value(filter_states, Extra),
- case couch_replicator:active_doc(DbName, Id) of
- {ok, {Props} = DocInfo} ->
- DocState = couch_util:get_value(state, Props),
- couch_replicator_utils:filter_state(DocState, FilterStates, DocInfo);
- {error, not_found} ->
- % could have been deleted
- skip
- end;
-maybe_fetch_and_filter_doc(_Id, Doc, _State) ->
- Doc.
diff --git a/src/couch_replicator/src/couch_replicator_fabric_rpc.erl b/src/couch_replicator/src/couch_replicator_fabric_rpc.erl
deleted file mode 100644
index daeb86e60..000000000
--- a/src/couch_replicator/src/couch_replicator_fabric_rpc.erl
+++ /dev/null
@@ -1,97 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_fabric_rpc).
-
--export([
- docs/3
-]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-docs(DbName, Options, Args0) ->
- set_io_priority(DbName, Options),
- #mrargs{skip = Skip, limit = Limit, extra = Extra} = Args0,
- FilterStates = proplists:get_value(filter_states, Extra),
- Args = Args0#mrargs{skip = 0, limit = Skip + Limit},
- HealthThreshold = couch_replicator_scheduler:health_threshold(),
- {ok, Db} = couch_db:open_int(DbName, Options),
- Acc = {DbName, FilterStates, HealthThreshold},
- couch_mrview:query_all_docs(Db, Args, fun docs_cb/2, Acc).
-
-docs_cb({meta, Meta}, Acc) ->
- ok = rexi:stream2({meta, Meta}),
- {ok, Acc};
-docs_cb({row, Row}, {DbName, States, HealthThreshold} = Acc) ->
- Id = couch_util:get_value(id, Row),
- Doc = couch_util:get_value(doc, Row),
- ViewRow = #view_row{
- id = Id,
- key = couch_util:get_value(key, Row),
- value = couch_util:get_value(value, Row)
- },
- case rep_doc_state(DbName, Id, Doc, States, HealthThreshold) of
- skip ->
- ok;
- Other ->
- ok = rexi:stream2(ViewRow#view_row{doc = Other})
- end,
- {ok, Acc};
-docs_cb(complete, Acc) ->
- ok = rexi:stream_last(complete),
- {ok, Acc}.
-
-set_io_priority(DbName, Options) ->
- case lists:keyfind(io_priority, 1, Options) of
- {io_priority, Pri} ->
- erlang:put(io_priority, Pri);
- false ->
- erlang:put(io_priority, {interactive, DbName})
- end.
-
-%% Get the state of the replication document. If it is found and has a terminal
-%% state then it can be filtered and either included in the results or skipped.
-%% If it is not in a terminal state, look it up in the local doc processor ETS
-%% table. If it is there then filter by state. If it is not found there either
-%% then mark it as `undecided` and let the coordinator try to fetch it. The
-%% The idea is to do as much work as possible locally and leave the minimum
-%% amount of work for the coordinator.
-rep_doc_state(_Shard, <<"_design/", _/binary>>, _, _, _) ->
- skip;
-rep_doc_state(Shard, Id, {[_ | _]} = Doc, States, HealthThreshold) ->
- DbName = mem3:dbname(Shard),
- DocInfo = couch_replicator:info_from_doc(DbName, Doc),
- case get_doc_state(DocInfo) of
- null ->
- % Fetch from local doc processor. If there, filter by state.
- % If not there, mark as undecided. Let coordinator figure it out.
- case
- couch_replicator_doc_processor:doc_lookup(
- Shard,
- Id,
- HealthThreshold
- )
- of
- {ok, EtsInfo} ->
- State = get_doc_state(EtsInfo),
- couch_replicator_utils:filter_state(State, States, EtsInfo);
- {error, not_found} ->
- undecided
- end;
- OtherState when is_atom(OtherState) ->
- couch_replicator_utils:filter_state(OtherState, States, DocInfo)
- end.
-
-get_doc_state({Props}) ->
- couch_util:get_value(state, Props).
diff --git a/src/couch_replicator/src/couch_replicator_filters.erl b/src/couch_replicator/src/couch_replicator_filters.erl
deleted file mode 100644
index aab8e80b3..000000000
--- a/src/couch_replicator/src/couch_replicator_filters.erl
+++ /dev/null
@@ -1,220 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_filters).
-
--export([
- parse/1,
- fetch/3,
- view_type/2,
- ejsort/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% Parse the filter from replication options proplist.
-% Return {ok, {FilterType,...}} | {error, ParseError}.
-% For `user` filter, i.e. filters specified as user code
-% in source database, this code doesn't fetch the filter
-% code, but only returns the name of the filter.
--spec parse([_]) ->
- {ok, nil}
- | {ok, {view, binary(), {[_]}}}
- | {ok, {user, {binary(), binary()}, {[_]}}}
- | {ok, {docids, [_]}}
- | {ok, {mango, {[_]}}}
- | {error, binary()}.
-parse(Options) ->
- Filter = couch_util:get_value(filter, Options),
- DocIds = couch_util:get_value(doc_ids, Options),
- Selector = couch_util:get_value(selector, Options),
- case {Filter, DocIds, Selector} of
- {undefined, undefined, undefined} ->
- {ok, nil};
- {<<"_", _/binary>>, undefined, undefined} ->
- {ok, {view, Filter, query_params(Options)}};
- {_, undefined, undefined} ->
- case parse_user_filter(Filter) of
- {ok, {Doc, FilterName}} ->
- {ok, {user, {Doc, FilterName}, query_params(Options)}};
- {error, Error} ->
- {error, Error}
- end;
- {undefined, _, undefined} ->
- {ok, {docids, DocIds}};
- {undefined, undefined, _} ->
- {ok, {mango, ejsort(mango_selector:normalize(Selector))}};
- _ ->
- Err = "`selector`, `filter` and `doc_ids` are mutually exclusive",
- {error, list_to_binary(Err)}
- end.
-
-% Fetches body of filter function from source database. Guaranteed to either
-% return {ok, Body} or an {error, Reason}. Also assume this function might
-% block due to network / socket issues for an undeterminted amount of time.
--spec fetch(binary(), binary(), binary()) ->
- {ok, {[_]}} | {error, binary()}.
-fetch(DDocName, FilterName, Source) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- try fetch_internal(DDocName, FilterName, Source) of
- Resp ->
- exit({exit_ok, Resp})
- catch
- throw:{fetch_error, Reason} ->
- exit({exit_fetch_error, Reason});
- _OtherTag:Reason ->
- exit({exit_other_error, Reason})
- end
- end),
- receive
- {'DOWN', Ref, process, Pid, {exit_ok, Resp}} ->
- {ok, Resp};
- {'DOWN', Ref, process, Pid, {exit_fetch_error, Reason}} ->
- {error, Reason};
- {'DOWN', Ref, process, Pid, {exit_other_error, Reason}} ->
- {error, couch_util:to_binary(Reason)}
- end.
-
-% Get replication type and view (if any) from replication document props
--spec view_type([_], [_]) ->
- {view, {binary(), binary()}} | {db, nil} | {error, binary()}.
-view_type(Props, Options) ->
- case couch_util:get_value(<<"filter">>, Props) of
- <<"_view">> ->
- {QP} = couch_util:get_value(query_params, Options, {[]}),
- ViewParam = couch_util:get_value(<<"view">>, QP),
- case re:split(ViewParam, <<"/">>) of
- [DName, ViewName] ->
- {view, {<<"_design/", DName/binary>>, ViewName}};
- _ ->
- {error, <<"Invalid `view` parameter.">>}
- end;
- _ ->
- {db, nil}
- end.
-
-% Private functions
-
-fetch_internal(DDocName, FilterName, Source) ->
- Db =
- case (catch couch_replicator_api_wrap:db_open(Source)) of
- {ok, Db0} ->
- Db0;
- DbError ->
- DbErrorMsg = io_lib:format(
- "Could not open source database `~s`: ~s",
- [
- couch_replicator_api_wrap:db_uri(Source),
- couch_util:to_binary(DbError)
- ]
- ),
- throw({fetch_error, iolist_to_binary(DbErrorMsg)})
- end,
- try
- Body =
- case
- (catch couch_replicator_api_wrap:open_doc(
- Db, <<"_design/", DDocName/binary>>, [ejson_body]
- ))
- of
- {ok, #doc{body = Body0}} ->
- Body0;
- DocError ->
- DocErrorMsg = io_lib:format(
- "Couldn't open document `_design/~s` from source "
- "database `~s`: ~s",
- [
- DDocName,
- couch_replicator_api_wrap:db_uri(Source),
- couch_util:to_binary(DocError)
- ]
- ),
- throw({fetch_error, iolist_to_binary(DocErrorMsg)})
- end,
- try
- Code = couch_util:get_nested_json_value(
- Body, [<<"filters">>, FilterName]
- ),
- re:replace(Code, [$^, "\s*(.*?)\s*", $$], "\\1", [{return, binary}])
- catch
- _Tag:CodeError ->
- CodeErrorMsg = io_lib:format(
- "Couldn't parse filter code from document ~s on `~s` "
- " Error: ~s",
- [
- DDocName,
- couch_replicator_api_wrap:db_uri(Source),
- couch_util:to_binary(CodeError)
- ]
- ),
- throw({fetch_error, CodeErrorMsg})
- end
- after
- couch_replicator_api_wrap:db_close(Db)
- end.
-
--spec query_params([_]) -> {[_]}.
-query_params(Options) ->
- couch_util:get_value(query_params, Options, {[]}).
-
-parse_user_filter(Filter) ->
- case re:run(Filter, "(.*?)/(.*)", [{capture, [1, 2], binary}]) of
- {match, [DDocName0, FilterName0]} ->
- {ok, {DDocName0, FilterName0}};
- _ ->
- {error, <<"Invalid filter. Must match `ddocname/filtername`.">>}
- end.
-
-% Sort an EJSON object's properties to attempt
-% to generate a unique representation. This is used
-% to reduce the chance of getting different
-% replication checkpoints for the same Mango selector
-ejsort({V}) ->
- ejsort_props(V, []);
-ejsort(V) when is_list(V) ->
- ejsort_array(V, []);
-ejsort(V) ->
- V.
-
-ejsort_props([], Acc) ->
- {lists:keysort(1, Acc)};
-ejsort_props([{K, V} | R], Acc) ->
- ejsort_props(R, [{K, ejsort(V)} | Acc]).
-
-ejsort_array([], Acc) ->
- lists:reverse(Acc);
-ejsort_array([V | R], Acc) ->
- ejsort_array(R, [ejsort(V) | Acc]).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-ejsort_basic_values_test() ->
- ?assertEqual(ejsort(0), 0),
- ?assertEqual(ejsort(<<"a">>), <<"a">>),
- ?assertEqual(ejsort(true), true),
- ?assertEqual(ejsort([]), []),
- ?assertEqual(ejsort({[]}), {[]}).
-
-ejsort_compound_values_test() ->
- ?assertEqual(ejsort([2, 1, 3, <<"a">>]), [2, 1, 3, <<"a">>]),
- Ej1 = {[{<<"a">>, 0}, {<<"c">>, 0}, {<<"b">>, 0}]},
- Ej1s = {[{<<"a">>, 0}, {<<"b">>, 0}, {<<"c">>, 0}]},
- ?assertEqual(ejsort(Ej1), Ej1s),
- Ej2 = {[{<<"x">>, Ej1}, {<<"z">>, Ej1}, {<<"y">>, [Ej1, Ej1]}]},
- ?assertEqual(
- ejsort(Ej2),
- {[{<<"x">>, Ej1s}, {<<"y">>, [Ej1s, Ej1s]}, {<<"z">>, Ej1s}]}
- ).
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl
deleted file mode 100644
index 67e3f8474..000000000
--- a/src/couch_replicator/src/couch_replicator_httpc.erl
+++ /dev/null
@@ -1,538 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_httpc).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("ibrowse/include/ibrowse.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
--export([setup/1]).
--export([send_req/3]).
--export([full_url/2]).
-
--import(couch_util, [
- get_value/2,
- get_value/3
-]).
-
--define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
--define(MAX_WAIT, 5 * 60 * 1000).
--define(STREAM_STATUS, ibrowse_stream_status).
--define(STOP_HTTP_WORKER, stop_http_worker).
-
-% This limit is for the number of messages we're willing to discard
-% from an HTTP stream in clean_mailbox/1 before killing the worker
-% and returning. The original intent for clean_mailbox was to remove
-% a single message or two if the changes feed returned before fully
-% consuming the request. This threshold gives us confidence we'll
-% continue to properly close changes feeds while avoiding any case
-% where we may end up processing an unbounded number of messages.
--define(MAX_DISCARDED_MESSAGES, 16).
-
-setup(Db) ->
- #httpdb{
- httpc_pool = nil,
- url = Url,
- http_connections = MaxConns,
- proxy_url = ProxyUrl
- } = Db,
- {ok, Pid} = couch_replicator_httpc_pool:start_link(
- Url,
- ProxyUrl,
- [{max_connections, MaxConns}]
- ),
- case couch_replicator_auth:initialize(Db#httpdb{httpc_pool = Pid}) of
- {ok, Db1} ->
- {ok, Db1};
- {error, Error} ->
- LogMsg = "~p: auth plugin initialization failed ~p ~p",
- LogUrl = couch_util:url_strip_password(Url),
- couch_log:error(LogMsg, [?MODULE, LogUrl, Error]),
- throw({replication_auth_error, Error})
- end.
-
-send_req(HttpDb, Params1, Callback) ->
- put(?STREAM_STATUS, init),
- couch_stats:increment_counter([couch_replicator, requests]),
- Params2 = ?replace(
- Params1,
- qs,
- [{K, ?b2l(iolist_to_binary(V))} || {K, V} <- get_value(qs, Params1, [])]
- ),
- Params = ?replace(
- Params2,
- ibrowse_options,
- lists:keysort(1, get_value(ibrowse_options, Params2, []))
- ),
- {Worker, Response, HttpDb1} = send_ibrowse_req(HttpDb, Params),
- Ret =
- try
- process_response(Response, Worker, HttpDb1, Params, Callback)
- catch
- throw:{retry, NewHttpDb0, NewParams0} ->
- {retry, NewHttpDb0, NewParams0}
- after
- Pool = HttpDb1#httpdb.httpc_pool,
- case get(?STOP_HTTP_WORKER) of
- stop ->
- ok = stop_and_release_worker(Pool, Worker),
- erase(?STOP_HTTP_WORKER);
- undefined ->
- ok = couch_replicator_httpc_pool:release_worker(Pool, Worker)
- end,
- clean_mailbox(Response)
- end,
- % This is necessary to keep this tail-recursive. Calling
- % send_req in the catch clause would turn it into a body
- % recursive call accidentally.
- case Ret of
- {retry, #httpdb{} = NewHttpDb, NewParams} ->
- send_req(NewHttpDb, NewParams, Callback);
- _ ->
- Ret
- end.
-
-send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb0, Params) ->
- Method = get_value(method, Params, get),
- UserHeaders = get_value(headers, Params, []),
- Headers1 = merge_headers(BaseHeaders, UserHeaders),
- {Headers2, HttpDb} = couch_replicator_auth:update_headers(HttpDb0, Headers1),
- Url = full_url(HttpDb, Params),
- Body = get_value(body, Params, []),
- case get_value(path, Params) == "_changes" of
- true ->
- Timeout = infinity;
- false ->
- Timeout =
- case config:get("replicator", "request_timeout", "infinity") of
- "infinity" -> infinity;
- Milliseconds -> list_to_integer(Milliseconds)
- end
- end,
- {ok, Worker} = couch_replicator_httpc_pool:get_worker(HttpDb#httpdb.httpc_pool),
- BasicAuthOpts =
- case couch_replicator_utils:get_basic_auth_creds(HttpDb) of
- {undefined, undefined} ->
- [];
- {User, Pass} when is_list(User), is_list(Pass) ->
- [{basic_auth, {User, Pass}}]
- end,
- IbrowseOptions =
- BasicAuthOpts ++
- [
- {response_format, binary},
- {inactivity_timeout, HttpDb#httpdb.timeout}
- | lists:ukeymerge(
- 1,
- get_value(ibrowse_options, Params, []),
- HttpDb#httpdb.ibrowse_options
- )
- ],
- backoff_before_request(Worker, HttpDb, Params),
- Response = ibrowse:send_req_direct(
- Worker, Url, Headers2, Method, Body, IbrowseOptions, Timeout
- ),
- {Worker, Response, HttpDb}.
-
-%% Stop worker, wait for it to die, then release it. Make sure it is dead before
-%% releasing it to the pool, so there is not race triggered recycling it again.
-%% The reason is recycling a dying worker, could end up that worker returning
-%% {error, req_timedout} error. While in reality is not really a timeout, just
-%% a race condition.
-stop_and_release_worker(Pool, Worker) ->
- Ref = erlang:monitor(process, Worker),
- ibrowse_http_client:stop(Worker),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- end,
- ok = couch_replicator_httpc_pool:release_worker_sync(Pool, Worker).
-
-process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) ->
- put(?STOP_HTTP_WORKER, stop),
- maybe_retry(sel_conn_closed, Worker, HttpDb, Params);
-%% This clause handles un-expected connection closing during pipelined requests.
-%% For example, if server responds to a request, sets Connection: close header
-%% and closes the socket, ibrowse will detect that error when it sends
-%% next request.
-process_response({error, connection_closing}, Worker, HttpDb, Params, _Cb) ->
- put(?STOP_HTTP_WORKER, stop),
- maybe_retry({error, connection_closing}, Worker, HttpDb, Params);
-process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) ->
- process_stream_response(ReqId, Worker, HttpDb, Params, Callback);
-process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) ->
- case list_to_integer(Code) of
- R when R =:= 301; R =:= 302; R =:= 303 ->
- backoff_success(HttpDb, Params),
- do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
- 429 ->
- backoff(HttpDb, Params);
- Error when Error =:= 408; Error >= 500 ->
- couch_stats:increment_counter([couch_replicator, responses, failure]),
- maybe_retry({code, Error}, Worker, HttpDb, Params);
- Ok when Ok >= 200, Ok < 500 ->
- backoff_success(HttpDb, Params),
- couch_stats:increment_counter([couch_replicator, responses, success]),
- EJson =
- case Body of
- <<>> ->
- null;
- Json ->
- ?JSON_DECODE(Json)
- end,
- process_auth_response(HttpDb, Ok, Headers, Params),
- if
- Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop);
- true -> ok
- end,
- Callback(Ok, Headers, EJson)
- end;
-process_response(Error, Worker, HttpDb, Params, _Callback) ->
- maybe_retry(Error, Worker, HttpDb, Params).
-
-process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
- receive
- {ibrowse_async_headers, ReqId, Code, Headers} ->
- case list_to_integer(Code) of
- R when R =:= 301; R =:= 302; R =:= 303 ->
- backoff_success(HttpDb, Params),
- do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
- 429 ->
- Timeout = couch_replicator_rate_limiter:max_interval(),
- backoff(HttpDb#httpdb{timeout = Timeout}, Params);
- Error when Error =:= 408; Error >= 500 ->
- couch_stats:increment_counter(
- [couch_replicator, stream_responses, failure]
- ),
- report_error(Worker, HttpDb, Params, {code, Error});
- Ok when Ok >= 200, Ok < 500 ->
- backoff_success(HttpDb, Params),
- HttpDb1 = process_auth_response(HttpDb, Ok, Headers, Params),
- StreamDataFun = fun() ->
- stream_data_self(HttpDb1, Params, Worker, ReqId, Callback)
- end,
- put(?STREAM_STATUS, {streaming, Worker}),
- if
- Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop);
- true -> ok
- end,
- ibrowse:stream_next(ReqId),
- try
- Ret = Callback(Ok, Headers, StreamDataFun),
- Ret
- catch
- throw:{maybe_retry_req, connection_closed} ->
- maybe_retry(
- {connection_closed, mid_stream},
- Worker,
- HttpDb1,
- Params
- );
- throw:{maybe_retry_req, Err} ->
- maybe_retry(Err, Worker, HttpDb1, Params)
- end
- end;
- {ibrowse_async_response, ReqId, {error, _} = Error} ->
- couch_stats:increment_counter(
- [couch_replicator, stream_responses, failure]
- ),
- maybe_retry(Error, Worker, HttpDb, Params)
- after HttpDb#httpdb.timeout + 500 ->
- % Note: ibrowse should always reply with timeouts, but this doesn't
- % seem to be always true when there's a very high rate of requests
- % and many open connections.
- maybe_retry(timeout, Worker, HttpDb, Params)
- end.
-
-process_auth_response(HttpDb, Code, Headers, Params) ->
- case couch_replicator_auth:handle_response(HttpDb, Code, Headers) of
- {continue, HttpDb1} ->
- HttpDb1;
- {retry, HttpDb1} ->
- log_retry_error(Params, HttpDb1, 0, Code),
- throw({retry, HttpDb1, Params})
- end.
-
-% Only streaming HTTP requests send messages back from
-% the ibrowse worker process. We can detect that based
-% on the ibrowse_req_id format. This just drops all
-% messages for the given ReqId on the floor since we're
-% no longer in the HTTP request.
-
-clean_mailbox(ReqId) ->
- clean_mailbox(ReqId, ?MAX_DISCARDED_MESSAGES).
-
-clean_mailbox(_ReqId, 0) ->
- case get(?STREAM_STATUS) of
- {streaming, Worker} ->
- % We kill workers that continue to stream us
- % messages after we give up but do *not* exit
- % our selves. This is because we may be running
- % as an exception unwinds and we don't want to
- % change any of that subtle logic.
- exit(Worker, {timeout, ibrowse_stream_cleanup});
- _ ->
- ok
- end,
- ok;
-clean_mailbox({ibrowse_req_id, ReqId}, Count) when Count > 0 ->
- case get(?STREAM_STATUS) of
- {streaming, Worker} ->
- case is_process_alive(Worker) of
- true ->
- discard_message(ReqId, Worker, Count);
- false ->
- put(?STREAM_STATUS, ended),
- ok
- end;
- Status when Status == init; Status == ended ->
- receive
- {ibrowse_async_response, ReqId, _} ->
- clean_mailbox({ibrowse_req_id, ReqId}, Count - 1);
- {ibrowse_async_response_end, ReqId} ->
- put(?STREAM_STATUS, ended),
- ok
- after 0 ->
- ok
- end
- end;
-clean_mailbox(_, Count) when Count > 0 ->
- ok.
-
-discard_message(ReqId, Worker, Count) ->
- ibrowse:stream_next(ReqId),
- receive
- {ibrowse_async_response, ReqId, _} ->
- clean_mailbox({ibrowse_req_id, ReqId}, Count - 1);
- {ibrowse_async_response_end, ReqId} ->
- put(?STREAM_STATUS, ended),
- ok
- after 30000 ->
- exit(Worker, {timeout, ibrowse_stream_cleanup}),
- exit({timeout, ibrowse_stream_cleanup})
- end.
-
-maybe_retry(Error, Worker, #httpdb{retries = 0} = HttpDb, Params) ->
- report_error(Worker, HttpDb, Params, {error, Error});
-maybe_retry(
- Error,
- Worker,
- #httpdb{retries = Retries, wait = Wait} = HttpDb,
- Params
-) ->
- case total_error_time_exceeded(HttpDb) of
- true ->
- report_error(Worker, HttpDb, Params, {error, Error});
- false ->
- ok = timer:sleep(Wait),
- log_retry_error(Params, HttpDb, Wait, Error),
- Wait2 = erlang:min(Wait * 2, ?MAX_WAIT),
- HttpDb1 = HttpDb#httpdb{retries = Retries - 1, wait = Wait2},
- HttpDb2 = update_first_error_timestamp(HttpDb1),
- throw({retry, HttpDb2, Params})
- end.
-
-% When retrying, check to make total time spent retrying a request is below
-% the current scheduler health threshold. The goal is to not exceed the
-% threshold, otherwise the job which keep retrying too long will still be
-% considered healthy.
-total_error_time_exceeded(#httpdb{first_error_timestamp = nil}) ->
- false;
-total_error_time_exceeded(#httpdb{first_error_timestamp = ErrorTimestamp}) ->
- HealthThresholdSec = couch_replicator_scheduler:health_threshold(),
- % Theshold value is halved because in the calling code the next step
- % is a doubling. Not halving here could mean sleeping too long and
- % exceeding the health threshold.
- ThresholdUSec = (HealthThresholdSec / 2) * 1000000,
- timer:now_diff(os:timestamp(), ErrorTimestamp) > ThresholdUSec.
-
-% Remember the first time an error occurs. This value is used later to check
-% the total time spend retrying a request. Because retrying is cursive, on
-% successful result #httpdb{} record is reset back to the original value.
-update_first_error_timestamp(#httpdb{first_error_timestamp = nil} = HttpDb) ->
- HttpDb#httpdb{first_error_timestamp = os:timestamp()};
-update_first_error_timestamp(HttpDb) ->
- HttpDb.
-
-log_retry_error(Params, HttpDb, Wait, Error) ->
- Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
- Url = couch_util:url_strip_password(full_url(HttpDb, Params)),
- couch_log:notice(
- "Retrying ~s request to ~s in ~p seconds due to error ~s",
- [Method, Url, Wait / 1000, error_cause(Error)]
- ).
-
-report_error(_Worker, HttpDb, Params, Error) ->
- Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
- Url = couch_util:url_strip_password(full_url(HttpDb, Params)),
- do_report_error(Url, Method, Error),
- exit({http_request_failed, Method, Url, Error}).
-
-do_report_error(Url, Method, {code, Code}) ->
- couch_log:error(
- "Replicator, request ~s to ~p failed. The received "
- "HTTP error code is ~p",
- [Method, Url, Code]
- );
-do_report_error(FullUrl, Method, Error) ->
- couch_log:error(
- "Replicator, request ~s to ~p failed due to error ~s",
- [Method, FullUrl, error_cause(Error)]
- ).
-
-error_cause({error, Cause}) ->
- lists:flatten(io_lib:format("~p", [Cause]));
-error_cause(Cause) ->
- lists:flatten(io_lib:format("~p", [Cause])).
-
-stream_data_self(#httpdb{timeout = T} = HttpDb, Params, Worker, ReqId, Cb) ->
- case accumulate_messages(ReqId, [], T + 500) of
- {Data, ibrowse_async_response} ->
- ibrowse:stream_next(ReqId),
- {Data, fun() -> stream_data_self(HttpDb, Params, Worker, ReqId, Cb) end};
- {Data, ibrowse_async_response_end} ->
- put(?STREAM_STATUS, ended),
- {Data, fun() -> throw({maybe_retry_req, more_data_expected}) end}
- end.
-
-accumulate_messages(ReqId, Acc, Timeout) ->
- receive
- {ibrowse_async_response, ReqId, {error, Error}} ->
- throw({maybe_retry_req, Error});
- {ibrowse_async_response, ReqId, <<>>} ->
- accumulate_messages(ReqId, Acc, Timeout);
- {ibrowse_async_response, ReqId, Data} ->
- accumulate_messages(ReqId, [Data | Acc], 0);
- {ibrowse_async_response_end, ReqId} ->
- {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response_end}
- after Timeout ->
- % Note: ibrowse should always reply with timeouts, but this doesn't
- % seem to be always true when there's a very high rate of requests
- % and many open connections.
- if
- Acc =:= [] ->
- throw({maybe_retry_req, timeout});
- true ->
- {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response}
- end
- end.
-
-full_url(#httpdb{url = BaseUrl}, Params) ->
- Path = get_value(path, Params, []),
- QueryArgs = get_value(qs, Params, []),
- BaseUrl ++ Path ++ query_args_to_string(QueryArgs, []).
-
-query_args_to_string([], []) ->
- "";
-query_args_to_string([], Acc) ->
- "?" ++ string:join(lists:reverse(Acc), "&");
-query_args_to_string([{K, V} | Rest], Acc) ->
- query_args_to_string(Rest, [K ++ "=" ++ couch_httpd:quote(V) | Acc]).
-
-do_redirect(_Worker, Code, Headers, #httpdb{url = Url} = HttpDb, Params, _Cb) ->
- RedirectUrl = redirect_url(Headers, Url),
- {HttpDb2, Params2} = after_redirect(RedirectUrl, Code, HttpDb, Params),
- throw({retry, HttpDb2, Params2}).
-
-redirect_url(RespHeaders, OrigUrl) ->
- MochiHeaders = mochiweb_headers:make(RespHeaders),
- RedUrl = mochiweb_headers:get_value("Location", MochiHeaders),
- #url{
- host = Host,
- host_type = HostType,
- port = Port,
- % includes query string
- path = Path,
- protocol = Proto
- } = ibrowse_lib:parse_url(RedUrl),
- #url{
- username = User,
- password = Passwd
- } = ibrowse_lib:parse_url(OrigUrl),
- Creds =
- case is_list(User) andalso is_list(Passwd) of
- true ->
- User ++ ":" ++ Passwd ++ "@";
- false ->
- []
- end,
- HostPart =
- case HostType of
- ipv6_address ->
- "[" ++ Host ++ "]";
- _ ->
- Host
- end,
- atom_to_list(Proto) ++ "://" ++ Creds ++ HostPart ++ ":" ++
- integer_to_list(Port) ++ Path.
-
-after_redirect(RedirectUrl, 303, HttpDb, Params) ->
- after_redirect(RedirectUrl, HttpDb, ?replace(Params, method, get));
-after_redirect(RedirectUrl, _Code, HttpDb, Params) ->
- after_redirect(RedirectUrl, HttpDb, Params).
-
-after_redirect(RedirectUrl, HttpDb, Params) ->
- Params2 = lists:keydelete(path, 1, lists:keydelete(qs, 1, Params)),
- {HttpDb#httpdb{url = RedirectUrl}, Params2}.
-
-backoff_key(HttpDb, Params) ->
- Method = get_value(method, Params, get),
- Url = HttpDb#httpdb.url,
- {Url, Method}.
-
-backoff(HttpDb, Params) ->
- Key = backoff_key(HttpDb, Params),
- couch_replicator_rate_limiter:failure(Key),
- throw({retry, HttpDb, Params}).
-
-backoff_success(HttpDb, Params) ->
- Key = backoff_key(HttpDb, Params),
- couch_replicator_rate_limiter:success(Key).
-
-backoff_before_request(Worker, HttpDb, Params) ->
- Key = backoff_key(HttpDb, Params),
- Limit = couch_replicator_rate_limiter:max_interval(),
- case couch_replicator_rate_limiter:interval(Key) of
- Sleep when Sleep >= Limit ->
- report_error(Worker, HttpDb, Params, max_backoff);
- Sleep when Sleep >= 1 ->
- timer:sleep(Sleep);
- Sleep when Sleep == 0 ->
- ok
- end.
-
-merge_headers(Headers1, Headers2) when is_list(Headers1), is_list(Headers2) ->
- Empty = mochiweb_headers:empty(),
- Merged = mochiweb_headers:enter_from_list(Headers1 ++ Headers2, Empty),
- mochiweb_headers:to_list(Merged).
-
--ifdef(TEST).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-merge_headers_test() ->
- ?assertEqual([], merge_headers([], [])),
- ?assertEqual([{"a", "x"}], merge_headers([], [{"a", "x"}])),
- ?assertEqual([{"a", "x"}], merge_headers([{"a", "x"}], [])),
- ?assertEqual([{"a", "y"}], merge_headers([{"A", "x"}], [{"a", "y"}])),
- ?assertEqual(
- [{"a", "y"}, {"B", "x"}],
- merge_headers(
- [{"B", "x"}],
- [{"a", "y"}]
- )
- ),
- ?assertEqual([{"a", "y"}], merge_headers([{"A", "z"}, {"a", "y"}], [])),
- ?assertEqual([{"a", "y"}], merge_headers([], [{"A", "z"}, {"a", "y"}])).
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpc_pool.erl b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
deleted file mode 100644
index ff070f922..000000000
--- a/src/couch_replicator/src/couch_replicator_httpc_pool.erl
+++ /dev/null
@@ -1,216 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_httpc_pool).
--behaviour(gen_server).
--vsn(1).
-
-% public API
--export([start_link/2, start_link/3, stop/1]).
--export([get_worker/1, release_worker/2, release_worker_sync/2]).
-
-% gen_server API
--export([init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2, format_status/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_util, [
- get_value/2
-]).
-
--record(state, {
- url,
- proxy_url,
- % max # of workers allowed
- limit,
- workers = [],
- % blocked clients waiting for a worker
- waiting = queue:new(),
- % clients who've been given a worker
- callers = []
-}).
-
-start_link(Url, Options) ->
- start_link(Url, undefined, Options).
-
-start_link(Url, ProxyUrl, Options) ->
- gen_server:start_link(?MODULE, {Url, ProxyUrl, Options}, []).
-
-stop(Pool) ->
- ok = gen_server:call(Pool, stop, infinity).
-
-get_worker(Pool) ->
- {ok, _Worker} = gen_server:call(Pool, get_worker, infinity).
-
-release_worker(Pool, Worker) ->
- ok = gen_server:cast(Pool, {release_worker, Worker}).
-
-release_worker_sync(Pool, Worker) ->
- ok = gen_server:call(Pool, {release_worker_sync, Worker}).
-
-init({Url, ProxyUrl, Options}) ->
- process_flag(trap_exit, true),
- State = #state{
- url = Url,
- proxy_url = ProxyUrl,
- limit = get_value(max_connections, Options)
- },
- {ok, State}.
-
-handle_call(get_worker, From, State) ->
- #state{
- waiting = Waiting,
- callers = Callers,
- url = Url,
- proxy_url = ProxyUrl,
- limit = Limit,
- workers = Workers
- } = State,
- case length(Workers) >= Limit of
- true ->
- {noreply, State#state{waiting = queue:in(From, Waiting)}};
- false ->
- % If the call to acquire fails, the worker pool will crash with a
- % badmatch.
- {ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
- NewState = State#state{
- workers = [Worker | Workers],
- callers = monitor_client(Callers, Worker, From)
- },
- {reply, {ok, Worker}, NewState}
- end;
-handle_call(stop, _From, State) ->
- {stop, normal, ok, State};
-handle_call({release_worker_sync, Worker}, _From, State) ->
- {reply, ok, release_worker_internal(Worker, State)}.
-
-handle_cast({release_worker, Worker}, State) ->
- {noreply, release_worker_internal(Worker, State)}.
-
-handle_info({'EXIT', Pid, _Reason}, State) ->
- #state{
- url = Url,
- proxy_url = ProxyUrl,
- workers = Workers,
- waiting = Waiting,
- callers = Callers
- } = State,
- NewCallers0 = demonitor_client(Callers, Pid),
- case Workers -- [Pid] of
- Workers ->
- {noreply, State#state{callers = NewCallers0}};
- Workers2 ->
- case queue:out(Waiting) of
- {empty, _} ->
- {noreply, State#state{
- workers = Workers2,
- callers = NewCallers0
- }};
- {{value, From}, Waiting2} ->
- {ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
- NewCallers1 = monitor_client(NewCallers0, Worker, From),
- gen_server:reply(From, {ok, Worker}),
- NewState = State#state{
- workers = [Worker | Workers2],
- waiting = Waiting2,
- callers = NewCallers1
- },
- {noreply, NewState}
- end
- end;
-handle_info({'DOWN', Ref, process, _, _}, #state{callers = Callers} = State) ->
- case lists:keysearch(Ref, 2, Callers) of
- {value, {Worker, Ref}} ->
- handle_cast({release_worker, Worker}, State);
- false ->
- {noreply, State}
- end.
-
-code_change(_OldVsn, #state{} = State, _Extra) ->
- {ok, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-format_status(_Opt, [_PDict, State]) ->
- #state{
- url = Url,
- proxy_url = ProxyUrl
- } = State,
- [
- {data, [
- {"State", State#state{
- url = couch_util:url_strip_password(Url),
- proxy_url = couch_util:url_strip_password(ProxyUrl)
- }}
- ]}
- ].
-
-monitor_client(Callers, Worker, {ClientPid, _}) ->
- [{Worker, erlang:monitor(process, ClientPid)} | Callers].
-
-demonitor_client(Callers, Worker) ->
- case lists:keysearch(Worker, 1, Callers) of
- {value, {Worker, MonRef}} ->
- erlang:demonitor(MonRef, [flush]),
- lists:keydelete(Worker, 1, Callers);
- false ->
- Callers
- end.
-
-release_worker_internal(Worker, State) ->
- #state{waiting = Waiting, callers = Callers} = State,
- NewCallers0 = demonitor_client(Callers, Worker),
- case
- is_process_alive(Worker) andalso
- lists:member(Worker, State#state.workers)
- of
- true ->
- Workers =
- case queue:out(Waiting) of
- {empty, Waiting2} ->
- NewCallers1 = NewCallers0,
- couch_replicator_connection:release(Worker),
- State#state.workers -- [Worker];
- {{value, From}, Waiting2} ->
- NewCallers1 = monitor_client(NewCallers0, Worker, From),
- gen_server:reply(From, {ok, Worker}),
- State#state.workers
- end,
- NewState = State#state{
- workers = Workers,
- waiting = Waiting2,
- callers = NewCallers1
- },
- NewState;
- false ->
- State#state{callers = NewCallers0}
- end.
-
--ifdef(TEST).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-format_status_test_() ->
- ?_test(begin
- State = #state{
- url = "https://username1:password1@$ACCOUNT2.cloudant.com/db",
- proxy_url = "https://username2:password2@proxy.thing.com:8080/"
- },
- [{data, [{"State", ScrubbedN}]}] = format_status(normal, [[], State]),
- ?assertEqual("https://username1:*****@$ACCOUNT2.cloudant.com/db", ScrubbedN#state.url),
- ?assertEqual("https://username2:*****@proxy.thing.com:8080/", ScrubbedN#state.proxy_url),
- ok
- end).
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpd.erl b/src/couch_replicator/src/couch_replicator_httpd.erl
deleted file mode 100644
index 77c78efe2..000000000
--- a/src/couch_replicator/src/couch_replicator_httpd.erl
+++ /dev/null
@@ -1,190 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_httpd).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([
- handle_req/1,
- handle_scheduler_req/1
-]).
-
--import(chttpd, [
- send_json/2,
- send_json/3,
- send_method_not_allowed/2
-]).
-
--import(couch_util, [
- to_binary/1
-]).
-
--define(DEFAULT_TASK_LIMIT, 100).
--define(REPDB, <<"_replicator">>).
-% This is a macro so it can be used as a guard
--define(ISREPDB(X),
- X =:= ?REPDB orelse
- binary_part(X, {byte_size(X), -12}) =:=
- <<"/_replicator">>
-).
-
-handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"jobs">>]} = Req) ->
- Limit = couch_replicator_httpd_util:parse_int_param(
- Req,
- "limit",
- ?DEFAULT_TASK_LIMIT,
- 0,
- infinity
- ),
- Skip = couch_replicator_httpd_util:parse_int_param(
- Req,
- "skip",
- 0,
- 0,
- infinity
- ),
- {Replies, _BadNodes} = rpc:multicall(couch_replicator_scheduler, jobs, []),
- Flatlist = lists:concat(Replies),
- % couch_replicator_scheduler:job_ejson/1 guarantees {id, Id} to be the
- % the first item in the list
- Sorted = lists:sort(fun({[{id, A} | _]}, {[{id, B} | _]}) -> A =< B end, Flatlist),
- Total = length(Sorted),
- Offset = min(Skip, Total),
- Sublist = lists:sublist(Sorted, Offset + 1, Limit),
- Sublist1 = [
- couch_replicator_httpd_util:update_db_name(Task)
- || Task <- Sublist
- ],
- send_json(Req, {[{total_rows, Total}, {offset, Offset}, {jobs, Sublist1}]});
-handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"jobs">>, JobId]} = Req) ->
- case couch_replicator:job(JobId) of
- {ok, JobInfo} ->
- send_json(Req, couch_replicator_httpd_util:update_db_name(JobInfo));
- {error, not_found} ->
- throw(not_found)
- end;
-handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"docs">>]} = Req) ->
- handle_scheduler_docs(?REPDB, Req);
-handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"docs">>, Db]} = Req) when
- ?ISREPDB(Db)
-->
- handle_scheduler_docs(Db, Req);
-handle_scheduler_req(
- #httpd{method = 'GET', path_parts = [_, <<"docs">>, Db, DocId]} =
- Req
-) when ?ISREPDB(Db) ->
- handle_scheduler_doc(Db, DocId, Req);
-% Allow users to pass in unencoded _replicator database names (/ are not
-% escaped). This is possible here because _replicator is not a valid document
-% ID so can disambiguate between an element of a db path and the document ID.
-handle_scheduler_req(
- #httpd{method = 'GET', path_parts = [_, <<"docs">> | Unquoted]} =
- Req
-) ->
- case parse_unquoted_docs_path(Unquoted) of
- {db_only, Db} ->
- handle_scheduler_docs(Db, Req);
- {db_and_doc, Db, DocId} ->
- handle_scheduler_doc(Db, DocId, Req);
- {error, invalid} ->
- throw(bad_request)
- end;
-handle_scheduler_req(#httpd{method = 'GET'} = Req) ->
- send_json(Req, 404, {[{error, <<"not found">>}]});
-handle_scheduler_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_req(#httpd{method = 'POST', user_ctx = UserCtx} = Req) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- RepDoc = {Props} = couch_httpd:json_body_obj(Req),
- couch_replicator_httpd_util:validate_rep_props(Props),
- case couch_replicator:replicate(RepDoc, UserCtx) of
- {error, {Error, Reason}} ->
- send_json(
- Req,
- 500,
- {[{error, to_binary(Error)}, {reason, to_binary(Reason)}]}
- );
- {error, not_found} ->
- % Tried to cancel a replication that didn't exist.
- send_json(Req, 404, {[{error, <<"not found">>}]});
- {error, Reason} ->
- send_json(Req, 500, {[{error, to_binary(Reason)}]});
- {ok, {cancelled, RepId}} ->
- send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
- {ok, {continuous, RepId}} ->
- send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
- {ok, {HistoryResults}} ->
- send_json(Req, {[{ok, true} | HistoryResults]})
- end;
-handle_req(Req) ->
- send_method_not_allowed(Req, "POST").
-
-handle_scheduler_docs(Db, Req) when is_binary(Db) ->
- VArgs0 = couch_mrview_http:parse_params(Req, undefined),
- StatesQs = chttpd:qs_value(Req, "states"),
- States = couch_replicator_httpd_util:parse_replication_state_filter(StatesQs),
- VArgs1 = VArgs0#mrargs{
- view_type = map,
- include_docs = true,
- reduce = false,
- extra = [{filter_states, States}]
- },
- VArgs2 = couch_mrview_util:validate_args(VArgs1),
- Opts = [{user_ctx, Req#httpd.user_ctx}],
- Max = chttpd:chunked_response_buffer_size(),
- Acc = couch_replicator_httpd_util:docs_acc_new(Req, Db, Max),
- Cb = fun couch_replicator_httpd_util:docs_cb/2,
- {ok, RAcc} = couch_replicator_fabric:docs(Db, Opts, VArgs2, Cb, Acc),
- {ok, couch_replicator_httpd_util:docs_acc_response(RAcc)}.
-
-handle_scheduler_doc(Db, DocId, Req) when is_binary(Db), is_binary(DocId) ->
- UserCtx = Req#httpd.user_ctx,
- case couch_replicator:doc(Db, DocId, UserCtx#user_ctx.roles) of
- {ok, DocInfo} ->
- send_json(Req, couch_replicator_httpd_util:update_db_name(DocInfo));
- {error, not_found} ->
- throw(not_found)
- end.
-
-parse_unquoted_docs_path([_, _ | _] = Unquoted) ->
- DbAndAfter = lists:dropwhile(fun(E) -> E =/= ?REPDB end, Unquoted),
- BeforeRDb = lists:takewhile(fun(E) -> E =/= ?REPDB end, Unquoted),
- case DbAndAfter of
- [] ->
- {error, invalid};
- [?REPDB] ->
- {db_only, filename:join(BeforeRDb ++ [?REPDB])};
- [?REPDB, DocId] ->
- {db_and_doc, filename:join(BeforeRDb ++ [?REPDB]), DocId}
- end.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-unquoted_scheduler_docs_path_test_() ->
- [
- ?_assertEqual(Res, parse_unquoted_docs_path(Path))
- || {Res, Path} <- [
- {{error, invalid}, [<<"a">>, <<"b">>]},
- {{db_only, <<"a/_replicator">>}, [<<"a">>, ?REPDB]},
- {{db_only, <<"a/b/_replicator">>}, [<<"a">>, <<"b">>, ?REPDB]},
- {{db_and_doc, <<"_replicator">>, <<"x">>}, [?REPDB, <<"x">>]},
- {{db_and_doc, <<"a/_replicator">>, <<"x">>}, [<<"a">>, ?REPDB, <<"x">>]},
- {{error, invalid}, [<<"a/_replicator">>, <<"x">>]}
- ]
- ].
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpd_util.erl b/src/couch_replicator/src/couch_replicator_httpd_util.erl
deleted file mode 100644
index ddcc179d4..000000000
--- a/src/couch_replicator/src/couch_replicator_httpd_util.erl
+++ /dev/null
@@ -1,201 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_httpd_util).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([
- validate_rep_props/1,
- parse_int_param/5,
- parse_replication_state_filter/1,
- update_db_name/1,
- docs_acc_new/3,
- docs_acc_response/1,
- docs_cb/2
-]).
-
--import(couch_httpd, [
- send_json/2,
- send_json/3,
- send_method_not_allowed/2
-]).
-
--import(couch_util, [
- to_binary/1
-]).
-
-parse_replication_state_filter(undefined) ->
- % This is the default (wildcard) filter
- [];
-parse_replication_state_filter(States) when is_list(States) ->
- AllStates = couch_replicator:replication_states(),
- StrStates = [string:to_lower(S) || S <- string:tokens(States, ",")],
- AtomStates =
- try
- [list_to_existing_atom(S) || S <- StrStates]
- catch
- error:badarg ->
- Msg1 = io_lib:format("States must be one or more of ~w", [AllStates]),
- throw({query_parse_error, ?l2b(Msg1)})
- end,
- AllSet = sets:from_list(AllStates),
- StatesSet = sets:from_list(AtomStates),
- Diff = sets:to_list(sets:subtract(StatesSet, AllSet)),
- case Diff of
- [] ->
- AtomStates;
- _ ->
- Args = [Diff, AllStates],
- Msg2 = io_lib:format("Unknown states ~w. Choose from: ~w", Args),
- throw({query_parse_error, ?l2b(Msg2)})
- end.
-
-parse_int_param(Req, Param, Default, Min, Max) ->
- IntVal =
- try
- list_to_integer(chttpd:qs_value(Req, Param, integer_to_list(Default)))
- catch
- error:badarg ->
- Msg1 = io_lib:format("~s must be an integer", [Param]),
- throw({query_parse_error, ?l2b(Msg1)})
- end,
- case IntVal >= Min andalso IntVal =< Max of
- true ->
- IntVal;
- false ->
- Msg2 = io_lib:format("~s not in range of [~w,~w]", [Param, Min, Max]),
- throw({query_parse_error, ?l2b(Msg2)})
- end.
-
-validate_rep_props([]) ->
- ok;
-validate_rep_props([{<<"query_params">>, {Params}} | Rest]) ->
- lists:foreach(
- fun
- ({_, V}) when is_binary(V) -> ok;
- ({K, _}) -> throw({bad_request, <<K/binary, " value must be a string.">>})
- end,
- Params
- ),
- validate_rep_props(Rest);
-validate_rep_props([_ | Rest]) ->
- validate_rep_props(Rest).
-
-prepend_val(#vacc{prepend = Prepend}) ->
- case Prepend of
- undefined ->
- "";
- _ ->
- Prepend
- end.
-
-maybe_flush_response(#vacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
- Size > 0 andalso (Size + Len) > Max
-->
- #vacc{buffer = Buffer, resp = Resp} = Acc,
- {ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
- {ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
-maybe_flush_response(Acc0, Data, Len) ->
- #vacc{buffer = Buf, bufsize = Size} = Acc0,
- Acc = Acc0#vacc{
- prepend = ",\r\n",
- buffer = [Buf | Data],
- bufsize = Size + Len
- },
- {ok, Acc}.
-
-docs_acc_new(Req, Db, Threshold) ->
- #vacc{db = Db, req = Req, threshold = Threshold}.
-
-docs_acc_response(#vacc{resp = Resp}) ->
- Resp.
-
-docs_cb({error, Reason}, #vacc{resp = undefined} = Acc) ->
- {ok, Resp} = chttpd:send_error(Acc#vacc.req, Reason),
- {ok, Acc#vacc{resp = Resp}};
-docs_cb(complete, #vacc{resp = undefined} = Acc) ->
- % Nothing in view
- {ok, Resp} = chttpd:send_json(Acc#vacc.req, 200, {[{rows, []}]}),
- {ok, Acc#vacc{resp = Resp}};
-docs_cb(Msg, #vacc{resp = undefined} = Acc) ->
- %% Start response
- Headers = [],
- {ok, Resp} = chttpd:start_delayed_json_response(Acc#vacc.req, 200, Headers),
- docs_cb(Msg, Acc#vacc{resp = Resp, should_close = true});
-docs_cb({error, Reason}, #vacc{resp = Resp} = Acc) ->
- {ok, Resp1} = chttpd:send_delayed_error(Resp, Reason),
- {ok, Acc#vacc{resp = Resp1}};
-docs_cb(complete, #vacc{resp = Resp, buffer = Buf, threshold = Max} = Acc) ->
- % Finish view output and possibly end the response
- {ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, "\r\n]}", Max),
- case Acc#vacc.should_close of
- true ->
- {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, Acc#vacc{resp = Resp2}};
- _ ->
- {ok, Acc#vacc{
- resp = Resp1,
- meta_sent = false,
- row_sent = false,
- prepend = ",\r\n",
- buffer = [],
- bufsize = 0
- }}
- end;
-docs_cb({meta, Meta}, #vacc{meta_sent = false, row_sent = false} = Acc) ->
- % Sending metadata as we've not sent it or any row yet
- Parts =
- case couch_util:get_value(total, Meta) of
- undefined -> [];
- Total -> [io_lib:format("\"total_rows\":~p", [adjust_total(Total)])]
- end ++
- case couch_util:get_value(offset, Meta) of
- undefined -> [];
- Offset -> [io_lib:format("\"offset\":~p", [Offset])]
- end ++ ["\"docs\":["],
- Chunk = [prepend_val(Acc), "{", string:join(Parts, ","), "\r\n"],
- {ok, AccOut} = maybe_flush_response(Acc, Chunk, iolist_size(Chunk)),
- {ok, AccOut#vacc{prepend = "", meta_sent = true}};
-docs_cb({meta, _Meta}, #vacc{} = Acc) ->
- %% ignore metadata
- {ok, Acc};
-docs_cb({row, Row}, #vacc{meta_sent = false} = Acc) ->
- %% sorted=false and row arrived before meta
- % Adding another row
- Chunk = [prepend_val(Acc), "{\"docs\":[\r\n", row_to_json(Row)],
- maybe_flush_response(Acc#vacc{meta_sent = true, row_sent = true}, Chunk, iolist_size(Chunk));
-docs_cb({row, Row}, #vacc{meta_sent = true} = Acc) ->
- % Adding another row
- Chunk = [prepend_val(Acc), row_to_json(Row)],
- maybe_flush_response(Acc#vacc{row_sent = true}, Chunk, iolist_size(Chunk)).
-
-update_db_name({Props}) ->
- {value, {database, DbName}, Props1} = lists:keytake(database, 1, Props),
- {[{database, normalize_db_name(DbName)} | Props1]}.
-
-normalize_db_name(<<"shards/", _/binary>> = DbName) ->
- mem3:dbname(DbName);
-normalize_db_name(DbName) ->
- DbName.
-
-row_to_json(Row) ->
- Doc0 = couch_util:get_value(doc, Row),
- Doc1 = update_db_name(Doc0),
- ?JSON_ENCODE(Doc1).
-
-%% Adjust Total as there is an automatically created validation design doc
-adjust_total(Total) when is_integer(Total), Total > 0 ->
- Total - 1;
-adjust_total(Total) when is_integer(Total) ->
- 0.
diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl
deleted file mode 100644
index 6543cf069..000000000
--- a/src/couch_replicator/src/couch_replicator_ids.erl
+++ /dev/null
@@ -1,282 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_ids).
-
--export([
- replication_id/1,
- replication_id/2,
- convert/1
-]).
-
--include_lib("ibrowse/include/ibrowse.hrl").
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
--include("couch_replicator.hrl").
-
-% replication_id/1 and replication_id/2 will attempt to fetch
-% filter code for filtered replications. If fetching or parsing
-% of the remotely fetched filter code fails they throw:
-% {filter_fetch_error, Error} exception.
-%
-
-replication_id(#rep{options = Options} = Rep) ->
- BaseId = replication_id(Rep, ?REP_ID_VERSION),
- {BaseId, maybe_append_options([continuous, create_target], Options)}.
-
-% Versioned clauses for generating replication IDs.
-% If a change is made to how replications are identified,
-% please add a new clause and increase ?REP_ID_VERSION.
-
-replication_id(#rep{} = Rep, 4) ->
- UUID = couch_server:get_uuid(),
- SrcInfo = get_v4_endpoint(Rep#rep.source),
- TgtInfo = get_v4_endpoint(Rep#rep.target),
- maybe_append_filters([UUID, SrcInfo, TgtInfo], Rep);
-replication_id(#rep{} = Rep, 3) ->
- UUID = couch_server:get_uuid(),
- Src = get_rep_endpoint(Rep#rep.source),
- Tgt = get_rep_endpoint(Rep#rep.target),
- maybe_append_filters([UUID, Src, Tgt], Rep);
-replication_id(#rep{} = Rep, 2) ->
- {ok, HostName} = inet:gethostname(),
- Port =
- case (catch mochiweb_socket_server:get(couch_httpd, port)) of
- P when is_number(P) ->
- P;
- _ ->
- % On restart we might be called before the couch_httpd process is
- % started.
- % TODO: we might be under an SSL socket server only, or both under
- % SSL and a non-SSL socket.
- % ... mochiweb_socket_server:get(https, port)
- config:get_integer("httpd", "port", 5984)
- end,
- Src = get_rep_endpoint(Rep#rep.source),
- Tgt = get_rep_endpoint(Rep#rep.target),
- maybe_append_filters([HostName, Port, Src, Tgt], Rep);
-replication_id(#rep{} = Rep, 1) ->
- {ok, HostName} = inet:gethostname(),
- Src = get_rep_endpoint(Rep#rep.source),
- Tgt = get_rep_endpoint(Rep#rep.target),
- maybe_append_filters([HostName, Src, Tgt], Rep).
-
--spec convert([_] | binary() | {string(), string()}) -> {string(), string()}.
-convert(Id) when is_list(Id) ->
- convert(?l2b(Id));
-convert(Id0) when is_binary(Id0) ->
- % Spaces can result from mochiweb incorrectly unquoting + characters from
- % the URL path. So undo the incorrect parsing here to avoid forcing
- % users to url encode + characters.
- Id = binary:replace(Id0, <<" ">>, <<"+">>, [global]),
- lists:splitwith(fun(Char) -> Char =/= $+ end, ?b2l(Id));
-convert({BaseId, Ext} = Id) when is_list(BaseId), is_list(Ext) ->
- Id.
-
-% Private functions
-
-maybe_append_filters(
- Base,
- #rep{source = Source, options = Options}
-) ->
- Base2 =
- Base ++
- case couch_replicator_filters:parse(Options) of
- {ok, nil} ->
- [];
- {ok, {view, Filter, QueryParams}} ->
- [Filter, QueryParams];
- {ok, {user, {Doc, Filter}, QueryParams}} ->
- case couch_replicator_filters:fetch(Doc, Filter, Source) of
- {ok, Code} ->
- [Code, QueryParams];
- {error, Error} ->
- throw({filter_fetch_error, Error})
- end;
- {ok, {docids, DocIds}} ->
- [DocIds];
- {ok, {mango, Selector}} ->
- [Selector];
- {error, FilterParseError} ->
- throw({error, FilterParseError})
- end,
- couch_util:to_hex(couch_hash:md5_hash(term_to_binary(Base2))).
-
-maybe_append_options(Options, RepOptions) ->
- lists:foldl(
- fun(Option, Acc) ->
- Acc ++
- case couch_util:get_value(Option, RepOptions, false) of
- true ->
- "+" ++ atom_to_list(Option);
- false ->
- ""
- end
- end,
- [],
- Options
- ).
-
-get_rep_endpoint(#httpdb{url = Url, headers = Headers}) ->
- DefaultHeaders = (#httpdb{})#httpdb.headers,
- {remote, Url, Headers -- DefaultHeaders}.
-
-get_v4_endpoint(#httpdb{} = HttpDb) ->
- {remote, Url, Headers} = get_rep_endpoint(HttpDb),
- {User, _} = couch_replicator_utils:get_basic_auth_creds(HttpDb),
- {Host, NonDefaultPort, Path} = get_v4_url_info(Url),
- % Keep this to ensure checkpoints don't change
- OAuth = undefined,
- {remote, User, Host, NonDefaultPort, Path, Headers, OAuth}.
-
-get_v4_url_info(Url) when is_binary(Url) ->
- get_v4_url_info(binary_to_list(Url));
-get_v4_url_info(Url) ->
- case ibrowse_lib:parse_url(Url) of
- {error, invalid_uri} ->
- % Tolerate errors here to avoid a bad user document
- % crashing the replicator
- {Url, undefined, undefined};
- #url{
- protocol = Schema,
- host = Host,
- port = Port,
- path = Path
- } ->
- NonDefaultPort = get_non_default_port(Schema, Port),
- {Host, NonDefaultPort, Path}
- end.
-
-get_non_default_port(https, 443) ->
- default;
-get_non_default_port(http, 80) ->
- default;
-get_non_default_port(http, 5984) ->
- default;
-get_non_default_port(_Schema, Port) ->
- Port.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-replication_id_convert_test_() ->
- [
- ?_assertEqual(Expected, convert(Id))
- || {Expected, Id} <- [
- {{"abc", ""}, "abc"},
- {{"abc", ""}, <<"abc">>},
- {{"abc", "+x+y"}, <<"abc+x+y">>},
- {{"abc", "+x+y"}, {"abc", "+x+y"}},
- {{"abc", "+x+y"}, <<"abc x y">>}
- ]
- ].
-
-http_v4_endpoint_test_() ->
- [
- ?_assertMatch(
- {remote, User, Host, Port, Path, HeadersNoAuth, undefined},
- begin
- HttpDb = #httpdb{url = Url, headers = Headers, auth_props = Auth},
- HttpDb1 = couch_replicator_utils:normalize_basic_auth(HttpDb),
- get_v4_endpoint(HttpDb1)
- end
- )
- || {{User, Host, Port, Path, HeadersNoAuth}, {Url, Headers, Auth}} <- [
- {
- {undefined, "host", default, "/", []},
- {"http://host", [], []}
- },
- {
- {undefined, "host", default, "/", []},
- {"https://host", [], []}
- },
- {
- {undefined, "host", default, "/", []},
- {"http://host:5984", [], []}
- },
- {
- {undefined, "host", 1, "/", []},
- {"http://host:1", [], []}
- },
- {
- {undefined, "host", 2, "/", []},
- {"https://host:2", [], []}
- },
- {
- {undefined, "host", default, "/", [{"h", "v"}]},
- {"http://host", [{"h", "v"}], []}
- },
- {
- {undefined, "host", default, "/a/b", []},
- {"http://host/a/b", [], []}
- },
- {
- {"user", "host", default, "/", []},
- {"http://user:pass@host", [], []}
- },
- {
- {"user", "host", 3, "/", []},
- {"http://user:pass@host:3", [], []}
- },
- {
- {"user", "host", default, "/", []},
- {"http://user:newpass@host", [], []}
- },
- {
- {"user", "host", default, "/", []},
- {"http://host", [basic_auth("user", "pass")], []}
- },
- {
- {"user", "host", default, "/", []},
- {"http://host", [basic_auth("user", "newpass")], []}
- },
- {
- {"user3", "host", default, "/", []},
- {"http://user1:pass1@host", [basic_auth("user2", "pass2")],
- auth_props("user3", "pass3")}
- },
- {
- {"user2", "host", default, "/", [{"h", "v"}]},
- {"http://host", [{"h", "v"}, basic_auth("user", "pass")],
- auth_props("user2", "pass2")}
- },
- {
- {"user", "host", default, "/", [{"h", "v"}]},
- {"http://host", [{"h", "v"}], auth_props("user", "pass")}
- },
- {
- {undefined, "random_junk", undefined, undefined},
- {"random_junk", [], []}
- },
- {
- {undefined, "host", default, "/", []},
- {"http://host", [{"Authorization", "Basic bad"}], []}
- }
- ]
- ].
-
-basic_auth(User, Pass) ->
- B64Auth = base64:encode_to_string(User ++ ":" ++ Pass),
- {"Authorization", "Basic " ++ B64Auth}.
-
-auth_props(User, Pass) when is_list(User), is_list(Pass) ->
- [
- {<<"basic">>,
- {[
- {<<"username">>, list_to_binary(User)},
- {<<"password">>, list_to_binary(Pass)}
- ]}}
- ].
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_job_sup.erl b/src/couch_replicator/src/couch_replicator_job_sup.erl
deleted file mode 100644
index e3d15c041..000000000
--- a/src/couch_replicator/src/couch_replicator_job_sup.erl
+++ /dev/null
@@ -1,34 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_job_sup).
-
--behaviour(supervisor).
-
--export([
- init/1,
- start_link/0
-]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%%=============================================================================
-%% supervisor callbacks
-%%=============================================================================
-
-init([]) ->
- {ok, {{one_for_one, 3, 10}, []}}.
-
-%%=============================================================================
-%% internal functions
-%%=============================================================================
diff --git a/src/couch_replicator/src/couch_replicator_js_functions.hrl b/src/couch_replicator/src/couch_replicator_js_functions.hrl
deleted file mode 100644
index d41043309..000000000
--- a/src/couch_replicator/src/couch_replicator_js_functions.hrl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(REP_DB_DOC_VALIDATE_FUN, <<"
- function(newDoc, oldDoc, userCtx) {
- function reportError(error_msg) {
- log('Error writing document `' + newDoc._id +
- '\\' to the replicator database: ' + error_msg);
- throw({forbidden: error_msg});
- }
-
- function validateEndpoint(endpoint, fieldName) {
- if ((typeof endpoint !== 'string') &&
- ((typeof endpoint !== 'object') || (endpoint === null))) {
-
- reportError('The `' + fieldName + '\\' property must exist' +
- ' and be either a string or an object.');
- }
-
- if (typeof endpoint === 'object') {
- if ((typeof endpoint.url !== 'string') || !endpoint.url) {
- reportError('The url property must exist in the `' +
- fieldName + '\\' field and must be a non-empty string.');
- }
-
- if ((typeof endpoint.auth !== 'undefined') &&
- ((typeof endpoint.auth !== 'object') ||
- endpoint.auth === null)) {
-
- reportError('`' + fieldName +
- '.auth\\' must be a non-null object.');
- }
-
- if ((typeof endpoint.headers !== 'undefined') &&
- ((typeof endpoint.headers !== 'object') ||
- endpoint.headers === null)) {
-
- reportError('`' + fieldName +
- '.headers\\' must be a non-null object.');
- }
- }
- }
-
- var isReplicator = (userCtx.roles.indexOf('_replicator') >= 0);
- var isAdmin = (userCtx.roles.indexOf('_admin') >= 0);
-
- if (isReplicator) {
- // Always let replicator update the replication document
- return;
- }
-
- if (newDoc._replication_state === 'failed') {
- // Skip validation in case when we update the document with the
- // failed state. In this case it might be malformed. However,
- // replicator will not pay attention to failed documents so this
- // is safe.
- return;
- }
-
- if (!newDoc._deleted) {
- validateEndpoint(newDoc.source, 'source');
- validateEndpoint(newDoc.target, 'target');
-
- if ((typeof newDoc.create_target !== 'undefined') &&
- (typeof newDoc.create_target !== 'boolean')) {
-
- reportError('The `create_target\\' field must be a boolean.');
- }
-
- if ((typeof newDoc.continuous !== 'undefined') &&
- (typeof newDoc.continuous !== 'boolean')) {
-
- reportError('The `continuous\\' field must be a boolean.');
- }
-
- if ((typeof newDoc.doc_ids !== 'undefined') &&
- !isArray(newDoc.doc_ids)) {
-
- reportError('The `doc_ids\\' field must be an array of strings.');
- }
-
- if ((typeof newDoc.selector !== 'undefined') &&
- (typeof newDoc.selector !== 'object')) {
-
- reportError('The `selector\\' field must be an object.');
- }
-
- if ((typeof newDoc.filter !== 'undefined') &&
- ((typeof newDoc.filter !== 'string') || !newDoc.filter)) {
-
- reportError('The `filter\\' field must be a non-empty string.');
- }
-
- if ((typeof newDoc.doc_ids !== 'undefined') &&
- (typeof newDoc.selector !== 'undefined')) {
-
- reportError('`doc_ids\\' field is incompatible with `selector\\'.');
- }
-
- if ( ((typeof newDoc.doc_ids !== 'undefined') ||
- (typeof newDoc.selector !== 'undefined')) &&
- (typeof newDoc.filter !== 'undefined') ) {
-
- reportError('`filter\\' field is incompatible with `selector\\' and `doc_ids\\'.');
- }
-
- if ((typeof newDoc.query_params !== 'undefined') &&
- ((typeof newDoc.query_params !== 'object') ||
- newDoc.query_params === null)) {
-
- reportError('The `query_params\\' field must be an object.');
- }
-
- if (newDoc.user_ctx) {
- var user_ctx = newDoc.user_ctx;
-
- if ((typeof user_ctx !== 'object') || (user_ctx === null)) {
- reportError('The `user_ctx\\' property must be a ' +
- 'non-null object.');
- }
-
- if (!(user_ctx.name === null ||
- (typeof user_ctx.name === 'undefined') ||
- ((typeof user_ctx.name === 'string') &&
- user_ctx.name.length > 0))) {
-
- reportError('The `user_ctx.name\\' property must be a ' +
- 'non-empty string or null.');
- }
-
- if (!isAdmin && (user_ctx.name !== userCtx.name)) {
- reportError('The given `user_ctx.name\\' is not valid');
- }
-
- if (user_ctx.roles && !isArray(user_ctx.roles)) {
- reportError('The `user_ctx.roles\\' property must be ' +
- 'an array of strings.');
- }
-
- if (!isAdmin && user_ctx.roles) {
- for (var i = 0; i < user_ctx.roles.length; i++) {
- var role = user_ctx.roles[i];
-
- if (typeof role !== 'string' || role.length === 0) {
- reportError('Roles must be non-empty strings.');
- }
- if (userCtx.roles.indexOf(role) === -1) {
- reportError('Invalid role (`' + role +
- '\\') in the `user_ctx\\'');
- }
- }
- }
- } else {
- if (!isAdmin) {
- reportError('The `user_ctx\\' property is missing (it is ' +
- 'optional for admins only).');
- }
- }
- } else {
- if (!isAdmin) {
- if (!oldDoc.user_ctx || (oldDoc.user_ctx.name !== userCtx.name)) {
- reportError('Replication documents can only be deleted by ' +
- 'admins or by the users who created them.');
- }
- }
- }
- }
-">>).
diff --git a/src/couch_replicator/src/couch_replicator_notifier.erl b/src/couch_replicator/src/couch_replicator_notifier.erl
deleted file mode 100644
index 451ec5de7..000000000
--- a/src/couch_replicator/src/couch_replicator_notifier.erl
+++ /dev/null
@@ -1,60 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_notifier).
-
--behaviour(gen_event).
--vsn(1).
-
-% public API
--export([start_link/1, stop/1, notify/1]).
-
-% gen_event callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_event/2, handle_call/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link(FunAcc) ->
- couch_event_sup:start_link(
- couch_replication,
- {couch_replicator_notifier, make_ref()},
- FunAcc
- ).
-
-notify(Event) ->
- gen_event:notify(couch_replication, Event).
-
-stop(Pid) ->
- couch_event_sup:stop(Pid).
-
-init(FunAcc) ->
- {ok, FunAcc}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_event(Event, Fun) when is_function(Fun, 1) ->
- Fun(Event),
- {ok, Fun};
-handle_event(Event, {Fun, Acc}) when is_function(Fun, 2) ->
- Acc2 = Fun(Event, Acc),
- {ok, {Fun, Acc2}}.
-
-handle_call(_Msg, State) ->
- {ok, ok, State}.
-
-handle_info(_Msg, State) ->
- {ok, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/couch_replicator/src/couch_replicator_rate_limiter.erl b/src/couch_replicator/src/couch_replicator_rate_limiter.erl
deleted file mode 100644
index 5d2c184b8..000000000
--- a/src/couch_replicator/src/couch_replicator_rate_limiter.erl
+++ /dev/null
@@ -1,239 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% This module implements rate limiting based on a variation the additive
-% increase / multiplicative decrease feedback control algorithm.
-%
-% https://en.wikipedia.org/wiki/Additive_increase/multiplicative_decrease
-%
-% This is an adaptive algorithm which converges on available channel
-% capacity where each participant (client) doesn't a priori know the
-% capacity, and participants don't communicate or know about each other (so they
-% don't coordinate to divide the capacity among themselves).
-%
-% The algorithm referenced above estimates a rate, whereas the implemented
-% algorithm uses an interval (in milliseconds). It preserves the original
-% semantics, that is the failure part is multplicative and the success part is
-% additive. The relationship between rate and interval is: rate = 1000 /
-% interval.
-%
-% There are two main API functions:
-%
-% success(Key) -> IntervalInMilliseconds
-% failure(Key) -> IntervalInMilliseconds
-%
-% Key is any term, typically something like {Method, Url}. The result from the
-% function is the current period value. Caller then might decide to sleep for
-% that amount of time before or after each request.
-
--module(couch_replicator_rate_limiter).
-
--behaviour(gen_server).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
-]).
-
--export([
- interval/1,
- max_interval/0,
- failure/1,
- success/1
-]).
-
-% Types
--type key() :: any().
--type interval() :: non_neg_integer().
--type msec() :: non_neg_integer().
-
-% Definitions
-
-% Main parameters of the algorithm. The factor is the multiplicative part and
-% base interval is the additive.
--define(BASE_INTERVAL, 20).
--define(BACKOFF_FACTOR, 1.2).
-
-% If estimated period exceeds a limit, it is clipped to this value. This
-% defines a practical limit of this algorithm. This is driven by real world
-% concerns such as having a connection which sleeps for too long and ends up
-% with socket timeout errors, or replication jobs which occupy a scheduler
-% slot without making any progress.
--define(MAX_INTERVAL, 25000).
-
-% Specify when (threshold) and how much (factor) to decay the estimated period.
-% If there is a long pause between consecutive updates, the estimated period
-% would become less accurate as more time passes. In such case choose to
-% optimistically decay the estimated value. That is assume there a certain
-% rate of successful requests happened. (For reference, TCP congestion algorithm
-% also handles a variation of this in RFC 5681 under "Restarting Idle
-% Connections" section).
--define(TIME_DECAY_FACTOR, 2).
--define(TIME_DECAY_THRESHOLD, 1000).
-
-% Limit the rate of updates applied. This controls the rate of change of the
-% estimated value. In colloquial terms it defines how "twitchy" the algorithm
-% is. Or, another way to look at it, this is as a poor version of a low pass
-% filter. (Some alternative TCP congestion control algorithms, like Westwood+
-% use something similar to solve the ACK compression problem).
--define(SENSITIVITY_TIME_WINDOW, 80).
-
--record(state, {timer}).
--record(rec, {id, backoff, ts}).
-
--spec start_link() -> {ok, pid()} | ignore | {error, term()}.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
--spec interval(key()) -> interval().
-interval(Key) ->
- {Interval, _Timestamp} = interval_and_timestamp(Key),
- Interval.
-
--spec max_interval() -> interval().
-max_interval() ->
- ?MAX_INTERVAL.
-
--spec failure(key()) -> interval().
-failure(Key) ->
- {Interval, Timestamp} = interval_and_timestamp(Key),
- update_failure(Key, Interval, Timestamp, now_msec()).
-
--spec success(key()) -> interval().
-success(Key) ->
- {Interval, Timestamp} = interval_and_timestamp(Key),
- update_success(Key, Interval, Timestamp, now_msec()).
-
-% gen_server callbacks
-
-init([]) ->
- couch_replicator_rate_limiter_tables:create(#rec.id),
- {ok, #state{timer = new_timer()}}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call(_Msg, _From, State) ->
- {reply, invalid, State}.
-
-handle_cast(_, State) ->
- {noreply, State}.
-
-handle_info(cleanup, #state{timer = Timer}) ->
- erlang:cancel_timer(Timer),
- TIds = couch_replicator_rate_limiter_tables:tids(),
- [cleanup_table(TId, now_msec() - ?MAX_INTERVAL) || TId <- TIds],
- {noreply, #state{timer = new_timer()}}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-% Private functions
-
--spec update_success(any(), interval(), msec(), msec()) -> interval().
-update_success(_Key, _Interval, _Timestamp = 0, _Now) ->
- % No ets entry. Keep it that way and don't insert a new one.
- 0;
-update_success(_Key, Interval, Timestamp, Now) when
- Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW
-->
- % Ignore too frequent updates.
- Interval;
-update_success(Key, Interval, Timestamp, Now) ->
- DecayedInterval = time_decay(Now - Timestamp, Interval),
- AdditiveFactor = additive_factor(DecayedInterval),
- NewInterval = DecayedInterval - AdditiveFactor,
- if
- NewInterval =< 0 ->
- Table = couch_replicator_rate_limiter_tables:term_to_table(Key),
- ets:delete(Table, Key),
- 0;
- NewInterval =< ?BASE_INTERVAL ->
- insert(Key, ?BASE_INTERVAL, Now);
- NewInterval > ?BASE_INTERVAL ->
- insert(Key, NewInterval, Now)
- end.
-
--spec update_failure(any(), interval(), msec(), msec()) -> interval().
-update_failure(_Key, Interval, Timestamp, Now) when
- Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW
-->
- % Ignore too frequent updates.
- Interval;
-update_failure(Key, Interval, _Timestamp, Now) ->
- Interval1 = erlang:max(Interval, ?BASE_INTERVAL),
- Interval2 = round(Interval1 * ?BACKOFF_FACTOR),
- Interval3 = erlang:min(Interval2, ?MAX_INTERVAL),
- insert(Key, Interval3, Now).
-
--spec insert(any(), interval(), msec()) -> interval().
-insert(Key, Interval, Timestamp) ->
- Entry = #rec{id = Key, backoff = Interval, ts = Timestamp},
- Table = couch_replicator_rate_limiter_tables:term_to_table(Key),
- ets:insert(Table, Entry),
- Interval.
-
--spec interval_and_timestamp(key()) -> {interval(), msec()}.
-interval_and_timestamp(Key) ->
- Table = couch_replicator_rate_limiter_tables:term_to_table(Key),
- case ets:lookup(Table, Key) of
- [] ->
- {0, 0};
- [#rec{backoff = Interval, ts = Timestamp}] ->
- {Interval, Timestamp}
- end.
-
--spec time_decay(msec(), interval()) -> interval().
-time_decay(Dt, Interval) when Dt > ?TIME_DECAY_THRESHOLD ->
- DecayedInterval = Interval - ?TIME_DECAY_FACTOR * Dt,
- erlang:max(round(DecayedInterval), 0);
-time_decay(_Dt, Interval) ->
- Interval.
-
-% Calculate additive factor. Ideally it would be a constant but in this case
-% it is a step function to help handle larger values as they are approaching
-% the backoff limit. Large success values closer to the limit add some
-% pressure against the limit, which is useful, as at the backoff limit the
-% whole replication job is killed which can be costly in time and temporary work
-% lost by those jobs.
--spec additive_factor(interval()) -> interval().
-additive_factor(Interval) when Interval > 10000 ->
- ?BASE_INTERVAL * 50;
-additive_factor(Interval) when Interval > 1000 ->
- ?BASE_INTERVAL * 5;
-additive_factor(Interval) when Interval > 100 ->
- ?BASE_INTERVAL * 2;
-additive_factor(_Interval) ->
- ?BASE_INTERVAL.
-
--spec new_timer() -> reference().
-new_timer() ->
- erlang:send_after(?MAX_INTERVAL * 2, self(), cleanup).
-
--spec now_msec() -> msec().
-now_msec() ->
- {Mega, Sec, Micro} = os:timestamp(),
- ((Mega * 1000000) + Sec) * 1000 + Micro div 1000.
-
--spec cleanup_table(atom(), msec()) -> non_neg_integer().
-cleanup_table(Tid, LimitMSec) ->
- Head = #rec{ts = '$1', _ = '_'},
- Guard = {'<', '$1', LimitMSec},
- ets:select_delete(Tid, [{Head, [Guard], [true]}]).
diff --git a/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl b/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
deleted file mode 100644
index 2e2556888..000000000
--- a/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Maintain cluster membership and stability notifications for replications.
-% On changes to cluster membership, broadcast events to `replication` gen_event.
-% Listeners will get `{cluster, stable}` or `{cluster, unstable}` events.
-%
-% Cluster stability is defined as "there have been no nodes added or removed in
-% last `QuietPeriod` seconds". QuietPeriod value is configurable. To ensure a
-% speedier startup, during initialization there is a shorter StartupQuietPeriod
-% in effect (also configurable).
-%
-% This module is also in charge of calculating ownership of replications based
-% on where their _repicator db documents shards live.
-
--module(couch_replicator_rate_limiter_tables).
-
--export([
- create/1,
- tids/0,
- term_to_table/1
-]).
-
--define(SHARDS_N, 16).
-
--spec create(non_neg_integer()) -> ok.
-create(KeyPos) ->
- Opts = [named_table, public, {keypos, KeyPos}, {read_concurrency, true}],
- [ets:new(list_to_atom(TableName), Opts) || TableName <- table_names()],
- ok.
-
--spec tids() -> [atom()].
-tids() ->
- [list_to_existing_atom(TableName) || TableName <- table_names()].
-
--spec term_to_table(any()) -> atom().
-term_to_table(Term) ->
- PHash = erlang:phash2(Term),
- list_to_existing_atom(table_name(PHash rem ?SHARDS_N)).
-
--spec table_names() -> [string()].
-table_names() ->
- [table_name(N) || N <- lists:seq(0, ?SHARDS_N - 1)].
-
--spec table_name(non_neg_integer()) -> string().
-table_name(Id) when is_integer(Id), Id >= 0 andalso Id < ?SHARDS_N ->
- atom_to_list(?MODULE) ++ "_" ++ integer_to_list(Id).
diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl
deleted file mode 100644
index f544865af..000000000
--- a/src/couch_replicator/src/couch_replicator_scheduler.erl
+++ /dev/null
@@ -1,1690 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_scheduler).
-
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3,
- format_status/2
-]).
-
--export([
- add_job/1,
- remove_job/1,
- reschedule/0,
- rep_state/1,
- find_jobs_by_dbname/1,
- find_jobs_by_doc/2,
- job_summary/2,
- health_threshold/0,
- jobs/0,
- job/1,
- restart_job/1,
- update_job_stats/2
-]).
-
-%% config_listener callbacks
--export([
- handle_config_change/5,
- handle_config_terminate/3
-]).
-
-%% for status updater process to allow hot code loading
--export([
- stats_updater_loop/1
-]).
-
--include("couch_replicator.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-%% definitions
--define(MAX_BACKOFF_EXPONENT, 10).
--define(BACKOFF_INTERVAL_MICROS, 30 * 1000 * 1000).
--define(DEFAULT_HEALTH_THRESHOLD_SEC, 2 * 60).
--define(RELISTEN_DELAY, 5000).
--define(STATS_UPDATE_WAIT, 5000).
-
--define(DEFAULT_MAX_JOBS, 500).
--define(DEFAULT_MAX_CHURN, 20).
--define(DEFAULT_MAX_HISTORY, 20).
--define(DEFAULT_SCHEDULER_INTERVAL, 60000).
-
--record(state, {
- interval = ?DEFAULT_SCHEDULER_INTERVAL,
- timer,
- max_jobs,
- max_churn,
- max_history,
- stats_pid
-}).
-
--record(stats_acc, {
- pending_n = 0 :: non_neg_integer(),
- running_n = 0 :: non_neg_integer(),
- crashed_n = 0 :: non_neg_integer()
-}).
-
-%% public functions
-
--spec start_link() -> {ok, pid()} | ignore | {error, term()}.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
--spec add_job(#rep{}) -> ok.
-add_job(#rep{} = Rep) when Rep#rep.id /= undefined ->
- case existing_replication(Rep) of
- false ->
- Job = #job{
- id = Rep#rep.id,
- rep = Rep,
- history = [{added, os:timestamp()}]
- },
- gen_server:call(?MODULE, {add_job, Job}, infinity);
- true ->
- ok
- end.
-
--spec remove_job(job_id()) -> ok.
-remove_job(Id) ->
- gen_server:call(?MODULE, {remove_job, Id}, infinity).
-
--spec reschedule() -> ok.
-% Trigger a manual reschedule. Used for testing and/or ops.
-reschedule() ->
- gen_server:call(?MODULE, reschedule, infinity).
-
--spec rep_state(rep_id()) -> #rep{} | nil.
-rep_state(RepId) ->
- case (catch ets:lookup_element(?MODULE, RepId, #job.rep)) of
- {'EXIT', {badarg, _}} ->
- nil;
- Rep ->
- Rep
- end.
-
--spec job_summary(job_id(), non_neg_integer()) -> [_] | nil.
-job_summary(JobId, HealthThreshold) ->
- case job_by_id(JobId) of
- {ok, #job{pid = Pid, history = History, rep = Rep}} ->
- ErrorCount = consecutive_crashes(History, HealthThreshold),
- {State, Info} =
- case {Pid, ErrorCount} of
- {undefined, 0} ->
- case History of
- [{{crashed, Error}, _When} | _] ->
- {crashing, crash_reason_json(Error)};
- [_ | _] ->
- {pending, Rep#rep.stats}
- end;
- {undefined, ErrorCount} when ErrorCount > 0 ->
- [{{crashed, Error}, _When} | _] = History,
- {crashing, crash_reason_json(Error)};
- {Pid, ErrorCount} when is_pid(Pid) ->
- {running, Rep#rep.stats}
- end,
- [
- {source, iolist_to_binary(ejson_url(Rep#rep.source))},
- {target, iolist_to_binary(ejson_url(Rep#rep.target))},
- {state, State},
- {info, couch_replicator_utils:ejson_state_info(Info)},
- {error_count, ErrorCount},
- {last_updated, last_updated(History)},
- {start_time, couch_replicator_utils:iso8601(Rep#rep.start_time)},
- {source_proxy, job_proxy_url(Rep#rep.source)},
- {target_proxy, job_proxy_url(Rep#rep.target)}
- ];
- {error, not_found} ->
- % Job might have just completed
- nil
- end.
-
-job_proxy_url(#httpdb{proxy_url = ProxyUrl}) when is_list(ProxyUrl) ->
- list_to_binary(couch_util:url_strip_password(ProxyUrl));
-job_proxy_url(_Endpoint) ->
- null.
-
-% Health threshold is the minimum amount of time an unhealthy job should run
-% crashing before it is considered to be healthy again. HealtThreashold should
-% not be 0 as jobs could start and immediately crash, and it shouldn't be
-% infinity, since then consecutive crashes would accumulate forever even if
-% job is back to normal.
--spec health_threshold() -> non_neg_integer().
-health_threshold() ->
- config:get_integer(
- "replicator",
- "health_threshold",
- ?DEFAULT_HEALTH_THRESHOLD_SEC
- ).
-
--spec find_jobs_by_dbname(binary()) -> list(#rep{}).
-find_jobs_by_dbname(DbName) ->
- Rep = #rep{db_name = DbName, _ = '_'},
- MatchSpec = #job{id = '$1', rep = Rep, _ = '_'},
- [RepId || [RepId] <- ets:match(?MODULE, MatchSpec)].
-
--spec find_jobs_by_doc(binary(), binary()) -> list(#rep{}).
-find_jobs_by_doc(DbName, DocId) ->
- Rep = #rep{db_name = DbName, doc_id = DocId, _ = '_'},
- MatchSpec = #job{id = '$1', rep = Rep, _ = '_'},
- [RepId || [RepId] <- ets:match(?MODULE, MatchSpec)].
-
--spec restart_job(binary() | list() | rep_id()) ->
- {ok, {[_]}} | {error, not_found}.
-restart_job(JobId) ->
- case rep_state(JobId) of
- nil ->
- {error, not_found};
- #rep{} = Rep ->
- ok = remove_job(JobId),
- ok = add_job(Rep),
- job(JobId)
- end.
-
--spec update_job_stats(job_id(), term()) -> ok.
-update_job_stats(JobId, Stats) ->
- gen_server:cast(?MODULE, {update_job_stats, JobId, Stats}).
-
-%% gen_server functions
-
-init(_) ->
- config:enable_feature('scheduler'),
- EtsOpts = [
- named_table,
- {keypos, #job.id},
- {read_concurrency, true},
- {write_concurrency, true}
- ],
- ?MODULE = ets:new(?MODULE, EtsOpts),
- ok = couch_replicator_share:init(),
- ok = config:listen_for_changes(?MODULE, nil),
- Interval = config:get_integer(
- "replicator",
- "interval",
- ?DEFAULT_SCHEDULER_INTERVAL
- ),
- MaxJobs = config:get_integer("replicator", "max_jobs", ?DEFAULT_MAX_JOBS),
- MaxChurn = config:get_integer(
- "replicator",
- "max_churn",
- ?DEFAULT_MAX_CHURN
- ),
- MaxHistory = config:get_integer(
- "replicator",
- "max_history",
- ?DEFAULT_MAX_HISTORY
- ),
- Timer = erlang:send_after(Interval, self(), reschedule),
- State = #state{
- interval = Interval,
- max_jobs = MaxJobs,
- max_churn = MaxChurn,
- max_history = MaxHistory,
- timer = Timer,
- stats_pid = start_stats_updater()
- },
- {ok, State}.
-
-handle_call({add_job, Job}, _From, State) ->
- ok = maybe_remove_job_int(Job#job.id, State),
- true = add_job_int(Job),
- ok = maybe_start_newly_added_job(Job, State),
- couch_stats:increment_counter([couch_replicator, jobs, adds]),
- TotalJobs = ets:info(?MODULE, size),
- couch_stats:update_gauge([couch_replicator, jobs, total], TotalJobs),
- {reply, ok, State};
-handle_call({remove_job, Id}, _From, State) ->
- ok = maybe_remove_job_int(Id, State),
- {reply, ok, State};
-handle_call(reschedule, _From, State) ->
- ok = reschedule(State),
- {reply, ok, State};
-handle_call(_, _From, State) ->
- {noreply, State}.
-
-handle_cast({set_max_jobs, MaxJobs}, State) when
- is_integer(MaxJobs),
- MaxJobs >= 0
-->
- couch_log:notice("~p: max_jobs set to ~B", [?MODULE, MaxJobs]),
- {noreply, State#state{max_jobs = MaxJobs}};
-handle_cast({set_max_churn, MaxChurn}, State) when
- is_integer(MaxChurn),
- MaxChurn > 0
-->
- couch_log:notice("~p: max_churn set to ~B", [?MODULE, MaxChurn]),
- {noreply, State#state{max_churn = MaxChurn}};
-handle_cast({set_max_history, MaxHistory}, State) when
- is_integer(MaxHistory),
- MaxHistory > 0
-->
- couch_log:notice("~p: max_history set to ~B", [?MODULE, MaxHistory]),
- {noreply, State#state{max_history = MaxHistory}};
-handle_cast({set_interval, Interval}, State) when
- is_integer(Interval),
- Interval > 0
-->
- couch_log:notice("~p: interval set to ~B", [?MODULE, Interval]),
- {noreply, State#state{interval = Interval}};
-handle_cast({update_shares, Key, Shares}, State) when
- is_binary(Key),
- is_integer(Shares),
- Shares >= 0
-->
- couch_log:notice("~p: shares for ~s set to ~B", [?MODULE, Key, Shares]),
- couch_replicator_share:update_shares(Key, Shares),
- {noreply, State};
-handle_cast({reset_shares, Key}, State) when is_binary(Key) ->
- couch_log:notice("~p: shares for ~s reset to default", [?MODULE, Key]),
- couch_replicator_share:reset_shares(Key),
- {noreply, State};
-handle_cast({update_job_stats, JobId, Stats}, State) ->
- case rep_state(JobId) of
- nil ->
- ok;
- #rep{} = Rep ->
- NewRep = Rep#rep{stats = Stats},
- true = ets:update_element(?MODULE, JobId, {#job.rep, NewRep})
- end,
- {noreply, State};
-handle_cast(UnexpectedMsg, State) ->
- couch_log:error("~p: received un-expected cast ~p", [?MODULE, UnexpectedMsg]),
- {noreply, State}.
-
-handle_info(reschedule, State) ->
- ok = reschedule(State),
- erlang:cancel_timer(State#state.timer),
- Timer = erlang:send_after(State#state.interval, self(), reschedule),
- {noreply, State#state{timer = Timer}};
-handle_info({'DOWN', _Ref, process, Pid, normal}, State) ->
- {ok, Job} = job_by_pid(Pid),
- couch_log:notice("~p: Job ~p completed normally", [?MODULE, Job#job.id]),
- Interval = State#state.interval,
- couch_replicator_share:charge(Job, Interval, os:timestamp()),
- remove_job_int(Job),
- update_running_jobs_stats(State#state.stats_pid),
- {noreply, State};
-handle_info({'DOWN', _Ref, process, Pid, Reason0}, State) ->
- {ok, Job} = job_by_pid(Pid),
- Reason =
- case Reason0 of
- {shutdown, ShutdownReason} -> ShutdownReason;
- Other -> Other
- end,
- Interval = State#state.interval,
- couch_replicator_share:charge(Job, Interval, os:timestamp()),
- ok = handle_crashed_job(Job, Reason, State),
- {noreply, State};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(_, State) ->
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-terminate(_Reason, _State) ->
- couch_replicator_share:clear(),
- ok.
-
-format_status(_Opt, [_PDict, State]) ->
- [
- {max_jobs, State#state.max_jobs},
- {running_jobs, running_job_count()},
- {pending_jobs, pending_job_count()}
- ].
-
-%% config listener functions
-
-handle_config_change("replicator", "max_jobs", V, _, S) ->
- ok = gen_server:cast(?MODULE, {set_max_jobs, list_to_integer(V)}),
- {ok, S};
-handle_config_change("replicator", "max_churn", V, _, S) ->
- ok = gen_server:cast(?MODULE, {set_max_churn, list_to_integer(V)}),
- {ok, S};
-handle_config_change("replicator", "interval", V, _, S) ->
- ok = gen_server:cast(?MODULE, {set_interval, list_to_integer(V)}),
- {ok, S};
-handle_config_change("replicator", "max_history", V, _, S) ->
- ok = gen_server:cast(?MODULE, {set_max_history, list_to_integer(V)}),
- {ok, S};
-handle_config_change("replicator.shares", Key, deleted, _, S) ->
- ok = gen_server:cast(?MODULE, {reset_shares, list_to_binary(Key)}),
- {ok, S};
-handle_config_change("replicator.shares", Key, V, _, S) ->
- ok = gen_server:cast(?MODULE, {update_shares, list_to_binary(Key), list_to_integer(V)}),
- {ok, S};
-handle_config_change(_, _, _, _, S) ->
- {ok, S}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_, _, _) ->
- Pid = whereis(?MODULE),
- erlang:send_after(?RELISTEN_DELAY, Pid, restart_config_listener).
-
-%% Private functions
-
-% Handle crashed jobs. Handling differs between transient and permanent jobs.
-% Transient jobs are those posted to the _replicate endpoint. They don't have a
-% db associated with them. When those jobs crash, they are not restarted. That
-% is also consistent with behavior when the node they run on, crashed and they
-% do not migrate to other nodes. Permanent jobs are those created from
-% replicator documents. Those jobs, once they pass basic validation and end up
-% in the scheduler will be retried indefinitely (with appropriate exponential
-% backoffs).
--spec handle_crashed_job(#job{}, any(), #state{}) -> ok.
-handle_crashed_job(#job{rep = #rep{db_name = null}} = Job, Reason, State) ->
- Msg = "~p : Transient job ~p failed, removing. Error: ~p",
- ErrorBinary = couch_replicator_utils:rep_error_to_binary(Reason),
- couch_log:error(Msg, [?MODULE, Job#job.id, ErrorBinary]),
- remove_job_int(Job),
- update_running_jobs_stats(State#state.stats_pid),
- ok;
-handle_crashed_job(Job, Reason, State) ->
- ok = update_state_crashed(Job, Reason, State),
- case couch_replicator_doc_processor:update_docs() of
- true ->
- couch_replicator_docs:update_error(Job#job.rep, Reason);
- false ->
- ok
- end,
- case ets:info(?MODULE, size) < State#state.max_jobs of
- true ->
- % Starting pending jobs is an O(TotalJobsCount) operation. Only do
- % it if there is a relatively small number of jobs. Otherwise
- % scheduler could be blocked if there is a cascade of lots failing
- % jobs in a row.
- start_pending_jobs(State),
- update_running_jobs_stats(State#state.stats_pid),
- ok;
- false ->
- ok
- end.
-
-% Attempt to start a newly added job. First quickly check if total jobs
-% already exceed max jobs, then do a more expensive check which runs a
-% select (an O(n) operation) to check pending jobs specifically.
--spec maybe_start_newly_added_job(#job{}, #state{}) -> ok.
-maybe_start_newly_added_job(Job, State) ->
- MaxJobs = State#state.max_jobs,
- TotalJobs = ets:info(?MODULE, size),
- case TotalJobs < MaxJobs andalso running_job_count() < MaxJobs of
- true ->
- start_job_int(Job, State),
- update_running_jobs_stats(State#state.stats_pid),
- ok;
- false ->
- ok
- end.
-
-% Return up to a given number of oldest, not recently crashed jobs. Try to be
-% memory efficient and use ets:foldl to accumulate jobs.
--spec pending_jobs(non_neg_integer()) -> [#job{}].
-pending_jobs(0) ->
- % Handle this case as user could set max_churn to 0. If this is passed to
- % other function clause it will crash as gb_sets:largest assumes set is not
- % empty.
- [];
-pending_jobs(Count) when is_integer(Count), Count > 0 ->
- % [{{Priority, LastStart}, Job},...]
- Set0 = gb_sets:new(),
- Now = os:timestamp(),
- Acc0 = {Set0, Now, Count, health_threshold()},
- {Set1, _, _, _} = ets:foldl(fun pending_fold/2, Acc0, ?MODULE),
- [Job || {_PriorityKey, Job} <- gb_sets:to_list(Set1)].
-
-pending_fold(#job{pid = Pid}, Acc) when is_pid(Pid) ->
- Acc;
-pending_fold(Job, {Set, Now, Count, HealthThreshold}) ->
- Healthy = not_recently_crashed(Job, Now, HealthThreshold),
- Set1 =
- case {Healthy, gb_sets:size(Set) >= Count} of
- {true, true} ->
- % Job is healthy but already reached accumulated limit, so might
- % have to replace one of the accumulated jobs
- pending_maybe_replace(Job, Set);
- {true, false} ->
- % Job is healthy and we haven't reached the limit, so add job
- % to accumulator
- gb_sets:add_element({start_priority_key(Job), Job}, Set);
- {false, _} ->
- % This job is not healthy (has crashed too recently), so skip it.
- Set
- end,
- {Set1, Now, Count, HealthThreshold}.
-
-% Replace Job in the accumulator if it has a higher priority (lower priority
-% value) than the lowest priority there. Job priority is indexed by
-% {FairSharePiority, LastStarted} tuples. If the FairSharePriority is the same
-% then last started timestamp is used to pick. The goal is to keep up to Count
-% oldest jobs during the iteration. For example, if there are jobs with these
-% priorities accumulated so far [5, 7, 11], and the priority of current job is
-% 6. Then 6 < 11 is true, so 11 (lower priority) is dropped and 6 is inserted
-% resulting in [5, 6, 7]. In the end the result might look like [1, 2, 5], for
-% example.
-%
-pending_maybe_replace(Job, Set) ->
- Key = start_priority_key(Job),
- {LowestPKey, LowestPJob} = gb_sets:largest(Set),
- case Key < LowestPKey of
- true ->
- Set1 = gb_sets:delete({LowestPKey, LowestPJob}, Set),
- gb_sets:add_element({Key, Job}, Set1);
- false ->
- Set
- end.
-
-% Starting priority key is used to order pending jobs such that the ones with a
-% lower priority value and start time would sort first, so they would be the
-% first to run.
-%
-start_priority_key(#job{} = Job) ->
- {couch_replicator_share:priority(Job#job.id), last_started(Job)}.
-
-start_jobs(Count, State) ->
- [start_job_int(Job, State) || Job <- pending_jobs(Count)],
- ok.
-
--spec stop_jobs(non_neg_integer(), boolean(), #state{}) -> non_neg_integer().
-stop_jobs(Count, _, _) when is_integer(Count), Count =< 0 ->
- 0;
-stop_jobs(Count, IsContinuous, State) when is_integer(Count) ->
- Running0 = running_jobs(),
- ContinuousPred = fun(Job) -> is_continuous(Job) =:= IsContinuous end,
- Running1 = lists:filter(ContinuousPred, Running0),
- Running2 = [{stop_priority_key(Job), Job} || Job <- Running1],
- Running3 = lists:sublist(lists:sort(Running2), Count),
- length([stop_job_int(Job, State) || {_SortKey, Job} <- Running3]).
-
-% Lower priority jobs have higher priority values, so we negate them, that way
-% when sorted, they'll come up first. If priorities are equal, jobs are sorted
-% by the lowest starting times as jobs with lowest start time have been running
-% the longest.
-%
-stop_priority_key(#job{} = Job) ->
- {-couch_replicator_share:priority(Job#job.id), last_started(Job)}.
-
-not_recently_crashed(#job{history = History}, Now, HealthThreshold) ->
- case History of
- [{added, _When}] ->
- true;
- [{stopped, _When} | _] ->
- true;
- _ ->
- LatestCrashT = latest_crash_timestamp(History),
- CrashCount = consecutive_crashes(History, HealthThreshold),
- timer:now_diff(Now, LatestCrashT) >= backoff_micros(CrashCount)
- end.
-
-% Count consecutive crashes. A crash happens when there is a `crashed` event
-% within a short period of time (configurable) after any other event. It could
-% be `crashed, started` for jobs crashing quickly after starting, `crashed,
-% crashed`, `crashed, stopped` if job repeatedly failed to start
-% being stopped. Or it could be `crashed, added` if it crashed immediately after
-% being added during start.
-%
-% A streak of "consecutive crashes" ends when a crashed event is seen starting
-% and running successfully without crashing for a period of time. That period
-% of time is the HealthThreshold.
-%
-
--spec consecutive_crashes(history(), non_neg_integer()) -> non_neg_integer().
-consecutive_crashes(History, HealthThreshold) when is_list(History) ->
- consecutive_crashes(History, HealthThreshold, 0).
-
--spec consecutive_crashes(history(), non_neg_integer(), non_neg_integer()) ->
- non_neg_integer().
-consecutive_crashes([], _HealthThreashold, Count) ->
- Count;
-consecutive_crashes(
- [{{crashed, _}, CrashT}, {_, PrevT} = PrevEvent | Rest],
- HealthThreshold,
- Count
-) ->
- case timer:now_diff(CrashT, PrevT) > HealthThreshold * 1000000 of
- true ->
- Count;
- false ->
- consecutive_crashes([PrevEvent | Rest], HealthThreshold, Count + 1)
- end;
-consecutive_crashes(
- [{stopped, _}, {started, _} | _],
- _HealthThreshold,
- Count
-) ->
- Count;
-consecutive_crashes([_ | Rest], HealthThreshold, Count) ->
- consecutive_crashes(Rest, HealthThreshold, Count).
-
--spec latest_crash_timestamp(history()) -> erlang:timestamp().
-latest_crash_timestamp([]) ->
- % Used to avoid special-casing "no crash" when doing now_diff
- {0, 0, 0};
-latest_crash_timestamp([{{crashed, _Reason}, When} | _]) ->
- When;
-latest_crash_timestamp([_Event | Rest]) ->
- latest_crash_timestamp(Rest).
-
--spec backoff_micros(non_neg_integer()) -> non_neg_integer().
-backoff_micros(CrashCount) ->
- % When calculating the backoff interval treat consecutive crash count as the
- % exponent in Base * 2 ^ CrashCount to achieve an exponential backoff
- % doubling every consecutive failure, starting with the base value of
- % ?BACKOFF_INTERVAL_MICROS.
- BackoffExp = erlang:min(CrashCount - 1, ?MAX_BACKOFF_EXPONENT),
- (1 bsl BackoffExp) * ?BACKOFF_INTERVAL_MICROS.
-
--spec add_job_int(#job{}) -> boolean().
-add_job_int(#job{} = Job) ->
- couch_replicator_share:job_added(Job),
- ets:insert_new(?MODULE, Job).
-
--spec maybe_remove_job_int(job_id(), #state{}) -> ok.
-maybe_remove_job_int(JobId, State) ->
- case job_by_id(JobId) of
- {ok, Job} ->
- Now = os:timestamp(),
- Interval = State#state.interval,
- couch_replicator_share:charge(Job, Interval, Now),
- ok = stop_job_int(Job, State),
- true = remove_job_int(Job),
- couch_stats:increment_counter([couch_replicator, jobs, removes]),
- TotalJobs = ets:info(?MODULE, size),
- couch_stats:update_gauge(
- [couch_replicator, jobs, total],
- TotalJobs
- ),
- update_running_jobs_stats(State#state.stats_pid),
- ok;
- {error, not_found} ->
- ok
- end.
-
-start_job_int(#job{pid = Pid}, _State) when Pid /= undefined ->
- ok;
-start_job_int(#job{} = Job0, State) ->
- Job = maybe_optimize_job_for_rate_limiting(Job0),
- case couch_replicator_scheduler_sup:start_child(Job#job.rep) of
- {ok, Child} ->
- Ref = monitor(process, Child),
- ok = update_state_started(Job, Child, Ref, State),
- couch_log:notice(
- "~p: Job ~p started as ~p",
- [?MODULE, Job#job.id, Child]
- );
- {error, {already_started, OtherPid}} when node(OtherPid) =:= node() ->
- Ref = monitor(process, OtherPid),
- ok = update_state_started(Job, OtherPid, Ref, State),
- couch_log:notice(
- "~p: Job ~p already running as ~p. Most likely"
- " because replicator scheduler was restarted",
- [?MODULE, Job#job.id, OtherPid]
- );
- {error, {already_started, OtherPid}} when node(OtherPid) =/= node() ->
- CrashMsg = "Duplicate replication running on another node",
- couch_log:notice(
- "~p: Job ~p already running as ~p. Most likely"
- " because a duplicate replication is running on another node",
- [?MODULE, Job#job.id, OtherPid]
- ),
- ok = update_state_crashed(Job, CrashMsg, State);
- {error, Reason} ->
- couch_log:notice(
- "~p: Job ~p failed to start for reason ~p",
- [?MODULE, Job, Reason]
- ),
- ok = update_state_crashed(Job, Reason, State)
- end.
-
--spec stop_job_int(#job{}, #state{}) -> ok | {error, term()}.
-stop_job_int(#job{pid = undefined}, _State) ->
- ok;
-stop_job_int(#job{} = Job, State) ->
- ok = couch_replicator_scheduler_sup:terminate_child(Job#job.pid),
- demonitor(Job#job.monitor, [flush]),
- ok = update_state_stopped(Job, State),
- couch_log:notice(
- "~p: Job ~p stopped as ~p",
- [?MODULE, Job#job.id, Job#job.pid]
- ).
-
--spec remove_job_int(#job{}) -> true.
-remove_job_int(#job{} = Job) ->
- couch_replicator_share:job_removed(Job),
- ets:delete(?MODULE, Job#job.id).
-
--spec running_job_count() -> non_neg_integer().
-running_job_count() ->
- ets:info(?MODULE, size) - pending_job_count().
-
--spec running_jobs() -> [#job{}].
-running_jobs() ->
- ets:select(?MODULE, [{#job{pid = '$1', _ = '_'}, [{is_pid, '$1'}], ['$_']}]).
-
--spec pending_job_count() -> non_neg_integer().
-pending_job_count() ->
- ets:select_count(?MODULE, [{#job{pid = undefined, _ = '_'}, [], [true]}]).
-
--spec job_by_pid(pid()) -> {ok, #job{}} | {error, not_found}.
-job_by_pid(Pid) when is_pid(Pid) ->
- case ets:match_object(?MODULE, #job{pid = Pid, _ = '_'}) of
- [] ->
- {error, not_found};
- [#job{} = Job] ->
- {ok, Job}
- end.
-
--spec job_by_id(job_id()) -> {ok, #job{}} | {error, not_found}.
-job_by_id(Id) ->
- case ets:lookup(?MODULE, Id) of
- [] ->
- {error, not_found};
- [#job{} = Job] ->
- {ok, Job}
- end.
-
--spec update_state_stopped(#job{}, #state{}) -> ok.
-update_state_stopped(Job, State) ->
- Job1 = reset_job_process(Job),
- Job2 = update_history(Job1, stopped, os:timestamp(), State),
- true = ets:insert(?MODULE, Job2),
- couch_stats:increment_counter([couch_replicator, jobs, stops]),
- ok.
-
--spec update_state_started(#job{}, pid(), reference(), #state{}) -> ok.
-update_state_started(Job, Pid, Ref, State) ->
- Job1 = set_job_process(Job, Pid, Ref),
- Job2 = update_history(Job1, started, os:timestamp(), State),
- true = ets:insert(?MODULE, Job2),
- couch_stats:increment_counter([couch_replicator, jobs, starts]),
- ok.
-
--spec update_state_crashed(#job{}, any(), #state{}) -> ok.
-update_state_crashed(Job, Reason, State) ->
- Job1 = reset_job_process(Job),
- Job2 = update_history(Job1, {crashed, Reason}, os:timestamp(), State),
- true = ets:insert(?MODULE, Job2),
- couch_stats:increment_counter([couch_replicator, jobs, crashes]),
- ok.
-
--spec set_job_process(#job{}, pid(), reference()) -> #job{}.
-set_job_process(#job{} = Job, Pid, Ref) when is_pid(Pid), is_reference(Ref) ->
- Job#job{pid = Pid, monitor = Ref}.
-
--spec reset_job_process(#job{}) -> #job{}.
-reset_job_process(#job{} = Job) ->
- Job#job{pid = undefined, monitor = undefined}.
-
--spec reschedule(#state{}) -> ok.
-reschedule(#state{interval = Interval} = State) ->
- couch_replicator_share:update(running_jobs(), Interval, os:timestamp()),
- StopCount = stop_excess_jobs(State, running_job_count()),
- rotate_jobs(State, StopCount),
- update_running_jobs_stats(State#state.stats_pid).
-
--spec stop_excess_jobs(#state{}, non_neg_integer()) -> non_neg_integer().
-stop_excess_jobs(State, Running) ->
- #state{max_jobs = MaxJobs} = State,
- StopCount = max(0, Running - MaxJobs),
- Stopped = stop_jobs(StopCount, true, State),
- OneshotLeft = StopCount - Stopped,
- stop_jobs(OneshotLeft, false, State),
- StopCount.
-
-start_pending_jobs(State) ->
- #state{max_jobs = MaxJobs} = State,
- Running = running_job_count(),
- Pending = pending_job_count(),
- if
- Running < MaxJobs, Pending > 0 ->
- start_jobs(MaxJobs - Running, State);
- true ->
- ok
- end.
-
--spec rotate_jobs(#state{}, non_neg_integer()) -> ok.
-rotate_jobs(State, ChurnSoFar) ->
- #state{max_jobs = MaxJobs, max_churn = MaxChurn} = State,
- Running = running_job_count(),
- Pending = pending_job_count(),
- % Reduce MaxChurn by the number of already stopped jobs in the
- % current rescheduling cycle.
- Churn = max(0, MaxChurn - ChurnSoFar),
- SlotsAvailable = MaxJobs - Running,
- if
- SlotsAvailable >= 0 ->
- % If there is are enough SlotsAvailable reduce StopCount to avoid
- % unnesessarily stopping jobs. `stop_jobs/3` ignores 0 or negative
- % values so we don't worry about that here.
- StopCount = lists:min([Pending - SlotsAvailable, Running, Churn]),
- stop_jobs(StopCount, true, State),
- StartCount = max(0, MaxJobs - running_job_count()),
- start_jobs(StartCount, State);
- true ->
- ok
- end.
-
--spec last_started(#job{}) -> erlang:timestamp().
-last_started(#job{} = Job) ->
- case lists:keyfind(started, 1, Job#job.history) of
- false ->
- {0, 0, 0};
- {started, When} ->
- When
- end.
-
--spec update_history(#job{}, event_type(), erlang:timestamp(), #state{}) ->
- #job{}.
-update_history(Job, Type, When, State) ->
- History0 = [{Type, When} | Job#job.history],
- History1 = lists:sublist(History0, State#state.max_history),
- Job#job{history = History1}.
-
--spec ejson_url(#httpdb{} | binary()) -> binary().
-ejson_url(#httpdb{} = Httpdb) ->
- couch_util:url_strip_password(Httpdb#httpdb.url);
-ejson_url(DbName) when is_binary(DbName) ->
- DbName.
-
--spec job_ejson(#job{}) -> {[_ | _]}.
-job_ejson(Job) ->
- Rep = Job#job.rep,
- Source = ejson_url(Rep#rep.source),
- Target = ejson_url(Rep#rep.target),
- History = lists:map(
- fun({Type, When}) ->
- EventProps =
- case Type of
- {crashed, Reason} ->
- [{type, crashed}, {reason, crash_reason_json(Reason)}];
- Type ->
- [{type, Type}]
- end,
- {[{timestamp, couch_replicator_utils:iso8601(When)} | EventProps]}
- end,
- Job#job.history
- ),
- {BaseID, Ext} = Job#job.id,
- Pid =
- case Job#job.pid of
- undefined ->
- null;
- P when is_pid(P) ->
- ?l2b(pid_to_list(P))
- end,
- {[
- {id, iolist_to_binary([BaseID, Ext])},
- {pid, Pid},
- {source, iolist_to_binary(Source)},
- {target, iolist_to_binary(Target)},
- {database, Rep#rep.db_name},
- {user, (Rep#rep.user_ctx)#user_ctx.name},
- {doc_id, Rep#rep.doc_id},
- {info, couch_replicator_utils:ejson_state_info(Rep#rep.stats)},
- {history, History},
- {node, node()},
- {start_time, couch_replicator_utils:iso8601(Rep#rep.start_time)}
- ]}.
-
--spec jobs() -> [[tuple()]].
-jobs() ->
- ets:foldl(fun(Job, Acc) -> [job_ejson(Job) | Acc] end, [], ?MODULE).
-
--spec job(job_id()) -> {ok, {[_ | _]}} | {error, not_found}.
-job(JobId) ->
- case job_by_id(JobId) of
- {ok, Job} ->
- {ok, job_ejson(Job)};
- Error ->
- Error
- end.
-
-crash_reason_json({_CrashType, Info}) when is_binary(Info) ->
- Info;
-crash_reason_json(Reason) when is_binary(Reason) ->
- Reason;
-crash_reason_json(Error) ->
- couch_replicator_utils:rep_error_to_binary(Error).
-
--spec last_updated([_]) -> binary().
-last_updated([{_Type, When} | _]) ->
- couch_replicator_utils:iso8601(When).
-
--spec is_continuous(#job{}) -> boolean().
-is_continuous(#job{rep = Rep}) ->
- couch_util:get_value(continuous, Rep#rep.options, false).
-
-% If job crashed last time because it was rate limited, try to
-% optimize some options to help the job make progress.
--spec maybe_optimize_job_for_rate_limiting(#job{}) -> #job{}.
-maybe_optimize_job_for_rate_limiting(
- Job = #job{
- history =
- [{{crashed, max_backoff}, _} | _]
- }
-) ->
- Opts = [
- {checkpoint_interval, 5000},
- {worker_processes, 2},
- {worker_batch_size, 100},
- {http_connections, 5}
- ],
- Rep = lists:foldl(fun optimize_int_option/2, Job#job.rep, Opts),
- Job#job{rep = Rep};
-maybe_optimize_job_for_rate_limiting(Job) ->
- Job.
-
--spec optimize_int_option({atom(), any()}, #rep{}) -> #rep{}.
-optimize_int_option({Key, Val}, #rep{options = Options} = Rep) ->
- case couch_util:get_value(Key, Options) of
- CurVal when is_integer(CurVal), CurVal > Val ->
- Msg = "~p replication ~p : setting ~p = ~p due to rate limiting",
- couch_log:warning(Msg, [?MODULE, Rep#rep.id, Key, Val]),
- Options1 = lists:keyreplace(Key, 1, Options, {Key, Val}),
- Rep#rep{options = Options1};
- _ ->
- Rep
- end.
-
-% Updater is a separate process. It receives `update_stats` messages and
-% updates scheduler stats from the scheduler jobs table. Updates are
-% performed no more frequently than once per ?STATS_UPDATE_WAIT milliseconds.
-
-update_running_jobs_stats(StatsPid) when is_pid(StatsPid) ->
- StatsPid ! update_stats,
- ok.
-
-start_stats_updater() ->
- erlang:spawn_link(?MODULE, stats_updater_loop, [undefined]).
-
-stats_updater_loop(Timer) ->
- receive
- update_stats when Timer == undefined ->
- TRef = erlang:send_after(?STATS_UPDATE_WAIT, self(), refresh_stats),
- ?MODULE:stats_updater_loop(TRef);
- update_stats when is_reference(Timer) ->
- ?MODULE:stats_updater_loop(Timer);
- refresh_stats ->
- ok = stats_updater_refresh(),
- ?MODULE:stats_updater_loop(undefined);
- Else ->
- erlang:exit({stats_updater_bad_msg, Else})
- end.
-
--spec stats_updater_refresh() -> ok.
-stats_updater_refresh() ->
- #stats_acc{
- pending_n = PendingN,
- running_n = RunningN,
- crashed_n = CrashedN
- } = ets:foldl(fun stats_fold/2, #stats_acc{}, ?MODULE),
- couch_stats:update_gauge([couch_replicator, jobs, pending], PendingN),
- couch_stats:update_gauge([couch_replicator, jobs, running], RunningN),
- couch_stats:update_gauge([couch_replicator, jobs, crashed], CrashedN),
- ok.
-
--spec stats_fold(#job{}, #stats_acc{}) -> #stats_acc{}.
-stats_fold(#job{pid = undefined, history = [{added, _}]}, Acc) ->
- Acc#stats_acc{pending_n = Acc#stats_acc.pending_n + 1};
-stats_fold(#job{pid = undefined, history = [{stopped, _} | _]}, Acc) ->
- Acc#stats_acc{pending_n = Acc#stats_acc.pending_n + 1};
-stats_fold(#job{pid = undefined, history = [{{crashed, _}, _} | _]}, Acc) ->
- Acc#stats_acc{crashed_n = Acc#stats_acc.crashed_n + 1};
-stats_fold(#job{pid = P, history = [{started, _} | _]}, Acc) when is_pid(P) ->
- Acc#stats_acc{running_n = Acc#stats_acc.running_n + 1}.
-
--spec existing_replication(#rep{}) -> boolean().
-existing_replication(#rep{} = NewRep) ->
- case job_by_id(NewRep#rep.id) of
- {ok, #job{rep = CurRep}} ->
- NormCurRep = couch_replicator_utils:normalize_rep(CurRep),
- NormNewRep = couch_replicator_utils:normalize_rep(NewRep),
- NormCurRep == NormNewRep;
- {error, not_found} ->
- false
- end.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-backoff_micros_test_() ->
- BaseInterval = ?BACKOFF_INTERVAL_MICROS,
- [
- ?_assertEqual(R * BaseInterval, backoff_micros(N))
- || {R, N} <- [
- {1, 1},
- {2, 2},
- {4, 3},
- {8, 4},
- {16, 5},
- {32, 6},
- {64, 7},
- {128, 8},
- {256, 9},
- {512, 10},
- {1024, 11},
- {1024, 12}
- ]
- ].
-
-consecutive_crashes_test_() ->
- Threshold = ?DEFAULT_HEALTH_THRESHOLD_SEC,
- [
- ?_assertEqual(R, consecutive_crashes(H, Threshold))
- || {R, H} <- [
- {0, []},
- {0, [added()]},
- {0, [stopped()]},
- {0, [crashed()]},
- {1, [crashed(), added()]},
- {1, [crashed(), crashed()]},
- {1, [crashed(), stopped()]},
- {3, [crashed(), crashed(), crashed(), added()]},
- {2, [crashed(), crashed(), stopped()]},
- {1, [crashed(), started(), added()]},
- {2, [crashed(3), started(2), crashed(1), started(0)]},
- {0, [stopped(3), started(2), crashed(1), started(0)]},
- {1, [crashed(3), started(2), stopped(1), started(0)]},
- {0, [crashed(999), started(0)]},
- {1, [crashed(999), started(998), crashed(997), started(0)]}
- ]
- ].
-
-consecutive_crashes_non_default_threshold_test_() ->
- [
- ?_assertEqual(R, consecutive_crashes(H, T))
- || {R, H, T} <- [
- {0, [crashed(11), started(0)], 10},
- {1, [crashed(10), started(0)], 10}
- ]
- ].
-
-latest_crash_timestamp_test_() ->
- [
- ?_assertEqual({0, R, 0}, latest_crash_timestamp(H))
- || {R, H} <- [
- {0, [added()]},
- {1, [crashed(1)]},
- {3, [crashed(3), started(2), crashed(1), started(0)]},
- {1, [started(3), stopped(2), crashed(1), started(0)]}
- ]
- ].
-
-last_started_test_() ->
- [
- ?_assertEqual({0, R, 0}, last_started(testjob(H)))
- || {R, H} <- [
- {0, [added()]},
- {0, [crashed(1)]},
- {1, [started(1)]},
- {1, [added(), started(1)]},
- {2, [started(2), started(1)]},
- {2, [crashed(3), started(2), started(1)]}
- ]
- ].
-
-longest_running_test() ->
- J0 = testjob([crashed()]),
- J1 = testjob([started(1)]),
- J2 = testjob([started(2)]),
- SortFun = fun(A, B) -> last_started(A) =< last_started(B) end,
- Sort = fun(Jobs) -> lists:sort(SortFun, Jobs) end,
- ?assertEqual([], Sort([])),
- ?assertEqual([J1], Sort([J1])),
- ?assertEqual([J1, J2], Sort([J2, J1])),
- ?assertEqual([J0, J1, J2], Sort([J2, J1, J0])).
-
-scheduler_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_pending_jobs_simple(),
- t_pending_jobs_skip_crashed(),
- t_pending_jobs_skip_running(),
- t_one_job_starts(),
- t_no_jobs_start_if_max_is_0(),
- t_one_job_starts_if_max_is_1(),
- t_max_churn_does_not_throttle_initial_start(),
- t_excess_oneshot_only_jobs(),
- t_excess_continuous_only_jobs(),
- t_excess_prefer_continuous_first(),
- t_stop_oldest_first(),
- t_start_oldest_first(),
- t_jobs_churn_even_if_not_all_max_jobs_are_running(),
- t_jobs_dont_churn_if_there_are_available_running_slots(),
- t_start_only_pending_jobs_do_not_churn_existing_ones(),
- t_dont_stop_if_nothing_pending(),
- t_max_churn_limits_number_of_rotated_jobs(),
- t_existing_jobs(),
- t_if_pending_less_than_running_start_all_pending(),
- t_running_less_than_pending_swap_all_running(),
- t_oneshot_dont_get_rotated(),
- t_rotate_continuous_only_if_mixed(),
- t_oneshot_dont_get_starting_priority(),
- t_oneshot_will_hog_the_scheduler(),
- t_if_excess_is_trimmed_rotation_still_happens(),
- t_if_transient_job_crashes_it_gets_removed(),
- t_if_permanent_job_crashes_it_stays_in_ets(),
- t_job_summary_running(),
- t_job_summary_pending(),
- t_job_summary_crashing_once(),
- t_job_summary_crashing_many_times(),
- t_job_summary_proxy_fields()
- ]
- }
- }.
-
-t_pending_jobs_simple() ->
- ?_test(begin
- Job1 = oneshot(1),
- Job2 = oneshot(2),
- setup_jobs([Job2, Job1]),
- ?assertEqual([], pending_jobs(0)),
- ?assertEqual([Job1], pending_jobs(1)),
- ?assertEqual([Job1, Job2], pending_jobs(2)),
- ?assertEqual([Job1, Job2], pending_jobs(3))
- end).
-
-t_pending_jobs_skip_crashed() ->
- ?_test(begin
- Job = oneshot(1),
- Ts = os:timestamp(),
- History = [crashed(Ts), started(Ts) | Job#job.history],
- Job1 = Job#job{history = History},
- Job2 = oneshot(2),
- Job3 = oneshot(3),
- setup_jobs([Job2, Job1, Job3]),
- ?assertEqual([Job2], pending_jobs(1)),
- ?assertEqual([Job2, Job3], pending_jobs(2)),
- ?assertEqual([Job2, Job3], pending_jobs(3))
- end).
-
-t_pending_jobs_skip_running() ->
- ?_test(begin
- Job1 = continuous(1),
- Job2 = continuous_running(2),
- Job3 = oneshot(3),
- Job4 = oneshot_running(4),
- Jobs = [Job1, Job2, Job3, Job4],
- setup_jobs(Jobs),
- ?assertEqual([Job1, Job3], pending_jobs(4))
- end).
-
-t_one_job_starts() ->
- ?_test(begin
- setup_jobs([oneshot(1)]),
- ?assertEqual({0, 1}, run_stop_count()),
- reschedule(mock_state(?DEFAULT_MAX_JOBS)),
- ?assertEqual({1, 0}, run_stop_count())
- end).
-
-t_no_jobs_start_if_max_is_0() ->
- ?_test(begin
- setup_jobs([oneshot(1)]),
- reschedule(mock_state(0)),
- ?assertEqual({0, 1}, run_stop_count())
- end).
-
-t_one_job_starts_if_max_is_1() ->
- ?_test(begin
- setup_jobs([oneshot(1), oneshot(2)]),
- reschedule(mock_state(1)),
- ?assertEqual({1, 1}, run_stop_count())
- end).
-
-t_max_churn_does_not_throttle_initial_start() ->
- ?_test(begin
- setup_jobs([oneshot(1), oneshot(2)]),
- reschedule(mock_state(?DEFAULT_MAX_JOBS, 0)),
- ?assertEqual({2, 0}, run_stop_count())
- end).
-
-t_excess_oneshot_only_jobs() ->
- ?_test(begin
- setup_jobs([oneshot_running(1), oneshot_running(2)]),
- ?assertEqual({2, 0}, run_stop_count()),
- reschedule(mock_state(1)),
- ?assertEqual({1, 1}, run_stop_count()),
- reschedule(mock_state(0)),
- ?assertEqual({0, 2}, run_stop_count())
- end).
-
-t_excess_continuous_only_jobs() ->
- ?_test(begin
- setup_jobs([continuous_running(1), continuous_running(2)]),
- ?assertEqual({2, 0}, run_stop_count()),
- reschedule(mock_state(1)),
- ?assertEqual({1, 1}, run_stop_count()),
- reschedule(mock_state(0)),
- ?assertEqual({0, 2}, run_stop_count())
- end).
-
-t_excess_prefer_continuous_first() ->
- ?_test(begin
- Jobs = [
- continuous_running(1),
- oneshot_running(2),
- continuous_running(3)
- ],
- setup_jobs(Jobs),
- ?assertEqual({3, 0}, run_stop_count()),
- ?assertEqual({1, 0}, oneshot_run_stop_count()),
- reschedule(mock_state(2)),
- ?assertEqual({2, 1}, run_stop_count()),
- ?assertEqual({1, 0}, oneshot_run_stop_count()),
- reschedule(mock_state(1)),
- ?assertEqual({1, 0}, oneshot_run_stop_count()),
- reschedule(mock_state(0)),
- ?assertEqual({0, 1}, oneshot_run_stop_count())
- end).
-
-t_stop_oldest_first() ->
- ?_test(begin
- Jobs = [
- continuous_running(7),
- continuous_running(4),
- continuous_running(5)
- ],
- setup_jobs(Jobs),
- reschedule(mock_state(2, 1)),
- ?assertEqual({2, 1}, run_stop_count()),
- ?assertEqual([4], jobs_stopped()),
- reschedule(mock_state(1, 1)),
- ?assertEqual([7], jobs_running())
- end).
-
-t_start_oldest_first() ->
- ?_test(begin
- setup_jobs([continuous(7), continuous(2), continuous(5)]),
- reschedule(mock_state(1)),
- ?assertEqual({1, 2}, run_stop_count()),
- ?assertEqual([2], jobs_running()),
- reschedule(mock_state(2)),
- ?assertEqual({2, 1}, run_stop_count()),
- % After rescheduling with max_jobs = 2, 2 was stopped and 5, 7 should
- % be running.
- ?assertEqual([2], jobs_stopped())
- end).
-
-t_jobs_churn_even_if_not_all_max_jobs_are_running() ->
- ?_test(begin
- setup_jobs([
- continuous_running(7),
- continuous(2),
- continuous(5)
- ]),
- reschedule(mock_state(2, 2)),
- ?assertEqual({2, 1}, run_stop_count()),
- ?assertEqual([7], jobs_stopped())
- end).
-
-t_jobs_dont_churn_if_there_are_available_running_slots() ->
- ?_test(begin
- setup_jobs([
- continuous_running(1),
- continuous_running(2)
- ]),
- reschedule(mock_state(2, 2)),
- ?assertEqual({2, 0}, run_stop_count()),
- ?assertEqual([], jobs_stopped()),
- ?assertEqual(0, meck:num_calls(couch_replicator_scheduler_sup, start_child, 1))
- end).
-
-t_start_only_pending_jobs_do_not_churn_existing_ones() ->
- ?_test(begin
- setup_jobs([
- continuous(1),
- continuous_running(2)
- ]),
- reschedule(mock_state(2, 2)),
- ?assertEqual(1, meck:num_calls(couch_replicator_scheduler_sup, start_child, 1)),
- ?assertEqual([], jobs_stopped()),
- ?assertEqual({2, 0}, run_stop_count())
- end).
-
-t_dont_stop_if_nothing_pending() ->
- ?_test(begin
- setup_jobs([continuous_running(1), continuous_running(2)]),
- reschedule(mock_state(2)),
- ?assertEqual({2, 0}, run_stop_count())
- end).
-
-t_max_churn_limits_number_of_rotated_jobs() ->
- ?_test(begin
- Jobs = [
- continuous(1),
- continuous_running(2),
- continuous(3),
- continuous_running(4)
- ],
- setup_jobs(Jobs),
- reschedule(mock_state(2, 1)),
- ?assertEqual([2, 3], jobs_stopped())
- end).
-
-t_if_pending_less_than_running_start_all_pending() ->
- ?_test(begin
- Jobs = [
- continuous(1),
- continuous_running(2),
- continuous(3),
- continuous_running(4),
- continuous_running(5)
- ],
- setup_jobs(Jobs),
- reschedule(mock_state(3)),
- ?assertEqual([1, 2, 5], jobs_running())
- end).
-
-t_running_less_than_pending_swap_all_running() ->
- ?_test(begin
- Jobs = [
- continuous(1),
- continuous(2),
- continuous(3),
- continuous_running(4),
- continuous_running(5)
- ],
- setup_jobs(Jobs),
- reschedule(mock_state(2)),
- ?assertEqual([3, 4, 5], jobs_stopped())
- end).
-
-t_oneshot_dont_get_rotated() ->
- ?_test(begin
- setup_jobs([oneshot_running(1), continuous(2)]),
- reschedule(mock_state(1)),
- ?assertEqual([1], jobs_running())
- end).
-
-t_rotate_continuous_only_if_mixed() ->
- ?_test(begin
- setup_jobs([continuous(1), oneshot_running(2), continuous_running(3)]),
- reschedule(mock_state(2)),
- ?assertEqual([1, 2], jobs_running())
- end).
-
-t_oneshot_dont_get_starting_priority() ->
- ?_test(begin
- setup_jobs([continuous(1), oneshot(2), continuous_running(3)]),
- reschedule(mock_state(1)),
- ?assertEqual([1], jobs_running())
- end).
-
-% This tested in other test cases, it is here to mainly make explicit a property
-% of one-shot replications -- they can starve other jobs if they "take control"
-% of all the available scheduler slots.
-t_oneshot_will_hog_the_scheduler() ->
- ?_test(begin
- Jobs = [
- oneshot_running(1),
- oneshot_running(2),
- oneshot(3),
- continuous(4)
- ],
- setup_jobs(Jobs),
- reschedule(mock_state(2)),
- ?assertEqual([1, 2], jobs_running())
- end).
-
-t_if_excess_is_trimmed_rotation_still_happens() ->
- ?_test(begin
- Jobs = [
- continuous(1),
- continuous_running(2),
- continuous_running(3)
- ],
- setup_jobs(Jobs),
- reschedule(mock_state(1)),
- ?assertEqual([1], jobs_running())
- end).
-
-t_if_transient_job_crashes_it_gets_removed() ->
- ?_test(begin
- Pid = mock_pid(),
- Rep = continuous_rep(),
- Job = #job{
- id = job1,
- pid = Pid,
- history = [added()],
- rep = Rep#rep{db_name = null}
- },
- setup_jobs([Job]),
- ?assertEqual(1, ets:info(?MODULE, size)),
- State = #state{max_history = 3, stats_pid = self()},
- {noreply, State} = handle_info(
- {'DOWN', r1, process, Pid, failed},
- State
- ),
- ?assertEqual(0, ets:info(?MODULE, size))
- end).
-
-t_if_permanent_job_crashes_it_stays_in_ets() ->
- ?_test(begin
- Pid = mock_pid(),
- Rep = continuous_rep(),
- Job = #job{
- id = job1,
- pid = Pid,
- history = [added()],
- rep = Rep#rep{db_name = <<"db1">>}
- },
- setup_jobs([Job]),
- ?assertEqual(1, ets:info(?MODULE, size)),
- State = #state{
- max_jobs = 1,
- max_history = 3,
- stats_pid = self()
- },
- {noreply, State} = handle_info(
- {'DOWN', r1, process, Pid, failed},
- State
- ),
- ?assertEqual(1, ets:info(?MODULE, size)),
- [Job1] = ets:lookup(?MODULE, job1),
- [Latest | _] = Job1#job.history,
- ?assertMatch({{crashed, failed}, _}, Latest)
- end).
-
-t_existing_jobs() ->
- ?_test(begin
- Rep0 = continuous_rep(<<"s">>, <<"t">>),
- Rep = Rep0#rep{id = job1, db_name = <<"db">>},
- setup_jobs([#job{id = Rep#rep.id, rep = Rep}]),
- NewRep0 = continuous_rep(<<"s">>, <<"t">>),
- NewRep = NewRep0#rep{id = Rep#rep.id, db_name = <<"db">>},
- ?assert(existing_replication(NewRep)),
- ?assertNot(existing_replication(NewRep#rep{source = <<"s1">>})),
- ?assertNot(existing_replication(NewRep#rep{target = <<"t1">>})),
- ?assertNot(existing_replication(NewRep#rep{options = []}))
- end).
-
-t_job_summary_running() ->
- ?_test(begin
- Rep = rep(<<"s">>, <<"t">>),
- Job = #job{
- id = job1,
- pid = mock_pid(),
- history = [added()],
- rep = Rep#rep{db_name = <<"db1">>}
- },
- setup_jobs([Job]),
- Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
- ?assertEqual(running, proplists:get_value(state, Summary)),
- ?assertEqual(null, proplists:get_value(info, Summary)),
- ?assertEqual(0, proplists:get_value(error_count, Summary)),
-
- Stats = [{source_seq, <<"1-abc">>}],
- handle_cast({update_job_stats, job1, Stats}, mock_state(1)),
- Summary1 = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
- ?assertEqual({Stats}, proplists:get_value(info, Summary1))
- end).
-
-t_job_summary_pending() ->
- ?_test(begin
- Job = #job{
- id = job1,
- pid = undefined,
- history = [stopped(20), started(10), added()],
- rep = rep(<<"s">>, <<"t">>)
- },
- setup_jobs([Job]),
- Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
- ?assertEqual(pending, proplists:get_value(state, Summary)),
- ?assertEqual(null, proplists:get_value(info, Summary)),
- ?assertEqual(0, proplists:get_value(error_count, Summary)),
-
- Stats = [{doc_write_failures, 1}],
- handle_cast({update_job_stats, job1, Stats}, mock_state(1)),
- Summary1 = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
- ?assertEqual({Stats}, proplists:get_value(info, Summary1))
- end).
-
-t_job_summary_crashing_once() ->
- ?_test(begin
- Job = #job{
- id = job1,
- history = [crashed(?DEFAULT_HEALTH_THRESHOLD_SEC + 1), started(0)],
- rep = rep(<<"s">>, <<"t">>)
- },
- setup_jobs([Job]),
- Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
- ?assertEqual(crashing, proplists:get_value(state, Summary)),
- Info = proplists:get_value(info, Summary),
- ?assertEqual({[{<<"error">>, <<"some_reason">>}]}, Info),
- ?assertEqual(0, proplists:get_value(error_count, Summary))
- end).
-
-t_job_summary_crashing_many_times() ->
- ?_test(begin
- Job = #job{
- id = job1,
- history = [crashed(4), started(3), crashed(2), started(1)],
- rep = rep(<<"s">>, <<"t">>)
- },
- setup_jobs([Job]),
- Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
- ?assertEqual(crashing, proplists:get_value(state, Summary)),
- Info = proplists:get_value(info, Summary),
- ?assertEqual({[{<<"error">>, <<"some_reason">>}]}, Info),
- ?assertEqual(2, proplists:get_value(error_count, Summary))
- end).
-
-t_job_summary_proxy_fields() ->
- ?_test(begin
- Src = #httpdb{
- url = "https://s",
- proxy_url = "http://u:p@sproxy:12"
- },
- Tgt = #httpdb{
- url = "http://t",
- proxy_url = "socks5://u:p@tproxy:34"
- },
- Job = #job{
- id = job1,
- history = [started(10), added()],
- rep = rep(Src, Tgt)
- },
- setup_jobs([Job]),
- Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
- ?assertEqual(
- <<"http://u:*****@sproxy:12">>,
- proplists:get_value(source_proxy, Summary)
- ),
- ?assertEqual(
- <<"socks5://u:*****@tproxy:34">>,
- proplists:get_value(target_proxy, Summary)
- )
- end).
-
-% Test helper functions
-
-setup_all() ->
- catch ets:delete(?MODULE),
- meck:expect(config, get, 1, []),
- meck:expect(config, get, 2, undefined),
- meck:expect(couch_log, notice, 2, ok),
- meck:expect(couch_log, warning, 2, ok),
- meck:expect(couch_log, error, 2, ok),
- meck:expect(couch_replicator_scheduler_sup, terminate_child, 1, ok),
- meck:expect(couch_stats, increment_counter, 1, ok),
- meck:expect(couch_stats, update_gauge, 2, ok),
- Pid = mock_pid(),
- meck:expect(couch_replicator_scheduler_sup, start_child, 1, {ok, Pid}),
- couch_replicator_share:init().
-
-teardown_all(_) ->
- couch_replicator_share:clear(),
- catch ets:delete(?MODULE),
- meck:unload().
-
-setup() ->
- meck:reset([
- couch_log,
- couch_replicator_scheduler_sup,
- couch_stats,
- config
- ]).
-
-teardown(_) ->
- ok.
-
-setup_jobs(Jobs) when is_list(Jobs) ->
- ?MODULE = ets:new(?MODULE, [named_table, {keypos, #job.id}]),
- ets:insert(?MODULE, Jobs).
-
-all_jobs() ->
- lists:usort(ets:tab2list(?MODULE)).
-
-jobs_stopped() ->
- [Job#job.id || Job <- all_jobs(), Job#job.pid =:= undefined].
-
-jobs_running() ->
- [Job#job.id || Job <- all_jobs(), Job#job.pid =/= undefined].
-
-run_stop_count() ->
- {length(jobs_running()), length(jobs_stopped())}.
-
-oneshot_run_stop_count() ->
- Running = [
- Job#job.id
- || Job <- all_jobs(),
- Job#job.pid =/= undefined,
- not is_continuous(Job)
- ],
- Stopped = [
- Job#job.id
- || Job <- all_jobs(),
- Job#job.pid =:= undefined,
- not is_continuous(Job)
- ],
- {length(Running), length(Stopped)}.
-
-mock_state(MaxJobs) ->
- #state{
- max_jobs = MaxJobs,
- max_churn = ?DEFAULT_MAX_CHURN,
- max_history = ?DEFAULT_MAX_HISTORY,
- stats_pid = self()
- }.
-
-mock_state(MaxJobs, MaxChurn) ->
- #state{
- max_jobs = MaxJobs,
- max_churn = MaxChurn,
- max_history = ?DEFAULT_MAX_HISTORY,
- stats_pid = self()
- }.
-
-rep() ->
- #rep{options = [], user_ctx = #user_ctx{}}.
-
-rep(Src, Tgt) ->
- Rep = rep(),
- Rep#rep{source = Src, target = Tgt}.
-
-continuous_rep() ->
- #rep{options = [{continuous, true}], user_ctx = #user_ctx{}}.
-
-continuous_rep(Src, Tgt) ->
- Rep = continuous_rep(),
- Rep#rep{source = Src, target = Tgt}.
-
-continuous(Id) when is_integer(Id) ->
- Started = Id,
- Hist = [stopped(Started + 1), started(Started), added()],
- #job{
- id = Id,
- history = Hist,
- rep = continuous_rep()
- }.
-
-continuous_running(Id) when is_integer(Id) ->
- Started = Id,
- Pid = mock_pid(),
- #job{
- id = Id,
- history = [started(Started), added()],
- rep = continuous_rep(),
- pid = Pid,
- monitor = monitor(process, Pid)
- }.
-
-oneshot(Id) when is_integer(Id) ->
- Started = Id,
- Hist = [stopped(Started + 1), started(Started), added()],
- #job{id = Id, history = Hist, rep = rep()}.
-
-oneshot_running(Id) when is_integer(Id) ->
- Started = Id,
- Pid = mock_pid(),
- #job{
- id = Id,
- history = [started(Started), added()],
- rep = rep(),
- pid = Pid,
- monitor = monitor(process, Pid)
- }.
-
-testjob(Hist) when is_list(Hist) ->
- #job{history = Hist}.
-
-mock_pid() ->
- list_to_pid("<0.999.999>").
-
-crashed() ->
- crashed(0).
-
-crashed(WhenSec) when is_integer(WhenSec) ->
- {{crashed, some_reason}, {0, WhenSec, 0}};
-crashed({MSec, Sec, USec}) ->
- {{crashed, some_reason}, {MSec, Sec, USec}}.
-
-started() ->
- started(0).
-
-started(WhenSec) when is_integer(WhenSec) ->
- {started, {0, WhenSec, 0}};
-started({MSec, Sec, USec}) ->
- {started, {MSec, Sec, USec}}.
-
-stopped() ->
- stopped(0).
-
-stopped(WhenSec) ->
- {stopped, {0, WhenSec, 0}}.
-
-added() ->
- {added, {0, 0, 0}}.
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
deleted file mode 100644
index 777636691..000000000
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ /dev/null
@@ -1,1182 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_scheduler_job).
-
--behaviour(gen_server).
-
--export([
- start_link/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3,
- format_status/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
--include("couch_replicator.hrl").
-
--import(couch_util, [
- get_value/2,
- get_value/3,
- to_binary/1
-]).
-
--import(couch_replicator_utils, [
- pp_rep_id/1
-]).
-
--define(LOWEST_SEQ, 0).
--define(DEFAULT_CHECKPOINT_INTERVAL, 30000).
--define(STARTUP_JITTER_DEFAULT, 5000).
-
--record(rep_state, {
- rep_details,
- source_name,
- target_name,
- source,
- target,
- history,
- checkpoint_history,
- start_seq,
- committed_seq,
- current_through_seq,
- seqs_in_progress = [],
- highest_seq_done = {0, ?LOWEST_SEQ},
- source_log,
- target_log,
- rep_starttime,
- src_starttime,
- tgt_starttime,
- % checkpoint timer
- timer,
- changes_queue,
- changes_manager,
- changes_reader,
- workers,
- stats = couch_replicator_stats:new(),
- session_id,
- source_seq = nil,
- use_checkpoints = true,
- checkpoint_interval = ?DEFAULT_CHECKPOINT_INTERVAL,
- type = db,
- view = nil
-}).
-
-start_link(#rep{id = {BaseId, Ext}, source = Src, target = Tgt} = Rep) ->
- RepChildId = BaseId ++ Ext,
- Source = couch_replicator_api_wrap:db_uri(Src),
- Target = couch_replicator_api_wrap:db_uri(Tgt),
- ServerName = {global, {?MODULE, Rep#rep.id}},
-
- case gen_server:start_link(ServerName, ?MODULE, Rep, []) of
- {ok, Pid} ->
- {ok, Pid};
- {error, Reason} ->
- couch_log:warning(
- "failed to start replication `~s` (`~s` -> `~s`)",
- [RepChildId, Source, Target]
- ),
- {error, Reason}
- end.
-
-init(InitArgs) ->
- {ok, InitArgs, 0}.
-
-do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx = UserCtx} = Rep) ->
- process_flag(trap_exit, true),
-
- timer:sleep(startup_jitter()),
-
- #rep_state{
- source = Source,
- target = Target,
- source_name = SourceName,
- target_name = TargetName,
- start_seq = {_Ts, StartSeq},
- highest_seq_done = {_, HighestSeq},
- checkpoint_interval = CheckpointInterval
- } = State = init_state(Rep),
-
- NumWorkers = get_value(worker_processes, Options),
- BatchSize = get_value(worker_batch_size, Options),
- {ok, ChangesQueue} = couch_work_queue:new([
- {max_items, BatchSize * NumWorkers * 2},
- {max_size, 100 * 1024 * NumWorkers}
- ]),
- % This starts the _changes reader process. It adds the changes from
- % the source db to the ChangesQueue.
- {ok, ChangesReader} = couch_replicator_changes_reader:start_link(
- StartSeq, Source, ChangesQueue, Options
- ),
- % Changes manager - responsible for dequeing batches from the changes queue
- % and deliver them to the worker processes.
- ChangesManager = spawn_changes_manager(self(), ChangesQueue, BatchSize),
- % This starts the worker processes. They ask the changes queue manager for a
- % a batch of _changes rows to process -> check which revs are missing in the
- % target, and for the missing ones, it copies them from the source to the target.
- MaxConns = get_value(http_connections, Options),
- Workers = lists:map(
- fun(_) ->
- couch_stats:increment_counter([couch_replicator, workers_started]),
- {ok, Pid} = couch_replicator_worker:start_link(
- self(), Source, Target, ChangesManager, MaxConns
- ),
- Pid
- end,
- lists:seq(1, NumWorkers)
- ),
-
- couch_task_status:add_task(
- [
- {type, replication},
- {user, UserCtx#user_ctx.name},
- {replication_id, ?l2b(BaseId ++ Ext)},
- {database, Rep#rep.db_name},
- {doc_id, Rep#rep.doc_id},
- {source, ?l2b(SourceName)},
- {target, ?l2b(TargetName)},
- {continuous, get_value(continuous, Options, false)},
- {source_seq, HighestSeq},
- {checkpoint_interval, CheckpointInterval}
- ] ++ rep_stats(State)
- ),
- couch_task_status:set_update_frequency(1000),
-
- % Until OTP R14B03:
- %
- % Restarting a temporary supervised child implies that the original arguments
- % (#rep{} record) specified in the MFA component of the supervisor
- % child spec will always be used whenever the child is restarted.
- % This implies the same replication performance tunning parameters will
- % always be used. The solution is to delete the child spec (see
- % cancel_replication/1) and then start the replication again, but this is
- % unfortunately not immune to race conditions.
-
- log_replication_start(State),
- couch_log:debug("Worker pids are: ~p", [Workers]),
-
- doc_update_triggered(Rep),
-
- {ok, State#rep_state{
- changes_queue = ChangesQueue,
- changes_manager = ChangesManager,
- changes_reader = ChangesReader,
- workers = Workers
- }}.
-
-handle_call({add_stats, Stats}, From, State) ->
- gen_server:reply(From, ok),
- NewStats = couch_replicator_utils:sum_stats(State#rep_state.stats, Stats),
- {noreply, State#rep_state{stats = NewStats}};
-handle_call(
- {report_seq_done, Seq, StatsInc},
- From,
- #rep_state{
- seqs_in_progress = SeqsInProgress,
- highest_seq_done = HighestDone,
- current_through_seq = ThroughSeq,
- stats = Stats
- } = State
-) ->
- gen_server:reply(From, ok),
- {NewThroughSeq0, NewSeqsInProgress} =
- case SeqsInProgress of
- [] ->
- {Seq, []};
- [Seq | Rest] ->
- {Seq, Rest};
- [_ | _] ->
- {ThroughSeq, ordsets:del_element(Seq, SeqsInProgress)}
- end,
- NewHighestDone = lists:max([HighestDone, Seq]),
- NewThroughSeq =
- case NewSeqsInProgress of
- [] ->
- lists:max([NewThroughSeq0, NewHighestDone]);
- _ ->
- NewThroughSeq0
- end,
- couch_log:debug(
- "Worker reported seq ~p, through seq was ~p, "
- "new through seq is ~p, highest seq done was ~p, "
- "new highest seq done is ~p~n"
- "Seqs in progress were: ~p~nSeqs in progress are now: ~p",
- [
- Seq,
- ThroughSeq,
- NewThroughSeq,
- HighestDone,
- NewHighestDone,
- SeqsInProgress,
- NewSeqsInProgress
- ]
- ),
- NewState = State#rep_state{
- stats = couch_replicator_utils:sum_stats(Stats, StatsInc),
- current_through_seq = NewThroughSeq,
- seqs_in_progress = NewSeqsInProgress,
- highest_seq_done = NewHighestDone
- },
- update_task(NewState),
- {noreply, NewState}.
-
-handle_cast(checkpoint, State) ->
- case do_checkpoint(State) of
- {ok, NewState} ->
- couch_stats:increment_counter([couch_replicator, checkpoints, success]),
- {noreply, NewState#rep_state{timer = start_timer(State)}};
- Error ->
- couch_stats:increment_counter([couch_replicator, checkpoints, failure]),
- {stop, Error, State}
- end;
-handle_cast(
- {report_seq, Seq},
- #rep_state{seqs_in_progress = SeqsInProgress} = State
-) ->
- NewSeqsInProgress = ordsets:add_element(Seq, SeqsInProgress),
- {noreply, State#rep_state{seqs_in_progress = NewSeqsInProgress}}.
-
-handle_info(shutdown, St) ->
- {stop, shutdown, St};
-
-handle_info({'EXIT', Pid, max_backoff}, State) ->
- couch_log:error("Max backoff reached child process ~p", [Pid]),
- {stop, {shutdown, max_backoff}, State};
-
-handle_info({'EXIT', Pid, {shutdown, max_backoff}}, State) ->
- couch_log:error("Max backoff reached child process ~p", [Pid]),
- {stop, {shutdown, max_backoff}, State};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{changes_reader=Pid} = State) ->
- {noreply, State};
-
-handle_info({'EXIT', Pid, Reason0}, #rep_state{changes_reader=Pid} = State) ->
- couch_stats:increment_counter([couch_replicator, changes_reader_deaths]),
- Reason = case Reason0 of
- {changes_req_failed, _, _} = HttpFail ->
- HttpFail;
- {http_request_failed, _, _, {error, {code, Code}}} ->
- {changes_req_failed, Code};
- {http_request_failed, _, _, {error, Err}} ->
- {changes_req_failed, Err};
- Other ->
- {changes_reader_died, Other}
- end,
- couch_log:error("ChangesReader process died with reason: ~p", [Reason]),
- {stop, {shutdown, Reason}, cancel_timer(State)};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{changes_manager = Pid} = State) ->
- {noreply, State};
-
-handle_info({'EXIT', Pid, Reason}, #rep_state{changes_manager = Pid} = State) ->
- couch_stats:increment_counter([couch_replicator, changes_manager_deaths]),
- couch_log:error("ChangesManager process died with reason: ~p", [Reason]),
- {stop, {shutdown, {changes_manager_died, Reason}}, cancel_timer(State)};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{changes_queue=Pid} = State) ->
- {noreply, State};
-
-handle_info({'EXIT', Pid, Reason}, #rep_state{changes_queue=Pid} = State) ->
- couch_stats:increment_counter([couch_replicator, changes_queue_deaths]),
- couch_log:error("ChangesQueue process died with reason: ~p", [Reason]),
- {stop, {shutdown, {changes_queue_died, Reason}}, cancel_timer(State)};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{workers = Workers} = State) ->
- case Workers -- [Pid] of
- Workers ->
- couch_log:error("unknown pid bit the dust ~p ~n",[Pid]),
- {noreply, State#rep_state{workers = Workers}};
- %% not clear why a stop was here before
- %%{stop, {unknown_process_died, Pid, normal}, State};
- [] ->
- catch unlink(State#rep_state.changes_manager),
- catch exit(State#rep_state.changes_manager, kill),
- do_last_checkpoint(State);
- Workers2 ->
- {noreply, State#rep_state{workers = Workers2}}
- end;
-
-handle_info({'EXIT', Pid, Reason}, #rep_state{workers = Workers} = State) ->
- State2 = cancel_timer(State),
- case lists:member(Pid, Workers) of
- false ->
- {stop, {unknown_process_died, Pid, Reason}, State2};
- true ->
- couch_stats:increment_counter([couch_replicator, worker_deaths]),
- StopReason = case Reason of
- {shutdown, _} = Err ->
- Err;
- Other ->
- couch_log:error("Worker ~p died with reason: ~p", [Pid, Reason]),
- {worker_died, Pid, Other}
- end,
- {stop, StopReason, State2}
- end;
-
-handle_info(timeout, InitArgs) ->
- try do_init(InitArgs) of {ok, State} ->
- {noreply, State}
- catch
- exit:{http_request_failed, _, _, max_backoff} ->
- {stop, {shutdown, max_backoff}, {error, InitArgs}};
- ?STACKTRACE(Class, Error, Stack)
- ShutdownReason = {error, replication_start_error(Error)},
- StackTop2 = lists:sublist(Stack, 2),
- % Shutdown state is a hack as it is not really the state of the
- % gen_server (it failed to initialize, so it doesn't have one).
- % Shutdown state is used to pass extra info about why start failed.
- ShutdownState = {error, Class, StackTop2, InitArgs},
- {stop, {shutdown, ShutdownReason}, ShutdownState}
- end;
-
-handle_info({Ref, Tuple}, State) when is_reference(Ref), is_tuple(Tuple) ->
- % Ignore responses from timed-out or retried ibrowse calls. Aliases in
- % Erlang 24 should help with this problem, so we should revisit this clause
- % when we update our minimum Erlang version to >= 24.
- {noreply, State}.
-
-terminate(
- normal,
- #rep_state{
- rep_details = #rep{id = RepId} = Rep,
- checkpoint_history = CheckpointHistory
- } = State
-) ->
- terminate_cleanup(State),
- couch_replicator_notifier:notify({finished, RepId, CheckpointHistory}),
- doc_update_completed(Rep, rep_stats(State));
-terminate(shutdown, #rep_state{rep_details = #rep{id = RepId}} = State) ->
- % Replication stopped via _scheduler_sup:terminate_child/1, which can be
- % occur during regular scheduler operation or when job is removed from
- % the scheduler.
- State1 =
- case do_checkpoint(State) of
- {ok, NewState} ->
- NewState;
- Error ->
- LogMsg = "~p : Failed last checkpoint. Job: ~p Error: ~p",
- couch_log:error(LogMsg, [?MODULE, RepId, Error]),
- State
- end,
- couch_replicator_notifier:notify({stopped, RepId, <<"stopped">>}),
- terminate_cleanup(State1);
-terminate({shutdown, max_backoff}, {error, InitArgs}) ->
- #rep{id = {BaseId, Ext} = RepId} = InitArgs,
- couch_stats:increment_counter([couch_replicator, failed_starts]),
- couch_log:warning("Replication `~s` reached max backoff ", [BaseId ++ Ext]),
- couch_replicator_notifier:notify({error, RepId, max_backoff});
-terminate({shutdown, {error, Error}}, {error, Class, Stack, InitArgs}) ->
- #rep{
- id = {BaseId, Ext} = RepId,
- source = Source0,
- target = Target0,
- doc_id = DocId,
- db_name = DbName
- } = InitArgs,
- Source = couch_replicator_api_wrap:db_uri(Source0),
- Target = couch_replicator_api_wrap:db_uri(Target0),
- RepIdStr = BaseId ++ Ext,
- Msg = "~p:~p: Replication ~s failed to start ~p -> ~p doc ~p:~p stack:~p",
- couch_log:error(Msg, [
- Class,
- Error,
- RepIdStr,
- Source,
- Target,
- DbName,
- DocId,
- Stack
- ]),
- couch_stats:increment_counter([couch_replicator, failed_starts]),
- couch_replicator_notifier:notify({error, RepId, Error});
-terminate({shutdown, max_backoff}, State) ->
- #rep_state{
- source_name = Source,
- target_name = Target,
- rep_details = #rep{id = {BaseId, Ext} = RepId}
- } = State,
- couch_log:error(
- "Replication `~s` (`~s` -> `~s`) reached max backoff",
- [BaseId ++ Ext, Source, Target]
- ),
- terminate_cleanup(State),
- couch_replicator_notifier:notify({error, RepId, max_backoff});
-terminate({shutdown, Reason}, State) ->
- % Unwrap so when reporting we don't have an extra {shutdown, ...} tuple
- % wrapped around the message
- terminate(Reason, State);
-terminate(Reason, State) ->
- #rep_state{
- source_name = Source,
- target_name = Target,
- rep_details = #rep{id = {BaseId, Ext} = RepId}
- } = State,
- couch_log:error(
- "Replication `~s` (`~s` -> `~s`) failed: ~s",
- [BaseId ++ Ext, Source, Target, to_binary(Reason)]
- ),
- terminate_cleanup(State),
- couch_replicator_notifier:notify({error, RepId, Reason}).
-
-terminate_cleanup(State) ->
- update_task(State),
- couch_replicator_api_wrap:db_close(State#rep_state.source),
- couch_replicator_api_wrap:db_close(State#rep_state.target).
-
-code_change(_OldVsn, #rep_state{} = State, _Extra) ->
- {ok, State}.
-
-format_status(_Opt, [_PDict, State]) ->
- #rep_state{
- source = Source,
- target = Target,
- rep_details = RepDetails,
- start_seq = StartSeq,
- source_seq = SourceSeq,
- committed_seq = CommitedSeq,
- current_through_seq = ThroughSeq,
- highest_seq_done = HighestSeqDone,
- session_id = SessionId
- } = state_strip_creds(State),
- #rep{
- id = RepId,
- options = Options,
- doc_id = DocId,
- db_name = DbName
- } = RepDetails,
- [
- {rep_id, RepId},
- {source, couch_replicator_api_wrap:db_uri(Source)},
- {target, couch_replicator_api_wrap:db_uri(Target)},
- {db_name, DbName},
- {doc_id, DocId},
- {options, Options},
- {session_id, SessionId},
- {start_seq, StartSeq},
- {source_seq, SourceSeq},
- {committed_seq, CommitedSeq},
- {current_through_seq, ThroughSeq},
- {highest_seq_done, HighestSeqDone}
- ].
-
-startup_jitter() ->
- Jitter = config:get_integer(
- "replicator",
- "startup_jitter",
- ?STARTUP_JITTER_DEFAULT
- ),
- couch_rand:uniform(erlang:max(1, Jitter)).
-
-headers_strip_creds([], Acc) ->
- lists:reverse(Acc);
-headers_strip_creds([{Key, Value0} | Rest], Acc) ->
- Value =
- case string:to_lower(Key) of
- "authorization" ->
- "****";
- _ ->
- Value0
- end,
- headers_strip_creds(Rest, [{Key, Value} | Acc]).
-
-httpdb_strip_creds(#httpdb{url = Url, headers = Headers} = HttpDb) ->
- HttpDb#httpdb{
- url = couch_util:url_strip_password(Url),
- headers = headers_strip_creds(Headers, [])
- };
-httpdb_strip_creds(LocalDb) ->
- LocalDb.
-
-rep_strip_creds(#rep{source = Source, target = Target} = Rep) ->
- Rep#rep{
- source = httpdb_strip_creds(Source),
- target = httpdb_strip_creds(Target)
- }.
-
-state_strip_creds(#rep_state{rep_details = Rep, source = Source, target = Target} = State) ->
- % #rep_state contains the source and target at the top level and also
- % in the nested #rep_details record
- State#rep_state{
- rep_details = rep_strip_creds(Rep),
- source = httpdb_strip_creds(Source),
- target = httpdb_strip_creds(Target)
- }.
-
-adjust_maxconn(Src = #httpdb{http_connections = 1}, RepId) ->
- Msg = "Adjusting minimum number of HTTP source connections to 2 for ~p",
- couch_log:notice(Msg, [RepId]),
- Src#httpdb{http_connections = 2};
-adjust_maxconn(Src, _RepId) ->
- Src.
-
--spec doc_update_triggered(#rep{}) -> ok.
-doc_update_triggered(#rep{db_name = null}) ->
- ok;
-doc_update_triggered(#rep{id = RepId, doc_id = DocId} = Rep) ->
- case couch_replicator_doc_processor:update_docs() of
- true ->
- couch_replicator_docs:update_triggered(Rep, RepId);
- false ->
- ok
- end,
- couch_log:notice(
- "Document `~s` triggered replication `~s`",
- [DocId, pp_rep_id(RepId)]
- ),
- ok.
-
--spec doc_update_completed(#rep{}, list()) -> ok.
-doc_update_completed(#rep{db_name = null}, _Stats) ->
- ok;
-doc_update_completed(
- #rep{
- id = RepId,
- doc_id = DocId,
- db_name = DbName,
- start_time = StartTime
- },
- Stats0
-) ->
- Stats = Stats0 ++ [{start_time, couch_replicator_utils:iso8601(StartTime)}],
- couch_replicator_docs:update_doc_completed(DbName, DocId, Stats),
- couch_log:notice(
- "Replication `~s` completed (triggered by `~s`)",
- [pp_rep_id(RepId), DocId]
- ),
- ok.
-
-do_last_checkpoint(
- #rep_state{
- seqs_in_progress = [],
- highest_seq_done = {_Ts, ?LOWEST_SEQ}
- } = State
-) ->
- {stop, normal, cancel_timer(State)};
-do_last_checkpoint(
- #rep_state{
- seqs_in_progress = [],
- highest_seq_done = Seq
- } = State
-) ->
- case do_checkpoint(State#rep_state{current_through_seq = Seq}) of
- {ok, NewState} ->
- couch_stats:increment_counter([couch_replicator, checkpoints, success]),
- {stop, normal, cancel_timer(NewState)};
- Error ->
- couch_stats:increment_counter([couch_replicator, checkpoints, failure]),
- {stop, Error, State}
- end.
-
-start_timer(State) ->
- After = State#rep_state.checkpoint_interval,
- case timer:apply_after(After, gen_server, cast, [self(), checkpoint]) of
- {ok, Ref} ->
- Ref;
- Error ->
- couch_log:error("Replicator, error scheduling checkpoint: ~p", [Error]),
- nil
- end.
-
-cancel_timer(#rep_state{timer = nil} = State) ->
- State;
-cancel_timer(#rep_state{timer = Timer} = State) ->
- {ok, cancel} = timer:cancel(Timer),
- State#rep_state{timer = nil}.
-
-init_state(Rep) ->
- #rep{
- id = {BaseId, _Ext},
- source = Src0,
- target = Tgt,
- options = Options,
- type = Type,
- view = View,
- start_time = StartTime,
- stats = ArgStats0
- } = Rep,
- % Adjust minimum number of http source connections to 2 to avoid deadlock
- Src = adjust_maxconn(Src0, BaseId),
- {ok, Source} = couch_replicator_api_wrap:db_open(Src),
- {CreateTargetParams} = get_value(create_target_params, Options, {[]}),
- {ok, Target} = couch_replicator_api_wrap:db_open(
- Tgt,
- get_value(create_target, Options, false),
- CreateTargetParams
- ),
-
- {ok, SourceInfo} = couch_replicator_api_wrap:get_db_info(Source),
- {ok, TargetInfo} = couch_replicator_api_wrap:get_db_info(Target),
-
- [SourceLog, TargetLog] = find_and_migrate_logs([Source, Target], Rep),
-
- {StartSeq0, History} = compare_replication_logs(SourceLog, TargetLog),
-
- ArgStats1 = couch_replicator_stats:new(ArgStats0),
- HistoryStats =
- case History of
- [{[_ | _] = HProps} | _] -> couch_replicator_stats:new(HProps);
- _ -> couch_replicator_stats:new()
- end,
- Stats = couch_replicator_stats:max_stats(ArgStats1, HistoryStats),
-
- StartSeq1 = get_value(since_seq, Options, StartSeq0),
- StartSeq = {0, StartSeq1},
-
- SourceSeq = get_value(<<"update_seq">>, SourceInfo, ?LOWEST_SEQ),
-
- #doc{body = {CheckpointHistory}} = SourceLog,
- State = #rep_state{
- rep_details = Rep,
- source_name = couch_replicator_api_wrap:db_uri(Source),
- target_name = couch_replicator_api_wrap:db_uri(Target),
- source = Source,
- target = Target,
- history = History,
- checkpoint_history = {[{<<"no_changes">>, true} | CheckpointHistory]},
- start_seq = StartSeq,
- current_through_seq = StartSeq,
- committed_seq = StartSeq,
- source_log = SourceLog,
- target_log = TargetLog,
- rep_starttime = StartTime,
- src_starttime = get_value(<<"instance_start_time">>, SourceInfo),
- tgt_starttime = get_value(<<"instance_start_time">>, TargetInfo),
- session_id = couch_uuids:random(),
- source_seq = SourceSeq,
- use_checkpoints = get_value(use_checkpoints, Options, true),
- checkpoint_interval = get_value(
- checkpoint_interval,
- Options,
- ?DEFAULT_CHECKPOINT_INTERVAL
- ),
- type = Type,
- view = View,
- stats = Stats
- },
- State#rep_state{timer = start_timer(State)}.
-
-find_and_migrate_logs(DbList, #rep{id = {BaseId, _}} = Rep) ->
- LogId = ?l2b(?LOCAL_DOC_PREFIX ++ BaseId),
- fold_replication_logs(DbList, ?REP_ID_VERSION, LogId, LogId, Rep, []).
-
-fold_replication_logs([], _Vsn, _LogId, _NewId, _Rep, Acc) ->
- lists:reverse(Acc);
-fold_replication_logs([Db | Rest] = Dbs, Vsn, LogId, NewId, Rep, Acc) ->
- case couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body]) of
- {error, <<"not_found">>} when Vsn > 1 ->
- OldRepId = couch_replicator_utils:replication_id(Rep, Vsn - 1),
- fold_replication_logs(
- Dbs,
- Vsn - 1,
- ?l2b(?LOCAL_DOC_PREFIX ++ OldRepId),
- NewId,
- Rep,
- Acc
- );
- {error, <<"not_found">>} ->
- fold_replication_logs(
- Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [#doc{id = NewId} | Acc]
- );
- {ok, Doc} when LogId =:= NewId ->
- fold_replication_logs(
- Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [Doc | Acc]
- );
- {ok, Doc} ->
- MigratedLog = #doc{id = NewId, body = Doc#doc.body},
- maybe_save_migrated_log(Rep, Db, MigratedLog, Doc#doc.id),
- fold_replication_logs(
- Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [MigratedLog | Acc]
- )
- end.
-
-maybe_save_migrated_log(Rep, Db, #doc{} = Doc, OldId) ->
- case get_value(use_checkpoints, Rep#rep.options, true) of
- true ->
- update_checkpoint(Db, Doc),
- Msg = "Migrated replication checkpoint. Db:~p ~p -> ~p",
- couch_log:notice(Msg, [httpdb_strip_creds(Db), OldId, Doc#doc.id]);
- false ->
- ok
- end.
-
-spawn_changes_manager(Parent, ChangesQueue, BatchSize) ->
- spawn_link(fun() ->
- changes_manager_loop_open(Parent, ChangesQueue, BatchSize, 1)
- end).
-
-changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts) ->
- receive
- {get_changes, From} ->
- case couch_work_queue:dequeue(ChangesQueue, BatchSize) of
- closed ->
- From ! {closed, self()};
- {ok, ChangesOrLastSeqs} ->
- ReportSeq =
- case lists:last(ChangesOrLastSeqs) of
- {last_seq, Seq} ->
- {Ts, Seq};
- #doc_info{high_seq = Seq} ->
- {Ts, Seq}
- end,
- Changes = lists:filter(
- fun
- (#doc_info{}) ->
- true;
- ({last_seq, _Seq}) ->
- false
- end,
- ChangesOrLastSeqs
- ),
- ok = gen_server:cast(Parent, {report_seq, ReportSeq}),
- From ! {changes, self(), Changes, ReportSeq}
- end,
- changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts + 1)
- end.
-
-do_checkpoint(#rep_state{use_checkpoints = false} = State) ->
- NewState = State#rep_state{checkpoint_history = {[{<<"use_checkpoints">>, false}]}},
- {ok, NewState};
-do_checkpoint(#rep_state{current_through_seq = Seq, committed_seq = Seq} = State) ->
- update_task(State),
- {ok, State};
-do_checkpoint(State) ->
- #rep_state{
- source_name = SourceName,
- target_name = TargetName,
- source = Source,
- target = Target,
- history = OldHistory,
- start_seq = {_, StartSeq},
- current_through_seq = {_Ts, NewSeq} = NewTsSeq,
- source_log = SourceLog,
- target_log = TargetLog,
- rep_starttime = ReplicationStartTime,
- src_starttime = SrcInstanceStartTime,
- tgt_starttime = TgtInstanceStartTime,
- stats = Stats,
- rep_details = #rep{options = Options},
- session_id = SessionId
- } = State,
- case commit_to_both(Source, Target) of
- {source_error, Reason} ->
- {checkpoint_commit_failure,
- <<"Failure on source commit: ", (to_binary(Reason))/binary>>};
- {target_error, Reason} ->
- {checkpoint_commit_failure,
- <<"Failure on target commit: ", (to_binary(Reason))/binary>>};
- {SrcInstanceStartTime, TgtInstanceStartTime} ->
- couch_log:notice(
- "recording a checkpoint for `~s` -> `~s` at source update_seq ~p",
- [SourceName, TargetName, NewSeq]
- ),
- LocalStartTime = calendar:now_to_local_time(ReplicationStartTime),
- StartTime = ?l2b(httpd_util:rfc1123_date(LocalStartTime)),
- EndTime = ?l2b(httpd_util:rfc1123_date()),
- NewHistoryEntry =
- {[
- {<<"session_id">>, SessionId},
- {<<"start_time">>, StartTime},
- {<<"end_time">>, EndTime},
- {<<"start_last_seq">>, StartSeq},
- {<<"end_last_seq">>, NewSeq},
- {<<"recorded_seq">>, NewSeq},
- {<<"missing_checked">>, couch_replicator_stats:missing_checked(Stats)},
- {<<"missing_found">>, couch_replicator_stats:missing_found(Stats)},
- {<<"docs_read">>, couch_replicator_stats:docs_read(Stats)},
- {<<"docs_written">>, couch_replicator_stats:docs_written(Stats)},
- {<<"doc_write_failures">>, couch_replicator_stats:doc_write_failures(Stats)}
- ]},
- BaseHistory =
- [
- {<<"session_id">>, SessionId},
- {<<"source_last_seq">>, NewSeq},
- {<<"replication_id_version">>, ?REP_ID_VERSION}
- ] ++
- case get_value(doc_ids, Options) of
- undefined ->
- [];
- _DocIds ->
- % backwards compatibility with the result of a replication by
- % doc IDs in versions 0.11.x and 1.0.x
- % TODO: deprecate (use same history format, simplify code)
- [
- {<<"start_time">>, StartTime},
- {<<"end_time">>, EndTime},
- {<<"docs_read">>, couch_replicator_stats:docs_read(Stats)},
- {<<"docs_written">>, couch_replicator_stats:docs_written(Stats)},
- {<<"doc_write_failures">>,
- couch_replicator_stats:doc_write_failures(Stats)}
- ]
- end,
- % limit history to 50 entries
- NewRepHistory = {
- BaseHistory ++
- [{<<"history">>, lists:sublist([NewHistoryEntry | OldHistory], 50)}]
- },
-
- try
- {SrcRevPos, SrcRevId} = update_checkpoint(
- Source, SourceLog#doc{body = NewRepHistory}, source
- ),
- {TgtRevPos, TgtRevId} = update_checkpoint(
- Target, TargetLog#doc{body = NewRepHistory}, target
- ),
- NewState = State#rep_state{
- checkpoint_history = NewRepHistory,
- committed_seq = NewTsSeq,
- source_log = SourceLog#doc{revs = {SrcRevPos, [SrcRevId]}},
- target_log = TargetLog#doc{revs = {TgtRevPos, [TgtRevId]}}
- },
- update_task(NewState),
- {ok, NewState}
- catch
- throw:{checkpoint_commit_failure, _} = Failure ->
- Failure
- end;
- {SrcInstanceStartTime, _NewTgtInstanceStartTime} ->
- {checkpoint_commit_failure, <<
- "instance_start_time on target database has changed since last checkpoint."
- >>};
- {_NewSrcInstanceStartTime, TgtInstanceStartTime} ->
- {checkpoint_commit_failure, <<
- "instance_start_time on source database has changed since last checkpoint."
- >>};
- {_NewSrcInstanceStartTime, _NewTgtInstanceStartTime} ->
- {checkpoint_commit_failure, <<
- "instance_start_time on source and target database has changed since last checkpoint."
- >>}
- end.
-
-update_checkpoint(Db, Doc, DbType) ->
- try
- update_checkpoint(Db, Doc)
- catch
- throw:{checkpoint_commit_failure, Reason} ->
- throw(
- {checkpoint_commit_failure,
- <<"Error updating the ", (to_binary(DbType))/binary, " checkpoint document: ",
- (to_binary(Reason))/binary>>}
- )
- end.
-
-update_checkpoint(Db, #doc{id = LogId, body = LogBody} = Doc) ->
- try
- case couch_replicator_api_wrap:update_doc(Db, Doc, [delay_commit]) of
- {ok, PosRevId} ->
- PosRevId;
- {error, Reason} ->
- throw({checkpoint_commit_failure, Reason})
- end
- catch
- throw:conflict ->
- case (catch couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body])) of
- {ok, #doc{body = LogBody, revs = {Pos, [RevId | _]}}} ->
- % This means that we were able to update successfully the
- % checkpoint doc in a previous attempt but we got a connection
- % error (timeout for e.g.) before receiving the success response.
- % Therefore the request was retried and we got a conflict, as the
- % revision we sent is not the current one.
- % We confirm this by verifying the doc body we just got is the same
- % that we have just sent.
- {Pos, RevId};
- _ ->
- throw({checkpoint_commit_failure, conflict})
- end
- end.
-
-commit_to_both(Source, Target) ->
- % commit the src async
- ParentPid = self(),
- SrcCommitPid = spawn_link(
- fun() ->
- Result = (catch couch_replicator_api_wrap:ensure_full_commit(Source)),
- ParentPid ! {self(), Result}
- end
- ),
-
- % commit tgt sync
- TargetResult = (catch couch_replicator_api_wrap:ensure_full_commit(Target)),
-
- SourceResult =
- receive
- {SrcCommitPid, Result} ->
- unlink(SrcCommitPid),
- receive
- {'EXIT', SrcCommitPid, _} -> ok
- after 0 -> ok
- end,
- Result;
- {'EXIT', SrcCommitPid, Reason} ->
- {error, Reason}
- end,
- case TargetResult of
- {ok, TargetStartTime} ->
- case SourceResult of
- {ok, SourceStartTime} ->
- {SourceStartTime, TargetStartTime};
- SourceError ->
- {source_error, SourceError}
- end;
- TargetError ->
- {target_error, TargetError}
- end.
-
-compare_replication_logs(SrcDoc, TgtDoc) ->
- #doc{body = {RepRecProps}} = SrcDoc,
- #doc{body = {RepRecPropsTgt}} = TgtDoc,
- case
- get_value(<<"session_id">>, RepRecProps) ==
- get_value(<<"session_id">>, RepRecPropsTgt)
- of
- true ->
- % if the records have the same session id,
- % then we have a valid replication history
- OldSeqNum = get_value(<<"source_last_seq">>, RepRecProps, ?LOWEST_SEQ),
- OldHistory = get_value(<<"history">>, RepRecProps, []),
- {OldSeqNum, OldHistory};
- false ->
- SourceHistory = get_value(<<"history">>, RepRecProps, []),
- TargetHistory = get_value(<<"history">>, RepRecPropsTgt, []),
- couch_log:notice(
- "Replication records differ. "
- "Scanning histories to find a common ancestor.",
- []
- ),
- couch_log:debug(
- "Record on source:~p~nRecord on target:~p~n",
- [RepRecProps, RepRecPropsTgt]
- ),
- compare_rep_history(SourceHistory, TargetHistory)
- end.
-
-compare_rep_history(S, T) when S =:= [] orelse T =:= [] ->
- couch_log:notice("no common ancestry -- performing full replication", []),
- {?LOWEST_SEQ, []};
-compare_rep_history([{S} | SourceRest], [{T} | TargetRest] = Target) ->
- SourceId = get_value(<<"session_id">>, S),
- case has_session_id(SourceId, Target) of
- true ->
- RecordSeqNum = get_value(<<"recorded_seq">>, S, ?LOWEST_SEQ),
- couch_log:notice(
- "found a common replication record with source_seq ~p",
- [RecordSeqNum]
- ),
- {RecordSeqNum, SourceRest};
- false ->
- TargetId = get_value(<<"session_id">>, T),
- case has_session_id(TargetId, SourceRest) of
- true ->
- RecordSeqNum = get_value(<<"recorded_seq">>, T, ?LOWEST_SEQ),
- couch_log:notice(
- "found a common replication record with source_seq ~p",
- [RecordSeqNum]
- ),
- {RecordSeqNum, TargetRest};
- false ->
- compare_rep_history(SourceRest, TargetRest)
- end
- end.
-
-has_session_id(_SessionId, []) ->
- false;
-has_session_id(SessionId, [{Props} | Rest]) ->
- case get_value(<<"session_id">>, Props, nil) of
- SessionId ->
- true;
- _Else ->
- has_session_id(SessionId, Rest)
- end.
-
-get_pending_count(St) ->
- Rep = St#rep_state.rep_details,
- Timeout = get_value(connection_timeout, Rep#rep.options),
- TimeoutMicro = Timeout * 1000,
- case get(pending_count_state) of
- {LastUpdate, PendingCount} ->
- case timer:now_diff(os:timestamp(), LastUpdate) > TimeoutMicro of
- true ->
- NewPendingCount = get_pending_count_int(St),
- put(pending_count_state, {os:timestamp(), NewPendingCount}),
- NewPendingCount;
- false ->
- PendingCount
- end;
- undefined ->
- NewPendingCount = get_pending_count_int(St),
- put(pending_count_state, {os:timestamp(), NewPendingCount}),
- NewPendingCount
- end.
-
-get_pending_count_int(#rep_state{source = #httpdb{} = Db0} = St) ->
- {_, Seq} = St#rep_state.highest_seq_done,
- Db = Db0#httpdb{retries = 3},
- case (catch couch_replicator_api_wrap:get_pending_count(Db, Seq)) of
- {ok, Pending} ->
- Pending;
- _ ->
- null
- end;
-get_pending_count_int(#rep_state{source = Db} = St) ->
- {_, Seq} = St#rep_state.highest_seq_done,
- {ok, Pending} = couch_replicator_api_wrap:get_pending_count(Db, Seq),
- Pending.
-
-update_task(State) ->
- #rep_state{
- rep_details = #rep{id = JobId},
- current_through_seq = {_, ThroughSeq},
- highest_seq_done = {_, HighestSeq}
- } = State,
- Status =
- rep_stats(State) ++
- [
- {source_seq, HighestSeq},
- {through_seq, ThroughSeq}
- ],
- couch_replicator_scheduler:update_job_stats(JobId, Status),
- couch_task_status:update(Status).
-
-rep_stats(State) ->
- #rep_state{
- committed_seq = {_, CommittedSeq},
- stats = Stats
- } = State,
- [
- {revisions_checked, couch_replicator_stats:missing_checked(Stats)},
- {missing_revisions_found, couch_replicator_stats:missing_found(Stats)},
- {docs_read, couch_replicator_stats:docs_read(Stats)},
- {docs_written, couch_replicator_stats:docs_written(Stats)},
- {changes_pending, get_pending_count(State)},
- {doc_write_failures, couch_replicator_stats:doc_write_failures(Stats)},
- {checkpointed_source_seq, CommittedSeq}
- ].
-
-replication_start_error({unauthorized, DbUri}) ->
- {unauthorized, <<"unauthorized to access or create database ", DbUri/binary>>};
-replication_start_error({db_not_found, DbUri}) ->
- {db_not_found, <<"could not open ", DbUri/binary>>};
-replication_start_error(
- {http_request_failed, _Method, Url0, {error, {error, {conn_failed, {error, nxdomain}}}}}
-) ->
- Url = ?l2b(couch_util:url_strip_password(Url0)),
- {nxdomain, <<"could not resolve ", Url/binary>>};
-replication_start_error({http_request_failed, Method0, Url0, {error, {code, Code}}}) when
- is_integer(Code)
-->
- Url = ?l2b(couch_util:url_strip_password(Url0)),
- Method = ?l2b(Method0),
- {http_error_code, Code, <<Method/binary, " ", Url/binary>>};
-replication_start_error(Error) ->
- Error.
-
-log_replication_start(#rep_state{rep_details = Rep} = RepState) ->
- #rep{
- id = {BaseId, Ext},
- doc_id = DocId,
- db_name = DbName,
- options = Options
- } = Rep,
- Id = BaseId ++ Ext,
- Workers = get_value(worker_processes, Options),
- BatchSize = get_value(worker_batch_size, Options),
- #rep_state{
- % credentials already stripped
- source_name = Source,
- % credentials already stripped
- target_name = Target,
- session_id = Sid
- } = RepState,
- From =
- case DbName of
- ShardName when is_binary(ShardName) ->
- io_lib:format("from doc ~s:~s", [mem3:dbname(ShardName), DocId]);
- _ ->
- "from _replicate endpoint"
- end,
- Msg =
- "Starting replication ~s (~s -> ~s) ~s worker_procesess:~p"
- " worker_batch_size:~p session_id:~s",
- couch_log:notice(Msg, [Id, Source, Target, From, Workers, BatchSize, Sid]).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-replication_start_error_test() ->
- ?assertEqual(
- {unauthorized, <<
- "unauthorized to access or create database"
- " http://x/y"
- >>},
- replication_start_error({unauthorized, <<"http://x/y">>})
- ),
- ?assertEqual(
- {db_not_found, <<"could not open http://x/y">>},
- replication_start_error({db_not_found, <<"http://x/y">>})
- ),
- ?assertEqual(
- {nxdomain, <<"could not resolve http://x/y">>},
- replication_start_error(
- {http_request_failed, "GET", "http://x/y",
- {error, {error, {conn_failed, {error, nxdomain}}}}}
- )
- ),
- ?assertEqual(
- {http_error_code, 503, <<"GET http://x/y">>},
- replication_start_error({http_request_failed, "GET", "http://x/y", {error, {code, 503}}})
- ).
-
-scheduler_job_format_status_test() ->
- Source = <<"http://u:p@h1/d1">>,
- Target = <<"http://u:p@h2/d2">>,
- Rep = #rep{
- id = {"base", "+ext"},
- source = couch_replicator_docs:parse_rep_db(Source, [], []),
- target = couch_replicator_docs:parse_rep_db(Target, [], []),
- options = [{create_target, true}],
- doc_id = <<"mydoc">>,
- db_name = <<"mydb">>
- },
- State = #rep_state{
- rep_details = Rep,
- source = Rep#rep.source,
- target = Rep#rep.target,
- session_id = <<"a">>,
- start_seq = <<"1">>,
- source_seq = <<"2">>,
- committed_seq = <<"3">>,
- current_through_seq = <<"4">>,
- highest_seq_done = <<"5">>
- },
- Format = format_status(opts_ignored, [pdict, State]),
- ?assertEqual("http://h1/d1/", proplists:get_value(source, Format)),
- ?assertEqual("http://h2/d2/", proplists:get_value(target, Format)),
- ?assertEqual({"base", "+ext"}, proplists:get_value(rep_id, Format)),
- ?assertEqual([{create_target, true}], proplists:get_value(options, Format)),
- ?assertEqual(<<"mydoc">>, proplists:get_value(doc_id, Format)),
- ?assertEqual(<<"mydb">>, proplists:get_value(db_name, Format)),
- ?assertEqual(<<"a">>, proplists:get_value(session_id, Format)),
- ?assertEqual(<<"1">>, proplists:get_value(start_seq, Format)),
- ?assertEqual(<<"2">>, proplists:get_value(source_seq, Format)),
- ?assertEqual(<<"3">>, proplists:get_value(committed_seq, Format)),
- ?assertEqual(<<"4">>, proplists:get_value(current_through_seq, Format)),
- ?assertEqual(<<"5">>, proplists:get_value(highest_seq_done, Format)).
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_sup.erl b/src/couch_replicator/src/couch_replicator_scheduler_sup.erl
deleted file mode 100644
index 1d5104312..000000000
--- a/src/couch_replicator/src/couch_replicator_scheduler_sup.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_scheduler_sup).
-
--behaviour(supervisor).
-
-%% public api
--export([
- start_link/0,
- start_child/1,
- terminate_child/1
-]).
-
-%% supervisor api
--export([
- init/1
-]).
-
-%% includes
--include("couch_replicator.hrl").
-
-%% public functions
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-start_child(#rep{} = Rep) ->
- supervisor:start_child(?MODULE, [Rep]).
-
-terminate_child(Pid) ->
- supervisor:terminate_child(?MODULE, Pid).
-
-%% supervisor functions
-
-init(_Args) ->
- Start = {couch_replicator_scheduler_job, start_link, []},
- % A crashed job is not entitled to immediate restart.
- Restart = temporary,
- Shutdown = 5000,
- Type = worker,
- Modules = [couch_replicator_scheduler_job],
-
- RestartStrategy = simple_one_for_one,
- MaxR = 10,
- MaxT = 3,
-
- ChildSpec =
- {undefined, Start, Restart, Shutdown, Type, Modules},
- {ok, {{RestartStrategy, MaxR, MaxT}, [ChildSpec]}}.
diff --git a/src/couch_replicator/src/couch_replicator_share.erl b/src/couch_replicator/src/couch_replicator_share.erl
deleted file mode 100644
index 8c9fa029a..000000000
--- a/src/couch_replicator/src/couch_replicator_share.erl
+++ /dev/null
@@ -1,762 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% This module implements the "Fair Share" algorithm by Judy Kay and Piers
-% Lauder [1] and applies it to the scheduling of replication jobs.
-%
-% The main idea is _replicator dbs can have a configurable number of "shares"
-% assigned to them. Shares is an abstract quantity from 1 to 1000. The default
-% is 100. Jobs from _replicator databases with more shares get proportionally a
-% higher chance to run than those from databases with a lower number of shares.
-%
-% Every scheduler cycle running jobs are "charged" based on how much time they
-% spent running during that cycle. At the end of the cycle the accumulated
-% charges for each job, the number of shares configured, and the total number
-% of jobs in the pending queue from the same _replicator db, are used to
-% calculate new priority values for all the jobs. To match the algorithm from
-% the paper, jobs with lower priority values are the ones at the front of the
-% run queue and have a higher chance of running.
-%
-% Here is how charges, shares, and number of sibling jobs affect the
-% priority value:
-%
-% 1) Jobs from dbs with higher configured shares get assigned lower
-% priority values and so stay closer to the front of the queue.
-%
-% 2) Jobs from dbs with many other jobs (many siblings) get assigned a
-% higher priority value, so they get pushed further down the queue
-% and have a lower chance of running.
-%
-% 3) Jobs which run longer accumulate more charges and get assigned a
-% higher priority value and get to wait longer to run.
-%
-% In order to prevent job starvation, all job priorities are periodicaly
-% decayed (decreased). This effectively moves all the jobs towards the front of
-% the run queue. So, in effect, there are two competing processes: one
-% uniformly moves all jobs to the front, and the other throws them back in
-% proportion to those factors mentioned above. The speed of this uniform
-% priority decay is controlled by the priority_coeff parameter.
-%
-% In order to prevent jobs from low shares dbs from "cheating" by getting
-% deleted and immediately re-added, charges are accumulated using a
-% historically decayed usage value. The speed of the usage decay is controlled
-% by the `usage_coeff = 0.5` parameter.
-%
-% [1] : https://proteusmaster.urcf.drexel.edu/urcfwiki/images/KayLauderFairShare.pdf
-
--module(couch_replicator_share).
-
--export([
- init/0,
- clear/0,
- update_shares/2,
- reset_shares/1,
- job_added/1,
- job_removed/1,
- update/3,
- priority/1,
- charge/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_replicator.hrl").
-
-% Usage coefficient decays historic usage every scheduling cycle. For example,
-% the usage value for a job running 1 minute is 60000000 (i.e microseconds /
-% minute), then if the job stops running it will take about 26 cycles (minutes)
-% for it to decay to 0 and the system to "forget" about it completely:
-%
-% trunc(60000000 * math:pow(0.5, 26)) = 0
-%
--define(DEFAULT_USAGE_COEFF, 0.5).
-
-% Priority coefficient decays all the job priorities such that they slowly
-% drift towards the front of the run queue. This coefficient defines a maximum
-% time window over which this algorithm would operate. For example, if this
-% value is too small (0.1), after a few cycles quite a few jobs would end up at
-% priority 0, and would render this algorithm useless. The default value of
-% 0.98 is picked such that if a job ran for one scheduler cycle, then didn't
-% get to run for 7 hours, it would still have priority > 0. 7 hours was picked
-% as it was close enought to 8 hours which is the default maximum error backoff
-% interval.
-%
-% Example calculation:
-% shares = 100
-% usage after 1 minute cycle run = 60000000
-% initial priority = 60000000 / (100 * 100) = 6000
-% trunc(6000 * math:pow(0.98, 431)) = 0
-% 431 / 60 ~= 7 hrs
-%
--define(DEFAULT_PRIORITY_COEFF, 0.98).
-
--define(MIN_SHARES, 1).
--define(MAX_SHARES, 1000).
--define(DEFAULT_SHARES, 100).
-
--define(SHARES, couch_replicator_shares).
--define(PRIORITIES, couch_replicator_priorities).
--define(USAGE, couch_replicator_usage).
--define(CHARGES, couch_replicator_stopped_usage).
--define(NUM_JOBS, couch_replicator_num_jobs).
-
-init() ->
- EtsOpts = [named_table, public],
- % {Key, Shares}
- ?SHARES = ets:new(?SHARES, EtsOpts),
- % {JobId, Priority}
- ?PRIORITIES = ets:new(?PRIORITIES, EtsOpts),
- % {Key, Usage}
- ?USAGE = ets:new(?USAGE, EtsOpts),
- % {Key, Charges}
- ?CHARGES = ets:new(?CHARGES, EtsOpts),
- % {Key, NumJobs}
- ?NUM_JOBS = ets:new(?NUM_JOBS, EtsOpts),
- lists:foreach(fun({K, V}) -> update_shares(K, V) end, get_config_shares()).
-
-clear() ->
- Tables = [?SHARES, ?PRIORITIES, ?USAGE, ?CHARGES, ?NUM_JOBS],
- lists:foreach(fun(T) -> catch ets:delete(T) end, Tables).
-
-% This should be called when user updates the replicator.shares config section
-%
-update_shares(Key, Shares) when is_integer(Shares) ->
- ets:insert(?SHARES, {Key, bounded(Shares, ?MIN_SHARES, ?MAX_SHARES)}).
-
-% Called when the config value is deleted and shares are reset to the default
-% value.
-reset_shares(Key) ->
- ets:delete(?SHARES, Key).
-
-job_added(#job{} = Job) ->
- Key = key(Job),
- % If the entry is not present {Key, 0} is used as the default
- ets:update_counter(?NUM_JOBS, Key, 1, {Key, 0}),
- % Update job's priority as if it ran during one scheduler cycle. This is so
- % new jobs don't get to be at priority 0 (highest).
- update_priority(Job).
-
-job_removed(#job{} = Job) ->
- Key = key(Job),
- ets:delete(?PRIORITIES, Job#job.id),
- case ets:update_counter(?NUM_JOBS, Key, -1, {Key, 0}) of
- N when is_integer(N), N =< 0 ->
- ets:delete(?NUM_JOBS, Key);
- N when is_integer(N), N > 0 ->
- ok
- end,
- ok.
-
-% This is the main algorithm update function. It should be called during each
-% rescheduling cycle with a list of running jobs, the interval from the
-% scheduler (in milliseconds), and the current timestamp.
-%
-% This function does all three main steps as described in [1].
-%
-% 1. Update usage from all the charges in the last scheduling cycle
-%
-% 2. Uniformly decay all job priorities
-%
-% 3. Update priorities for all the running jobs based on usage and number of
-% sibling jobs.
-%
-update(RunningJobs, Interval, {_, _, _} = Now) ->
- lists:foreach(fun(Job) -> charge(Job, Interval, Now) end, RunningJobs),
- update_usage(),
- decay_priorities(),
- lists:foreach(fun(Job) -> update_priority(Job) end, RunningJobs).
-
-priority(JobId) ->
- % Not found means it was removed because it's value was 0
- case ets:lookup(?PRIORITIES, JobId) of
- [{_, Priority}] -> Priority;
- [] -> 0
- end.
-
-charge(#job{pid = undefined}, _, _) ->
- 0;
-charge(#job{} = Job, Interval, {_, _, _} = Now) when is_integer(Interval) ->
- Key = key(Job),
- Charges = job_charges(Job, Interval, Now),
- % If the entry is not present {Key, 0} is used as the default
- ets:update_counter(?CHARGES, Key, Charges, {Key, 0}).
-
-usage(Key) ->
- case ets:lookup(?USAGE, Key) of
- [{_, Usage}] -> Usage;
- [] -> 0
- end.
-
-num_jobs(Key) ->
- case ets:lookup(?NUM_JOBS, Key) of
- [{_, NumJobs}] -> NumJobs;
- [] -> 0
- end.
-
-shares(Key) ->
- case ets:lookup(?SHARES, Key) of
- [{_, Shares}] -> Shares;
- [] -> ?DEFAULT_SHARES
- end.
-
-% In [1] this described in the "Decay of Process Priorities" section
-%
-decay_priorities() ->
- decay(?PRIORITIES, priority_coeff()),
- % If priority becomes 0, it's removed. When looking it up, if it
- % is missing we assume it is 0
- clear_zero(?PRIORITIES).
-
-% This is the main part of the alrgorithm. In [1] it is described in the
-% "Priority Adjustment" section.
-%
-update_priority(#job{} = Job) ->
- Id = Job#job.id,
- Key = key(Job),
- Shares = shares(Key),
- Priority = (usage(Key) * num_jobs(Key)) / (Shares * Shares),
- % If the entry is not present {Id, 0} is used as the default
- ets:update_counter(?PRIORITIES, Id, trunc(Priority), {Id, 0}).
-
-% This is the "User-Level Scheduling" part from [1]
-%
-update_usage() ->
- decay(?USAGE, usage_coeff()),
- clear_zero(?USAGE),
- ets:foldl(
- fun({Key, Charges}, _) ->
- % If the entry is not present {Key, 0} is used as the default
- ets:update_counter(?USAGE, Key, Charges, {Key, 0})
- end,
- 0,
- ?CHARGES
- ),
- % Start each interval with a fresh charges table
- ets:delete_all_objects(?CHARGES).
-
-% Private helper functions
-
-decay(Ets, Coeff) when is_atom(Ets) ->
- % Use trunc to ensure the result stays an integer in order for
- % ets:update_counter to work properly. It throws a badarg otherwise.
- Head = {'$1', '$2'},
- Result = {{'$1', {trunc, {'*', '$2', {const, Coeff}}}}},
- ets:select_replace(Ets, [{Head, [], [Result]}]).
-
-clear_zero(Ets) when is_atom(Ets) ->
- ets:select_delete(Ets, [{{'_', '$1'}, [{'=<', '$1', 0}], [true]}]).
-
-key(#job{} = Job) ->
- Rep = Job#job.rep,
- case is_binary(Rep#rep.db_name) of
- true -> mem3:dbname(Rep#rep.db_name);
- false -> (Rep#rep.user_ctx)#user_ctx.name
- end.
-
-% Jobs are charged based on the amount of time the job was running during the
-% last scheduling interval. The time units used are microseconds in order to
-% have a large enough usage values so that when priority is calculated the
-% rounded value won't be rounded off to 0 easily. The formula for the priority
-% calculation is:
-%
-% Priority = (Usage * NumJobs) / Shares^2
-%
-% Then in the worst case of a single job in the db, running only for one
-% second,for one job, with 1000 (max) shares, the priority would be:
-%
-% 1000000 * 1 / (1000^2) = 1
-%
-job_charges(#job{} = Job, IntervalMSec, {_, _, _} = Now) ->
- TimeRunning = timer:now_diff(Now, last_started(Job)),
- IntervalUSec = IntervalMSec * 1000,
- bounded(TimeRunning, 0, IntervalUSec).
-
-last_started(#job{} = Job) ->
- case lists:keyfind(started, 1, Job#job.history) of
- % In case user set too low of a max history
- false -> {0, 0, 0};
- {started, When} -> When
- end.
-
-bounded(Val, Min, Max) ->
- max(Min, min(Max, Val)).
-
-% Config helper functions
-
-get_config_shares() ->
- lists:map(
- fun({K, V}) ->
- {list_to_binary(K), int_val(V, ?DEFAULT_SHARES)}
- end,
- config:get("replicator.shares")
- ).
-
-priority_coeff() ->
- % This is the K2 coefficient from [1]
- Default = ?DEFAULT_PRIORITY_COEFF,
- Val = float_val(config:get("replicator", "priority_coeff"), Default),
- bounded(Val, 0.0, 1.0).
-
-usage_coeff() ->
- % This is the K1 coefficient from [1]
- Default = ?DEFAULT_USAGE_COEFF,
- Val = float_val(config:get("replicator", "usage_coeff"), Default),
- bounded(Val, 0.0, 1.0).
-
-int_val(Str, Default) when is_list(Str) ->
- try list_to_integer(Str) of
- Val -> Val
- catch
- error:badarg ->
- Default
- end.
-
-float_val(undefined, Default) ->
- Default;
-float_val(Str, Default) when is_list(Str) ->
- try list_to_float(Str) of
- Val -> Val
- catch
- error:badarg ->
- Default
- end.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_replicator/test/eunit/couch_replicator_test.hrl").
-
--define(DB1, <<"db1">>).
--define(DB2, <<"db2">>).
--define(DB3, <<"db3">>).
--define(J1, <<"j1">>).
--define(J2, <<"j2">>).
--define(J3, <<"j3">>).
-
-fair_share_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- ?TDEF_FE(init_works),
- ?TDEF_FE(shares_are_updated_and_reset),
- ?TDEF_FE(jobs_are_added_and_removed),
- ?TDEF_FE(can_fetch_job_priority),
- ?TDEF_FE(jobs_are_charged),
- ?TDEF_FE(usage_is_updated),
- ?TDEF_FE(priority_coefficient_works),
- ?TDEF_FE(priority_decays_when_jobs_stop_running),
- ?TDEF_FE(priority_increases_when_jobs_run),
- ?TDEF_FE(two_dbs_equal_shares_equal_number_of_jobs),
- ?TDEF_FE(two_dbs_unequal_shares_equal_number_of_jobs),
- ?TDEF_FE(two_dbs_equal_shares_unequal_number_of_jobs),
- ?TDEF_FE(two_dbs_unequal_shares_unequal_number_of_jobs),
- ?TDEF_FE(three_dbs_equal_shares_equal_number_of_jobs),
- ?TDEF_FE(three_dbs_unequal_shares_equal_number_of_jobs),
- ?TDEF_FE(three_dbs_equal_shares_unequal_number_of_jobs),
- ?TDEF_FE(three_dbs_unequal_shares_unequal_number_of_jobs)
- ]
- }
- }.
-
-setup_all() ->
- test_util:start_couch().
-
-teardown_all(Ctx) ->
- config_delete("priority_coeff"),
- config_delete("usage_coeff"),
- config_shares_delete(),
- test_util:stop_couch(Ctx).
-
-setup() ->
- init(),
- ok.
-
-teardown(_) ->
- clear(),
- config_delete("priority_coeff"),
- config_delete("usage_coeff"),
- config_shares_delete().
-
-init_works(_) ->
- Tables = [?SHARES, ?PRIORITIES, ?USAGE, ?CHARGES, ?NUM_JOBS],
- [?assert(is_list(ets:info(T))) || T <- Tables],
- ?assertEqual(#{}, tab2map(?SHARES)),
-
- clear(),
- [?assertEqual(undefined, ets:info(T)) || T <- Tables],
-
- config_share_set("db1", "200"),
- init(),
- ?assertEqual(200, shares(?DB1)),
- ?assertEqual(#{?DB1 => 200}, tab2map(?SHARES)).
-
-shares_are_updated_and_reset(_) ->
- ?assertEqual(#{}, tab2map(?SHARES)),
-
- update_shares(?DB1, 42),
- ?assertEqual(42, shares(?DB1)),
-
- reset_shares(?DB1),
- ?assertEqual(100, shares(?DB1)),
- ?assertEqual(#{}, tab2map(?SHARES)),
-
- % min shares
- update_shares(?DB1, 0),
- ?assertEqual(1, shares(?DB1)),
-
- % max shares
- update_shares(?DB1, 1001),
- ?assertEqual(1000, shares(?DB1)).
-
-jobs_are_added_and_removed(_) ->
- job_added(job(?J1, ?DB1)),
- ?assertEqual(1, num_jobs(?DB1)),
- ?assertEqual(#{?J1 => 0}, tab2map(?PRIORITIES)),
-
- job_added(job(?J2, ?DB1)),
- ?assertEqual(2, num_jobs(?DB1)),
- ?assertEqual(#{?J1 => 0, ?J2 => 0}, tab2map(?PRIORITIES)),
-
- job_added(job(?J3, ?DB2)),
- ?assertEqual(1, num_jobs(?DB2)),
- ?assertEqual(#{?J1 => 0, ?J2 => 0, ?J3 => 0}, tab2map(?PRIORITIES)),
-
- job_removed(job(?J1, ?DB1)),
- ?assertEqual(1, num_jobs(?DB1)),
- ?assertEqual(#{?J2 => 0, ?J3 => 0}, tab2map(?PRIORITIES)),
-
- job_removed(job(?J3, ?DB2)),
- ?assertEqual(0, num_jobs(?DB2)),
- ?assertEqual(0, priority(?J3)),
-
- job_removed(job(?J2, ?DB1)),
- ?assertEqual(0, num_jobs(?DB2)),
- ?assertEqual(#{}, tab2map(?NUM_JOBS)),
- ?assertEqual(0, priority(?J2)),
- ?assertEqual(#{}, tab2map(?PRIORITIES)).
-
-can_fetch_job_priority(_) ->
- job_added(job(?J1, ?DB1)),
- ?assertEqual(0, priority(?J1)),
-
- ets:insert(?PRIORITIES, {?J1, 42}),
- ?assertEqual(42, priority(?J1)),
-
- ets:delete(?PRIORITIES, ?J1),
- ?assertEqual(0, priority(?J1)).
-
-jobs_are_charged(_) ->
- Job1 = running_job(?J1, ?DB1),
- job_added(Job1),
- ?assertEqual(#{}, tab2map(?CHARGES)),
-
- charge(Job1, 1000, {0, 1, 0}),
- ?assertEqual(#{?DB1 => 1000000}, tab2map(?CHARGES)),
-
- % Stopped jobs are not charged
- charge(stop(Job1), 1000, {0, 1, 0}),
- ?assertEqual(#{?DB1 => 1000000}, tab2map(?CHARGES)),
-
- % Only charge up to one interval's worth even if job ran longer
- charge(Job1, 1000, {0, 5, 0}),
- ?assertEqual(#{?DB1 => 2000000}, tab2map(?CHARGES)),
-
- % Charges are accumulated from jobs in same db
- Job2 = running_job(?J2, ?DB1),
- job_added(Job2),
- charge(Job2, 1000, {0, 0, 1}),
- ?assertEqual(#{?DB1 => 2000001}, tab2map(?CHARGES)),
-
- % Charges are not cleared if jobs are removed
- job_removed(Job1),
- job_removed(Job2),
- ?assertEqual(#{?DB1 => 2000001}, tab2map(?CHARGES)).
-
-usage_is_updated(_) ->
- Job = running_job(?J1, ?DB1),
- job_added(Job),
-
- charge(Job, 60000, {0, 60, 0}),
- update_usage(),
- ?assertEqual(60000000, usage(?DB1)),
-
- % Charges table is cleared after usage is updated
- ?assertEqual(#{}, tab2map(?CHARGES)),
-
- % Check that usage decay works
- config_set("usage_coeff", "0.2"),
- update_usage(),
- ?assertEqual(12000000, usage(?DB1)),
-
- config_set("usage_coeff", "0.5"),
- update_usage(),
- ?assertEqual(6000000, usage(?DB1)),
-
- % Check that function both decays and updates from charges
- charge(Job, 60000, {0, 60, 0}),
- update_usage(),
- ?assertEqual(63000000, usage(?DB1)),
-
- % Usage eventually decays to 0 and is removed from the table
- [update_usage() || _ <- lists:seq(1, 100)],
- ?assertEqual(0, usage(?DB1)),
- ?assertEqual(#{}, tab2map(?USAGE)).
-
-priority_coefficient_works(_) ->
- job_added(job(?J1, ?DB1)),
- ets:insert(?PRIORITIES, {?J1, 1000}),
-
- config_set("priority_coeff", "0.8"),
- decay_priorities(),
- ?assertEqual(800, priority(?J1)),
-
- config_set("priority_coeff", "0.5"),
- decay_priorities(),
- ?assertEqual(400, priority(?J1)),
-
- % If non-float junk value is set then the default is used
- config_set("priority_coeff", "junk"),
- decay_priorities(),
- ?assertEqual(392, priority(?J1)),
-
- % Clipped to 1.0 max
- config_set("priority_coeff", "1.1"),
- decay_priorities(),
- ?assertEqual(392, priority(?J1)),
-
- % Clipped to 0.0 min and removed when =< 0
- config_set("priority_coeff", "-0.1"),
- decay_priorities(),
- ?assertEqual(0, priority(?J1)),
- ?assertEqual(#{}, tab2map(?PRIORITIES)).
-
-priority_decays_when_jobs_stop_running(_) ->
- Job = running_job(?J1, ?DB1),
- job_added(Job),
-
- % Ran for one cycle then stop
- {[], Pending} = reschedule(1, {[Job], []}),
-
- % Priority is non-0 initially
- ?assert(priority(?J1) > 0),
-
- % Priority decays to 0 after some cycles
- [reschedule(0, {[], Pending}) || _ <- lists:seq(1, 500)],
- ?assertEqual(0, priority(?J1)).
-
-priority_increases_when_jobs_run(_) ->
- Job = running_job(?J1, ?DB1),
- job_added(Job),
-
- Running = [Job],
- reschedule(0, {Running, []}),
- P1 = priority(?J1),
- ?assert(P1 > 0),
-
- % Priority increases
- reschedule(0, {Running, []}),
- P2 = priority(?J1),
- ?assert(P2 > P1),
-
- % Additive priority increase is balanced out by priority decay
- [reschedule(0, {Running, []}) || _ <- lists:seq(1, 500)],
- Pn = priority(?J1),
- ?assert(Pn > P2),
-
- reschedule(0, {Running, []}),
- Pm = priority(?J1),
- ?assertEqual(Pn, Pm).
-
-two_dbs_equal_shares_equal_number_of_jobs(_) ->
- update_shares(?DB1, 100),
- update_shares(?DB2, 100),
- Jobs = jobs(#{?DB1 => {25, 75}, ?DB2 => {25, 75}}),
- #{?DB1 := Db1, ?DB2 := Db2} = run_scheduler(1000, 10, Jobs),
- ?assert(49 =< Db1 andalso Db1 =< 51),
- ?assert(49 =< Db2 andalso Db2 =< 51).
-
-two_dbs_unequal_shares_equal_number_of_jobs(_) ->
- update_shares(?DB1, 100),
- update_shares(?DB1, 900),
- Jobs = jobs(#{?DB1 => {25, 75}, ?DB2 => {25, 75}}),
- #{?DB1 := Db1, ?DB2 := Db2} = run_scheduler(1000, 10, Jobs),
- ?assert(89 =< Db1 andalso Db1 =< 91),
- ?assert(9 =< Db2 andalso Db2 =< 11).
-
-two_dbs_equal_shares_unequal_number_of_jobs(_) ->
- update_shares(?DB1, 100),
- update_shares(?DB2, 100),
- Jobs = jobs(#{?DB1 => {25, 25}, ?DB2 => {25, 125}}),
- #{?DB1 := Db1, ?DB2 := Db2} = run_scheduler(1000, 10, Jobs),
- ?assert(49 =< Db1 andalso Db1 =< 51),
- ?assert(49 =< Db2 andalso Db2 =< 51).
-
-two_dbs_unequal_shares_unequal_number_of_jobs(_) ->
- update_shares(?DB1, 1),
- update_shares(?DB2, 100),
- Jobs = jobs(#{?DB1 => {25, 25}, ?DB2 => {25, 125}}),
- #{?DB1 := Db1, ?DB2 := Db2} = run_scheduler(1000, 10, Jobs),
- ?assert(0 =< Db1 andalso Db1 =< 2),
- ?assert(98 =< Db2 andalso Db2 =< 100).
-
-three_dbs_equal_shares_equal_number_of_jobs(_) ->
- update_shares(?DB1, 100),
- update_shares(?DB2, 100),
- update_shares(?DB3, 100),
- Jobs = jobs(#{?DB1 => {25, 75}, ?DB2 => {25, 75}, ?DB3 => {25, 75}}),
- #{?DB1 := Db1, ?DB2 := Db2, ?DB3 := Db3} = run_scheduler(1000, 10, Jobs),
- ?assert(32 =< Db1 andalso Db1 =< 34),
- ?assert(32 =< Db2 andalso Db2 =< 34),
- ?assert(32 =< Db3 andalso Db3 =< 34).
-
-three_dbs_unequal_shares_equal_number_of_jobs(_) ->
- update_shares(?DB1, 100),
- update_shares(?DB2, 700),
- update_shares(?DB3, 200),
- Jobs = jobs(#{?DB1 => {25, 75}, ?DB2 => {25, 75}, ?DB3 => {25, 75}}),
- #{?DB1 := Db1, ?DB2 := Db2, ?DB3 := Db3} = run_scheduler(1000, 10, Jobs),
- ?assert(9 =< Db1 andalso Db1 =< 11),
- ?assert(69 =< Db2 andalso Db2 =< 71),
- ?assert(19 =< Db3 andalso Db3 =< 21).
-
-three_dbs_equal_shares_unequal_number_of_jobs(_) ->
- update_shares(?DB1, 100),
- update_shares(?DB2, 100),
- update_shares(?DB3, 100),
- Jobs = jobs(#{?DB1 => {25, 25}, ?DB2 => {25, 100}, ?DB3 => {25, 75}}),
- #{?DB1 := Db1, ?DB2 := Db2, ?DB3 := Db3} = run_scheduler(1000, 10, Jobs),
- ?assert(32 =< Db1 andalso Db1 =< 34),
- ?assert(32 =< Db2 andalso Db2 =< 34),
- ?assert(32 =< Db3 andalso Db3 =< 34).
-
-three_dbs_unequal_shares_unequal_number_of_jobs(_) ->
- update_shares(?DB1, 1000),
- update_shares(?DB2, 100),
- update_shares(?DB3, 1),
- Jobs = jobs(#{?DB1 => {25, 100}, ?DB2 => {25, 125}, ?DB3 => {25, 875}}),
- #{?DB1 := Db1, ?DB2 := Db2, ?DB3 := Db3} = run_scheduler(1000, 10, Jobs),
- ?assert(87 =< Db1 andalso Db1 =< 89),
- ?assert(9 =< Db2 andalso Db2 =< 11),
- ?assert(2 =< Db3 andalso Db3 =< 4).
-
-config_set(K, V) ->
- config:set("replicator", K, V, _Persist = false).
-
-config_delete(K) ->
- config:delete("replicator", K, _Persist = false).
-
-config_share_set(K, V) ->
- config:set("replicator.shares", K, V, _Persist = false).
-
-config_shares_delete() ->
- [
- config:delete("replicator.shares", K, _Persist = false)
- || {K, _} <- config:get("replicator.shares")
- ].
-
-tab2map(T) when is_atom(T) ->
- maps:from_list(ets:tab2list(T)).
-
-job(rand, Db) ->
- job(rand:uniform(1 bsl 59), Db);
-job(Id, Db) ->
- Job = #job{
- id = Id,
- rep = #rep{
- db_name = Db,
- user_ctx = #user_ctx{}
- }
- },
- stop(Job).
-
-running_job(Id, Db) ->
- run(job(Id, Db)).
-
-run(#job{} = Job) ->
- Job#job{
- pid = list_to_pid("<0.9999.999>"),
- history = [{started, {0, 0, 0}}, {added, {0, 0, 0}}]
- }.
-
-stop(#job{} = Job) ->
- Job#job{
- pid = undefined,
- history = [{added, {0, 0, 0}}]
- }.
-
-% Simple scheduler simulator. Start and stop N jobs and do the
-% accounting steps. Return a new list of running and pending jobs. If
-% N is 0 then jobs which were running stay running and jobs were
-% pending stay pending.
-%
-reschedule(N, {Running, Pending}) ->
- update(Running, 60000, {0, 60, 0}),
-
- RunPr = [{priority(Job#job.id), Job} || Job <- Running],
- StopPr = [{priority(Job#job.id), Job} || Job <- Pending],
-
- {_, Running1} = lists:unzip(lists:reverse(lists:sort(RunPr))),
- {_, Pending1} = lists:unzip(lists:sort(StopPr)),
-
- ToStop = lists:sublist(Running1, N),
- ToStart = lists:sublist(Pending1, N),
-
- Running2 = [run(Job) || Job <- ToStart] ++ Running1 -- ToStop,
- Pending2 = [stop(Job) || Job <- ToStop] ++ Pending1 -- ToStart,
-
- {Running2, Pending2}.
-
-% Run a few scheduling cycles and calculate usage percentage for each db
-%
-run_scheduler(Cycles, Churn, Jobs0) ->
- Acc0 = {#{}, Jobs0},
-
- {Sum, _} = lists:foldl(
- fun(_CycleCnt, {UsageAcc, {Running, _} = Jobs}) ->
- UsageAcc1 = lists:foldl(
- fun(#job{} = Job, Acc) ->
- Db = Job#job.rep#rep.db_name,
- maps:update_with(Db, fun(V) -> V + 1 end, 0, Acc)
- end,
- UsageAcc,
- Running
- ),
- {UsageAcc1, reschedule(Churn, Jobs)}
- end,
- Acc0,
- lists:seq(1, Cycles)
- ),
-
- Total = maps:fold(fun(_, V, Acc) -> Acc + V end, 0, Sum),
- maps:map(fun(_Db, V) -> round(V / Total * 100) end, Sum).
-
-% Dbs = #{Db => {RunningCount, PendingCount}
-%
-jobs(#{} = Dbs) ->
- maps:fold(
- fun(Db, {RCnt, PCnt}, {Running, Pending}) ->
- RJobs = [running_job(rand, Db) || _ <- lists:seq(1, RCnt)],
- PJobs = [job(rand, Db) || _ <- lists:seq(1, PCnt)],
- [job_added(Job) || Job <- RJobs ++ PJobs],
- {Running ++ RJobs, Pending ++ PJobs}
- end,
- {[], []},
- Dbs
- ).
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_stats.erl b/src/couch_replicator/src/couch_replicator_stats.erl
deleted file mode 100644
index e1f23a1bc..000000000
--- a/src/couch_replicator/src/couch_replicator_stats.erl
+++ /dev/null
@@ -1,87 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_stats).
-
--export([
- new/0,
- new/1,
- get/2,
- increment/2,
- sum_stats/2,
- max_stats/2
-]).
-
--export([
- missing_checked/1,
- missing_found/1,
- docs_read/1,
- docs_written/1,
- doc_write_failures/1
-]).
-
-new() ->
- orddict:new().
-
-new(Initializers0) when is_list(Initializers0) ->
- Initializers1 = lists:filtermap(fun fmap/1, Initializers0),
- orddict:from_list(Initializers1).
-
-missing_checked(Stats) ->
- get(missing_checked, Stats).
-
-missing_found(Stats) ->
- get(missing_found, Stats).
-
-docs_read(Stats) ->
- get(docs_read, Stats).
-
-docs_written(Stats) ->
- get(docs_written, Stats).
-
-doc_write_failures(Stats) ->
- get(doc_write_failures, Stats).
-
-get(Field, Stats) ->
- case orddict:find(Field, Stats) of
- {ok, Value} ->
- Value;
- error ->
- 0
- end.
-
-increment(Field, Stats) ->
- orddict:update_counter(Field, 1, Stats).
-
-sum_stats(S1, S2) ->
- orddict:merge(fun(_, V1, V2) -> V1 + V2 end, S1, S2).
-
-max_stats(S1, S2) ->
- orddict:merge(fun(_, V1, V2) -> max(V1, V2) end, S1, S2).
-
-% Handle initializing from a status object, which uses same values but
-% different field names, as well as from ejson props from the checkpoint
-% history
-%
-fmap({missing_found, _}) -> true;
-fmap({missing_revisions_found, V}) -> {true, {missing_found, V}};
-fmap({<<"missing_found">>, V}) -> {true, {missing_found, V}};
-fmap({missing_checked, _}) -> true;
-fmap({revisions_checked, V}) -> {true, {missing_checked, V}};
-fmap({<<"missing_checked">>, V}) -> {true, {missing_checked, V}};
-fmap({docs_read, _}) -> true;
-fmap({<<"docs_read">>, V}) -> {true, {docs_read, V}};
-fmap({docs_written, _}) -> true;
-fmap({<<"docs_written">>, V}) -> {true, {docs_written, V}};
-fmap({doc_write_failures, _}) -> true;
-fmap({<<"doc_write_failures">>, V}) -> {true, {doc_write_failures, V}};
-fmap({_, _}) -> false.
diff --git a/src/couch_replicator/src/couch_replicator_sup.erl b/src/couch_replicator/src/couch_replicator_sup.erl
deleted file mode 100644
index 33eee8659..000000000
--- a/src/couch_replicator/src/couch_replicator_sup.erl
+++ /dev/null
@@ -1,40 +0,0 @@
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_sup).
--behaviour(supervisor).
--export([start_link/0, init/1]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init(_Args) ->
- Children = [
- {couch_replication_event, {gen_event, start_link, [{local, couch_replication}]}, permanent,
- brutal_kill, worker, dynamic},
- {couch_replicator_clustering, {couch_replicator_clustering, start_link, []}, permanent,
- brutal_kill, worker, [couch_replicator_clustering]},
- {couch_replicator_connection, {couch_replicator_connection, start_link, []}, permanent,
- brutal_kill, worker, [couch_replicator_connection]},
- {couch_replicator_rate_limiter, {couch_replicator_rate_limiter, start_link, []}, permanent,
- brutal_kill, worker, [couch_replicator_rate_limiter]},
- {couch_replicator_scheduler_sup, {couch_replicator_scheduler_sup, start_link, []},
- permanent, infinity, supervisor, [couch_replicator_scheduler_sup]},
- {couch_replicator_scheduler, {couch_replicator_scheduler, start_link, []}, permanent,
- brutal_kill, worker, [couch_replicator_scheduler]},
- {couch_replicator_doc_processor, {couch_replicator_doc_processor, start_link, []},
- permanent, brutal_kill, worker, [couch_replicator_doc_processor]},
- {couch_replicator_db_changes, {couch_replicator_db_changes, start_link, []}, permanent,
- brutal_kill, worker, [couch_multidb_changes]}
- ],
- {ok, {{rest_for_one, 10, 1}, Children}}.
diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl
deleted file mode 100644
index b2bc34078..000000000
--- a/src/couch_replicator/src/couch_replicator_utils.erl
+++ /dev/null
@@ -1,572 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_utils).
-
--export([
- parse_rep_doc/2,
- replication_id/2,
- sum_stats/2,
- is_deleted/1,
- rep_error_to_binary/1,
- get_json_value/2,
- get_json_value/3,
- pp_rep_id/1,
- iso8601/1,
- filter_state/3,
- normalize_rep/1,
- ejson_state_info/1,
- get_basic_auth_creds/1,
- remove_basic_auth_creds/1,
- normalize_basic_auth/1
-]).
-
--include_lib("ibrowse/include/ibrowse.hrl").
--include_lib("couch/include/couch_db.hrl").
--include("couch_replicator.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
--import(couch_util, [
- get_value/2,
- get_value/3
-]).
-
-rep_error_to_binary(Error) ->
- couch_util:to_binary(error_reason(Error)).
-
-error_reason({shutdown, Error}) ->
- error_reason(Error);
-error_reason({error, {Error, Reason}}) when
- is_atom(Error), is_binary(Reason)
-->
- io_lib:format("~s: ~s", [Error, Reason]);
-error_reason({error, Reason}) ->
- Reason;
-error_reason(Reason) ->
- Reason.
-
-get_json_value(Key, Props) ->
- get_json_value(Key, Props, undefined).
-
-get_json_value(Key, Props, Default) when is_atom(Key) ->
- Ref = make_ref(),
- case get_value(Key, Props, Ref) of
- Ref ->
- get_value(?l2b(atom_to_list(Key)), Props, Default);
- Else ->
- Else
- end;
-get_json_value(Key, Props, Default) when is_binary(Key) ->
- Ref = make_ref(),
- case get_value(Key, Props, Ref) of
- Ref ->
- get_value(list_to_atom(?b2l(Key)), Props, Default);
- Else ->
- Else
- end.
-
-% pretty-print replication id
--spec pp_rep_id(#rep{} | rep_id()) -> string().
-pp_rep_id(#rep{id = RepId}) ->
- pp_rep_id(RepId);
-pp_rep_id({Base, Extension}) ->
- Base ++ Extension.
-
-% NV: TODO: this function is not used outside api wrap module
-% consider moving it there during final cleanup
-is_deleted(Change) ->
- get_json_value(<<"deleted">>, Change, false).
-
-% NV: TODO: proxy some functions which used to be here, later remove
-% these and replace calls to their respective modules
-replication_id(Rep, Version) ->
- couch_replicator_ids:replication_id(Rep, Version).
-
-sum_stats(S1, S2) ->
- couch_replicator_stats:sum_stats(S1, S2).
-
-parse_rep_doc(Props, UserCtx) ->
- couch_replicator_docs:parse_rep_doc(Props, UserCtx).
-
--spec iso8601(erlang:timestamp()) -> binary().
-iso8601({_Mega, _Sec, _Micro} = Timestamp) ->
- {{Y, Mon, D}, {H, Min, S}} = calendar:now_to_universal_time(Timestamp),
- Format = "~B-~2..0B-~2..0BT~2..0B:~2..0B:~2..0BZ",
- iolist_to_binary(io_lib:format(Format, [Y, Mon, D, H, Min, S])).
-
-%% Filter replication info ejson by state provided. If it matches return
-%% the input value, if it doesn't return 'skip'. This is used from replicator
-%% fabric coordinator and worker.
--spec filter_state(atom(), [atom()], {[_ | _]}) -> {[_ | _]} | skip.
-filter_state(null = _State, _States, _Info) ->
- skip;
-filter_state(_ = _State, [] = _States, Info) ->
- Info;
-filter_state(State, States, Info) ->
- case lists:member(State, States) of
- true ->
- Info;
- false ->
- skip
- end.
-
-remove_basic_auth_from_headers(Headers) ->
- Headers1 = mochiweb_headers:make(Headers),
- case mochiweb_headers:get_value("Authorization", Headers1) of
- undefined ->
- {{undefined, undefined}, Headers};
- Auth ->
- {Basic, Base64} = lists:splitwith(fun(X) -> X =/= $\s end, Auth),
- maybe_remove_basic_auth(string:to_lower(Basic), Base64, Headers1)
- end.
-
-maybe_remove_basic_auth("basic", " " ++ Base64, Headers) ->
- Headers1 = mochiweb_headers:delete_any("Authorization", Headers),
- {decode_basic_creds(Base64), mochiweb_headers:to_list(Headers1)};
-maybe_remove_basic_auth(_, _, Headers) ->
- {{undefined, undefined}, mochiweb_headers:to_list(Headers)}.
-
-decode_basic_creds(Base64) ->
- try re:split(base64:decode(Base64), ":", [{return, list}, {parts, 2}]) of
- [User, Pass] ->
- {User, Pass};
- _ ->
- {undefined, undefined}
- catch
- % Tolerate invalid B64 values here to avoid crashing replicator
- error:function_clause ->
- {undefined, undefined}
- end.
-
-% Normalize a #rep{} record such that it doesn't contain time dependent fields
-% pids (like httpc pools), and options / props are sorted. This function would
-% used during comparisons.
--spec normalize_rep(#rep{} | nil) -> #rep{} | nil.
-normalize_rep(nil) ->
- nil;
-normalize_rep(#rep{} = Rep) ->
- #rep{
- source = couch_replicator_api_wrap:normalize_db(Rep#rep.source),
- target = couch_replicator_api_wrap:normalize_db(Rep#rep.target),
- % already sorted in make_options/1
- options = Rep#rep.options,
- type = Rep#rep.type,
- view = Rep#rep.view,
- doc_id = Rep#rep.doc_id,
- db_name = Rep#rep.db_name
- }.
-
--spec ejson_state_info(binary() | nil) -> binary() | null.
-ejson_state_info(nil) ->
- null;
-ejson_state_info(Info) when is_binary(Info) ->
- {[{<<"error">>, Info}]};
-ejson_state_info([]) ->
- % Status not set yet => null for compatibility reasons
- null;
-ejson_state_info([{_, _} | _] = Info) ->
- {Info};
-ejson_state_info(Info) ->
- ErrMsg = couch_replicator_utils:rep_error_to_binary(Info),
- {[{<<"error">>, ErrMsg}]}.
-
--spec get_basic_auth_creds(#httpdb{}) ->
- {string(), string()} | {undefined, undefined}.
-get_basic_auth_creds(#httpdb{auth_props = AuthProps}) ->
- case couch_util:get_value(<<"basic">>, AuthProps) of
- undefined ->
- {undefined, undefined};
- {UserPass} when is_list(UserPass) ->
- User = couch_util:get_value(<<"username">>, UserPass),
- Pass = couch_util:get_value(<<"password">>, UserPass),
- case {User, Pass} of
- _ when is_binary(User), is_binary(Pass) ->
- {binary_to_list(User), binary_to_list(Pass)};
- _Other ->
- {undefined, undefined}
- end;
- _Other ->
- {undefined, undefined}
- end.
-
--spec remove_basic_auth_creds(#httpd{}) -> #httpdb{}.
-remove_basic_auth_creds(#httpdb{auth_props = Props} = HttpDb) ->
- Props1 = lists:keydelete(<<"basic">>, 1, Props),
- HttpDb#httpdb{auth_props = Props1}.
-
--spec set_basic_auth_creds(string(), string(), #httpd{}) -> #httpdb{}.
-set_basic_auth_creds(undefined, undefined, #httpdb{} = HttpDb) ->
- HttpDb;
-set_basic_auth_creds(User, Pass, #httpdb{} = HttpDb) when
- is_list(User), is_list(Pass)
-->
- HttpDb1 = remove_basic_auth_creds(HttpDb),
- Props = HttpDb1#httpdb.auth_props,
- UserPass =
- {[
- {<<"username">>, list_to_binary(User)},
- {<<"password">>, list_to_binary(Pass)}
- ]},
- Props1 = lists:keystore(<<"basic">>, 1, Props, {<<"basic">>, UserPass}),
- HttpDb1#httpdb{auth_props = Props1}.
-
--spec extract_creds_from_url(string()) ->
- {ok, {string() | undefined, string() | undefined}, string()}
- | {error, term()}.
-extract_creds_from_url(Url) ->
- case ibrowse_lib:parse_url(Url) of
- {error, Error} ->
- {error, Error};
- #url{username = undefined, password = undefined} ->
- {ok, {undefined, undefined}, Url};
- #url{protocol = Proto, username = User, password = Pass} ->
- % Excise user and pass parts from the url. Try to keep the host,
- % port and path as they were in the original.
- Prefix = lists:concat([Proto, "://", User, ":", Pass, "@"]),
- Suffix = lists:sublist(Url, length(Prefix) + 1, length(Url) + 1),
- NoCreds = lists:concat([Proto, "://", Suffix]),
- {ok, {User, Pass}, NoCreds}
- end.
-
-% Normalize basic auth credentials so they are set only in the auth props
-% object. If multiple basic auth credentials are provided, the resulting
-% credentials are picked in the following order.
-% 1) {"auth": "basic": {"username":.., "password": ...} ...}
-% 2) URL userinfo part
-% 3) "Authentication" : "basic $base64" headers
-%
--spec normalize_basic_auth(#httpdb{}) -> #httpdb{}.
-normalize_basic_auth(#httpdb{} = HttpDb) ->
- #httpdb{url = Url, headers = Headers} = HttpDb,
- {HeaderCreds, HeadersNoCreds} = remove_basic_auth_from_headers(Headers),
- {UrlCreds, UrlWithoutCreds} =
- case extract_creds_from_url(Url) of
- {ok, Creds = {_, _}, UrlNoCreds} ->
- {Creds, UrlNoCreds};
- {error, _Error} ->
- % Don't crash replicator if user provided an invalid
- % userinfo part
- {undefined, undefined}
- end,
- AuthCreds = {_, _} = get_basic_auth_creds(HttpDb),
- HttpDb1 = remove_basic_auth_creds(HttpDb#httpdb{
- url = UrlWithoutCreds,
- headers = HeadersNoCreds
- }),
- {User, Pass} =
- case {AuthCreds, UrlCreds, HeaderCreds} of
- {{U, P}, {_, _}, {_, _}} when is_list(U), is_list(P) -> {U, P};
- {{_, _}, {U, P}, {_, _}} when is_list(U), is_list(P) -> {U, P};
- {{_, _}, {_, _}, {U, P}} -> {U, P}
- end,
- set_basic_auth_creds(User, Pass, HttpDb1).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-remove_basic_auth_from_headers_test_() ->
- [
- ?_assertMatch(
- {{User, Pass}, NoAuthHeaders},
- remove_basic_auth_from_headers(Headers)
- )
- || {{User, Pass, NoAuthHeaders}, Headers} <- [
- {
- {undefined, undefined, []},
- []
- },
- {
- {undefined, undefined, [{"h", "v"}]},
- [{"h", "v"}]
- },
- {
- {undefined, undefined, [{"Authorization", "junk"}]},
- [{"Authorization", "junk"}]
- },
- {
- {undefined, undefined, []},
- [{"Authorization", "basic X"}]
- },
- {
- {"user", "pass", []},
- [{"Authorization", "Basic " ++ b64creds("user", "pass")}]
- },
- {
- {"user", "pass", []},
- [{"AuThorization", "Basic " ++ b64creds("user", "pass")}]
- },
- {
- {"user", "pass", []},
- [{"Authorization", "bAsIc " ++ b64creds("user", "pass")}]
- },
- {
- {"user", "pass", [{"h", "v"}]},
- [
- {"Authorization", "Basic " ++ b64creds("user", "pass")},
- {"h", "v"}
- ]
- }
- ]
- ].
-
-b64creds(User, Pass) ->
- base64:encode_to_string(User ++ ":" ++ Pass).
-
-normalize_rep_test_() ->
- {
- setup,
- fun() ->
- meck:expect(
- config,
- get,
- fun(_, _, Default) -> Default end
- )
- end,
- fun(_) -> meck:unload() end,
- ?_test(begin
- EJson1 =
- {[
- {<<"source">>, <<"http://host.com/source_db">>},
- {<<"target">>, <<"http://target.local/db">>},
- {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]},
- {<<"other_field">>, <<"some_value">>}
- ]},
- Rep1 = couch_replicator_docs:parse_rep_doc_without_id(EJson1),
- EJson2 =
- {[
- {<<"other_field">>, <<"unrelated">>},
- {<<"target">>, <<"http://target.local/db">>},
- {<<"source">>, <<"http://host.com/source_db">>},
- {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]},
- {<<"other_field2">>, <<"unrelated2">>}
- ]},
- Rep2 = couch_replicator_docs:parse_rep_doc_without_id(EJson2),
- ?assertEqual(normalize_rep(Rep1), normalize_rep(Rep2))
- end)
- }.
-
-get_basic_auth_creds_test() ->
- Check = fun(Props) ->
- get_basic_auth_creds(#httpdb{auth_props = Props})
- end,
-
- ?assertEqual({undefined, undefined}, Check([])),
-
- ?assertEqual({undefined, undefined}, Check([null])),
-
- ?assertEqual({undefined, undefined}, Check([{<<"other">>, <<"x">>}])),
-
- ?assertEqual({undefined, undefined}, Check([{<<"basic">>, []}])),
-
- UserPass1 = {[{<<"username">>, <<"u">>}, {<<"password">>, <<"p">>}]},
- ?assertEqual({"u", "p"}, Check([{<<"basic">>, UserPass1}])),
-
- UserPass3 = {[{<<"username">>, <<"u">>}, {<<"password">>, null}]},
- ?assertEqual({undefined, undefined}, Check([{<<"basic">>, UserPass3}])).
-
-remove_basic_auth_creds_test() ->
- Check = fun(Props) ->
- HttpDb = remove_basic_auth_creds(#httpdb{auth_props = Props}),
- HttpDb#httpdb.auth_props
- end,
-
- ?assertEqual([], Check([])),
-
- ?assertEqual([{<<"other">>, {[]}}], Check([{<<"other">>, {[]}}])),
-
- ?assertEqual(
- [],
- Check([
- {<<"basic">>,
- {[
- {<<"username">>, <<"u">>},
- {<<"password">>, <<"p">>}
- ]}}
- ])
- ),
-
- ?assertEqual(
- [{<<"other">>, {[]}}],
- Check([
- {<<"basic">>,
- {[
- {<<"username">>, <<"u">>},
- {<<"password">>, <<"p">>}
- ]}},
- {<<"other">>, {[]}}
- ])
- ).
-
-set_basic_auth_creds_test() ->
- Check = fun(User, Pass, Props) ->
- HttpDb = set_basic_auth_creds(User, Pass, #httpdb{auth_props = Props}),
- HttpDb#httpdb.auth_props
- end,
-
- ?assertEqual([], Check(undefined, undefined, [])),
-
- ?assertEqual(
- [{<<"other">>, {[]}}],
- Check(
- undefined,
- undefined,
- [{<<"other">>, {[]}}]
- )
- ),
-
- ?assertEqual(
- [
- {<<"basic">>,
- {[
- {<<"username">>, <<"u">>},
- {<<"password">>, <<"p">>}
- ]}}
- ],
- Check("u", "p", [])
- ),
-
- ?assertEqual(
- [
- {<<"other">>, {[]}},
- {<<"basic">>,
- {[
- {<<"username">>, <<"u">>},
- {<<"password">>, <<"p">>}
- ]}}
- ],
- Check("u", "p", [{<<"other">>, {[]}}])
- ).
-
-normalize_basic_creds_test_() ->
- DefaultHeaders = (#httpdb{})#httpdb.headers,
- [
- ?_assertEqual(Expect, normalize_basic_auth(Input))
- || {Input, Expect} <- [
- {
- #httpdb{url = "http://u:p@x.y/db"},
- #httpdb{url = "http://x.y/db", auth_props = auth_props("u", "p")}
- },
- {
- #httpdb{url = "http://u:p@h:80/db"},
- #httpdb{url = "http://h:80/db", auth_props = auth_props("u", "p")}
- },
- {
- #httpdb{url = "https://u:p@h/db"},
- #httpdb{url = "https://h/db", auth_props = auth_props("u", "p")}
- },
- {
- #httpdb{url = "http://u:p@[2001:db8:a1b:12f9::1]/db"},
- #httpdb{
- url = "http://[2001:db8:a1b:12f9::1]/db",
- auth_props = auth_props("u", "p")
- }
- },
- {
- #httpdb{
- url = "http://h/db",
- headers =
- DefaultHeaders ++
- [
- {"Authorization", "Basic " ++ b64creds("u", "p")}
- ]
- },
- #httpdb{url = "http://h/db", auth_props = auth_props("u", "p")}
- },
- {
- #httpdb{
- url = "http://h/db",
- headers =
- DefaultHeaders ++
- [
- {"Authorization", "Basic " ++ b64creds("u", "p@")}
- ]
- },
- #httpdb{url = "http://h/db", auth_props = auth_props("u", "p@")}
- },
- {
- #httpdb{
- url = "http://h/db",
- headers =
- DefaultHeaders ++
- [
- {"Authorization", "Basic " ++ b64creds("u", "p@%40")}
- ]
- },
- #httpdb{url = "http://h/db", auth_props = auth_props("u", "p@%40")}
- },
- {
- #httpdb{
- url = "http://h/db",
- headers =
- DefaultHeaders ++
- [
- {"aUthoriZation", "bASIC " ++ b64creds("U", "p")}
- ]
- },
- #httpdb{url = "http://h/db", auth_props = auth_props("U", "p")}
- },
- {
- #httpdb{
- url = "http://u1:p1@h/db",
- headers =
- DefaultHeaders ++
- [
- {"Authorization", "Basic " ++ b64creds("u2", "p2")}
- ]
- },
- #httpdb{url = "http://h/db", auth_props = auth_props("u1", "p1")}
- },
- {
- #httpdb{
- url = "http://u1:p1@h/db",
- auth_props = [
- {<<"basic">>,
- {[
- {<<"username">>, <<"u2">>},
- {<<"password">>, <<"p2">>}
- ]}}
- ]
- },
- #httpdb{url = "http://h/db", auth_props = auth_props("u2", "p2")}
- },
- {
- #httpdb{
- url = "http://u1:p1@h/db",
- auth_props = [
- {<<"basic">>,
- {[
- {<<"username">>, <<"u2">>},
- {<<"password">>, <<"p2">>}
- ]}}
- ],
- headers =
- DefaultHeaders ++
- [
- {"Authorization", "Basic " ++ b64creds("u3", "p3")}
- ]
- },
- #httpdb{url = "http://h/db", auth_props = auth_props("u2", "p2")}
- }
- ]
- ].
-
-auth_props(User, Pass) when is_list(User), is_list(Pass) ->
- [
- {<<"basic">>,
- {[
- {<<"username">>, list_to_binary(User)},
- {<<"password">>, list_to_binary(Pass)}
- ]}}
- ].
-
--endif.
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
deleted file mode 100644
index f66a019e2..000000000
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ /dev/null
@@ -1,546 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_worker).
--behaviour(gen_server).
--vsn(1).
-
-% public API
--export([start_link/5]).
-
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
--export([format_status/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
--include("couch_replicator.hrl").
-
-% TODO: maybe make both buffer max sizes configurable
-
-% for remote targets
--define(DOC_BUFFER_BYTE_SIZE, 512 * 1024).
-% 10 seconds (in microseconds)
--define(STATS_DELAY, 10000000).
--define(MISSING_DOC_RETRY_MSEC, 2000).
-
--import(couch_util, [
- to_binary/1,
- get_value/3
-]).
-
--record(batch, {
- docs = [],
- size = 0
-}).
-
--record(state, {
- cp,
- loop,
- max_parallel_conns,
- source,
- target,
- readers = [],
- writer = nil,
- pending_fetch = nil,
- flush_waiter = nil,
- stats = couch_replicator_stats:new(),
- batch = #batch{}
-}).
-
-start_link(Cp, #httpdb{} = Source, Target, ChangesManager, MaxConns) ->
- gen_server:start_link(
- ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []
- ).
-
-init({Cp, Source, Target, ChangesManager, MaxConns}) ->
- process_flag(trap_exit, true),
- Parent = self(),
- LoopPid = spawn_link(fun() ->
- queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
- end),
- erlang:put(last_stats_report, os:timestamp()),
- State = #state{
- cp = Cp,
- max_parallel_conns = MaxConns,
- loop = LoopPid,
- source = Source,
- target = Target
- },
- {ok, State}.
-
-handle_call(
- {fetch_doc, {_Id, _Revs, _PAs} = Params},
- {Pid, _} = From,
- #state{
- loop = Pid,
- readers = Readers,
- pending_fetch = nil,
- source = Src,
- target = Tgt,
- max_parallel_conns = MaxConns
- } = State
-) ->
- case length(Readers) of
- Size when Size < MaxConns ->
- Reader = spawn_doc_reader(Src, Tgt, Params),
- NewState = State#state{
- readers = [Reader | Readers]
- },
- {reply, ok, NewState};
- _ ->
- NewState = State#state{
- pending_fetch = {From, Params}
- },
- {noreply, NewState}
- end;
-handle_call({batch_doc, Doc}, From, State) ->
- gen_server:reply(From, ok),
- {noreply, maybe_flush_docs(Doc, State)};
-handle_call({add_stats, IncStats}, From, #state{stats = Stats} = State) ->
- gen_server:reply(From, ok),
- NewStats = couch_replicator_utils:sum_stats(Stats, IncStats),
- NewStats2 = maybe_report_stats(State#state.cp, NewStats),
- {noreply, State#state{stats = NewStats2}};
-handle_call(
- flush,
- {Pid, _} = From,
- #state{
- loop = Pid,
- writer = nil,
- flush_waiter = nil,
- target = Target,
- batch = Batch
- } = State
-) ->
- State2 =
- case State#state.readers of
- [] ->
- State#state{writer = spawn_writer(Target, Batch)};
- _ ->
- State
- end,
- {noreply, State2#state{flush_waiter = From}}.
-
-handle_cast(Msg, State) ->
- {stop, {unexpected_async_call, Msg}, State}.
-
-handle_info({'EXIT', Pid, normal}, #state{loop = Pid} = State) ->
- #state{
- batch = #batch{docs = []},
- readers = [],
- writer = nil,
- pending_fetch = nil,
- flush_waiter = nil
- } = State,
- {stop, normal, State};
-handle_info({'EXIT', Pid, normal}, #state{writer = Pid} = State) ->
- {noreply, after_full_flush(State)};
-handle_info({'EXIT', Pid, normal}, #state{writer = nil} = State) ->
- #state{
- readers = Readers,
- writer = Writer,
- batch = Batch,
- source = Source,
- target = Target,
- pending_fetch = Fetch,
- flush_waiter = FlushWaiter
- } = State,
- case Readers -- [Pid] of
- Readers ->
- {noreply, State};
- Readers2 ->
- State2 =
- case Fetch of
- nil ->
- case
- (FlushWaiter =/= nil) andalso (Writer =:= nil) andalso
- (Readers2 =:= [])
- of
- true ->
- State#state{
- readers = Readers2,
- writer = spawn_writer(Target, Batch)
- };
- false ->
- State#state{readers = Readers2}
- end;
- {From, FetchParams} ->
- Reader = spawn_doc_reader(Source, Target, FetchParams),
- gen_server:reply(From, ok),
- State#state{
- readers = [Reader | Readers2],
- pending_fetch = nil
- }
- end,
- {noreply, State2}
- end;
-handle_info({'EXIT', _Pid, max_backoff}, State) ->
- {stop, {shutdown, max_backoff}, State};
-handle_info({'EXIT', _Pid, {bulk_docs_failed, _, _} = Err}, State) ->
- {stop, {shutdown, Err}, State};
-handle_info({'EXIT', _Pid, {revs_diff_failed, _, _} = Err}, State) ->
- {stop, {shutdown, Err}, State};
-handle_info({'EXIT', _Pid, {http_request_failed, _, _, _} = Err}, State) ->
- {stop, {shutdown, Err}, State};
-handle_info({'EXIT', Pid, Reason}, State) ->
- {stop, {process_died, Pid, Reason}, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-format_status(_Opt, [_PDict, State]) ->
- #state{
- cp = MainJobPid,
- loop = LoopPid,
- source = Source,
- target = Target,
- readers = Readers,
- pending_fetch = PendingFetch,
- batch = #batch{size = BatchSize}
- } = State,
- [
- {main_pid, MainJobPid},
- {loop, LoopPid},
- {source, couch_replicator_api_wrap:db_uri(Source)},
- {target, couch_replicator_api_wrap:db_uri(Target)},
- {num_readers, length(Readers)},
- {pending_fetch, PendingFetch},
- {batch_size, BatchSize}
- ].
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) ->
- ChangesManager ! {get_changes, self()},
- receive
- {closed, ChangesManager} ->
- ok;
- {changes, ChangesManager, [], ReportSeq} ->
- Stats = couch_replicator_stats:new(),
- ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
- queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager);
- {changes, ChangesManager, Changes, ReportSeq} ->
- {IdRevs, Stats0} = find_missing(Changes, Target),
- ok = gen_server:call(Parent, {add_stats, Stats0}, infinity),
- remote_process_batch(IdRevs, Parent),
- {ok, Stats} = gen_server:call(Parent, flush, infinity),
- ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
- erlang:put(last_stats_report, os:timestamp()),
- couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]),
- queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
- end.
-
-remote_process_batch([], _Parent) ->
- ok;
-remote_process_batch([{Id, Revs, PAs} | Rest], Parent) ->
- % When the source is a remote database, we fetch a single document revision
- % per HTTP request. This is mostly to facilitate retrying of HTTP requests
- % due to network transient failures. It also helps not exceeding the maximum
- % URL length allowed by proxies and Mochiweb.
- lists:foreach(
- fun(Rev) ->
- ok = gen_server:call(Parent, {fetch_doc, {Id, [Rev], PAs}}, infinity)
- end,
- Revs
- ),
- remote_process_batch(Rest, Parent).
-
-spawn_doc_reader(Source, Target, FetchParams) ->
- Parent = self(),
- spawn_link(fun() ->
- fetch_doc(
- Source, FetchParams, fun remote_doc_handler/2, {Parent, Target}
- )
- end).
-
-fetch_doc(Source, {Id, Revs, PAs}, DocHandler, Acc) ->
- try
- couch_replicator_api_wrap:open_doc_revs(
- Source, Id, Revs, [{atts_since, PAs}, latest], DocHandler, Acc
- )
- catch
- throw:missing_doc ->
- couch_log:error(
- "Retrying fetch and update of document `~s` as it is "
- "unexpectedly missing. Missing revisions are: ~s",
- [Id, couch_doc:revs_to_strs(Revs)]
- ),
- WaitMSec = config:get_integer(
- "replicator",
- "missing_doc_retry_msec",
- ?MISSING_DOC_RETRY_MSEC
- ),
- timer:sleep(WaitMSec),
- couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc);
- throw:{missing_stub, _} ->
- couch_log:error(
- "Retrying fetch and update of document `~s` due to out of "
- "sync attachment stubs. Missing revisions are: ~s",
- [Id, couch_doc:revs_to_strs(Revs)]
- ),
- WaitMSec = config:get_integer(
- "replicator",
- "missing_doc_retry_msec",
- ?MISSING_DOC_RETRY_MSEC
- ),
- timer:sleep(WaitMSec),
- couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc)
- end.
-
-remote_doc_handler(
- {ok, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc},
- Acc
-) ->
- % Flush design docs in their own PUT requests to correctly process
- % authorization failures for design doc updates.
- couch_log:debug("Worker flushing design doc", []),
- doc_handler_flush_doc(Doc, Acc);
-remote_doc_handler({ok, #doc{atts = [_ | _]} = Doc}, Acc) ->
- % Immediately flush documents with attachments received from a remote
- % source. The data property of each attachment is a function that starts
- % streaming the attachment data from the remote source, therefore it's
- % convenient to call it ASAP to avoid ibrowse inactivity timeouts.
- couch_log:debug("Worker flushing doc with attachments", []),
- doc_handler_flush_doc(Doc, Acc);
-remote_doc_handler({ok, #doc{atts = []} = Doc}, {Parent, _} = Acc) ->
- ok = gen_server:call(Parent, {batch_doc, Doc}, infinity),
- {ok, Acc};
-remote_doc_handler({{not_found, missing}, _}, _Acc) ->
- throw(missing_doc).
-
-doc_handler_flush_doc(#doc{} = Doc, {Parent, Target} = Acc) ->
- Stats = couch_replicator_stats:new([{docs_read, 1}]),
- Success = (flush_doc(Target, Doc) =:= ok),
- {Result, Stats2} =
- case Success of
- true ->
- {{ok, Acc}, couch_replicator_stats:increment(docs_written, Stats)};
- false ->
- {{skip, Acc}, couch_replicator_stats:increment(doc_write_failures, Stats)}
- end,
- ok = gen_server:call(Parent, {add_stats, Stats2}, infinity),
- Result.
-
-spawn_writer(Target, #batch{docs = DocList, size = Size}) ->
- case {Target, Size > 0} of
- {#httpdb{}, true} ->
- couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]);
- _ ->
- ok
- end,
- Parent = self(),
- spawn_link(
- fun() ->
- Stats = flush_docs(Target, DocList),
- ok = gen_server:call(Parent, {add_stats, Stats}, infinity)
- end
- ).
-
-after_full_flush(#state{stats = Stats, flush_waiter = Waiter} = State) ->
- gen_server:reply(Waiter, {ok, Stats}),
- erlang:put(last_stats_report, os:timestamp()),
- State#state{
- stats = couch_replicator_stats:new(),
- flush_waiter = nil,
- writer = nil,
- batch = #batch{}
- }.
-
-maybe_flush_docs(Doc, State) ->
- #state{
- target = Target,
- batch = Batch,
- stats = Stats,
- cp = Cp
- } = State,
- {Batch2, WStats} = maybe_flush_docs(Target, Batch, Doc),
- Stats2 = couch_replicator_stats:sum_stats(Stats, WStats),
- Stats3 = couch_replicator_stats:increment(docs_read, Stats2),
- Stats4 = maybe_report_stats(Cp, Stats3),
- State#state{stats = Stats4, batch = Batch2}.
-
-maybe_flush_docs(#httpdb{} = Target, Batch, Doc) ->
- #batch{docs = DocAcc, size = SizeAcc} = Batch,
- JsonDoc = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
- case SizeAcc + iolist_size(JsonDoc) of
- SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
- couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
- Stats = flush_docs(Target, [JsonDoc | DocAcc]),
- {#batch{}, Stats};
- SizeAcc2 ->
- Stats = couch_replicator_stats:new(),
- {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
- end.
-
-flush_docs(_Target, []) ->
- couch_replicator_stats:new();
-flush_docs(Target, DocList) ->
- FlushResult = couch_replicator_api_wrap:update_docs(
- Target,
- DocList,
- [delay_commit],
- replicated_changes
- ),
- handle_flush_docs_result(FlushResult, Target, DocList).
-
-handle_flush_docs_result({error, request_body_too_large}, _Target, [Doc]) ->
- couch_log:error("Replicator: failed to write doc ~p. Too large", [Doc]),
- couch_replicator_stats:new([{doc_write_failures, 1}]);
-handle_flush_docs_result({error, request_body_too_large}, Target, DocList) ->
- Len = length(DocList),
- {DocList1, DocList2} = lists:split(Len div 2, DocList),
- couch_log:notice(
- "Replicator: couldn't write batch of size ~p to ~p because"
- " request body is too large. Splitting batch into 2 separate batches of"
- " sizes ~p and ~p",
- [
- Len,
- couch_replicator_api_wrap:db_uri(Target),
- length(DocList1),
- length(DocList2)
- ]
- ),
- Stats1 = flush_docs(Target, DocList1),
- Stats2 = flush_docs(Target, DocList2),
- couch_replicator_stats:sum_stats(Stats1, Stats2);
-handle_flush_docs_result({ok, Errors}, Target, DocList) ->
- DbUri = couch_replicator_api_wrap:db_uri(Target),
- lists:foreach(
- fun({Props}) ->
- couch_log:error(
- "Replicator: couldn't write document `~s`, revision"
- " `~s`, to target database `~s`. Error: `~s`, reason: `~s`.",
- [
- get_value(id, Props, ""),
- get_value(rev, Props, ""),
- DbUri,
- get_value(error, Props, ""),
- get_value(reason, Props, "")
- ]
- )
- end,
- Errors
- ),
- couch_replicator_stats:new([
- {docs_written, length(DocList) - length(Errors)},
- {doc_write_failures, length(Errors)}
- ]);
-handle_flush_docs_result({error, {bulk_docs_failed, _, _} = Err}, _, _) ->
- exit(Err).
-
-flush_doc(Target, #doc{id = Id, revs = {Pos, [RevId | _]}} = Doc) ->
- try couch_replicator_api_wrap:update_doc(Target, Doc, [], replicated_changes) of
- {ok, _} ->
- ok;
- Error ->
- couch_log:error(
- "Replicator: error writing document `~s` to `~s`: ~s",
- [Id, couch_replicator_api_wrap:db_uri(Target), couch_util:to_binary(Error)]
- ),
- Error
- catch
- throw:{missing_stub, _} = MissingStub ->
- throw(MissingStub);
- throw:{Error, Reason} ->
- couch_log:error(
- "Replicator: couldn't write document `~s`, revision `~s`,"
- " to target database `~s`. Error: `~s`, reason: `~s`.",
- [
- Id,
- couch_doc:rev_to_str({Pos, RevId}),
- couch_replicator_api_wrap:db_uri(Target),
- to_binary(Error),
- to_binary(Reason)
- ]
- ),
- {error, Error};
- throw:Err ->
- couch_log:error(
- "Replicator: couldn't write document `~s`, revision `~s`,"
- " to target database `~s`. Error: `~s`.",
- [
- Id,
- couch_doc:rev_to_str({Pos, RevId}),
- couch_replicator_api_wrap:db_uri(Target),
- to_binary(Err)
- ]
- ),
- {error, Err}
- end.
-
-find_missing(DocInfos, Target) ->
- {IdRevs, AllRevsCount} = lists:foldr(
- fun
- (#doc_info{revs = []}, {IdRevAcc, CountAcc}) ->
- {IdRevAcc, CountAcc};
- (#doc_info{id = Id, revs = RevsInfo}, {IdRevAcc, CountAcc}) ->
- Revs = [Rev || #rev_info{rev = Rev} <- RevsInfo],
- {[{Id, Revs} | IdRevAcc], CountAcc + length(Revs)}
- end,
- {[], 0},
- DocInfos
- ),
-
- Missing =
- case couch_replicator_api_wrap:get_missing_revs(Target, IdRevs) of
- {ok, Result} -> Result;
- {error, Error} -> exit(Error)
- end,
- MissingRevsCount = lists:foldl(
- fun({_Id, MissingRevs, _PAs}, Acc) -> Acc + length(MissingRevs) end,
- 0,
- Missing
- ),
- Stats = couch_replicator_stats:new([
- {missing_checked, AllRevsCount},
- {missing_found, MissingRevsCount}
- ]),
- {Missing, Stats}.
-
-maybe_report_stats(Cp, Stats) ->
- Now = os:timestamp(),
- case timer:now_diff(erlang:get(last_stats_report), Now) >= ?STATS_DELAY of
- true ->
- ok = gen_server:call(Cp, {add_stats, Stats}, infinity),
- erlang:put(last_stats_report, Now),
- couch_replicator_stats:new();
- false ->
- Stats
- end.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-replication_worker_format_status_test() ->
- State = #state{
- cp = self(),
- loop = self(),
- source = #httpdb{url = "http://u:p@h/d1"},
- target = #httpdb{url = "http://u:p@h/d2"},
- readers = [r1, r2, r3],
- pending_fetch = nil,
- batch = #batch{size = 5}
- },
- Format = format_status(opts_ignored, [pdict, State]),
- ?assertEqual(self(), proplists:get_value(main_pid, Format)),
- ?assertEqual(self(), proplists:get_value(loop, Format)),
- ?assertEqual("http://u:*****@h/d1", proplists:get_value(source, Format)),
- ?assertEqual("http://u:*****@h/d2", proplists:get_value(target, Format)),
- ?assertEqual(3, proplists:get_value(num_readers, Format)),
- ?assertEqual(nil, proplists:get_value(pending_fetch, Format)),
- ?assertEqual(5, proplists:get_value(batch_size, Format)).
-
--endif.
diff --git a/src/couch_replicator/src/json_stream_parse.erl b/src/couch_replicator/src/json_stream_parse.erl
deleted file mode 100644
index 3478b9830..000000000
--- a/src/couch_replicator/src/json_stream_parse.erl
+++ /dev/null
@@ -1,425 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(json_stream_parse).
-
--export([events/2, to_ejson/1, collect_object/2]).
-
--define(IS_WS(X), (X == $\ orelse X == $\t orelse X == $\n orelse X == $\r)).
--define(IS_DELIM(X), (X == $} orelse X == $] orelse X == $,)).
--define(IS_DIGIT(X), (X >= $0 andalso X =< $9)).
-
-% Parses the json into events.
-%
-% The DataFun param is a function that produces the data for parsing. When
-% called it must yield a tuple, or the atom done. The first element in the
-% tuple is the data itself, and the second element is a function to be called
-% next to get the next chunk of data in the stream.
-%
-% The EventFun is called everytime a json element is parsed. It must produce
-% a new function to be called for the next event.
-%
-% Events happen each time a new element in the json string is parsed.
-% For simple value types, the data itself is returned:
-% Strings
-% Integers
-% Floats
-% true
-% false
-% null
-%
-% For arrays, the start of the array is signaled by the event array_start
-% atom. The end is signaled by array_end. The events before the end are the
-% values, or nested values.
-%
-% For objects, the start of the object is signaled by the event object_start
-% atom. The end is signaled by object_end. Each key is signaled by
-% {key, KeyString}, and the following event is the value, or start of the
-% value (array_start, object_start).
-%
-events(Data, EventFun) when is_list(Data) ->
- events(list_to_binary(Data), EventFun);
-events(Data, EventFun) when is_binary(Data) ->
- events(fun() -> {Data, fun() -> done end} end, EventFun);
-events(DataFun, EventFun) ->
- parse_one(DataFun, EventFun, <<>>).
-
-% converts the JSON directly to the erlang represention of Json
-to_ejson(DF) ->
- {_DF2, EF, _Rest} = events(DF, fun(Ev) -> collect_events(Ev, []) end),
- [[EJson]] = make_ejson(EF(get_results), [[]]),
- EJson.
-
-% This function is used to return complete objects while parsing streams.
-%
-% Return this function from inside an event function right after getting an
-% object_start event. It then collects the remaining events for that object
-% and converts it to the erlang represention of Json.
-%
-% It then calls your ReturnControl function with the erlang object. Your
-% return control function then should yield another event function.
-%
-% This example stream parses an array of objects, calling
-% fun do_something_with_the_object/1 for each object.
-%
-% ev_array(array_start) ->
-% fun(Ev) -> ev_object_loop(Ev) end.
-%
-% ev_object_loop(object_start) ->
-% fun(Ev) ->
-% json_stream_parse:collect_object(Ev,
-% fun(Obj) ->
-% do_something_with_the_object(Obj),
-% fun(Ev2) -> ev_object_loop(Ev2) end
-% end)
-% end;
-% ev_object_loop(array_end) ->
-% ok
-% end.
-%
-% % invoke the parse
-% main() ->
-% ...
-% events(Data, fun(Ev) -> ev_array(Ev) end).
-
-collect_object(Ev, ReturnControl) ->
- collect_object(Ev, 0, ReturnControl, [object_start]).
-
-% internal methods
-
-parse_one(DF, EF, Acc) ->
- case toke(DF, Acc) of
- none ->
- none;
- {Token, DF2, Rest} ->
- case Token of
- "{" ->
- EF2 = EF(object_start),
- {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
- {DF3, EF3(object_end), Rest2};
- "[" ->
- EF2 = EF(array_start),
- {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
- {DF3, EF3(array_end), Rest2};
- Int when is_integer(Int) ->
- {DF2, EF(Int), Rest};
- Float when is_float(Float) ->
- {DF2, EF(Float), Rest};
- Atom when is_atom(Atom) ->
- {DF2, EF(Atom), Rest};
- String when is_binary(String) ->
- {DF2, EF(String), Rest};
- _OtherToken ->
- err(unexpected_token)
- end
- end.
-
-must_parse_one(DF, EF, Acc, Error) ->
- case parse_one(DF, EF, Acc) of
- none ->
- err(Error);
- Else ->
- Else
- end.
-
-must_toke(DF, Data, Error) ->
- case toke(DF, Data) of
- none ->
- err(Error);
- Result ->
- Result
- end.
-
-toke(DF, <<>>) ->
- case DF() of
- done ->
- none;
- {Data, DF2} ->
- toke(DF2, Data)
- end;
-toke(DF, <<C, Rest/binary>>) when ?IS_WS(C) ->
- toke(DF, Rest);
-toke(DF, <<${, Rest/binary>>) ->
- {"{", DF, Rest};
-toke(DF, <<$}, Rest/binary>>) ->
- {"}", DF, Rest};
-toke(DF, <<$[, Rest/binary>>) ->
- {"[", DF, Rest};
-toke(DF, <<$], Rest/binary>>) ->
- {"]", DF, Rest};
-toke(DF, <<$", Rest/binary>>) ->
- toke_string(DF, Rest, []);
-toke(DF, <<$,, Rest/binary>>) ->
- {",", DF, Rest};
-toke(DF, <<$:, Rest/binary>>) ->
- {":", DF, Rest};
-toke(DF, <<$-, Rest/binary>>) ->
- {<<C, _/binary>> = Data, DF2} = must_df(DF, 1, Rest, expected_number),
- case ?IS_DIGIT(C) of
- true ->
- toke_number_leading(DF2, Data, "-");
- false ->
- err(expected_number)
- end;
-toke(DF, <<C, _/binary>> = Data) when ?IS_DIGIT(C) ->
- toke_number_leading(DF, Data, []);
-toke(DF, <<$t, Rest/binary>>) ->
- {Data, DF2} = must_match(<<"rue">>, DF, Rest),
- {true, DF2, Data};
-toke(DF, <<$f, Rest/binary>>) ->
- {Data, DF2} = must_match(<<"alse">>, DF, Rest),
- {false, DF2, Data};
-toke(DF, <<$n, Rest/binary>>) ->
- {Data, DF2} = must_match(<<"ull">>, DF, Rest),
- {null, DF2, Data};
-toke(_, _) ->
- err(bad_token).
-
-must_match(Pattern, DF, Data) ->
- Size = size(Pattern),
- case must_df(DF, Size, Data, bad_token) of
- {<<Pattern:Size/binary, Data2/binary>>, DF2} ->
- {Data2, DF2};
- {_, _} ->
- err(bad_token)
- end.
-
-must_df(DF, Error) ->
- case DF() of
- done ->
- err(Error);
- {Data, DF2} ->
- {Data, DF2}
- end.
-
-must_df(DF, NeedLen, Acc, Error) ->
- if
- size(Acc) >= NeedLen ->
- {Acc, DF};
- true ->
- case DF() of
- done ->
- err(Error);
- {Data, DF2} ->
- must_df(DF2, NeedLen, <<Acc/binary, Data/binary>>, Error)
- end
- end.
-
-parse_object(DF, EF, Acc) ->
- case must_toke(DF, Acc, unterminated_object) of
- {String, DF2, Rest} when is_binary(String) ->
- EF2 = EF({key, String}),
- case must_toke(DF2, Rest, unterminated_object) of
- {":", DF3, Rest2} ->
- {DF4, EF3, Rest3} = must_parse_one(DF3, EF2, Rest2, expected_value),
- case must_toke(DF4, Rest3, unterminated_object) of
- {",", DF5, Rest4} ->
- parse_object(DF5, EF3, Rest4);
- {"}", DF5, Rest4} ->
- {DF5, EF3, Rest4};
- {_, _, _} ->
- err(unexpected_token)
- end;
- _Else ->
- err(expected_colon)
- end;
- {"}", DF2, Rest} ->
- {DF2, EF, Rest};
- {_, _, _} ->
- err(unexpected_token)
- end.
-
-parse_array0(DF, EF, Acc) ->
- case toke(DF, Acc) of
- none ->
- err(unterminated_array);
- {",", DF2, Rest} ->
- parse_array(DF2, EF, Rest);
- {"]", DF2, Rest} ->
- {DF2, EF, Rest};
- _ ->
- err(unexpected_token)
- end.
-
-parse_array(DF, EF, Acc) ->
- case toke(DF, Acc) of
- none ->
- err(unterminated_array);
- {Token, DF2, Rest} ->
- case Token of
- "{" ->
- EF2 = EF(object_start),
- {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
- parse_array0(DF3, EF3(object_end), Rest2);
- "[" ->
- EF2 = EF(array_start),
- {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
- parse_array0(DF3, EF3(array_end), Rest2);
- Int when is_integer(Int) ->
- parse_array0(DF2, EF(Int), Rest);
- Float when is_float(Float) ->
- parse_array0(DF2, EF(Float), Rest);
- Atom when is_atom(Atom) ->
- parse_array0(DF2, EF(Atom), Rest);
- String when is_binary(String) ->
- parse_array0(DF2, EF(String), Rest);
- "]" ->
- {DF2, EF, Rest};
- _ ->
- err(unexpected_token)
- end
- end.
-
-toke_string(DF, <<>>, Acc) ->
- {Data, DF2} = must_df(DF, unterminated_string),
- toke_string(DF2, Data, Acc);
-toke_string(DF, <<$\\, $", Rest/binary>>, Acc) ->
- toke_string(DF, Rest, [$" | Acc]);
-toke_string(DF, <<$\\, $\\, Rest/binary>>, Acc) ->
- toke_string(DF, Rest, [$\\ | Acc]);
-toke_string(DF, <<$\\, $/, Rest/binary>>, Acc) ->
- toke_string(DF, Rest, [$/ | Acc]);
-toke_string(DF, <<$\\, $b, Rest/binary>>, Acc) ->
- toke_string(DF, Rest, [$\b | Acc]);
-toke_string(DF, <<$\\, $f, Rest/binary>>, Acc) ->
- toke_string(DF, Rest, [$\f | Acc]);
-toke_string(DF, <<$\\, $n, Rest/binary>>, Acc) ->
- toke_string(DF, Rest, [$\n | Acc]);
-toke_string(DF, <<$\\, $r, Rest/binary>>, Acc) ->
- toke_string(DF, Rest, [$\r | Acc]);
-toke_string(DF, <<$\\, $t, Rest/binary>>, Acc) ->
- toke_string(DF, Rest, [$\t | Acc]);
-toke_string(DF, <<$\\, $u, Rest/binary>>, Acc) ->
- {<<A, B, C, D, Data/binary>>, DF2} = must_df(DF, 4, Rest, missing_hex),
- UTFChar = erlang:list_to_integer([A, B, C, D], 16),
- if
- UTFChar == 16#FFFF orelse UTFChar == 16#FFFE ->
- err(invalid_utf_char);
- true ->
- ok
- end,
- Chars = xmerl_ucs:to_utf8(UTFChar),
- toke_string(DF2, Data, lists:reverse(Chars) ++ Acc);
-toke_string(DF, <<$\\>>, Acc) ->
- {Data, DF2} = must_df(DF, unterminated_string),
- toke_string(DF2, <<$\\, Data/binary>>, Acc);
-toke_string(_DF, <<$\\, _/binary>>, _Acc) ->
- err(bad_escape);
-toke_string(DF, <<$", Rest/binary>>, Acc) ->
- {list_to_binary(lists:reverse(Acc)), DF, Rest};
-toke_string(DF, <<C, Rest/binary>>, Acc) ->
- toke_string(DF, Rest, [C | Acc]).
-
-toke_number_leading(DF, <<Digit, Rest/binary>>, Acc) when
- ?IS_DIGIT(Digit)
-->
- toke_number_leading(DF, Rest, [Digit | Acc]);
-toke_number_leading(DF, <<C, _/binary>> = Rest, Acc) when
- ?IS_WS(C) orelse ?IS_DELIM(C)
-->
- {list_to_integer(lists:reverse(Acc)), DF, Rest};
-toke_number_leading(DF, <<>>, Acc) ->
- case DF() of
- done ->
- {list_to_integer(lists:reverse(Acc)), fun() -> done end, <<>>};
- {Data, DF2} ->
- toke_number_leading(DF2, Data, Acc)
- end;
-toke_number_leading(DF, <<$., Rest/binary>>, Acc) ->
- toke_number_trailing(DF, Rest, [$. | Acc]);
-toke_number_leading(DF, <<$e, Rest/binary>>, Acc) ->
- toke_number_exponent(DF, Rest, [$e, $0, $. | Acc]);
-toke_number_leading(DF, <<$E, Rest/binary>>, Acc) ->
- toke_number_exponent(DF, Rest, [$e, $0, $. | Acc]);
-toke_number_leading(_, _, _) ->
- err(unexpected_character_in_number).
-
-toke_number_trailing(DF, <<Digit, Rest/binary>>, Acc) when
- ?IS_DIGIT(Digit)
-->
- toke_number_trailing(DF, Rest, [Digit | Acc]);
-toke_number_trailing(DF, <<C, _/binary>> = Rest, Acc) when
- ?IS_WS(C) orelse ?IS_DELIM(C)
-->
- {list_to_float(lists:reverse(Acc)), DF, Rest};
-toke_number_trailing(DF, <<>>, Acc) ->
- case DF() of
- done ->
- {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
- {Data, DF2} ->
- toke_number_trailing(DF2, Data, Acc)
- end;
-toke_number_trailing(DF, <<"e", Rest/binary>>, [C | _] = Acc) when C /= $. ->
- toke_number_exponent(DF, Rest, [$e | Acc]);
-toke_number_trailing(DF, <<"E", Rest/binary>>, [C | _] = Acc) when C /= $. ->
- toke_number_exponent(DF, Rest, [$e | Acc]);
-toke_number_trailing(_, _, _) ->
- err(unexpected_character_in_number).
-
-toke_number_exponent(DF, <<Digit, Rest/binary>>, Acc) when ?IS_DIGIT(Digit) ->
- toke_number_exponent(DF, Rest, [Digit | Acc]);
-toke_number_exponent(DF, <<Sign, Rest/binary>>, [$e | _] = Acc) when
- Sign == $+ orelse Sign == $-
-->
- toke_number_exponent(DF, Rest, [Sign | Acc]);
-toke_number_exponent(DF, <<C, _/binary>> = Rest, Acc) when
- ?IS_WS(C) orelse ?IS_DELIM(C)
-->
- {list_to_float(lists:reverse(Acc)), DF, Rest};
-toke_number_exponent(DF, <<>>, Acc) ->
- case DF() of
- done ->
- {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
- {Data, DF2} ->
- toke_number_exponent(DF2, Data, Acc)
- end;
-toke_number_exponent(_, _, _) ->
- err(unexpected_character_in_number).
-
-err(Error) ->
- throw({parse_error, Error}).
-
-make_ejson([], Stack) ->
- Stack;
-make_ejson([array_start | RevEvs], [ArrayValues, PrevValues | RestStack]) ->
- make_ejson(RevEvs, [[ArrayValues | PrevValues] | RestStack]);
-make_ejson([array_end | RevEvs], Stack) ->
- make_ejson(RevEvs, [[] | Stack]);
-make_ejson([object_start | RevEvs], [ObjValues, PrevValues | RestStack]) ->
- make_ejson(RevEvs, [[{ObjValues} | PrevValues] | RestStack]);
-make_ejson([object_end | RevEvs], Stack) ->
- make_ejson(RevEvs, [[] | Stack]);
-make_ejson([{key, String} | RevEvs], [[PrevValue | RestObject] | RestStack] = _Stack) ->
- make_ejson(RevEvs, [[{String, PrevValue} | RestObject] | RestStack]);
-make_ejson([Value | RevEvs], [Vals | RestStack] = _Stack) ->
- make_ejson(RevEvs, [[Value | Vals] | RestStack]).
-
-collect_events(get_results, Acc) ->
- Acc;
-collect_events(Ev, Acc) ->
- fun(NextEv) -> collect_events(NextEv, [Ev | Acc]) end.
-
-collect_object(object_end, 0, ReturnControl, Acc) ->
- [[Obj]] = make_ejson([object_end | Acc], [[]]),
- ReturnControl(Obj);
-collect_object(object_end, NestCount, ReturnControl, Acc) ->
- fun(Ev) ->
- collect_object(Ev, NestCount - 1, ReturnControl, [object_end | Acc])
- end;
-collect_object(object_start, NestCount, ReturnControl, Acc) ->
- fun(Ev) ->
- collect_object(Ev, NestCount + 1, ReturnControl, [object_start | Acc])
- end;
-collect_object(Ev, NestCount, ReturnControl, Acc) ->
- fun(Ev2) ->
- collect_object(Ev2, NestCount, ReturnControl, [Ev | Acc])
- end.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl b/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl
deleted file mode 100644
index 2d58f847e..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_attachments_too_large).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
-
-setup(_) ->
- Ctx = test_util:start_couch([couch_replicator]),
- Source = create_db(),
- create_doc_with_attachment(Source, <<"doc">>, 1000),
- Target = create_db(),
- {Ctx, {Source, Target}}.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- delete_db(Source),
- delete_db(Target),
- config:delete("couchdb", "max_attachment_size"),
- ok = test_util:stop_couch(Ctx).
-
-attachment_too_large_replication_test_() ->
- Pairs = [{remote, remote}],
- {
- "Attachment size too large replication tests",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Pair, fun should_succeed/2} || Pair <- Pairs] ++
- [{Pair, fun should_fail/2} || Pair <- Pairs]
- }
- }.
-
-should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)}
- ]},
- config:set("couchdb", "max_attachment_size", "1000", _Persist = false),
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- ?_assertEqual(ok, couch_replicator_test_helper:compare_dbs(Source, Target)).
-
-should_fail({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)}
- ]},
- config:set("couchdb", "max_attachment_size", "999", _Persist = false),
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- ?_assertError(
- {badmatch, {not_found, missing}},
- couch_replicator_test_helper:compare_dbs(Source, Target)
- ).
-
-create_db() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-create_doc_with_attachment(DbName, DocId, AttSize) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc = #doc{id = DocId, atts = att(AttSize)},
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- couch_db:close(Db),
- ok.
-
-att(Size) when is_integer(Size), Size >= 1 ->
- [
- couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, fun(_Bytes) ->
- <<<<"x">> || _ <- lists:seq(1, Size)>>
- end}
- ])
- ].
-
-delete_db(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-
-db_url(remote, DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
deleted file mode 100644
index 1c093d58c..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
+++ /dev/null
@@ -1,523 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_compact_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
-
--import(couch_replicator_test_helper, [
- db_url/1,
- get_pid/1
-]).
-
--define(ATTFILE, filename:join([?FIXTURESDIR, "logo.png"])).
--define(DELAY, 500).
--define(TIMEOUT, 360000).
--define(TIMEOUT_WRITER, 100000).
--define(TIMEOUT_EUNIT, ?TIMEOUT div 1000 + 70).
--define(WRITE_BATCH_SIZE, 25).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-setup(remote) ->
- {remote, setup()};
-setup({A, B}) ->
- Ctx = test_util:start_couch([couch_replicator]),
- Source = setup(A),
- Target = setup(B),
- {Ctx, {Source, Target}}.
-
-teardown({remote, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- teardown(Source),
- teardown(Target),
- ok = application:stop(couch_replicator),
- ok = test_util:stop_couch(Ctx).
-
-compact_test_() ->
- Pairs = [{remote, remote}],
- {
- "Compaction during replication tests",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {Pair, fun should_populate_replicate_compact/2}
- || Pair <- Pairs
- ]
- }
- }.
-
-should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) ->
- {ok, RepPid, RepId} = replicate(Source, Target),
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_run_replication(RepPid, RepId, Source, Target),
- should_all_processes_be_alive(RepPid, Source, Target),
- should_populate_and_compact(RepPid, Source, Target, 50, 3),
- should_wait_target_in_sync(Source, Target),
- should_ensure_replication_still_running(RepPid, RepId, Source, Target),
- should_cancel_replication(RepId, RepPid),
- should_compare_databases(Source, Target)
- ]}
- }.
-
-should_all_processes_be_alive(RepPid, Source, Target) ->
- ?_test(begin
- {ok, SourceDb} = reopen_db(Source),
- {ok, TargetDb} = reopen_db(Target),
- ?assert(is_process_alive(RepPid)),
- ?assert(is_process_alive(couch_db:get_pid(SourceDb))),
- ?assert(is_process_alive(couch_db:get_pid(TargetDb)))
- end).
-
-should_run_replication(RepPid, RepId, Source, Target) ->
- ?_test(check_active_tasks(RepPid, RepId, Source, Target)).
-
-should_ensure_replication_still_running(RepPid, RepId, Source, Target) ->
- ?_test(check_active_tasks(RepPid, RepId, Source, Target)).
-
-check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
- Source =
- case Src of
- {remote, NameSrc} ->
- <<(db_url(NameSrc))/binary, $/>>;
- _ ->
- Src
- end,
- Target =
- case Tgt of
- {remote, NameTgt} ->
- <<(db_url(NameTgt))/binary, $/>>;
- _ ->
- Tgt
- end,
- FullRepId = ?l2b(BaseId ++ Ext),
- Pid = ?l2b(pid_to_list(RepPid)),
- RepTasks = wait_for_task_status(),
- ?assertNotEqual(timeout, RepTasks),
- [RepTask] = RepTasks,
- ?assertEqual(Pid, couch_util:get_value(pid, RepTask)),
- ?assertEqual(FullRepId, couch_util:get_value(replication_id, RepTask)),
- ?assertEqual(true, couch_util:get_value(continuous, RepTask)),
- ?assertEqual(Source, couch_util:get_value(source, RepTask)),
- ?assertEqual(Target, couch_util:get_value(target, RepTask)),
- ?assert(is_integer(couch_util:get_value(docs_read, RepTask))),
- ?assert(is_integer(couch_util:get_value(docs_written, RepTask))),
- ?assert(is_integer(couch_util:get_value(doc_write_failures, RepTask))),
- ?assert(is_integer(couch_util:get_value(revisions_checked, RepTask))),
- ?assert(is_integer(couch_util:get_value(missing_revisions_found, RepTask))),
- ?assert(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask))),
- ?assert(is_integer(couch_util:get_value(source_seq, RepTask))),
- Pending = couch_util:get_value(changes_pending, RepTask),
- ?assert(is_integer(Pending)).
-
-replication_tasks() ->
- lists:filter(
- fun(P) ->
- couch_util:get_value(type, P) =:= replication
- end,
- couch_task_status:all()
- ).
-
-wait_for_task_status() ->
- test_util:wait(fun() ->
- case replication_tasks() of
- [] ->
- wait;
- Tasks ->
- Tasks
- end
- end).
-
-should_cancel_replication(RepId, RepPid) ->
- ?_assertNot(begin
- ok = couch_replicator_scheduler:remove_job(RepId),
- is_process_alive(RepPid)
- end).
-
-should_populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) ->
- {timeout, ?TIMEOUT_EUNIT,
- ?_test(begin
- {ok, SourceDb0} = reopen_db(Source),
- Writer = spawn_writer(SourceDb0),
- lists:foreach(
- fun(N) ->
- {ok, SourceDb} = reopen_db(Source),
- {ok, TargetDb} = reopen_db(Target),
- pause_writer(Writer),
-
- compact_db("source", SourceDb),
- ?assert(is_process_alive(RepPid)),
- ?assert(is_process_alive(couch_db:get_pid(SourceDb))),
- wait_for_compaction("source", SourceDb),
-
- compact_db("target", TargetDb),
- ?assert(is_process_alive(RepPid)),
- ?assert(is_process_alive(couch_db:get_pid(TargetDb))),
- wait_for_compaction("target", TargetDb),
-
- {ok, SourceDb2} = reopen_db(SourceDb),
- {ok, TargetDb2} = reopen_db(TargetDb),
-
- resume_writer(Writer),
- wait_writer(Writer, BatchSize * N),
-
- compact_db("source", SourceDb2),
- ?assert(is_process_alive(RepPid)),
- ?assert(is_process_alive(couch_db:get_pid(SourceDb2))),
- pause_writer(Writer),
- wait_for_compaction("source", SourceDb2),
- resume_writer(Writer),
-
- compact_db("target", TargetDb2),
- ?assert(is_process_alive(RepPid)),
- ?assert(is_process_alive(couch_db:get_pid(TargetDb2))),
- pause_writer(Writer),
- wait_for_compaction("target", TargetDb2),
- resume_writer(Writer)
- end,
- lists:seq(1, Rounds)
- ),
- stop_writer(Writer)
- end)}.
-
-should_wait_target_in_sync({remote, Source}, Target) ->
- should_wait_target_in_sync(Source, Target);
-should_wait_target_in_sync(Source, {remote, Target}) ->
- should_wait_target_in_sync(Source, Target);
-should_wait_target_in_sync(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT,
- ?_assert(begin
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, SourceInfo} = couch_db:get_db_info(SourceDb),
- ok = couch_db:close(SourceDb),
- SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
- wait_target_in_sync_loop(SourceDocCount, Target, 300)
- end)}.
-
-wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Could not get source and target databases in sync"}
- ]}
- );
-wait_target_in_sync_loop(DocCount, {remote, TargetName}, RetriesLeft) ->
- wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft);
-wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
- {ok, Target} = couch_db:open_int(TargetName, []),
- {ok, TargetInfo} = couch_db:get_db_info(Target),
- ok = couch_db:close(Target),
- TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
- case TargetDocCount == DocCount of
- true ->
- true;
- false ->
- ok = timer:sleep(?DELAY),
- wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
- end.
-
-should_compare_databases({remote, Source}, Target) ->
- should_compare_databases(Source, Target);
-should_compare_databases(Source, {remote, Target}) ->
- should_compare_databases(Source, Target);
-should_compare_databases(Source, Target) ->
- {timeout, 35,
- ?_test(begin
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, TargetDb} = couch_db:open_int(Target, []),
- Fun = fun(FullDocInfo, Acc) ->
- {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
- {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
- DocId = couch_util:get_value(<<"_id">>, Props),
- DocTarget =
- case couch_db:open_doc(TargetDb, DocId) of
- {ok, DocT} ->
- DocT;
- Error ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- lists:concat([
- "Error opening document '",
- ?b2l(DocId),
- "' from target: ",
- couch_util:to_list(Error)
- ])}
- ]}
- )
- end,
- DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
- ?assertEqual(DocJson, DocTargetJson),
- {ok, Acc}
- end,
- {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb)
- end)}.
-
-reopen_db({remote, Db}) ->
- reopen_db(Db);
-reopen_db(DbName) when is_binary(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- ok = couch_db:close(Db),
- {ok, Db};
-reopen_db(Db) ->
- reopen_db(couch_db:name(Db)).
-
-compact_db(Type, Db0) ->
- Name = couch_db:name(Db0),
- {ok, Db} = couch_db:open_int(Name, []),
- {ok, CompactPid} = couch_db:start_compact(Db),
- MonRef = erlang:monitor(process, CompactPid),
- receive
- {'DOWN', MonRef, process, CompactPid, normal} ->
- ok;
- {'DOWN', MonRef, process, CompactPid, noproc} ->
- ok;
- {'DOWN', MonRef, process, CompactPid, Reason} ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- lists:concat([
- "Error compacting ",
- Type,
- " database ",
- ?b2l(Name),
- ": ",
- couch_util:to_list(Reason)
- ])}
- ]}
- )
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- lists:concat([
- "Compaction for ",
- Type,
- " database ",
- ?b2l(Name),
- " didn't finish"
- ])}
- ]}
- )
- end,
- ok = couch_db:close(Db).
-
-wait_for_compaction(Type, Db) ->
- case couch_db:wait_for_compaction(Db) of
- ok ->
- ok;
- {error, noproc} ->
- ok;
- {error, Reason} ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- lists:concat([
- "Compaction of ",
- Type,
- " database failed with: ",
- Reason
- ])}
- ]}
- )
- end.
-
-replicate({remote, Db}, Target) ->
- replicate(db_url(Db), Target);
-replicate(Source, {remote, Db}) ->
- replicate(Source, db_url(Db));
-replicate(Source, Target) ->
- RepObject =
- {[
- {<<"source">>, Source},
- {<<"target">>, Target},
- {<<"continuous">>, true}
- ]},
- {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
- ok = couch_replicator_scheduler:add_job(Rep),
- couch_replicator_scheduler:reschedule(),
- Pid = get_pid(Rep#rep.id),
- {ok, Pid, Rep#rep.id}.
-
-wait_writer(Pid, NumDocs) ->
- case get_writer_num_docs_written(Pid) of
- N when N >= NumDocs ->
- ok;
- _ ->
- wait_writer(Pid, NumDocs)
- end.
-
-spawn_writer(Db) ->
- Parent = self(),
- Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end),
- Pid.
-
-pause_writer(Pid) ->
- Ref = make_ref(),
- Pid ! {pause, Ref},
- receive
- {paused, Ref} ->
- ok
- after ?TIMEOUT_WRITER ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Failed to pause source database writer"}
- ]}
- )
- end.
-
-resume_writer(Pid) ->
- Ref = make_ref(),
- Pid ! {continue, Ref},
- receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT_WRITER ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Failed to pause source database writer"}
- ]}
- )
- end.
-
-get_writer_num_docs_written(Pid) ->
- Ref = make_ref(),
- Pid ! {get_count, Ref},
- receive
- {count, Ref, Count} ->
- Count
- after ?TIMEOUT_WRITER ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- "Timeout getting number of documents written"
- " from source database writer"}
- ]}
- )
- end.
-
-stop_writer(Pid) ->
- Ref = make_ref(),
- Pid ! {stop, Ref},
- receive
- {stopped, Ref, DocsWritten} ->
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, _Reason} ->
- DocsWritten
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout stopping source database writer"}
- ]}
- )
- end
- after ?TIMEOUT_WRITER ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout stopping source database writer"}
- ]}
- )
- end.
-
-writer_loop(Db0, Parent, Counter) ->
- DbName = couch_db:name(Db0),
- {ok, Data} = file:read_file(?ATTFILE),
- maybe_pause(Parent, Counter),
- Docs = lists:map(
- fun(I) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, ?l2b(integer_to_list(Counter + I))},
- {<<"value">>, Counter + I},
- {<<"_attachments">>,
- {[
- {<<"icon1.png">>,
- {[
- {<<"data">>, base64:encode(Data)},
- {<<"content_type">>, <<"image/png">>}
- ]}},
- {<<"icon2.png">>,
- {[
- {<<"data">>, base64:encode(iolist_to_binary([Data, Data]))},
- {<<"content_type">>, <<"image/png">>}
- ]}}
- ]}}
- ]}
- )
- end,
- lists:seq(1, ?WRITE_BATCH_SIZE)
- ),
- maybe_pause(Parent, Counter),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- ok = couch_db:close(Db),
- receive
- {get_count, Ref} ->
- Parent ! {count, Ref, Counter + ?WRITE_BATCH_SIZE},
- writer_loop(Db, Parent, Counter + ?WRITE_BATCH_SIZE);
- {stop, Ref} ->
- Parent ! {stopped, Ref, Counter + ?WRITE_BATCH_SIZE}
- after 0 ->
- timer:sleep(?DELAY),
- writer_loop(Db, Parent, Counter + ?WRITE_BATCH_SIZE)
- end.
-
-maybe_pause(Parent, Counter) ->
- receive
- {get_count, Ref} ->
- Parent ! {count, Ref, Counter};
- {pause, Ref} ->
- Parent ! {paused, Ref},
- receive
- {continue, Ref2} ->
- Parent ! {ok, Ref2}
- end
- after 0 ->
- ok
- end.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl
deleted file mode 100644
index 7adbb6852..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl
+++ /dev/null
@@ -1,237 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_connection_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- Host = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = config:get("httpd", "port", "5984"),
- {Host, Port}.
-
-teardown(_) ->
- ok.
-
-httpc_pool_test_() ->
- {
- "replicator connection sharing tests",
- {
- setup,
- fun() -> test_util:start_couch([couch_replicator]) end,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun connections_shared_after_release/1,
- fun connections_not_shared_after_owner_death/1,
- fun idle_connections_closed/1,
- fun test_owner_monitors/1,
- fun worker_discards_creds_on_create/1,
- fun worker_discards_url_creds_after_request/1,
- fun worker_discards_creds_in_headers_after_request/1,
- fun worker_discards_proxy_creds_after_request/1
- ]
- }
- }
- }.
-
-connections_shared_after_release({Host, Port}) ->
- ?_test(begin
- URL = "http://" ++ Host ++ ":" ++ Port,
- Self = self(),
- {ok, Pid} = couch_replicator_connection:acquire(URL),
- couch_replicator_connection:release(Pid),
- spawn(fun() ->
- Self ! couch_replicator_connection:acquire(URL)
- end),
- receive
- {ok, Pid2} ->
- ?assertEqual(Pid, Pid2)
- end
- end).
-
-connections_not_shared_after_owner_death({Host, Port}) ->
- ?_test(begin
- URL = "http://" ++ Host ++ ":" ++ Port,
- Self = self(),
- spawn(fun() ->
- Self ! couch_replicator_connection:acquire(URL),
- error("simulate division by zero without compiler warning")
- end),
- receive
- {ok, Pid} ->
- {ok, Pid2} = couch_replicator_connection:acquire(URL),
- ?assertNotEqual(Pid, Pid2),
- MRef = monitor(process, Pid),
- receive
- {'DOWN', MRef, process, Pid, _Reason} ->
- ?assert(not is_process_alive(Pid));
- Other ->
- throw(Other)
- end
- end
- end).
-
-idle_connections_closed({Host, Port}) ->
- ?_test(begin
- URL = "http://" ++ Host ++ ":" ++ Port,
- {ok, Pid} = couch_replicator_connection:acquire(URL),
- couch_replicator_connection ! close_idle_connections,
- ?assert(ets:member(couch_replicator_connection, Pid)),
- % block until idle connections have closed
- sys:get_status(couch_replicator_connection),
- couch_replicator_connection:release(Pid),
- couch_replicator_connection ! close_idle_connections,
- % block until idle connections have closed
- sys:get_status(couch_replicator_connection),
- ?assert(not ets:member(couch_replicator_connection, Pid))
- end).
-
-test_owner_monitors({Host, Port}) ->
- ?_test(begin
- URL = "http://" ++ Host ++ ":" ++ Port,
- {ok, Worker0} = couch_replicator_connection:acquire(URL),
- assert_monitors_equal([{process, self()}]),
- couch_replicator_connection:release(Worker0),
- assert_monitors_equal([]),
- {Workers, Monitors} = lists:foldl(
- fun(_, {WAcc, MAcc}) ->
- {ok, Worker1} = couch_replicator_connection:acquire(URL),
- MAcc1 = [{process, self()} | MAcc],
- assert_monitors_equal(MAcc1),
- {[Worker1 | WAcc], MAcc1}
- end,
- {[], []},
- lists:seq(1, 5)
- ),
- lists:foldl(
- fun(Worker2, Acc) ->
- [_ | NewAcc] = Acc,
- couch_replicator_connection:release(Worker2),
- assert_monitors_equal(NewAcc),
- NewAcc
- end,
- Monitors,
- Workers
- )
- end).
-
-worker_discards_creds_on_create({Host, Port}) ->
- ?_test(begin
- {User, Pass, B64Auth} = user_pass(),
- URL = "http://" ++ User ++ ":" ++ Pass ++ "@" ++ Host ++ ":" ++ Port,
- {ok, WPid} = couch_replicator_connection:acquire(URL),
- Internals = worker_internals(WPid),
- ?assert(string:str(Internals, B64Auth) =:= 0),
- ?assert(string:str(Internals, Pass) =:= 0)
- end).
-
-worker_discards_url_creds_after_request({Host, _}) ->
- ?_test(begin
- {User, Pass, B64Auth} = user_pass(),
- {Port, ServerPid} = server(),
- PortStr = integer_to_list(Port),
- URL = "http://" ++ User ++ ":" ++ Pass ++ "@" ++ Host ++ ":" ++ PortStr,
- {ok, WPid} = couch_replicator_connection:acquire(URL),
- ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], [])),
- Internals = worker_internals(WPid),
- ?assert(string:str(Internals, B64Auth) =:= 0),
- ?assert(string:str(Internals, Pass) =:= 0),
- couch_replicator_connection:release(WPid),
- unlink(ServerPid),
- exit(ServerPid, kill)
- end).
-
-worker_discards_creds_in_headers_after_request({Host, _}) ->
- ?_test(begin
- {_User, Pass, B64Auth} = user_pass(),
- {Port, ServerPid} = server(),
- PortStr = integer_to_list(Port),
- URL = "http://" ++ Host ++ ":" ++ PortStr,
- {ok, WPid} = couch_replicator_connection:acquire(URL),
- Headers = [{"Authorization", "Basic " ++ B64Auth}],
- ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, Headers, [])),
- Internals = worker_internals(WPid),
- ?assert(string:str(Internals, B64Auth) =:= 0),
- ?assert(string:str(Internals, Pass) =:= 0),
- couch_replicator_connection:release(WPid),
- unlink(ServerPid),
- exit(ServerPid, kill)
- end).
-
-worker_discards_proxy_creds_after_request({Host, _}) ->
- ?_test(begin
- {User, Pass, B64Auth} = user_pass(),
- {Port, ServerPid} = server(),
- PortStr = integer_to_list(Port),
- URL = "http://" ++ Host ++ ":" ++ PortStr,
- {ok, WPid} = couch_replicator_connection:acquire(URL),
- Opts = [
- {proxy_host, Host},
- {proxy_port, Port},
- {proxy_user, User},
- {proxy_pass, Pass}
- ],
- ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], Opts)),
- Internals = worker_internals(WPid),
- ?assert(string:str(Internals, B64Auth) =:= 0),
- ?assert(string:str(Internals, Pass) =:= 0),
- couch_replicator_connection:release(WPid),
- unlink(ServerPid),
- exit(ServerPid, kill)
- end).
-
-send_req(WPid, URL, Headers, Opts) ->
- ibrowse:send_req_direct(WPid, URL, Headers, get, [], Opts).
-
-user_pass() ->
- User = "specialuser",
- Pass = "averysecretpassword",
- B64Auth = ibrowse_lib:encode_base64(User ++ ":" ++ Pass),
- {User, Pass, B64Auth}.
-
-worker_internals(Pid) ->
- Dict = io_lib:format("~p", [erlang:process_info(Pid, dictionary)]),
- State = io_lib:format("~p", [sys:get_state(Pid)]),
- lists:flatten([Dict, State]).
-
-server() ->
- {ok, LSock} = gen_tcp:listen(0, [{recbuf, 256}, {active, false}]),
- {ok, LPort} = inet:port(LSock),
- SPid = spawn_link(fun() -> server_responder(LSock) end),
- {LPort, SPid}.
-
-server_responder(LSock) ->
- {ok, Sock} = gen_tcp:accept(LSock),
- case gen_tcp:recv(Sock, 0) of
- {ok, Data} ->
- % sanity check that all the request data was received
- ?assert(lists:prefix("GET ", Data)),
- ?assert(lists:suffix("\r\n\r\n", Data)),
- Res = ["HTTP/1.1 200 OK", "Content-Length: 0", "\r\n"],
- ok = gen_tcp:send(Sock, string:join(Res, "\r\n"));
- Other ->
- gen_tcp:close(Sock),
- throw({replication_eunit_tcp_server_crashed, Other})
- end,
- server_responder(LSock).
-
-assert_monitors_equal(ShouldBe) ->
- sys:get_status(couch_replicator_connection),
- {monitors, Monitors} = process_info(whereis(couch_replicator_connection), monitors),
- ?assertEqual(Monitors, ShouldBe).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
deleted file mode 100644
index 8adcd25bd..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
+++ /dev/null
@@ -1,144 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_create_target_with_options_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
-
--define(USERNAME, "rep_admin").
--define(PASSWORD, "secret").
-
-setup() ->
- Ctx = test_util:start_couch([fabric, mem3, couch_replicator, chttpd]),
- Hashed = couch_passwords:hash_admin_password(?PASSWORD),
- ok = config:set("admins", ?USERNAME, ?b2l(Hashed), _Persist = false),
- Source = ?tempdb(),
- Target = ?tempdb(),
- {Ctx, {Source, Target}}.
-
-teardown({Ctx, {_Source, _Target}}) ->
- config:delete("admins", ?USERNAME),
- ok = test_util:stop_couch(Ctx).
-
-create_target_with_options_replication_test_() ->
- {
- "Create target with range partitions tests",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_create_target_with_q_4/1,
- fun should_create_target_with_q_2_n_1/1,
- fun should_create_target_with_default/1,
- fun should_not_create_target_with_q_any/1
- ]
- }
- }.
-
-should_create_target_with_q_4({_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)},
- {<<"create_target">>, true},
- {<<"create_target_params">>, {[{<<"q">>, <<"4">>}]}}
- ]},
- create_db(Source),
- create_doc(Source),
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
-
- {ok, TargetInfo} = fabric:get_db_info(Target),
- {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
- delete_db(Source),
- delete_db(Target),
- ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)).
-
-should_create_target_with_q_2_n_1({_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)},
- {<<"create_target">>, true},
- {<<"create_target_params">>, {[{<<"q">>, <<"2">>}, {<<"n">>, <<"1">>}]}}
- ]},
- create_db(Source),
- create_doc(Source),
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
-
- {ok, TargetInfo} = fabric:get_db_info(Target),
- {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
- delete_db(Source),
- delete_db(Target),
- [
- ?_assertEqual(2, couch_util:get_value(q, ClusterInfo)),
- ?_assertEqual(1, couch_util:get_value(n, ClusterInfo))
- ].
-
-should_create_target_with_default({_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)},
- {<<"create_target">>, true}
- ]},
- create_db(Source),
- create_doc(Source),
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
-
- {ok, TargetInfo} = fabric:get_db_info(Target),
- {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
- Q = config:get_integer("cluster", "q", 2),
- delete_db(Source),
- delete_db(Target),
- ?_assertEqual(Q, couch_util:get_value(q, ClusterInfo)).
-
-should_not_create_target_with_q_any({_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)},
- {<<"create_target">>, false},
- {<<"create_target_params">>, {[{<<"q">>, <<"1">>}]}}
- ]},
- create_db(Source),
- create_doc(Source),
- {error, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- DbExist = is_list(catch mem3:shards(Target)),
- delete_db(Source),
- ?_assertEqual(false, DbExist).
-
-create_doc(DbName) ->
- Body = {[{<<"foo">>, <<"bar">>}]},
- NewDoc = #doc{body = Body},
- {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]).
-
-create_db(DbName) ->
- ok = fabric:create_db(DbName, [?ADMIN_CTX]).
-
-delete_db(DbName) ->
- ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
-
-db_url(DbName) ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- ?l2b(
- io_lib:format("http://~s:~s@~s:~b/~s", [
- ?USERNAME,
- ?PASSWORD,
- Addr,
- Port,
- DbName
- ])
- ).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl
deleted file mode 100644
index 7778bd77d..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl
+++ /dev/null
@@ -1,260 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_error_reporting_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
-
-setup_all() ->
- test_util:start_couch([couch_replicator, chttpd, mem3, fabric]).
-
-teardown_all(Ctx) ->
- ok = test_util:stop_couch(Ctx).
-
-setup() ->
- meck:unload(),
- Source = setup_db(),
- Target = setup_db(),
- {Source, Target}.
-
-teardown({Source, Target}) ->
- meck:unload(),
- teardown_db(Source),
- teardown_db(Target),
- ok.
-
-error_reporting_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun t_fail_bulk_docs/1,
- fun t_fail_changes_reader/1,
- fun t_fail_revs_diff/1,
- fun t_fail_changes_queue/1,
- fun t_fail_changes_manager/1,
- fun t_fail_changes_reader_proc/1
- ]
- }
- }.
-
-t_fail_bulk_docs({Source, Target}) ->
- ?_test(begin
- populate_db(Source, 1, 5),
- {ok, RepId} = replicate(Source, Target),
- wait_target_in_sync(Source, Target),
-
- {ok, Listener} = rep_result_listener(RepId),
- mock_fail_req("/_bulk_docs", {ok, "403", [], [<<"{\"x\":\"y\"}">>]}),
- populate_db(Source, 6, 6),
-
- {error, Result} = wait_rep_result(RepId),
- ?assertEqual({bulk_docs_failed, 403, {[{<<"x">>, <<"y">>}]}}, Result),
-
- couch_replicator_notifier:stop(Listener)
- end).
-
-t_fail_changes_reader({Source, Target}) ->
- ?_test(begin
- populate_db(Source, 1, 5),
- {ok, RepId} = replicate(Source, Target),
- wait_target_in_sync(Source, Target),
-
- {ok, Listener} = rep_result_listener(RepId),
- mock_fail_req("/_changes", {ok, "418", [], [<<"{\"x\":\"y\"}">>]}),
- populate_db(Source, 6, 6),
-
- {error, Result} = wait_rep_result(RepId),
- ?assertEqual({changes_req_failed, 418, {[{<<"x">>, <<"y">>}]}}, Result),
-
- couch_replicator_notifier:stop(Listener)
- end).
-
-t_fail_revs_diff({Source, Target}) ->
- ?_test(begin
- populate_db(Source, 1, 5),
- {ok, RepId} = replicate(Source, Target),
- wait_target_in_sync(Source, Target),
-
- {ok, Listener} = rep_result_listener(RepId),
- mock_fail_req("/_revs_diff", {ok, "407", [], [<<"{\"x\":\"y\"}">>]}),
- populate_db(Source, 6, 6),
-
- {error, Result} = wait_rep_result(RepId),
- ?assertEqual({revs_diff_failed, 407, {[{<<"x">>, <<"y">>}]}}, Result),
-
- couch_replicator_notifier:stop(Listener)
- end).
-
-t_fail_changes_queue({Source, Target}) ->
- ?_test(begin
- populate_db(Source, 1, 5),
- {ok, RepId} = replicate(Source, Target),
- wait_target_in_sync(Source, Target),
-
- RepPid = couch_replicator_test_helper:get_pid(RepId),
- State = sys:get_state(RepPid),
- ChangesQueue = element(20, State),
- ?assert(is_process_alive(ChangesQueue)),
-
- {ok, Listener} = rep_result_listener(RepId),
- exit(ChangesQueue, boom),
-
- {error, Result} = wait_rep_result(RepId),
- ?assertEqual({changes_queue_died, boom}, Result),
- couch_replicator_notifier:stop(Listener)
- end).
-
-t_fail_changes_manager({Source, Target}) ->
- ?_test(begin
- populate_db(Source, 1, 5),
- {ok, RepId} = replicate(Source, Target),
- wait_target_in_sync(Source, Target),
-
- RepPid = couch_replicator_test_helper:get_pid(RepId),
- State = sys:get_state(RepPid),
- ChangesManager = element(21, State),
- ?assert(is_process_alive(ChangesManager)),
-
- {ok, Listener} = rep_result_listener(RepId),
- exit(ChangesManager, bam),
-
- {error, Result} = wait_rep_result(RepId),
- ?assertEqual({changes_manager_died, bam}, Result),
- couch_replicator_notifier:stop(Listener)
- end).
-
-t_fail_changes_reader_proc({Source, Target}) ->
- ?_test(begin
- populate_db(Source, 1, 5),
- {ok, RepId} = replicate(Source, Target),
- wait_target_in_sync(Source, Target),
-
- RepPid = couch_replicator_test_helper:get_pid(RepId),
- State = sys:get_state(RepPid),
- ChangesReader = element(22, State),
- ?assert(is_process_alive(ChangesReader)),
-
- {ok, Listener} = rep_result_listener(RepId),
- exit(ChangesReader, kapow),
-
- {error, Result} = wait_rep_result(RepId),
- ?assertEqual({changes_reader_died, kapow}, Result),
- couch_replicator_notifier:stop(Listener)
- end).
-
-mock_fail_req(Path, Return) ->
- meck:expect(
- ibrowse,
- send_req_direct,
- fun(W, Url, Headers, Meth, Body, Opts, TOut) ->
- Args = [W, Url, Headers, Meth, Body, Opts, TOut],
- {ok, {_, _, _, _, UPath, _}} = http_uri:parse(Url),
- case lists:suffix(Path, UPath) of
- true -> Return;
- false -> meck:passthrough(Args)
- end
- end
- ).
-
-rep_result_listener(RepId) ->
- ReplyTo = self(),
- {ok, _Listener} = couch_replicator_notifier:start_link(
- fun
- ({_, RepId2, _} = Ev) when RepId2 =:= RepId ->
- ReplyTo ! Ev;
- (_) ->
- ok
- end
- ).
-
-wait_rep_result(RepId) ->
- receive
- {finished, RepId, RepResult} -> {ok, RepResult};
- {error, RepId, Reason} -> {error, Reason}
- end.
-
-setup_db() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-teardown_db(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-
-populate_db(DbName, Start, End) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- Docs = lists:foldl(
- fun(DocIdCounter, Acc) ->
- Id = integer_to_binary(DocIdCounter),
- Doc = #doc{id = Id, body = {[]}},
- [Doc | Acc]
- end,
- [],
- lists:seq(Start, End)
- ),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- ok = couch_db:close(Db).
-
-wait_target_in_sync(Source, Target) ->
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, SourceInfo} = couch_db:get_db_info(SourceDb),
- ok = couch_db:close(SourceDb),
- SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
- wait_target_in_sync_loop(SourceDocCount, Target, 300).
-
-wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Could not get source and target databases in sync"}
- ]}
- );
-wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
- {ok, Target} = couch_db:open_int(TargetName, []),
- {ok, TargetInfo} = couch_db:get_db_info(Target),
- ok = couch_db:close(Target),
- TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
- case TargetDocCount == DocCount of
- true ->
- true;
- false ->
- ok = timer:sleep(500),
- wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
- end.
-
-replicate(Source, Target) ->
- SrcUrl = couch_replicator_test_helper:db_url(Source),
- TgtUrl = couch_replicator_test_helper:db_url(Target),
- RepObject =
- {[
- {<<"source">>, SrcUrl},
- {<<"target">>, TgtUrl},
- {<<"continuous">>, true},
- {<<"worker_processes">>, 1},
- {<<"retries_per_request">>, 1},
- % Low connection timeout so _changes feed gets restarted quicker
- {<<"connection_timeout">>, 3000}
- ]},
- {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
- ok = couch_replicator_scheduler:add_job(Rep),
- couch_replicator_scheduler:reschedule(),
- {ok, Rep#rep.id}.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl
deleted file mode 100644
index b77b83daa..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl
+++ /dev/null
@@ -1,262 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_filtered_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
-
--define(DDOC,
- {[
- {<<"_id">>, <<"_design/filter_ddoc">>},
- {<<"filters">>,
- {[
- {<<"testfilter">>, <<
- "\n"
- " function(doc, req){if (doc.class == 'mammal') return true;}\n"
- " "
- >>},
- {<<"queryfilter">>, <<
- "\n"
- " function(doc, req) {\n"
- " if (doc.class && req.query.starts) {\n"
- " return doc.class.indexOf(req.query.starts) === 0;\n"
- " }\n"
- " else {\n"
- " return false;\n"
- " }\n"
- " }\n"
- " "
- >>}
- ]}},
- {<<"views">>,
- {[
- {<<"mammals">>,
- {[
- {<<"map">>, <<
- "\n"
- " function(doc) {\n"
- " if (doc.class == 'mammal') {\n"
- " emit(doc._id, null);\n"
- " }\n"
- " }\n"
- " "
- >>}
- ]}}
- ]}}
- ]}
-).
-
-setup(_) ->
- Ctx = test_util:start_couch([couch_replicator]),
- Source = create_db(),
- create_docs(Source),
- Target = create_db(),
- {Ctx, {Source, Target}}.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- delete_db(Source),
- delete_db(Target),
- ok = application:stop(couch_replicator),
- ok = test_util:stop_couch(Ctx).
-
-filtered_replication_test_() ->
- Pairs = [{remote, remote}],
- {
- "Filtered replication tests",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Pair, fun should_succeed/2} || Pair <- Pairs]
- }
- }.
-
-query_filtered_replication_test_() ->
- Pairs = [{remote, remote}],
- {
- "Filtered with query replication tests",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Pair, fun should_succeed_with_query/2} || Pair <- Pairs]
- }
- }.
-
-view_filtered_replication_test_() ->
- Pairs = [{remote, remote}],
- {
- "Filtered with a view replication tests",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Pair, fun should_succeed_with_view/2} || Pair <- Pairs]
- }
- }.
-
-should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)},
- {<<"filter">>, <<"filter_ddoc/testfilter">>}
- ]},
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- %% FilteredFun is an Erlang version of following JS function
- %% function(doc, req){if (doc.class == 'mammal') return true;}
- FilterFun = fun(_DocId, {Props}) ->
- couch_util:get_value(<<"class">>, Props) == <<"mammal">>
- end,
- {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun),
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [
- {"Target DB has proper number of docs",
- ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))},
- {"Target DB doesn't have deleted docs",
- ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))},
- {"All the docs filtered as expected",
- ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
- ]}.
-
-should_succeed_with_query({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)},
- {<<"filter">>, <<"filter_ddoc/queryfilter">>},
- {<<"query_params">>,
- {[
- {<<"starts">>, <<"a">>}
- ]}}
- ]},
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- FilterFun = fun(_DocId, {Props}) ->
- case couch_util:get_value(<<"class">>, Props) of
- <<"a", _/binary>> -> true;
- _ -> false
- end
- end,
- {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun),
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [
- {"Target DB has proper number of docs",
- ?_assertEqual(2, proplists:get_value(doc_count, TargetDbInfo))},
- {"Target DB doesn't have deleted docs",
- ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))},
- {"All the docs filtered as expected",
- ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
- ]}.
-
-should_succeed_with_view({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)},
- {<<"filter">>, <<"_view">>},
- {<<"query_params">>,
- {[
- {<<"view">>, <<"filter_ddoc/mammals">>}
- ]}}
- ]},
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- FilterFun = fun(_DocId, {Props}) ->
- couch_util:get_value(<<"class">>, Props) == <<"mammal">>
- end,
- {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun),
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [
- {"Target DB has proper number of docs",
- ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))},
- {"Target DB doesn't have deleted docs",
- ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))},
- {"All the docs filtered as expected",
- ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
- ]}.
-
-compare_dbs(Source, Target, FilterFun) ->
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, TargetDb} = couch_db:open_int(Target, []),
- {ok, TargetDbInfo} = couch_db:get_db_info(TargetDb),
- Fun = fun(FullDocInfo, Acc) ->
- {ok, DocId, SourceDoc} = read_doc(SourceDb, FullDocInfo),
- TargetReply = read_doc(TargetDb, DocId),
- case FilterFun(DocId, SourceDoc) of
- true ->
- ValidReply = {ok, DocId, SourceDoc} == TargetReply,
- {ok, [ValidReply | Acc]};
- false ->
- ValidReply = {not_found, missing} == TargetReply,
- {ok, [ValidReply | Acc]}
- end
- end,
- {ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb),
- {ok, TargetDbInfo, AllReplies}.
-
-read_doc(Db, DocIdOrInfo) ->
- case couch_db:open_doc(Db, DocIdOrInfo) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, [attachments]),
- DocId = couch_util:get_value(<<"_id">>, Props),
- {ok, DocId, {Props}};
- Error ->
- Error
- end.
-
-create_db() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-create_docs(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj(?DDOC),
- Doc1 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc1">>},
- {<<"class">>, <<"mammal">>},
- {<<"value">>, 1}
- ]}
- ),
- Doc2 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc2">>},
- {<<"class">>, <<"amphibians">>},
- {<<"value">>, 2}
- ]}
- ),
- Doc3 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc3">>},
- {<<"class">>, <<"reptiles">>},
- {<<"value">>, 3}
- ]}
- ),
- Doc4 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc4">>},
- {<<"class">>, <<"arthropods">>},
- {<<"value">>, 2}
- ]}
- ),
- {ok, _} = couch_db:update_docs(Db, [DDoc, Doc1, Doc2, Doc3, Doc4]),
- couch_db:close(Db).
-
-delete_db(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-
-db_url(remote, DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl
deleted file mode 100644
index 31f1da48e..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl
+++ /dev/null
@@ -1,178 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_httpc_pool_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- spawn_pool().
-
-teardown(Pool) ->
- stop_pool(Pool).
-
-httpc_pool_test_() ->
- {
- "httpc pool tests",
- {
- setup,
- fun() -> test_util:start_couch([couch_replicator]) end,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_block_new_clients_when_full/1,
- fun should_replace_worker_on_death/1
- ]
- }
- }
- }.
-
-should_block_new_clients_when_full(Pool) ->
- ?_test(begin
- Client1 = spawn_client(Pool),
- Client2 = spawn_client(Pool),
- Client3 = spawn_client(Pool),
-
- ?assertEqual(ok, ping_client(Client1)),
- ?assertEqual(ok, ping_client(Client2)),
- ?assertEqual(ok, ping_client(Client3)),
-
- Worker1 = get_client_worker(Client1, "1"),
- Worker2 = get_client_worker(Client2, "2"),
- Worker3 = get_client_worker(Client3, "3"),
-
- ?assert(is_process_alive(Worker1)),
- ?assert(is_process_alive(Worker2)),
- ?assert(is_process_alive(Worker3)),
-
- ?assertNotEqual(Worker1, Worker2),
- ?assertNotEqual(Worker2, Worker3),
- ?assertNotEqual(Worker3, Worker1),
-
- Client4 = spawn_client(Pool),
- ?assertEqual(timeout, ping_client(Client4)),
-
- ?assertEqual(ok, stop_client(Client1)),
- ?assertEqual(ok, ping_client(Client4)),
-
- Worker4 = get_client_worker(Client4, "4"),
- ?assertEqual(Worker1, Worker4),
-
- lists:foreach(
- fun(C) ->
- ?assertEqual(ok, stop_client(C))
- end,
- [Client2, Client3, Client4]
- )
- end).
-
-should_replace_worker_on_death(Pool) ->
- ?_test(begin
- Client1 = spawn_client(Pool),
- ?assertEqual(ok, ping_client(Client1)),
- Worker1 = get_client_worker(Client1, "1"),
- ?assert(is_process_alive(Worker1)),
-
- ?assertEqual(ok, kill_client_worker(Client1)),
- ?assertNot(is_process_alive(Worker1)),
- ?assertEqual(ok, stop_client(Client1)),
-
- Client2 = spawn_client(Pool),
- ?assertEqual(ok, ping_client(Client2)),
- Worker2 = get_client_worker(Client2, "2"),
- ?assert(is_process_alive(Worker2)),
-
- ?assertNotEqual(Worker1, Worker2),
- ?assertEqual(ok, stop_client(Client2))
- end).
-
-spawn_client(Pool) ->
- Parent = self(),
- Ref = make_ref(),
- Pid = spawn(fun() ->
- {ok, Worker} = couch_replicator_httpc_pool:get_worker(Pool),
- loop(Parent, Ref, Worker, Pool)
- end),
- {Pid, Ref}.
-
-ping_client({Pid, Ref}) ->
- Pid ! ping,
- receive
- {pong, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-get_client_worker({Pid, Ref}, ClientName) ->
- Pid ! get_worker,
- receive
- {worker, Ref, Worker} ->
- Worker
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout getting client " ++ ClientName ++ " worker"}
- ]}
- )
- end.
-
-stop_client({Pid, Ref}) ->
- Pid ! stop,
- receive
- {stop, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-kill_client_worker({Pid, Ref}) ->
- Pid ! get_worker,
- receive
- {worker, Ref, Worker} ->
- exit(Worker, kill),
- ok
- after ?TIMEOUT ->
- timeout
- end.
-
-loop(Parent, Ref, Worker, Pool) ->
- receive
- ping ->
- Parent ! {pong, Ref},
- loop(Parent, Ref, Worker, Pool);
- get_worker ->
- Parent ! {worker, Ref, Worker},
- loop(Parent, Ref, Worker, Pool);
- stop ->
- couch_replicator_httpc_pool:release_worker(Pool, Worker),
- Parent ! {stop, Ref}
- end.
-
-spawn_pool() ->
- Host = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = config:get("httpd", "port", "5984"),
- {ok, Pool} = couch_replicator_httpc_pool:start_link(
- "http://" ++ Host ++ ":" ++ Port, [{max_connections, 3}]
- ),
- Pool.
-
-stop_pool(Pool) ->
- ok = couch_replicator_httpc_pool:stop(Pool).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl
deleted file mode 100644
index 9ed415a29..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl
+++ /dev/null
@@ -1,86 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_id_too_long_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
-
-setup(_) ->
- Ctx = test_util:start_couch([couch_replicator]),
- Source = create_db(),
- create_doc(Source),
- Target = create_db(),
- {Ctx, {Source, Target}}.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- delete_db(Source),
- delete_db(Target),
- config:set("replicator", "max_document_id_length", "infinity"),
- ok = test_util:stop_couch(Ctx).
-
-id_too_long_replication_test_() ->
- Pairs = [{remote, remote}],
- {
- "Doc id too long tests",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Pair, fun should_succeed/2} || Pair <- Pairs] ++
- [{Pair, fun should_fail/2} || Pair <- Pairs]
- }
- }.
-
-should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)}
- ]},
- config:set("replicator", "max_document_id_length", "5"),
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- ?_assertEqual(ok, couch_replicator_test_helper:compare_dbs(Source, Target)).
-
-should_fail({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)}
- ]},
- config:set("replicator", "max_document_id_length", "4"),
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- ?_assertError(
- {badmatch, {not_found, missing}},
- couch_replicator_test_helper:compare_dbs(Source, Target)
- ).
-
-create_db() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-create_doc(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc = couch_doc:from_json_obj({[{<<"_id">>, <<"12345">>}]}),
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- couch_db:close(Db).
-
-delete_db(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-
-db_url(remote, DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl
deleted file mode 100644
index 2f0e2a1f0..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl
+++ /dev/null
@@ -1,127 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_large_atts_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_replicator_test_helper, [
- db_url/1,
- replicate/2,
- compare_dbs/2
-]).
-
--define(ATT_SIZE_1, 2 * 1024 * 1024).
--define(ATT_SIZE_2, round(6.6 * 1024 * 1024)).
--define(DOCS_COUNT, 11).
--define(TIMEOUT_EUNIT, 120).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-setup(remote) ->
- {remote, setup()};
-setup({A, B}) ->
- Ctx = test_util:start_couch([couch_replicator]),
- config:set("attachments", "compressible_types", "text/*", false),
- Source = setup(A),
- Target = setup(B),
- {Ctx, {Source, Target}}.
-
-teardown({remote, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- teardown(Source),
- teardown(Target),
-
- ok = application:stop(couch_replicator),
- ok = test_util:stop_couch(Ctx).
-
-large_atts_test_() ->
- Pairs = [{remote, remote}],
- {
- "Replicate docs with large attachments",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {Pair, fun should_populate_replicate_compact/2}
- || Pair <- Pairs
- ]
- }
- }.
-
-should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target)
- ]}
- }.
-
-should_populate_source({remote, Source}) ->
- should_populate_source(Source);
-should_populate_source(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, ?DOCS_COUNT))}.
-
-should_replicate({remote, Source}, Target) ->
- should_replicate(db_url(Source), Target);
-should_replicate(Source, {remote, Target}) ->
- should_replicate(Source, db_url(Target));
-should_replicate(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
-
-should_compare_databases({remote, Source}, Target) ->
- should_compare_databases(Source, Target);
-should_compare_databases(Source, {remote, Target}) ->
- should_compare_databases(Source, Target);
-should_compare_databases(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
-
-populate_db(DbName, DocCount) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- Docs = lists:foldl(
- fun(DocIdCounter, Acc) ->
- Doc = #doc{
- id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
- body = {[]},
- atts = [
- att(<<"att1">>, ?ATT_SIZE_1, <<"text/plain">>),
- att(<<"att2">>, ?ATT_SIZE_2, <<"app/binary">>)
- ]
- },
- [Doc | Acc]
- end,
- [],
- lists:seq(1, DocCount)
- ),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- couch_db:close(Db).
-
-att(Name, Size, Type) ->
- couch_att:new([
- {name, Name},
- {type, Type},
- {att_len, Size},
- {data, fun(Count) -> crypto:strong_rand_bytes(Count) end}
- ]).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
deleted file mode 100644
index 86daa808f..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
+++ /dev/null
@@ -1,229 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_many_leaves_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_replicator_test_helper, [
- db_url/1,
- replicate/2
-]).
-
--define(DOCS_CONFLICTS, [
- {<<"doc1">>, 10},
- % use some _design docs as well to test the special handling for them
- {<<"_design/doc2">>, 100},
- % a number > MaxURLlength (7000) / length(DocRevisionString)
- {<<"doc3">>, 210}
-]).
--define(NUM_ATTS, 2).
--define(TIMEOUT_EUNIT, 60).
--define(i2l(I), integer_to_list(I)).
--define(io2b(Io), iolist_to_binary(Io)).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-setup(remote) ->
- {remote, setup()};
-setup({A, B}) ->
- Ctx = test_util:start_couch([couch_replicator]),
- Source = setup(A),
- Target = setup(B),
- {Ctx, {Source, Target}}.
-
-teardown({remote, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- teardown(Source),
- teardown(Target),
- ok = application:stop(couch_replicator),
- ok = test_util:stop_couch(Ctx).
-
-docs_with_many_leaves_test_() ->
- Pairs = [{remote, remote}],
- {
- "Replicate documents with many leaves",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {Pair, fun should_populate_replicate_compact/2}
- || Pair <- Pairs
- ]
- }
- }.
-
-should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source(Source),
- should_replicate(Source, Target),
- should_verify_target(Source, Target),
- should_add_attachments_to_source(Source),
- should_replicate(Source, Target),
- should_verify_target(Source, Target)
- ]}
- }.
-
-should_populate_source({remote, Source}) ->
- should_populate_source(Source);
-should_populate_source(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}.
-
-should_replicate({remote, Source}, Target) ->
- should_replicate(db_url(Source), Target);
-should_replicate(Source, {remote, Target}) ->
- should_replicate(Source, db_url(Target));
-should_replicate(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
-
-should_verify_target({remote, Source}, Target) ->
- should_verify_target(Source, Target);
-should_verify_target(Source, {remote, Target}) ->
- should_verify_target(Source, Target);
-should_verify_target(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT,
- ?_test(begin
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, TargetDb} = couch_db:open_int(Target, []),
- verify_target(SourceDb, TargetDb, ?DOCS_CONFLICTS),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb)
- end)}.
-
-should_add_attachments_to_source({remote, Source}) ->
- should_add_attachments_to_source(Source);
-should_add_attachments_to_source(Source) ->
- {timeout, ?TIMEOUT_EUNIT,
- ?_test(begin
- {ok, SourceDb} = couch_db:open_int(Source, [?ADMIN_CTX]),
- add_attachments(SourceDb, ?NUM_ATTS, ?DOCS_CONFLICTS),
- ok = couch_db:close(SourceDb)
- end)}.
-
-populate_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- lists:foreach(
- fun({DocId, NumConflicts}) ->
- Value = <<"0">>,
- Doc = #doc{
- id = DocId,
- body = {[{<<"value">>, Value}]}
- },
- {ok, _} = couch_db:update_doc(Db, Doc, [?ADMIN_CTX]),
- {ok, _} = add_doc_siblings(Db, DocId, NumConflicts)
- end,
- ?DOCS_CONFLICTS
- ),
- couch_db:close(Db).
-
-add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
- add_doc_siblings(Db, DocId, NumLeaves, [], []).
-
-add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) ->
- {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes),
- {ok, AccRevs};
-add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) ->
- Value = ?l2b(?i2l(NumLeaves)),
- Rev = couch_hash:md5_hash(Value),
- Doc = #doc{
- id = DocId,
- revs = {1, [Rev]},
- body = {[{<<"value">>, Value}]}
- },
- add_doc_siblings(
- Db,
- DocId,
- NumLeaves - 1,
- [Doc | AccDocs],
- [{1, Rev} | AccRevs]
- ).
-
-verify_target(_SourceDb, _TargetDb, []) ->
- ok;
-verify_target(SourceDb, TargetDb, [{DocId, NumConflicts} | Rest]) ->
- {ok, SourceLookups} = couch_db:open_doc_revs(
- SourceDb,
- DocId,
- all,
- [conflicts, deleted_conflicts]
- ),
- {ok, TargetLookups} = couch_db:open_doc_revs(
- TargetDb,
- DocId,
- all,
- [conflicts, deleted_conflicts]
- ),
- SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
- TargetDocs = [Doc || {ok, Doc} <- TargetLookups],
- Total = NumConflicts + 1,
- ?assertEqual(Total, length(TargetDocs)),
- lists:foreach(
- fun({SourceDoc, TargetDoc}) ->
- SourceJson = couch_doc:to_json_obj(SourceDoc, [attachments]),
- TargetJson = couch_doc:to_json_obj(TargetDoc, [attachments]),
- ?assertEqual(SourceJson, TargetJson)
- end,
- lists:zip(SourceDocs, TargetDocs)
- ),
- verify_target(SourceDb, TargetDb, Rest).
-
-add_attachments(_SourceDb, _NumAtts, []) ->
- ok;
-add_attachments(SourceDb, NumAtts, [{DocId, NumConflicts} | Rest]) ->
- {ok, SourceLookups} = couch_db:open_doc_revs(SourceDb, DocId, all, []),
- SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
- Total = NumConflicts + 1,
- ?assertEqual(Total, length(SourceDocs)),
- NewDocs = lists:foldl(
- fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) ->
- NewAtts = lists:foldl(
- fun(I, AttAcc) ->
- AttData = crypto:strong_rand_bytes(100),
- NewAtt = couch_att:new([
- {name,
- ?io2b([
- "att_",
- ?i2l(I),
- "_",
- couch_doc:rev_to_str({Pos, Rev})
- ])},
- {type, <<"application/foobar">>},
- {att_len, byte_size(AttData)},
- {data, AttData}
- ]),
- [NewAtt | AttAcc]
- end,
- [],
- lists:seq(1, NumAtts)
- ),
- [Doc#doc{atts = Atts ++ NewAtts} | Acc]
- end,
- [],
- SourceDocs
- ),
- {ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []),
- NewRevs = [R || {ok, R} <- UpdateResults],
- ?assertEqual(length(NewDocs), length(NewRevs)),
- add_attachments(SourceDb, NumAtts, Rest).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl
deleted file mode 100644
index ff3b5ee98..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_missing_stubs_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_replicator_test_helper, [
- db_url/1,
- replicate/2,
- compare_dbs/2
-]).
-
--define(REVS_LIMIT, 3).
--define(TIMEOUT_EUNIT, 30).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-setup(remote) ->
- {remote, setup()};
-setup({A, B}) ->
- Ctx = test_util:start_couch([couch_replicator]),
- Source = setup(A),
- Target = setup(B),
- {Ctx, {Source, Target}}.
-
-teardown({remote, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- teardown(Source),
- teardown(Target),
- ok = application:stop(couch_replicator),
- ok = test_util:stop_couch(Ctx).
-
-missing_stubs_test_() ->
- Pairs = [{remote, remote}],
- {
- "Replicate docs with missing stubs (COUCHDB-1365)",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {Pair, fun should_replicate_docs_with_missed_att_stubs/2}
- || Pair <- Pairs
- ]
- }
- }.
-
-should_replicate_docs_with_missed_att_stubs({From, To}, {_Ctx, {Source, Target}}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source(Source),
- should_set_target_revs_limit(Target, ?REVS_LIMIT),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target),
- should_update_source_docs(Source, ?REVS_LIMIT * 2),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target)
- ]}
- }.
-
-should_populate_source({remote, Source}) ->
- should_populate_source(Source);
-should_populate_source(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}.
-
-should_replicate({remote, Source}, Target) ->
- should_replicate(db_url(Source), Target);
-should_replicate(Source, {remote, Target}) ->
- should_replicate(Source, db_url(Target));
-should_replicate(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
-
-should_set_target_revs_limit({remote, Target}, RevsLimit) ->
- should_set_target_revs_limit(Target, RevsLimit);
-should_set_target_revs_limit(Target, RevsLimit) ->
- ?_test(begin
- {ok, Db} = couch_db:open_int(Target, [?ADMIN_CTX]),
- ?assertEqual(ok, couch_db:set_revs_limit(Db, RevsLimit)),
- ok = couch_db:close(Db)
- end).
-
-should_compare_databases({remote, Source}, Target) ->
- should_compare_databases(Source, Target);
-should_compare_databases(Source, {remote, Target}) ->
- should_compare_databases(Source, Target);
-should_compare_databases(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
-
-should_update_source_docs({remote, Source}, Times) ->
- should_update_source_docs(Source, Times);
-should_update_source_docs(Source, Times) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(update_db_docs(Source, Times))}.
-
-populate_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- AttData = crypto:strong_rand_bytes(6000),
- Doc = #doc{
- id = <<"doc1">>,
- atts = [
- couch_att:new([
- {name, <<"doc1_att1">>},
- {type, <<"application/foobar">>},
- {att_len, byte_size(AttData)},
- {data, AttData}
- ])
- ]
- },
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- couch_db:close(Db).
-
-update_db_docs(DbName, Times) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = couch_db:fold_docs(
- Db,
- fun(FDI, Acc) -> db_fold_fun(FDI, Acc) end,
- {DbName, Times},
- []
- ),
- ok = couch_db:close(Db).
-
-db_fold_fun(FullDocInfo, {DbName, Times}) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Doc} = couch_db:open_doc(Db, FullDocInfo),
- lists:foldl(
- fun(_, {Pos, RevId}) ->
- {ok, Db2} = couch_db:reopen(Db),
- NewDocVersion = Doc#doc{
- revs = {Pos, [RevId]},
- body = {[{<<"value">>, base64:encode(crypto:strong_rand_bytes(100))}]}
- },
- {ok, NewRev} = couch_db:update_doc(Db2, NewDocVersion, []),
- NewRev
- end,
- {element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))},
- lists:seq(1, Times)
- ),
- ok = couch_db:close(Db),
- {ok, {DbName, Times}}.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl
deleted file mode 100644
index ca1816b33..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl
+++ /dev/null
@@ -1,123 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_proxy_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
--include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
-setup() ->
- ok.
-
-teardown(_) ->
- ok.
-
-replicator_proxy_test_() ->
- {
- "replicator proxy tests",
- {
- setup,
- fun() -> test_util:start_couch([couch_replicator]) end,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun parse_rep_doc_without_proxy/1,
- fun parse_rep_doc_with_proxy/1,
- fun parse_rep_source_target_proxy/1,
- fun mutually_exclusive_proxy_and_source_proxy/1,
- fun mutually_exclusive_proxy_and_target_proxy/1
- ]
- }
- }
- }.
-
-parse_rep_doc_without_proxy(_) ->
- ?_test(begin
- NoProxyDoc =
- {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>}
- ]},
- Rep = couch_replicator_docs:parse_rep_doc(NoProxyDoc),
- ?assertEqual((Rep#rep.source)#httpdb.proxy_url, undefined),
- ?assertEqual((Rep#rep.target)#httpdb.proxy_url, undefined)
- end).
-
-parse_rep_doc_with_proxy(_) ->
- ?_test(begin
- ProxyURL = <<"http://myproxy.com">>,
- ProxyDoc =
- {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>},
- {<<"proxy">>, ProxyURL}
- ]},
- Rep = couch_replicator_docs:parse_rep_doc(ProxyDoc),
- ?assertEqual((Rep#rep.source)#httpdb.proxy_url, binary_to_list(ProxyURL)),
- ?assertEqual((Rep#rep.target)#httpdb.proxy_url, binary_to_list(ProxyURL))
- end).
-
-parse_rep_source_target_proxy(_) ->
- ?_test(begin
- SrcProxyURL = <<"http://mysrcproxy.com">>,
- TgtProxyURL = <<"http://mytgtproxy.com:9999">>,
- ProxyDoc =
- {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>},
- {<<"source_proxy">>, SrcProxyURL},
- {<<"target_proxy">>, TgtProxyURL}
- ]},
- Rep = couch_replicator_docs:parse_rep_doc(ProxyDoc),
- ?assertEqual(
- (Rep#rep.source)#httpdb.proxy_url,
- binary_to_list(SrcProxyURL)
- ),
- ?assertEqual(
- (Rep#rep.target)#httpdb.proxy_url,
- binary_to_list(TgtProxyURL)
- )
- end).
-
-mutually_exclusive_proxy_and_source_proxy(_) ->
- ?_test(begin
- ProxyDoc =
- {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>},
- {<<"proxy">>, <<"oldstyleproxy.local">>},
- {<<"source_proxy">>, <<"sourceproxy.local">>}
- ]},
- ?assertThrow(
- {bad_rep_doc, _},
- couch_replicator_docs:parse_rep_doc(ProxyDoc)
- )
- end).
-
-mutually_exclusive_proxy_and_target_proxy(_) ->
- ?_test(begin
- ProxyDoc =
- {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>},
- {<<"proxy">>, <<"oldstyleproxy.local">>},
- {<<"target_proxy">>, <<"targetproxy.local">>}
- ]},
- ?assertThrow(
- {bad_rep_doc, _},
- couch_replicator_docs:parse_rep_doc(ProxyDoc)
- )
- end).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl
deleted file mode 100644
index a214d4607..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl
+++ /dev/null
@@ -1,79 +0,0 @@
--module(couch_replicator_rate_limiter_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-rate_limiter_test_() ->
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_new_key(),
- t_1_failure(),
- t_2_failures_back_to_back(),
- t_2_failures(),
- t_success_threshold(),
- t_1_failure_2_successes()
- ]
- }.
-
-t_new_key() ->
- ?_test(begin
- ?assertEqual(0, couch_replicator_rate_limiter:interval({"foo", get}))
- end).
-
-t_1_failure() ->
- ?_test(begin
- ?assertEqual(24, couch_replicator_rate_limiter:failure({"foo", get}))
- end).
-
-t_2_failures() ->
- ?_test(begin
- couch_replicator_rate_limiter:failure({"foo", get}),
- low_pass_filter_delay(),
- Interval = couch_replicator_rate_limiter:failure({"foo", get}),
- ?assertEqual(29, Interval)
- end).
-
-t_2_failures_back_to_back() ->
- ?_test(begin
- couch_replicator_rate_limiter:failure({"foo", get}),
- Interval = couch_replicator_rate_limiter:failure({"foo", get}),
- ?assertEqual(24, Interval)
- end).
-
-t_success_threshold() ->
- ?_test(begin
- Interval = couch_replicator_rate_limiter:success({"foo", get}),
- ?assertEqual(0, Interval),
- Interval = couch_replicator_rate_limiter:success({"foo", get}),
- ?assertEqual(0, Interval)
- end).
-
-t_1_failure_2_successes() ->
- ?_test(begin
- couch_replicator_rate_limiter:failure({"foo", get}),
- low_pass_filter_delay(),
- Succ1 = couch_replicator_rate_limiter:success({"foo", get}),
- ?assertEqual(20, Succ1),
- low_pass_filter_delay(),
- Succ2 = couch_replicator_rate_limiter:success({"foo", get}),
- ?assertEqual(0, Succ2)
- end).
-
-low_pass_filter_delay() ->
- timer:sleep(100).
-
-setup() ->
- {ok, Pid} = couch_replicator_rate_limiter:start_link(),
- Pid.
-
-teardown(Pid) ->
- Ref = erlang:monitor(process, Pid),
- unlink(Pid),
- exit(Pid, kill),
- receive
- {'DOWN', Ref, process, Pid, _} ->
- ok
- end,
- ok.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl b/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
deleted file mode 100644
index 9ffcc9e2c..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
+++ /dev/null
@@ -1,287 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_retain_stats_between_job_runs).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
-
--define(DELAY, 500).
--define(TIMEOUT, 60000).
-
-setup_all() ->
- test_util:start_couch([couch_replicator, chttpd, mem3, fabric]).
-
-teardown_all(Ctx) ->
- ok = test_util:stop_couch(Ctx).
-
-setup() ->
- Source = setup_db(),
- Target = setup_db(),
- {Source, Target}.
-
-teardown({Source, Target}) ->
- teardown_db(Source),
- teardown_db(Target),
- ok.
-
-stats_retained_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun t_stats_retained_by_scheduler/1,
- fun t_stats_retained_on_job_removal/1
- ]
- }
- }.
-
-t_stats_retained_by_scheduler({Source, Target}) ->
- ?_test(begin
- {ok, _} = add_vdu(Target),
- populate_db_reject_even_docs(Source, 1, 10),
- {ok, RepPid, RepId} = replicate(Source, Target),
- wait_target_in_sync(6, Target),
-
- check_active_tasks(10, 5, 5),
- check_scheduler_jobs(10, 5, 5),
-
- stop_job(RepPid),
- check_scheduler_jobs(10, 5, 5),
-
- start_job(),
- check_active_tasks(10, 5, 5),
- check_scheduler_jobs(10, 5, 5),
- couch_replicator_scheduler:remove_job(RepId)
- end).
-
-t_stats_retained_on_job_removal({Source, Target}) ->
- ?_test(begin
- {ok, _} = add_vdu(Target),
- populate_db_reject_even_docs(Source, 1, 10),
- {ok, _, RepId} = replicate(Source, Target),
- % 5 + 1 vdu
- wait_target_in_sync(6, Target),
-
- check_active_tasks(10, 5, 5),
- check_scheduler_jobs(10, 5, 5),
-
- couch_replicator_scheduler:remove_job(RepId),
-
- populate_db_reject_even_docs(Source, 11, 20),
- {ok, _, RepId} = replicate(Source, Target),
- % 6 + 5
- wait_target_in_sync(11, Target),
-
- check_scheduler_jobs(20, 10, 10),
- check_active_tasks(20, 10, 10),
-
- couch_replicator_scheduler:remove_job(RepId),
-
- populate_db_reject_even_docs(Source, 21, 30),
- {ok, _, RepId} = replicate(Source, Target),
- % 11 + 5
- wait_target_in_sync(16, Target),
-
- check_scheduler_jobs(30, 15, 15),
- check_active_tasks(30, 15, 15),
-
- couch_replicator_scheduler:remove_job(RepId)
- end).
-
-setup_db() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-teardown_db(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-stop_job(RepPid) ->
- Ref = erlang:monitor(process, RepPid),
- gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 0}),
- couch_replicator_scheduler:reschedule(),
- receive
- {'DOWN', Ref, _, _, _} -> ok
- after ?TIMEOUT ->
- erlang:error(timeout)
- end.
-
-start_job() ->
- gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 500}),
- couch_replicator_scheduler:reschedule().
-
-check_active_tasks(DocsRead, DocsWritten, DocsFailed) ->
- RepTask = wait_for_task_status(DocsWritten),
- ?assertNotEqual(timeout, RepTask),
- ?assertEqual(DocsRead, couch_util:get_value(docs_read, RepTask)),
- ?assertEqual(DocsWritten, couch_util:get_value(docs_written, RepTask)),
- ?assertEqual(
- DocsFailed,
- couch_util:get_value(
- doc_write_failures,
- RepTask
- )
- ).
-
-check_scheduler_jobs(DocsRead, DocsWritten, DocFailed) ->
- Info = wait_scheduler_info(DocsWritten),
- ?assert(maps:is_key(<<"changes_pending">>, Info)),
- ?assert(maps:is_key(<<"doc_write_failures">>, Info)),
- ?assert(maps:is_key(<<"docs_read">>, Info)),
- ?assert(maps:is_key(<<"docs_written">>, Info)),
- ?assert(maps:is_key(<<"missing_revisions_found">>, Info)),
- ?assert(maps:is_key(<<"checkpointed_source_seq">>, Info)),
- ?assert(maps:is_key(<<"source_seq">>, Info)),
- ?assert(maps:is_key(<<"revisions_checked">>, Info)),
- ?assertMatch(#{<<"docs_read">> := DocsRead}, Info),
- ?assertMatch(#{<<"docs_written">> := DocsWritten}, Info),
- ?assertMatch(#{<<"doc_write_failures">> := DocFailed}, Info).
-
-replication_tasks() ->
- lists:filter(
- fun(P) ->
- couch_util:get_value(type, P) =:= replication
- end,
- couch_task_status:all()
- ).
-
-wait_for_task_status(DocsWritten) ->
- test_util:wait(fun() ->
- case replication_tasks() of
- [] ->
- wait;
- [RepTask] ->
- case couch_util:get_value(docs_written, RepTask) of
- DocsWritten -> RepTask;
- _Other -> wait
- end
- end
- end).
-
-wait_scheduler_info(DocsWritten) ->
- test_util:wait(fun() ->
- case scheduler_jobs() of
- [] ->
- wait;
- [#{<<"info">> := null}] ->
- wait;
- [#{<<"info">> := Info}] ->
- case maps:get(<<"docs_written">>, Info, undefined) of
- DocsWritten -> Info;
- _Other -> wait
- end
- end
- end).
-
-populate_db_reject_even_docs(DbName, Start, End) ->
- BodyFun = fun(Id) ->
- case Id rem 2 == 0 of
- true -> {[{<<"nope">>, true}]};
- false -> {[]}
- end
- end,
- populate_db(DbName, Start, End, BodyFun).
-
-populate_db(DbName, Start, End, BodyFun) when is_function(BodyFun, 1) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- Docs = lists:foldl(
- fun(DocIdCounter, Acc) ->
- Id = integer_to_binary(DocIdCounter),
- Doc = #doc{id = Id, body = BodyFun(DocIdCounter)},
- [Doc | Acc]
- end,
- [],
- lists:seq(Start, End)
- ),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- ok = couch_db:close(Db).
-
-wait_target_in_sync(DocCount, Target) when is_integer(DocCount) ->
- wait_target_in_sync_loop(DocCount, Target, 300).
-
-wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Could not get source and target databases in sync"}
- ]}
- );
-wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
- {ok, Target} = couch_db:open_int(TargetName, []),
- {ok, TargetInfo} = couch_db:get_db_info(Target),
- ok = couch_db:close(Target),
- TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
- case TargetDocCount == DocCount of
- true ->
- true;
- false ->
- ok = timer:sleep(?DELAY),
- wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
- end.
-
-replicate(Source, Target) ->
- SrcUrl = couch_replicator_test_helper:db_url(Source),
- TgtUrl = couch_replicator_test_helper:db_url(Target),
- RepObject =
- {[
- {<<"source">>, SrcUrl},
- {<<"target">>, TgtUrl},
- {<<"continuous">>, true}
- ]},
- {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
- ok = couch_replicator_scheduler:add_job(Rep),
- couch_replicator_scheduler:reschedule(),
- Pid = couch_replicator_test_helper:get_pid(Rep#rep.id),
- {ok, Pid, Rep#rep.id}.
-
-scheduler_jobs() ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:flatten(io_lib:format("http://~s:~b/_scheduler/jobs", [Addr, Port])),
- {ok, 200, _, Body} = test_request:get(Url, []),
- Json = jiffy:decode(Body, [return_maps]),
- maps:get(<<"jobs">>, Json).
-
-vdu() ->
- <<
- "function(newDoc, oldDoc, userCtx) {\n"
- " if(newDoc.nope === true) {\n"
- " throw({forbidden: 'nope'});\n"
- " } else {\n"
- " return;\n"
- " }\n"
- " }"
- >>.
-
-add_vdu(DbName) ->
- DocProps = [
- {<<"_id">>, <<"_design/vdu">>},
- {<<"language">>, <<"javascript">>},
- {<<"validate_doc_update">>, vdu()}
- ],
- Doc = couch_doc:from_json_obj({DocProps}, []),
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- try
- {ok, _Rev} = couch_db:update_doc(Db, Doc, [])
- after
- couch_db:close(Db)
- end.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl
deleted file mode 100644
index 8f61a638c..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl
+++ /dev/null
@@ -1,122 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_selector_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
-
-setup(_) ->
- Ctx = test_util:start_couch([couch_replicator]),
- Source = create_db(),
- create_docs(Source),
- Target = create_db(),
- {Ctx, {Source, Target}}.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- delete_db(Source),
- delete_db(Target),
- ok = application:stop(couch_replicator),
- ok = test_util:stop_couch(Ctx).
-
-selector_replication_test_() ->
- Pairs = [{remote, remote}],
- {
- "Selector filtered replication tests",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Pair, fun should_succeed/2} || Pair <- Pairs]
- }
- }.
-
-should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject =
- {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)},
- {<<"selector">>, {[{<<"_id">>, <<"doc2">>}]}}
- ]},
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- %% FilteredFun is an Erlang version of following mango selector
- FilterFun = fun(_DocId, {Props}) ->
- couch_util:get_value(<<"_id">>, Props) == <<"doc2">>
- end,
- {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun),
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [
- {"Target DB has proper number of docs",
- ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))},
- {"All the docs selected as expected",
- ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
- ]}.
-
-compare_dbs(Source, Target, FilterFun) ->
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, TargetDb} = couch_db:open_int(Target, []),
- {ok, TargetDbInfo} = couch_db:get_db_info(TargetDb),
- Fun = fun(FullDocInfo, Acc) ->
- {ok, DocId, SourceDoc} = read_doc(SourceDb, FullDocInfo),
- TargetReply = read_doc(TargetDb, DocId),
- case FilterFun(DocId, SourceDoc) of
- true ->
- ValidReply = {ok, DocId, SourceDoc} == TargetReply,
- {ok, [ValidReply | Acc]};
- false ->
- ValidReply = {not_found, missing} == TargetReply,
- {ok, [ValidReply | Acc]}
- end
- end,
- {ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb),
- {ok, TargetDbInfo, AllReplies}.
-
-read_doc(Db, DocIdOrInfo) ->
- case couch_db:open_doc(Db, DocIdOrInfo) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, [attachments]),
- DocId = couch_util:get_value(<<"_id">>, Props),
- {ok, DocId, {Props}};
- Error ->
- Error
- end.
-
-create_db() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-create_docs(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc1">>}
- ]}
- ),
- Doc2 = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"doc2">>}
- ]}
- ),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2]),
- couch_db:close(Db).
-
-delete_db(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-
-db_url(remote, DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
deleted file mode 100644
index 3b020927d..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
+++ /dev/null
@@ -1,184 +0,0 @@
--module(couch_replicator_small_max_request_size_target).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_replicator_test_helper, [
- db_url/1,
- replicate/1,
- compare_dbs/3
-]).
-
--define(TIMEOUT_EUNIT, 360).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-setup(remote) ->
- {remote, setup()};
-setup({A, B}) ->
- Ctx = test_util:start_couch([couch_replicator]),
- config:set("chttpd", "max_http_request_size", "10000", false),
- Source = setup(A),
- Target = setup(B),
- {Ctx, {Source, Target}}.
-
-teardown({remote, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- teardown(Source),
- teardown(Target),
- ok = application:stop(couch_replicator),
- ok = test_util:stop_couch(Ctx).
-
-reduce_max_request_size_test_() ->
- Pairs = [{remote, remote}],
- {
- "Replicate docs when target has a small max_http_request_size",
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {Pair, fun should_replicate_all_docs/2}
- || Pair <- Pairs
- ] ++
- [
- {Pair, fun should_replicate_one/2}
- || Pair <- Pairs
- ] ++
- % Disabled. See issue 574. Sometimes PUTs with a doc and
- % attachment which exceed maximum request size are simply
- % closed instead of returning a 413 request. That makes these
- % tests flaky.
- [
- {Pair, fun should_replicate_one_with_attachment/2}
- || Pair <- Pairs
- ]
- }
- }.
-
-% Test documents which are below max_http_request_size but when batched, batch size
-% will be greater than max_http_request_size. Replicator could automatically split
-% the batch into smaller batches and POST those separately.
-should_replicate_all_docs({From, To}, {_Ctx, {Source, Target}}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target, [])
- ]}
- }.
-
-% If a document is too large to post as a single request, that document is
-% skipped but replication overall will make progress and not crash.
-should_replicate_one({From, To}, {_Ctx, {Source, Target}}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source_one_large_one_small(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target, [<<"doc0">>])
- ]}
- }.
-
-% If a document has an attachment > 64 * 1024 bytes, replicator will switch to
-% POST-ing individual documents directly and skip bulk_docs. Test that case
-% separately
-% See note in main test function why this was disabled.
-should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source_one_large_attachment(Source),
- should_populate_source(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target, [<<"doc0">>])
- ]}
- }.
-
-should_populate_source({remote, Source}) ->
- should_populate_source(Source);
-should_populate_source(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(add_docs(Source, 5, 3000, 0))}.
-
-should_populate_source_one_large_one_small({remote, Source}) ->
- should_populate_source_one_large_one_small(Source);
-should_populate_source_one_large_one_small(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_one_small(Source, 12000, 3000))}.
-
-should_populate_source_one_large_attachment({remote, Source}) ->
- should_populate_source_one_large_attachment(Source);
-should_populate_source_one_large_attachment(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}.
-
-should_replicate({remote, Source}, Target) ->
- should_replicate(db_url(Source), Target);
-should_replicate(Source, {remote, Target}) ->
- should_replicate(Source, db_url(Target));
-should_replicate(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
-
-should_compare_databases({remote, Source}, Target, ExceptIds) ->
- should_compare_databases(Source, Target, ExceptIds);
-should_compare_databases(Source, {remote, Target}, ExceptIds) ->
- should_compare_databases(Source, Target, ExceptIds);
-should_compare_databases(Source, Target, ExceptIds) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target, ExceptIds))}.
-
-binary_chunk(Size) when is_integer(Size), Size > 0 ->
- <<<<"x">> || _ <- lists:seq(1, Size)>>.
-
-add_docs(DbName, DocCount, DocSize, AttSize) ->
- [
- begin
- DocId = iolist_to_binary(["doc", integer_to_list(Id)]),
- add_doc(DbName, DocId, DocSize, AttSize)
- end
- || Id <- lists:seq(1, DocCount)
- ],
- ok.
-
-one_large_one_small(DbName, Large, Small) ->
- add_doc(DbName, <<"doc0">>, Large, 0),
- add_doc(DbName, <<"doc1">>, Small, 0).
-
-one_large_attachment(DbName, Size, AttSize) ->
- add_doc(DbName, <<"doc0">>, Size, AttSize).
-
-add_doc(DbName, DocId, Size, AttSize) when is_binary(DocId) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- Doc0 = #doc{id = DocId, body = {[{<<"x">>, binary_chunk(Size)}]}},
- Doc = Doc0#doc{atts = atts(AttSize)},
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- couch_db:close(Db).
-
-atts(0) ->
- [];
-atts(Size) ->
- [
- couch_att:new([
- {name, <<"att1">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, fun(Bytes) -> binary_chunk(Bytes) end}
- ])
- ].
-
-replicate(Source, Target) ->
- replicate(
- {[
- {<<"source">>, Source},
- {<<"target">>, Target},
- % This make batch_size predictable
- {<<"worker_processes">>, "1"}
- ]}
- ).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_test.hrl b/src/couch_replicator/test/eunit/couch_replicator_test.hrl
deleted file mode 100644
index 6db97ec2b..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_test.hrl
+++ /dev/null
@@ -1,35 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-% Borrowed from fabric2_test.hrl
-
-% Some test modules do not use with, so squash the unused fun compiler warning
--compile([{nowarn_unused_function, [{with, 1}]}]).
-
-
--define(TDEF(Name), {atom_to_list(Name), fun Name/1}).
--define(TDEF(Name, Timeout), {atom_to_list(Name), Timeout, fun Name/1}).
-
--define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
--define(TDEF_FE(Name, Timeout), fun(Arg) -> {atom_to_list(Name), {timeout, Timeout, ?_test(Name(Arg))}} end).
-
-
-with(Tests) ->
- fun(ArgsTuple) ->
- lists:map(fun
- ({Name, Fun}) ->
- {Name, ?_test(Fun(ArgsTuple))};
- ({Name, Timeout, Fun}) ->
- {Name, {timeout, Timeout, ?_test(Fun(ArgsTuple))}}
- end, Tests)
- end.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
deleted file mode 100644
index 4044e7c72..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
+++ /dev/null
@@ -1,147 +0,0 @@
--module(couch_replicator_test_helper).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_replicator/src/couch_replicator.hrl").
-
--export([
- compare_dbs/2,
- compare_dbs/3,
- db_url/1,
- replicate/1,
- get_pid/1,
- replicate/2
-]).
-
-compare_dbs(Source, Target) ->
- compare_dbs(Source, Target, []).
-
-compare_dbs(Source, Target, ExceptIds) ->
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, TargetDb} = couch_db:open_int(Target, []),
-
- Fun = fun(FullDocInfo, Acc) ->
- {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo),
- Id = DocSource#doc.id,
- case lists:member(Id, ExceptIds) of
- true ->
- ?assertEqual(not_found, couch_db:get_doc_info(TargetDb, Id));
- false ->
- {ok, TDoc} = couch_db:open_doc(TargetDb, Id),
- compare_docs(DocSource, TDoc)
- end,
- {ok, Acc}
- end,
-
- {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb).
-
-compare_docs(Doc1, Doc2) ->
- ?assertEqual(Doc1#doc.body, Doc2#doc.body),
- #doc{atts = Atts1} = Doc1,
- #doc{atts = Atts2} = Doc2,
- ?assertEqual(
- lists:sort([couch_att:fetch(name, Att) || Att <- Atts1]),
- lists:sort([couch_att:fetch(name, Att) || Att <- Atts2])
- ),
- FunCompareAtts = fun(Att) ->
- AttName = couch_att:fetch(name, Att),
- {ok, AttTarget} = find_att(Atts2, AttName),
- SourceMd5 = att_md5(Att),
- TargetMd5 = att_md5(AttTarget),
- case AttName of
- <<"att1">> ->
- ?assertEqual(gzip, couch_att:fetch(encoding, Att)),
- ?assertEqual(gzip, couch_att:fetch(encoding, AttTarget)),
- DecSourceMd5 = att_decoded_md5(Att),
- DecTargetMd5 = att_decoded_md5(AttTarget),
- ?assertEqual(DecSourceMd5, DecTargetMd5);
- _ ->
- ?assertEqual(identity, couch_att:fetch(encoding, AttTarget)),
- ?assertEqual(identity, couch_att:fetch(encoding, AttTarget))
- end,
- ?assertEqual(SourceMd5, TargetMd5),
- ?assert(is_integer(couch_att:fetch(disk_len, Att))),
- ?assert(is_integer(couch_att:fetch(att_len, Att))),
- ?assert(is_integer(couch_att:fetch(disk_len, AttTarget))),
- ?assert(is_integer(couch_att:fetch(att_len, AttTarget))),
- ?assertEqual(
- couch_att:fetch(disk_len, Att),
- couch_att:fetch(disk_len, AttTarget)
- ),
- ?assertEqual(
- couch_att:fetch(att_len, Att),
- couch_att:fetch(att_len, AttTarget)
- ),
- ?assertEqual(
- couch_att:fetch(type, Att),
- couch_att:fetch(type, AttTarget)
- ),
- ?assertEqual(
- couch_att:fetch(md5, Att),
- couch_att:fetch(md5, AttTarget)
- )
- end,
- lists:foreach(FunCompareAtts, Atts1).
-
-find_att([], _Name) ->
- nil;
-find_att([Att | Rest], Name) ->
- case couch_att:fetch(name, Att) of
- Name ->
- {ok, Att};
- _ ->
- find_att(Rest, Name)
- end.
-
-att_md5(Att) ->
- Md50 = couch_att:foldl(
- Att,
- fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end,
- couch_hash:md5_hash_init()
- ),
- couch_hash:md5_hash_final(Md50).
-
-att_decoded_md5(Att) ->
- Md50 = couch_att:foldl_decode(
- Att,
- fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end,
- couch_hash:md5_hash_init()
- ),
- couch_hash:md5_hash_final(Md50).
-
-db_url(DbName) ->
- iolist_to_binary([
- "http://",
- config:get("httpd", "bind_address", "127.0.0.1"),
- ":",
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "/",
- DbName
- ]).
-
-get_pid(RepId) ->
- Pid = global:whereis_name({couch_replicator_scheduler_job, RepId}),
- ?assert(is_pid(Pid)),
- Pid.
-
-replicate(Source, Target) ->
- replicate(
- {[
- {<<"source">>, Source},
- {<<"target">>, Target}
- ]}
- ).
-
-replicate({[_ | _]} = RepObject) ->
- {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
- ok = couch_replicator_scheduler:add_job(Rep),
- couch_replicator_scheduler:reschedule(),
- Pid = get_pid(Rep#rep.id),
- MonRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MonRef, process, Pid, _} ->
- ok
- end,
- ok = couch_replicator_scheduler:remove_job(Rep#rep.id).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl
deleted file mode 100644
index a23f415c0..000000000
--- a/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl
+++ /dev/null
@@ -1,201 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_use_checkpoints_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_replicator_test_helper, [
- db_url/1,
- replicate/1
-]).
-
--define(DOCS_COUNT, 100).
--define(TIMEOUT_EUNIT, 30).
--define(i2l(I), integer_to_list(I)).
--define(io2b(Io), iolist_to_binary(Io)).
-
-start(false) ->
- fun
- ({finished, _, {CheckpointHistory}}) ->
- ?assertEqual([{<<"use_checkpoints">>, false}], CheckpointHistory);
- (_) ->
- ok
- end;
-start(true) ->
- fun
- ({finished, _, {CheckpointHistory}}) ->
- ?assertNotEqual(
- false,
- lists:keyfind(
- <<"session_id">>,
- 1,
- CheckpointHistory
- )
- );
- (_) ->
- ok
- end.
-
-stop(_, _) ->
- ok.
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-setup(remote) ->
- {remote, setup()};
-setup({_, Fun, {A, B}}) ->
- Ctx = test_util:start_couch([couch_replicator]),
- {ok, Listener} = couch_replicator_notifier:start_link(Fun),
- Source = setup(A),
- Target = setup(B),
- {Ctx, {Source, Target, Listener}}.
-
-teardown({remote, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown(_, {Ctx, {Source, Target, Listener}}) ->
- teardown(Source),
- teardown(Target),
-
- couch_replicator_notifier:stop(Listener),
- ok = application:stop(couch_replicator),
- ok = test_util:stop_couch(Ctx).
-
-use_checkpoints_test_() ->
- {
- "Replication use_checkpoints feature tests",
- {
- foreachx,
- fun start/1,
- fun stop/2,
- [
- {UseCheckpoints, fun use_checkpoints_tests/2}
- || UseCheckpoints <- [false, true]
- ]
- }
- }.
-
-use_checkpoints_tests(UseCheckpoints, Fun) ->
- Pairs = [{remote, remote}],
- {
- "use_checkpoints: " ++ atom_to_list(UseCheckpoints),
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {{UseCheckpoints, Fun, Pair}, fun should_test_checkpoints/2}
- || Pair <- Pairs
- ]
- }
- }.
-
-should_test_checkpoints({UseCheckpoints, _, {From, To}}, {_Ctx, {Source, Target, _}}) ->
- should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}).
-should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source(Source, ?DOCS_COUNT),
- should_replicate(Source, Target, UseCheckpoints),
- should_compare_databases(Source, Target)
- ]}
- }.
-
-should_populate_source({remote, Source}, DocCount) ->
- should_populate_source(Source, DocCount);
-should_populate_source(Source, DocCount) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, DocCount))}.
-
-should_replicate({remote, Source}, Target, UseCheckpoints) ->
- should_replicate(db_url(Source), Target, UseCheckpoints);
-should_replicate(Source, {remote, Target}, UseCheckpoints) ->
- should_replicate(Source, db_url(Target), UseCheckpoints);
-should_replicate(Source, Target, UseCheckpoints) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target, UseCheckpoints))}.
-
-should_compare_databases({remote, Source}, Target) ->
- should_compare_databases(Source, Target);
-should_compare_databases(Source, {remote, Target}) ->
- should_compare_databases(Source, Target);
-should_compare_databases(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
-
-populate_db(DbName, DocCount) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- Docs = lists:foldl(
- fun(DocIdCounter, Acc) ->
- Id = ?io2b(["doc", ?i2l(DocIdCounter)]),
- Value = ?io2b(["val", ?i2l(DocIdCounter)]),
- Doc = #doc{
- id = Id,
- body = {[{<<"value">>, Value}]}
- },
- [Doc | Acc]
- end,
- [],
- lists:seq(1, DocCount)
- ),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- ok = couch_db:close(Db).
-
-compare_dbs(Source, Target) ->
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, TargetDb} = couch_db:open_int(Target, []),
- Fun = fun(FullDocInfo, Acc) ->
- {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
- {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
- DocId = couch_util:get_value(<<"_id">>, Props),
- DocTarget =
- case couch_db:open_doc(TargetDb, DocId) of
- {ok, DocT} ->
- DocT;
- Error ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason,
- lists:concat([
- "Error opening document '",
- ?b2l(DocId),
- "' from target: ",
- couch_util:to_list(Error)
- ])}
- ]}
- )
- end,
- DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
- ?assertEqual(DocJson, DocTargetJson),
- {ok, Acc}
- end,
- {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb).
-
-replicate(Source, Target, UseCheckpoints) ->
- replicate(
- {[
- {<<"source">>, Source},
- {<<"target">>, Target},
- {<<"use_checkpoints">>, UseCheckpoints}
- ]}
- ).
diff --git a/src/couch_stats/.gitignore b/src/couch_stats/.gitignore
deleted file mode 100644
index 093e7e05b..000000000
--- a/src/couch_stats/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-*~
-*.beam
-deps
-ebin
-doc
-.project
diff --git a/src/couch_stats/LICENSE b/src/couch_stats/LICENSE
deleted file mode 100644
index 11069edd7..000000000
--- a/src/couch_stats/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/src/couch_stats/README.md b/src/couch_stats/README.md
deleted file mode 100644
index 53c9ea4f4..000000000
--- a/src/couch_stats/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# couch_stats
-
-couch_stats is a simple statistics collection app for Erlang applications. Its
-core API is a thin wrapper around a stat storage library (currently Folsom,) but
-abstracting over that library provides several benefits:
-
-* All references to stat storage are in one place, so it's easy to swap
- the module out.
-
-* Some common patterns, such as tying a process's lifetime to a counter value,
- are straightforward to support.
-
-* Configuration can be managed in a single place - for example, it's much easier
- to ensure that all histogram metrics use a 10-second sliding window if those
- metrics are instantiated/configured centrally.
-
-## Adding a metric
-
-1. Write a stat description file. See `priv/descriptions.cfg for an example.
- * The metric name should be of type `[atom()]`.
- * The type should be one of `counter`, `gauge`, or `histogram`.
-
- If you don't add your metric to a description file, your metric will be
- accessible via `couch_stats:sample/1`, but it won't be read by the stats
- collector and therefore won't be available to HTTP `_stats` requests, etc.
-
-2. Tell couch_stats to use your description file via application configuration.
-
-2. Instrument your code with the helper functions in `couch_stats.erl`.
diff --git a/src/couch_stats/priv/sample_descriptions.cfg b/src/couch_stats/priv/sample_descriptions.cfg
deleted file mode 100644
index 1947ad489..000000000
--- a/src/couch_stats/priv/sample_descriptions.cfg
+++ /dev/null
@@ -1,15 +0,0 @@
-%% -*- mode: erlang -*-
-
-%% Example stat descriptions.
-{[couch_stats, sample_counter], [
- {type, counter},
- {desc, <<"counters counted by couch_stats">>}
-]}.
-{[couch_stats, sample_histogram], [
- {type, histogram},
- {desc, <<"histograms histogrammed by couch_stats">>}
-]}.
-{[couch_stats, sample_gauge], [
- {type, gauge},
- {desc, <<"gauges gauged by couch_stats">>}
-]}.
diff --git a/src/couch_stats/src/couch_stats.app.src b/src/couch_stats/src/couch_stats.app.src
deleted file mode 100644
index 990f8de62..000000000
--- a/src/couch_stats/src/couch_stats.app.src
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_stats, [
- {description, "Simple statistics collection"},
- {vsn, git},
- {registered, [couch_stats_aggregator, couch_stats_process_tracker]},
- {applications, [kernel, stdlib, folsom]},
- {mod, {couch_stats_app, []}},
- {env, []}
-]}.
diff --git a/src/couch_stats/src/couch_stats.erl b/src/couch_stats/src/couch_stats.erl
deleted file mode 100644
index e0303fc0f..000000000
--- a/src/couch_stats/src/couch_stats.erl
+++ /dev/null
@@ -1,130 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stats).
-
--export([
- start/0,
- stop/0,
- fetch/0,
- reload/0,
- sample/1,
- new/2,
- delete/1,
- list/0,
- increment_counter/1,
- increment_counter/2,
- decrement_counter/1,
- decrement_counter/2,
- update_histogram/2,
- update_gauge/2
-]).
-
--include("couch_stats.hrl").
-
--type response() :: ok | {error, unknown_metric}.
--type stat() :: {any(), [{atom(), any()}]}.
-
-start() ->
- application:start(couch_stats).
-
-stop() ->
- application:stop(couch_stats).
-
-fetch() ->
- couch_stats_aggregator:fetch().
-
-reload() ->
- couch_stats_aggregator:reload().
-
--spec sample(any()) -> stat().
-sample(Name) ->
- [{Name, Info}] = folsom_metrics:get_metric_info(Name),
- sample_type(Name, proplists:get_value(type, Info)).
-
--spec new(atom(), any()) -> ok | {error, metric_exists | unsupported_type}.
-new(counter, Name) ->
- case folsom_metrics:new_counter(Name) of
- ok -> ok;
- {error, Name, metric_already_exists} -> {error, metric_exists}
- end;
-new(histogram, Name) ->
- Time = config:get_integer("stats", "interval", ?DEFAULT_INTERVAL),
- case folsom_metrics:new_histogram(Name, slide_uniform, {Time, 1024}) of
- ok -> ok;
- {error, Name, metric_already_exists} -> {error, metric_exists}
- end;
-new(gauge, Name) ->
- case folsom_metrics:new_gauge(Name) of
- ok -> ok;
- {error, Name, metric_already_exists} -> {error, metric_exists}
- end;
-new(_, _) ->
- {error, unsupported_type}.
-
-delete(Name) ->
- folsom_metrics:delete_metric(Name).
-
-list() ->
- folsom_metrics:get_metrics_info().
-
--spec increment_counter(any()) -> response().
-increment_counter(Name) ->
- notify_existing_metric(Name, {inc, 1}, counter).
-
--spec increment_counter(any(), pos_integer()) -> response().
-increment_counter(Name, Value) ->
- notify_existing_metric(Name, {inc, Value}, counter).
-
--spec decrement_counter(any()) -> response().
-decrement_counter(Name) ->
- notify_existing_metric(Name, {dec, 1}, counter).
-
--spec decrement_counter(any(), pos_integer()) -> response().
-decrement_counter(Name, Value) ->
- notify_existing_metric(Name, {dec, Value}, counter).
-
--spec update_histogram
- (any(), number()) -> response();
- (any(), function()) -> any().
-update_histogram(Name, Fun) when is_function(Fun, 0) ->
- Begin = os:timestamp(),
- Result = Fun(),
- Duration = timer:now_diff(os:timestamp(), Begin) div 1000,
- case notify_existing_metric(Name, Duration, histogram) of
- ok ->
- Result;
- {error, unknown_metric} ->
- throw({unknown_metric, Name})
- end;
-update_histogram(Name, Value) when is_number(Value) ->
- notify_existing_metric(Name, Value, histogram).
-
--spec update_gauge(any(), number()) -> response().
-update_gauge(Name, Value) ->
- notify_existing_metric(Name, Value, gauge).
-
--spec notify_existing_metric(any(), any(), any()) -> response().
-notify_existing_metric(Name, Op, Type) ->
- try
- ok = folsom_metrics:notify_existing_metric(Name, Op, Type)
- catch
- _:_ ->
- error_logger:error_msg("unknown metric: ~p", [Name]),
- {error, unknown_metric}
- end.
-
--spec sample_type(any(), atom()) -> stat().
-sample_type(Name, histogram) ->
- folsom_metrics:get_histogram_statistics(Name);
-sample_type(Name, _) ->
- folsom_metrics:get_metric_value(Name).
diff --git a/src/couch_stats/src/couch_stats.hrl b/src/couch_stats/src/couch_stats.hrl
deleted file mode 100644
index 3cffe99f1..000000000
--- a/src/couch_stats/src/couch_stats.hrl
+++ /dev/null
@@ -1,14 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(DEFAULT_INTERVAL, 10).
--define(RELOAD_INTERVAL, 600).
diff --git a/src/couch_stats/src/couch_stats_aggregator.erl b/src/couch_stats/src/couch_stats_aggregator.erl
deleted file mode 100644
index 34b28bfd6..000000000
--- a/src/couch_stats/src/couch_stats_aggregator.erl
+++ /dev/null
@@ -1,162 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stats_aggregator).
-
--behaviour(gen_server).
-
--export([
- fetch/0,
- flush/0,
- reload/0
-]).
-
--export([
- start_link/0,
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- terminate/2
-]).
-
--include("couch_stats.hrl").
-
--record(st, {
- descriptions,
- stats,
- collect_timer,
- reload_timer
-}).
-
-fetch() ->
- {ok, Stats} = gen_server:call(?MODULE, fetch),
- Stats.
-
-flush() ->
- gen_server:call(?MODULE, flush).
-
-reload() ->
- gen_server:call(?MODULE, reload).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
- {ok, Descs} = reload_metrics(),
- CT = erlang:send_after(get_interval(collect), self(), collect),
- RT = erlang:send_after(get_interval(reload), self(), reload),
- {ok, #st{descriptions = Descs, stats = [], collect_timer = CT, reload_timer = RT}}.
-
-handle_call(fetch, _from, #st{stats = Stats} = State) ->
- {reply, {ok, Stats}, State};
-handle_call(flush, _From, State) ->
- {reply, ok, collect(State)};
-handle_call(reload, _from, #st{reload_timer = OldRT} = State) ->
- timer:cancel(OldRT),
- {ok, Descriptions} = reload_metrics(),
- RT = update_timer(reload),
- {reply, ok, State#st{descriptions = Descriptions, reload_timer = RT}};
-handle_call(Msg, _From, State) ->
- {stop, {unknown_call, Msg}, error, State}.
-
-handle_cast(Msg, State) ->
- {stop, {unknown_cast, Msg}, State}.
-
-handle_info(collect, State) ->
- {noreply, collect(State)};
-handle_info(reload, State) ->
- {ok, Descriptions} = reload_metrics(),
- {noreply, State#st{descriptions = Descriptions}};
-handle_info(Msg, State) ->
- {stop, {unknown_info, Msg}, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-comparison_set(Metrics) ->
- sets:from_list(
- [{Name, proplists:get_value(type, Props)} || {Name, Props} <- Metrics]
- ).
-
-reload_metrics() ->
- Current = load_metrics_for_applications(),
- CurrentSet = comparison_set(Current),
- Existing = couch_stats:list(),
- ExistingSet = comparison_set(Existing),
- ToDelete = sets:subtract(ExistingSet, CurrentSet),
- ToCreate = sets:subtract(CurrentSet, ExistingSet),
- sets:fold(
- fun({Name, _}, _) ->
- couch_stats:delete(Name),
- nil
- end,
- nil,
- ToDelete
- ),
- sets:fold(
- fun({Name, Type}, _) ->
- couch_stats:new(Type, Name),
- nil
- end,
- nil,
- ToCreate
- ),
- {ok, Current}.
-
-load_metrics_for_applications() ->
- Apps = [element(1, A) || A <- application:loaded_applications()],
- lists:foldl(
- fun(AppName, Acc) ->
- case load_metrics_for_application(AppName) of
- error -> Acc;
- Descriptions -> Descriptions ++ Acc
- end
- end,
- [],
- Apps
- ).
-
-load_metrics_for_application(AppName) ->
- case code:priv_dir(AppName) of
- {error, _Error} ->
- error;
- Dir ->
- case file:consult(Dir ++ "/stats_descriptions.cfg") of
- {ok, Descriptions} ->
- Descriptions;
- {error, _Error} ->
- error
- end
- end.
-
-collect(#st{collect_timer = OldCT} = State) ->
- timer:cancel(OldCT),
- Stats = lists:map(
- fun({Name, Props}) ->
- {Name, [{value, couch_stats:sample(Name)} | Props]}
- end,
- State#st.descriptions
- ),
- CT = update_timer(collect),
- State#st{stats = Stats, collect_timer = CT}.
-
-update_timer(Type) ->
- Interval = get_interval(Type),
- erlang:send_after(Interval, self(), Type).
-
-get_interval(reload) -> 1000 * ?RELOAD_INTERVAL;
-get_interval(collect) -> 1000 * config:get_integer("stats", "interval", ?DEFAULT_INTERVAL).
diff --git a/src/couch_stats/src/couch_stats_app.erl b/src/couch_stats/src/couch_stats_app.erl
deleted file mode 100644
index 78880e383..000000000
--- a/src/couch_stats/src/couch_stats_app.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stats_app).
-
--behaviour(application).
-
--export([start/2, stop/1]).
-
-start(_StartType, _StartArgs) ->
- couch_stats_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/couch_stats/src/couch_stats_httpd.erl b/src/couch_stats/src/couch_stats_httpd.erl
deleted file mode 100644
index b40ba6094..000000000
--- a/src/couch_stats/src/couch_stats_httpd.erl
+++ /dev/null
@@ -1,115 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stats_httpd).
--include_lib("couch/include/couch_db.hrl").
-
--export([handle_stats_req/1]).
-
-%% exported for use by chttpd_misc
--export([transform_stats/1, nest/1, to_ejson/1, extract_path/2]).
-
-handle_stats_req(#httpd{method = 'GET', path_parts = [_ | Path]} = Req) ->
- flush(Req),
- Stats0 = couch_stats:fetch(),
- Stats = transform_stats(Stats0),
- Nested = nest(Stats),
- EJSON0 = to_ejson(Nested),
- EJSON1 = extract_path(Path, EJSON0),
- couch_httpd:send_json(Req, EJSON1).
-
-transform_stats(Stats) ->
- transform_stats(Stats, []).
-
-transform_stats([], Acc) ->
- Acc;
-transform_stats([{Key, Props} | Rest], Acc) ->
- {_, Type} = proplists:lookup(type, Props),
- transform_stats(Rest, [{Key, transform_stat(Type, Props)} | Acc]).
-
-transform_stat(counter, Props) ->
- Props;
-transform_stat(gauge, Props) ->
- Props;
-transform_stat(histogram, Props) ->
- lists:map(
- fun
- ({value, Value}) ->
- {value,
- lists:map(
- fun
- ({Key, List}) when Key == percentile; Key == histogram ->
- {Key, [tuple_to_list(Item) || Item <- List]};
- (Else) ->
- Else
- end,
- Value
- )};
- (Else) ->
- Else
- end,
- Props
- ).
-
-nest(Proplist) ->
- nest(Proplist, []).
-
-nest([], Acc) ->
- Acc;
-nest([{[Key | Keys], Value} | Rest], Acc) ->
- Acc1 =
- case proplists:lookup(Key, Acc) of
- {Key, Old} ->
- [{Key, nest([{Keys, Value}], Old)} | proplists:delete(Key, Acc)];
- none ->
- Term = lists:foldr(fun(K, A) -> [{K, A}] end, Value, Keys),
- [{Key, Term} | Acc]
- end,
- nest(Rest, Acc1).
-
-to_ejson([{_, _} | _] = Proplist) ->
- EJSONProps = lists:map(
- fun({Key, Value}) -> {maybe_format_key(Key), to_ejson(Value)} end,
- Proplist
- ),
- {EJSONProps};
-to_ejson(NotAProplist) ->
- NotAProplist.
-
-extract_path([], EJSON) ->
- EJSON;
-extract_path([Key | Rest], {Props}) ->
- case proplists:lookup(Key, Props) of
- {Key, SubEJSON} ->
- extract_path(Rest, SubEJSON);
- none ->
- null
- end;
-extract_path([_ | _], _NotAnObject) ->
- null.
-
-maybe_format_key(Key) when is_list(Key) ->
- list_to_binary(Key);
-maybe_format_key(Key) when is_atom(Key) ->
- list_to_binary(atom_to_list(Key));
-maybe_format_key(Key) when is_integer(Key) ->
- list_to_binary(integer_to_list(Key));
-maybe_format_key(Key) when is_binary(Key) ->
- Key.
-
-flush(Req) ->
- case couch_util:get_value("flush", chttpd:qs(Req)) of
- "true" ->
- couch_stats_aggregator:flush();
- _Else ->
- ok
- end.
diff --git a/src/couch_stats/src/couch_stats_process_tracker.erl b/src/couch_stats/src/couch_stats_process_tracker.erl
deleted file mode 100644
index c53f0f887..000000000
--- a/src/couch_stats/src/couch_stats_process_tracker.erl
+++ /dev/null
@@ -1,80 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stats_process_tracker).
--behaviour(gen_server).
-
--export([
- track/1,
- track/2
-]).
-
--export([
- start_link/0,
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- terminate/2
-]).
-
--record(st, {}).
-
--spec track(any()) -> ok.
-track(Name) ->
- track(self(), Name).
-
--spec track(pid(), any()) -> ok.
-track(Pid, Name) ->
- gen_server:cast(?MODULE, {track, Pid, Name}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
- ets:new(?MODULE, [named_table, public, set]),
- {ok, #st{}}.
-
-handle_call(Msg, _From, State) ->
- error_logger:error_msg("~p received unknown call ~p", [?MODULE, Msg]),
- {noreply, State}.
-
-handle_cast({track, Pid, Name}, State) ->
- couch_stats:increment_counter(Name),
- Ref = erlang:monitor(process, Pid),
- ets:insert(?MODULE, {Ref, Name}),
- {noreply, State};
-handle_cast(Msg, State) ->
- error_logger:error_msg("~p received unknown cast ~p", [?MODULE, Msg]),
- {noreply, State}.
-
-handle_info({'DOWN', Ref, _, _, _} = Msg, State) ->
- case ets:lookup(?MODULE, Ref) of
- [] ->
- error_logger:error_msg(
- "~p received unknown exit; message was ~p", [?MODULE, Msg]
- );
- [{Ref, Name}] ->
- couch_stats:decrement_counter(Name),
- ets:delete(?MODULE, Ref)
- end,
- {noreply, State};
-handle_info(Msg, State) ->
- error_logger:error_msg("~p received unknown message ~p", [?MODULE, Msg]),
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/couch_stats/src/couch_stats_sup.erl b/src/couch_stats/src/couch_stats_sup.erl
deleted file mode 100644
index 2a92ac69c..000000000
--- a/src/couch_stats/src/couch_stats_sup.erl
+++ /dev/null
@@ -1,34 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stats_sup).
-
--behaviour(supervisor).
-
--export([
- start_link/0,
- init/1
-]).
-
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init([]) ->
- {ok,
- {
- {one_for_one, 5, 10}, [
- ?CHILD(couch_stats_aggregator, worker),
- ?CHILD(couch_stats_process_tracker, worker)
- ]
- }}.
diff --git a/src/couch_tests/.gitignore b/src/couch_tests/.gitignore
deleted file mode 100644
index 083179d49..000000000
--- a/src/couch_tests/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-*.o
-*.so
-ebin/
-
-.rebar/
-.eunit
diff --git a/src/couch_tests/include/couch_tests.hrl b/src/couch_tests/include/couch_tests.hrl
deleted file mode 100644
index 41d7e8d70..000000000
--- a/src/couch_tests/include/couch_tests.hrl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(couch_tests_ctx, {
- chain = [],
- args = [],
- opts = [],
- started_apps = [],
- stopped_apps = [],
- dict = dict:new()
-}).
-
--record(couch_tests_fixture, {
- module,
- id,
- setup,
- teardown,
- apps = []
-}).
diff --git a/src/couch_tests/rebar.config b/src/couch_tests/rebar.config
deleted file mode 100644
index a08b22f76..000000000
--- a/src/couch_tests/rebar.config
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{erl_opts, [debug_info,
- {src_dirs, ["src", "setups"]}]}.
-
-{eunit_opts, [verbose]}.
-
-{cover_enabled, true}.
-
-{cover_print_enabled, true}.
diff --git a/src/couch_tests/setups/couch_epi_dispatch.erl b/src/couch_tests/setups/couch_epi_dispatch.erl
deleted file mode 100644
index 0094780d4..000000000
--- a/src/couch_tests/setups/couch_epi_dispatch.erl
+++ /dev/null
@@ -1,98 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_epi_dispatch).
-
--export([
- dispatch/2
-]).
-
-%% Exports needed for tests
--export([
- app/0,
- providers/0,
- services/0,
- data_providers/0,
- data_subscriptions/0,
- processes/0,
- notify/3
-]).
-
-%% ------------------------------------------------------------------
-%% API functions definitions
-%% ------------------------------------------------------------------
-
-dispatch(ServiceId, CallbackModule) ->
- couch_tests:new(
- ?MODULE,
- dispatch,
- setup_dispatch(ServiceId, CallbackModule),
- teardown_dispatch()
- ).
-
-%% ------------------------------------------------------------------
-%% setups and teardowns
-%% ------------------------------------------------------------------
-
-setup_dispatch(ServiceId, CallbackModule) ->
- fun(Fixture, Ctx0) ->
- Plugins = application:get_env(couch_epi, plugins, []),
- Ctx1 = start_epi(Ctx0, [CallbackModule]),
- couch_tests:set_state(Fixture, Ctx1, {ServiceId, CallbackModule, Plugins})
- end.
-
-teardown_dispatch() ->
- fun(Fixture, Ctx0) ->
- {ServiceId, _Module, Plugins} = couch_tests:get_state(Fixture, Ctx0),
- stop_epi(Ctx0, ServiceId, Plugins)
- end.
-
-%% ------------------------------------------------------------------
-%% Helper functions definitions
-%% ------------------------------------------------------------------
-
-start_epi(Ctx0, Plugins) ->
- %% stop in case it's started from other tests..
- Ctx1 = couch_tests:stop_applications([couch_epi], Ctx0),
- application:unload(couch_epi),
- ok = application:load(couch_epi),
- ok = application:set_env(couch_epi, plugins, Plugins),
- couch_tests:start_applications([couch_epi], Ctx1).
-
-stop_epi(Ctx0, ServiceId, Plugins) ->
- ok = application:set_env(couch_epi, plugins, Plugins),
- Handle = couch_epi:get_handle(ServiceId),
- catch couch_epi_module_keeper:reload(Handle),
- Ctx1 = couch_tests:stop_applications([couch_epi], Ctx0),
- application:unload(couch_epi),
- Ctx1.
-
-%% ------------------------------------------------------------------
-%% Tests
-%% ------------------------------------------------------------------
-
-%% EPI behaviour callbacks
-app() -> test_app.
-providers() -> [].
-services() -> [].
-data_providers() -> [].
-data_subscriptions() -> [].
-processes() -> [].
-notify(_, _, _) -> ok.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-dispatch_test() ->
- ?assert(couch_tests:validate_fixture(dispatch(test_service, ?MODULE))).
-
--endif.
diff --git a/src/couch_tests/src/couch_tests.app.src b/src/couch_tests/src/couch_tests.app.src
deleted file mode 100644
index ea243eba0..000000000
--- a/src/couch_tests/src/couch_tests.app.src
+++ /dev/null
@@ -1,18 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_tests, [
- {description, "Testing infrastructure for Apache CouchDB"},
- {vsn, git},
- {registered, []},
- {applications, [kernel, stdlib]}
-]}.
diff --git a/src/couch_tests/src/couch_tests.erl b/src/couch_tests/src/couch_tests.erl
deleted file mode 100644
index de80addf5..000000000
--- a/src/couch_tests/src/couch_tests.erl
+++ /dev/null
@@ -1,233 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_tests).
-
--export([
- new/4,
- setup/1,
- setup/3,
- teardown/1
-]).
-
--export([
- start_applications/2,
- stop_applications/2
-]).
-
--export([
- get/2,
- get_state/2,
- set_state/3
-]).
-
--export([
- validate/1,
- validate_and_report/1
-]).
-
--export([
- validate_fixture/1,
- validate_fixture/3
-]).
-
--include_lib("couch_tests/include/couch_tests.hrl").
-
-%% ------------------------------------------------------------------
-%% API functions definitions
-%% ------------------------------------------------------------------
-
-new(Module, FixtureId, Setup, Teardown) ->
- #couch_tests_fixture{
- module = Module,
- id = FixtureId,
- setup = Setup,
- teardown = Teardown
- }.
-
-setup(Chain) ->
- setup(Chain, [], []).
-
-setup(Chain, Args, Opts) ->
- Ctx = #couch_tests_ctx{chain = Chain, args = Args, opts = Opts},
- do_setup(Chain, Ctx, []).
-
-teardown(#couch_tests_ctx{chain = Chain} = Ctx0) ->
- Ctx1 = lists:foldl(fun do_teardown/2, Ctx0, lists:reverse(Chain)),
- ToStop = lists:reverse(Ctx1#couch_tests_ctx.started_apps),
- stop_applications(ToStop, Ctx1).
-
-start_applications(Apps, Ctx) when is_list(Apps) ->
- #couch_tests_ctx{
- started_apps = Running
- } = Ctx,
- Started = start_applications(Apps),
- Ctx#couch_tests_ctx{started_apps = Running ++ Started}.
-
-stop_applications(Apps, Ctx) when is_list(Apps) ->
- #couch_tests_ctx{
- started_apps = Started,
- stopped_apps = Stopped
- } = Ctx,
- JustStopped = stop_applications(Apps -- Stopped),
- Ctx#couch_tests_ctx{
- started_apps = Started -- JustStopped,
- stopped_apps = remove_duplicates(Stopped ++ JustStopped)
- }.
-
-get_state(#couch_tests_fixture{module = Module, id = Id}, Ctx) ->
- dict:fetch({Module, Id}, Ctx#couch_tests_ctx.dict).
-
-set_state(Fixture, Ctx, State) ->
- #couch_tests_fixture{
- module = Module,
- id = Id
- } = Fixture,
- Dict = dict:store({Module, Id}, State, Ctx#couch_tests_ctx.dict),
- Ctx#couch_tests_ctx{dict = Dict}.
-
-get(started_apps, #couch_tests_ctx{started_apps = Started}) ->
- Started;
-get(stopped_apps, #couch_tests_ctx{stopped_apps = Stopped}) ->
- Stopped.
-
-validate_fixture(#couch_tests_fixture{} = Fixture) ->
- validate_fixture(Fixture, [], []).
-
-validate_fixture(#couch_tests_fixture{} = Fixture0, Args, Opts) ->
- AppsBefore = applications(),
- #couch_tests_ctx{chain = [Fixture1]} = Ctx0 = setup([Fixture0], Args, Opts),
- AppsWhile = applications(),
- Ctx1 = teardown(Ctx0),
- AppsAfter = applications(),
- AppsStarted = lists:usort(AppsWhile -- AppsBefore),
- FixtureApps = lists:usort(Fixture1#couch_tests_fixture.apps),
- StartedAppsBeforeTeardown = lists:usort(Ctx0#couch_tests_ctx.started_apps),
- StoppedAppsAfterTeardown = lists:usort(Ctx1#couch_tests_ctx.stopped_apps),
- StartedAppsAfterTeardown = Ctx1#couch_tests_ctx.started_apps,
-
- validate_and_report([
- {equal,
- "Expected applications before calling fixture (~p) "
- "to be equal to applications after its calling", AppsBefore, AppsAfter},
- {equal,
- "Expected list of started applications (~p) "
- "to be equal to #couch_tests_fixture.apps (~p)", AppsStarted, FixtureApps},
- {equal,
- "Expected list of started applications (~p) "
- "to be equal to #couch_tests_ctx.started_apps (~p)", AppsStarted,
- StartedAppsBeforeTeardown},
- {equal,
- "Expected list of stopped applications (~p) "
- "to be equal to #couch_tests_ctx.stopped_apps (~p)", AppsStarted,
- StoppedAppsAfterTeardown},
- {equal,
- "Expected empty list ~i of #couch_tests_ctx.started_apps (~p) "
- "after teardown", [], StartedAppsAfterTeardown}
- ]).
-
-validate(Sheet) ->
- case lists:foldl(fun do_validate/2, [], Sheet) of
- [] -> true;
- Errors -> Errors
- end.
-
-validate_and_report(Sheet) ->
- case validate(Sheet) of
- true ->
- true;
- Errors ->
- [io:format(user, " ~s~n", [Err]) || Err <- Errors],
- false
- end.
-
-%% ------------------------------------------------------------------
-%% Helper functions definitions
-%% ------------------------------------------------------------------
-
-do_setup([#couch_tests_fixture{setup = Setup} = Fixture | Rest], Ctx0, Acc) ->
- Ctx1 = Ctx0#couch_tests_ctx{started_apps = []},
- #couch_tests_ctx{started_apps = Apps} = Ctx2 = Setup(Fixture, Ctx1),
- Ctx3 = Ctx2#couch_tests_ctx{started_apps = []},
- do_setup(Rest, Ctx3, [Fixture#couch_tests_fixture{apps = Apps} | Acc]);
-do_setup([], Ctx, Acc) ->
- Apps = lists:foldl(
- fun(#couch_tests_fixture{apps = A}, AppsAcc) ->
- A ++ AppsAcc
- end,
- [],
- Acc
- ),
- Ctx#couch_tests_ctx{chain = lists:reverse(Acc), started_apps = Apps}.
-
-do_teardown(Fixture, Ctx0) ->
- #couch_tests_fixture{teardown = Teardown, apps = Apps} = Fixture,
- #couch_tests_ctx{} = Ctx1 = Teardown(Fixture, Ctx0),
- stop_applications(lists:reverse(Apps), Ctx1).
-
-start_applications(Apps) ->
- do_start_applications(Apps, []).
-
-do_start_applications([], Acc) ->
- lists:reverse(Acc);
-do_start_applications([App | Apps], Acc) ->
- case application:start(App) of
- {error, {already_started, _}} ->
- do_start_applications(Apps, Acc);
- {error, {not_started, Dep}} ->
- do_start_applications([Dep, App | Apps], Acc);
- {error, {not_running, Dep}} ->
- do_start_applications([Dep, App | Apps], Acc);
- ok ->
- do_start_applications(Apps, [App | Acc])
- end.
-
-stop_applications(Apps) ->
- do_stop_applications(Apps, []).
-
-do_stop_applications([], Acc) ->
- lists:reverse(Acc);
-do_stop_applications([App | Apps], Acc) ->
- case application:stop(App) of
- {error, _} ->
- do_stop_applications(Apps, Acc);
- ok ->
- do_stop_applications(Apps, [App | Acc])
- end.
-
-remove_duplicates([]) ->
- [];
-remove_duplicates([H | T]) ->
- [H | [X || X <- remove_duplicates(T), X /= H]].
-
-applications() ->
- lists:usort([App || {App, _, _} <- application:which_applications()]).
-
-do_validate({equal, _Message, Arg, Arg}, Acc) ->
- Acc;
-do_validate({equal, Message, Arg1, Arg2}, Acc) ->
- [io_lib:format(Message, [Arg1, Arg2]) | Acc].
-
-%% ------------------------------------------------------------------
-%% Tests
-%% ------------------------------------------------------------------
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-validate_test() ->
- ?assertMatch("1 == 2", lists:flatten(validate([{equal, "~w == ~w", 1, 2}]))),
- ?assertMatch("2", lists:flatten(validate([{equal, "~i~w", 1, 2}]))),
- ?assert(validate([{equal, "~w == ~w", 1, 1}])),
- ok.
-
--endif.
diff --git a/src/couch_tests/src/couch_tests_combinatorics.erl b/src/couch_tests/src/couch_tests_combinatorics.erl
deleted file mode 100644
index f1ee6dd2e..000000000
--- a/src/couch_tests/src/couch_tests_combinatorics.erl
+++ /dev/null
@@ -1,136 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_tests_combinatorics).
-
--export([
- powerset/1,
- permutations/1,
- product/1,
- binary_combinations/1,
- n_combinations/2
-]).
-
-%% @doc powerset(Items)
-%% Generate powerset for a given list of Items
-%% By Hynek - Pichi - Vychodil
-%% For example:
-%% 1> powerset([foo, bar, baz]).
-%% [
-%% [foo],
-%% [foo,baz],
-%% [foo,bar,baz],
-%% [foo,bar],
-%% [bar],
-%% [bar,baz],
-%% [baz],
-%% []
-%% ]
--spec powerset(Elements :: list()) -> [list()].
-
-powerset([]) ->
- [[]];
-powerset([H | T]) ->
- PT = powerset(T),
- powerset(H, PT, PT).
-
-powerset(_, [], Acc) ->
- Acc;
-powerset(X, [H | T], Acc) ->
- powerset(X, T, [[X | H] | Acc]).
-
-%% @doc permutations(Items)
-%% Return all premutations of given list of Items.
-%% from http://erlang.org/doc/programming_examples/list_comprehensions.html
-%% For example:
-%% 1> permutations([foo, bar, baz]).
-%% [
-%% [foo, bar, baz],
-%% [foo, baz, bar],
-%% [bar, foo, baz],
-%% [bar, baz, foo],
-%% [baz, foo, bar],
-%% [baz, bar, foo]
-%% ]
--spec permutations(Elements :: list()) -> [list()].
-
-permutations([]) ->
- [[]];
-permutations(L) ->
- [[H | T] || H <- L, T <- permutations(L -- [H])].
-
-%% @doc product({Items1, Items2, ..., ItemsN})
-%% Return cartesian product of multiple sets represented as list of lists
-%% From: http://stackoverflow.com/a/23886680
-%% For example:
-%% 1> product([[foo, bar], [1,2,3]]).
-%% [
-%% [foo, 1],
-%% [foo, 2],
-%% [foo, 3],
-%% [bar, 1],
-%% [bar, 2],
-%% [bar, 3]
-%% ]
--spec product(Elements :: list()) -> [list()].
-
-product([H]) ->
- [[A] || A <- H];
-product([H | T]) ->
- [[A | B] || A <- H, B <- product(T)].
-
-%% @doc binary_combinations(NBits).
-%% Generate all combinations of true and false for specified number of bits.
-%% For example:
-%% 1> binary_combinations(3).
-%% [
-%% [ false , false , false ],
-%% [ false , false , true ],
-%% [ false , true , false ],
-%% [ false , true , true ],
-%% [ true , false , false ],
-%% [ true , false , true ],
-%% [ true , true , false ],
-%% [ true , true , true ]
-%% ]
-%% 2> length(binary_combinations(3))
-%% 8
--spec binary_combinations(NBits :: pos_integer()) -> [list(boolean())].
-
-binary_combinations(NBits) ->
- product(lists:duplicate(NBits, [true, false])).
-
-%% @doc combinations(N, Items).
-%% Generate all combinations by choosing N values from a given list of Items
-%% in sorted order. Each combination is sorted and the entire table is sorted.
-%% For example:
-%% 1> couch_tests_combinatorics:n_combinations(2, [mon, tue, wed, thu, fri]).
-%% [
-%% [mon, tue],
-%% [mon, wed],
-%% [mon, thu],
-%% [mon, fri],
-%% [tue, wed],
-%% [tue, thu],
-%% [tue, fri],
-%% [wed, thu],
-%% [wed, fri],
-%% [thu, fri]
-%% ]
--spec n_combinations(Size :: pos_integer(), Elements :: list()) -> [list()].
-
-n_combinations(0, _) ->
- [[]];
-n_combinations(_, []) ->
- [];
-n_combinations(N, [H | T]) ->
- [[H | L] || L <- n_combinations(N - 1, T)] ++ n_combinations(N, T).
diff --git a/src/couch_tests/test/couch_tests_app_tests.erl b/src/couch_tests/test/couch_tests_app_tests.erl
deleted file mode 100644
index 97f5c1750..000000000
--- a/src/couch_tests/test/couch_tests_app_tests.erl
+++ /dev/null
@@ -1,117 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_tests_app_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-setup() ->
- [mock(application)].
-
-teardown(Mocks) ->
- [unmock(Mock) || Mock <- Mocks].
-
-%% ------------------------------------------------------------------
-%% Test callbacks definitions
-%% ------------------------------------------------------------------
-
-dummy_setup() ->
- couch_tests:new(
- ?MODULE,
- dummy_setup,
- fun(_Fixture, Ctx) -> Ctx end,
- fun(_Fixture, Ctx) -> Ctx end
- ).
-
-setup1(Arg1) ->
- couch_tests:new(
- ?MODULE,
- setup1,
- fun(Fixture, Ctx0) ->
- Ctx1 = couch_tests:start_applications([asn1], Ctx0),
- couch_tests:set_state(Fixture, Ctx1, {Arg1})
- end,
- fun(_Fixture, Ctx) ->
- couch_tests:stop_applications([asn1], Ctx)
- end
- ).
-
-setup2(Arg1, Arg2) ->
- couch_tests:new(
- ?MODULE,
- setup2,
- fun(Fixture, Ctx0) ->
- Ctx1 = couch_tests:start_applications([public_key], Ctx0),
- couch_tests:set_state(Fixture, Ctx1, {Arg1, Arg2})
- end,
- fun(_Fixture, Ctx) ->
- Ctx
- end
- ).
-
-couch_tests_test_() ->
- {
- "couch_tests tests",
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- {"chained setup", fun chained_setup/0}
- ]
- }
- }.
-
-chained_setup() ->
- ?assert(meck:validate(application)),
- ?assertEqual([], history(application, start)),
- Ctx0 = couch_tests:setup(
- [
- setup1(foo),
- dummy_setup(),
- setup2(bar, baz)
- ],
- [],
- []
- ),
-
- ?assertEqual([asn1, public_key], history(application, start)),
- ?assertEqual([asn1, public_key], couch_tests:get(started_apps, Ctx0)),
- ?assertEqual([], couch_tests:get(stopped_apps, Ctx0)),
-
- Ctx1 = couch_tests:teardown(Ctx0),
-
- ?assertEqual([public_key, asn1], history(application, stop)),
- ?assertEqual([], couch_tests:get(started_apps, Ctx1)),
- ?assertEqual([public_key, asn1], couch_tests:get(stopped_apps, Ctx1)),
-
- ok.
-
-mock(application) ->
- ok = meck:new(application, [unstick, passthrough]),
- ok = meck:expect(application, start, fun(_) -> ok end),
- ok = meck:expect(application, stop, fun(_) -> ok end),
- meck:validate(application),
- application.
-
-unmock(application) ->
- catch meck:unload(application).
-
-history(Module, Function) ->
- Self = self(),
- [
- A
- || {Pid, {M, F, [A]}, _Result} <- meck:history(Module),
- Pid =:= Self,
- M =:= Module,
- F =:= Function
- ].
diff --git a/src/custodian/README b/src/custodian/README
deleted file mode 100644
index ff88373c5..000000000
--- a/src/custodian/README
+++ /dev/null
@@ -1,8 +0,0 @@
-Custodian is responsible for the data stored in CouchDB databases.
-
-Custodian scans the shards database, which details the location of
-every shard of every database and ensures that operators are aware of
-any shard that is under-replicated (has less than N copies).
-
-Custodian accounts for data in transit (as indicated by the
-mem3.redirects section) as well as nodes not recently known to be up.
diff --git a/src/custodian/rebar.config.script b/src/custodian/rebar.config.script
deleted file mode 100644
index f32db974c..000000000
--- a/src/custodian/rebar.config.script
+++ /dev/null
@@ -1,35 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-CouchConfig = case filelib:is_file(os:getenv("COUCHDB_CONFIG")) of
- true ->
- {ok, Result} = file:consult(os:getenv("COUCHDB_CONFIG")),
- Result;
- false ->
- []
-end,
-
-CustodianMonitor = case lists:keyfind(custodian_monitor, 1, CouchConfig) of
- {custodian_monitor, Module} when Module /= "" ->
- list_to_atom(Module);
- _ ->
- custodian_noop_monitor
-end,
-
-CurrentOpts = case lists:keyfind(erl_opts, 1, CONFIG) of
- {erl_opts, Opts} -> Opts;
- false -> []
-end,
-
-CustodianOpts = {d, 'CUSTODIAN_MONITOR', CustodianMonitor},
-lists:keystore(erl_opts, 1, CONFIG, {erl_opts, [CustodianOpts | CurrentOpts]}).
diff --git a/src/custodian/src/custodian.app.src.script b/src/custodian/src/custodian.app.src.script
deleted file mode 100644
index 551b9c2c3..000000000
--- a/src/custodian/src/custodian.app.src.script
+++ /dev/null
@@ -1,48 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-CouchConfig = case filelib:is_file(os:getenv("COUCHDB_CONFIG")) of
- true ->
- {ok, Result} = file:consult(os:getenv("COUCHDB_CONFIG")),
- Result;
- false ->
- []
-end.
-
-CustodianMonitorApp = case lists:keyfind(custodian_monitor_app, 1, CouchConfig) of
- {custodian_monitor_app, AppName} when AppName /= "" ->
- [list_to_atom(AppName)];
- _ ->
- []
-end.
-
-BaseApplications = [
- kernel,
- stdlib,
- couch_log,
- config,
- couch_event,
- couch,
- mem3
-].
-
-Applications = CustodianMonitorApp ++ BaseApplications.
-
-{application, custodian,
- [
- {description, "in your cluster, looking after your stuff"},
- {vsn, git},
- {registered, []},
- {applications, Applications},
- {mod, { custodian_app, []}},
- {env, []}
- ]}.
diff --git a/src/custodian/src/custodian.erl b/src/custodian/src/custodian.erl
deleted file mode 100644
index 5cb7d930c..000000000
--- a/src/custodian/src/custodian.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(custodian).
-
--export([report/0, summary/0]).
-
-report() ->
- custodian_util:report().
-
-summary() ->
- custodian_util:summary().
diff --git a/src/custodian/src/custodian_app.erl b/src/custodian/src/custodian_app.erl
deleted file mode 100644
index 91afe139f..000000000
--- a/src/custodian/src/custodian_app.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(custodian_app).
-
--behaviour(application).
-
-%% Application callbacks
--export([start/2, stop/1]).
-
-%% ===================================================================
-%% Application callbacks
-%% ===================================================================
-
-start(_StartType, _StartArgs) ->
- custodian_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/custodian/src/custodian_db_checker.erl b/src/custodian/src/custodian_db_checker.erl
deleted file mode 100644
index 96cf24a30..000000000
--- a/src/custodian/src/custodian_db_checker.erl
+++ /dev/null
@@ -1,139 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(custodian_db_checker).
--behaviour(gen_server).
--vsn(1).
-
--export([start_link/0]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--export([
- check_dbs/0
-]).
-
--record(st, {
- checker
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init(_) ->
- process_flag(trap_exit, true),
- net_kernel:monitor_nodes(true),
- {ok, restart_checker(#st{})}.
-
-terminate(_Reason, St) ->
- couch_util:shutdown_sync(St#st.checker),
- ok.
-
-handle_call(Msg, _From, St) ->
- {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-handle_cast(refresh, St) ->
- {noreply, restart_checker(St)};
-handle_cast(Msg, St) ->
- {stop, {invalid_cast, Msg}, St}.
-
-handle_info({nodeup, _}, St) ->
- {noreply, restart_checker(St)};
-handle_info({nodedown, _}, St) ->
- {noreply, restart_checker(St)};
-handle_info({'EXIT', Pid, normal}, #st{checker = Pid} = St) ->
- {noreply, St#st{checker = undefined}};
-handle_info({'EXIT', Pid, Reason}, #st{checker = Pid} = St) ->
- couch_log:notice("custodian db checker died ~p", [Reason]),
- {noreply, restart_checker(St#st{checker = undefined})};
-handle_info(Msg, St) ->
- {stop, {invalid_info, Msg}, St}.
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-restart_checker(#st{checker = undefined} = St) ->
- Pid = spawn_link(fun ?MODULE:check_dbs/0),
- St#st{checker = Pid};
-restart_checker(#st{checker = Pid} = St) when is_pid(Pid) ->
- St.
-
-check_dbs() ->
- {ok, DbsDb} = custodian_util:ensure_dbs_exists(),
- try
- Missing = lists:foldl(
- fun(DbName, Count) ->
- case check_db(DbsDb, DbName) of
- ok -> Count;
- missing -> Count + 1
- end
- end,
- 0,
- get_dbs()
- ),
- case Missing == 0 of
- true -> clear_missing_dbs_alert();
- false -> ok
- end
- after
- couch_db:close(DbsDb)
- end.
-
-check_db(DbsDb, DbName) when is_binary(DbName) ->
- try
- case couch_db:open_doc(DbsDb, DbName, []) of
- {ok, _} ->
- ok;
- _ ->
- send_missing_db_alert(DbName),
- missing
- end
- catch
- _:_ ->
- send_missing_db_alert(DbName),
- missing
- end.
-
-get_dbs() ->
- lists:flatten([
- get_users_db(),
- get_stats_db()
- ]).
-
-get_users_db() ->
- UsersDb = chttpd_auth_cache:dbname(),
- [list_to_binary(UsersDb)].
-
-get_stats_db() ->
- case application:get_env(ioq, stats_db) of
- {ok, DbName} when is_binary(DbName) ->
- [DbName];
- {ok, DbName} when is_list(DbName) ->
- [iolist_to_binary(DbName)];
- _ ->
- []
- end.
-
-send_missing_db_alert(DbName) ->
- couch_log:notice("Missing system database ~s", [DbName]),
- ?CUSTODIAN_MONITOR:send_missing_db_alert(DbName).
-
-clear_missing_dbs_alert() ->
- couch_log:notice("All system databases exist.", []),
- ?CUSTODIAN_MONITOR:clear_missing_dbs_alert().
diff --git a/src/custodian/src/custodian_monitor.erl b/src/custodian/src/custodian_monitor.erl
deleted file mode 100644
index 29a347374..000000000
--- a/src/custodian/src/custodian_monitor.erl
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(custodian_monitor).
-
-% N.B. that callback return values are ignored
-
--callback send_missing_db_alert(DbName :: binary()) ->
- Ignored :: any().
-
--callback clear_missing_dbs_alert() ->
- Ignored :: any().
-
--callback send_event(
- Name :: string(), Count :: non_neg_integer(), Description :: string()
-) ->
- Ignored :: any().
diff --git a/src/custodian/src/custodian_noop_monitor.erl b/src/custodian/src/custodian_noop_monitor.erl
deleted file mode 100644
index 4cdd6d1d3..000000000
--- a/src/custodian/src/custodian_noop_monitor.erl
+++ /dev/null
@@ -1,30 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(custodian_noop_monitor).
-
--behaviour(custodian_monitor).
-
--export([
- send_missing_db_alert/1,
- clear_missing_dbs_alert/0,
- send_event/3
-]).
-
-send_missing_db_alert(_DbName) ->
- false.
-
-clear_missing_dbs_alert() ->
- false.
-
-send_event(_Name, _Count, _Description) ->
- false.
diff --git a/src/custodian/src/custodian_server.erl b/src/custodian/src/custodian_server.erl
deleted file mode 100644
index e8bdc13c9..000000000
--- a/src/custodian/src/custodian_server.erl
+++ /dev/null
@@ -1,244 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(custodian_server).
--behaviour(gen_server).
--vsn(3).
--behaviour(config_listener).
-
-% public api.
--export([start_link/0]).
-
-% gen_server api.
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- terminate/2
-]).
-
-% exported for callback.
--export([
- check_shards/0,
- handle_db_event/3
-]).
-
-% config_listener callback
--export([handle_config_change/5, handle_config_terminate/3]).
-
-% private records.
--record(state, {
- event_listener,
- shard_checker,
- rescan = false
-}).
-
--define(VSN_0_2_7, 184129240591641721395874905059581858099).
-
--ifdef(TEST).
--define(RELISTEN_DELAY, 50).
--else.
--define(RELISTEN_DELAY, 5000).
--endif.
-
-% public functions.
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-handle_config_change("couchdb", "maintenance_mode", _, _, S) ->
- ok = gen_server:cast(?MODULE, refresh),
- {ok, S};
-handle_config_change(_, _, _, _, S) ->
- {ok, S}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-% gen_server functions.
-init(_) ->
- process_flag(trap_exit, true),
- net_kernel:monitor_nodes(true),
- ok = config:listen_for_changes(?MODULE, nil),
- {ok, LisPid} = start_event_listener(),
- {ok,
- start_shard_checker(#state{
- event_listener = LisPid
- })}.
-
-handle_call(_Msg, _From, State) ->
- {noreply, State}.
-
-handle_cast(refresh, State) ->
- {noreply, start_shard_checker(State)}.
-
-handle_info({nodeup, _}, State) ->
- {noreply, start_shard_checker(State)};
-handle_info({nodedown, _}, State) ->
- {noreply, start_shard_checker(State)};
-handle_info({'EXIT', Pid, normal}, #state{shard_checker = Pid} = State) ->
- NewState = State#state{shard_checker = undefined},
- case State#state.rescan of
- true ->
- {noreply, start_shard_checker(NewState)};
- false ->
- {noreply, NewState}
- end;
-handle_info({'EXIT', Pid, Reason}, #state{shard_checker = Pid} = State) ->
- couch_log:notice("custodian shard checker died ~p", [Reason]),
- NewState = State#state{shard_checker = undefined},
- {noreply, start_shard_checker(NewState)};
-handle_info({'EXIT', Pid, Reason}, #state{event_listener = Pid} = State) ->
- couch_log:notice("custodian update notifier died ~p", [Reason]),
- {ok, Pid1} = start_event_listener(),
- {noreply, State#state{event_listener = Pid1}};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State}.
-
-terminate(_Reason, State) ->
- couch_event:stop_listener(State#state.event_listener),
- couch_util:shutdown_sync(State#state.shard_checker),
- ok.
-
-code_change(?VSN_0_2_7, State, _Extra) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {ok, State};
-code_change(_OldVsn, #state{} = State, _Extra) ->
- {ok, State}.
-
-% private functions
-
-start_shard_checker(#state{shard_checker = undefined} = State) ->
- State#state{
- shard_checker = spawn_link(fun ?MODULE:check_shards/0),
- rescan = false
- };
-start_shard_checker(#state{shard_checker = Pid} = State) when is_pid(Pid) ->
- State#state{rescan = true}.
-
-start_event_listener() ->
- DbName = mem3_sync:shards_db(),
- couch_event:link_listener(
- ?MODULE, handle_db_event, nil, [{dbname, DbName}]
- ).
-
-handle_db_event(_DbName, updated, _St) ->
- gen_server:cast(?MODULE, refresh),
- {ok, nil};
-handle_db_event(_DbName, _Event, _St) ->
- {ok, nil}.
-
-check_shards() ->
- [send_event(Item) || Item <- custodian:summary()].
-
-send_event({_, Count} = Item) ->
- Description = describe(Item),
- Name = check_name(Item),
- case Count of
- 0 ->
- ok;
- 1 ->
- couch_log:critical("~s", [Description]);
- _ ->
- couch_log:warning("~s", [Description])
- end,
- ?CUSTODIAN_MONITOR:send_event(Name, Count, Description).
-
-describe({{safe, N}, Count}) ->
- lists:concat([
- Count,
- " ",
- shards(Count),
- " in cluster with only ",
- N,
- " ",
- copies(N),
- " on nodes that are currently up"
- ]);
-describe({{live, N}, Count}) ->
- lists:concat([
- Count,
- " ",
- shards(Count),
- " in cluster with only ",
- N,
- " ",
- copies(N),
- " on nodes not in maintenance mode"
- ]);
-describe({conflicted, Count}) ->
- lists:concat([Count, " conflicted ", shards(Count), " in cluster"]).
-
-check_name({{Type, N}, _}) ->
- lists:concat(["custodian-", N, "-", Type, "-shards-check"]);
-check_name({Type, _}) ->
- lists:concat(["custodian-", Type, "-shards-check"]).
-
-shards(1) ->
- "shard";
-shards(_) ->
- "shards".
-
-copies(1) ->
- "copy";
-copies(_) ->
- "copies".
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-config_update_test_() ->
- {
- "Test config updates",
- {
- foreach,
- fun() -> test_util:start_couch([custodian]) end,
- fun test_util:stop_couch/1,
- [
- fun t_restart_config_listener/1
- ]
- }
- }.
-
-t_restart_config_listener(_) ->
- ?_test(begin
- ConfigMonitor = config_listener_mon(),
- ?assert(is_process_alive(ConfigMonitor)),
- test_util:stop_sync(ConfigMonitor),
- ?assertNot(is_process_alive(ConfigMonitor)),
- NewConfigMonitor = test_util:wait(fun() ->
- case config_listener_mon() of
- undefined -> wait;
- Pid -> Pid
- end
- end),
- ?assertNotEqual(ConfigMonitor, NewConfigMonitor),
- ?assert(is_process_alive(NewConfigMonitor))
- end).
-
-config_listener_mon() ->
- IsConfigMonitor = fun(P) ->
- [M | _] = string:tokens(couch_debug:process_name(P), ":"),
- M =:= "config_listener_mon"
- end,
- [{_, MonitoredBy}] = process_info(whereis(?MODULE), [monitored_by]),
- case lists:filter(IsConfigMonitor, MonitoredBy) of
- [Pid] -> Pid;
- [] -> undefined
- end.
-
--endif.
diff --git a/src/custodian/src/custodian_sup.erl b/src/custodian/src/custodian_sup.erl
deleted file mode 100644
index c2be7c861..000000000
--- a/src/custodian/src/custodian_sup.erl
+++ /dev/null
@@ -1,45 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(custodian_sup).
-
--behaviour(supervisor).
-
-%% API
--export([start_link/0]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-%% ===================================================================
-%% API functions
-%% ===================================================================
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% ===================================================================
-%% Supervisor callbacks
-%% ===================================================================
-
-init([]) ->
- {ok,
- {
- {one_for_one, 5, 10},
- [
- ?CHILD(custodian_server, worker),
- ?CHILD(custodian_db_checker, worker)
- ]
- }}.
diff --git a/src/custodian/src/custodian_util.erl b/src/custodian/src/custodian_util.erl
deleted file mode 100644
index 866bcacb1..000000000
--- a/src/custodian/src/custodian_util.erl
+++ /dev/null
@@ -1,284 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(custodian_util).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([summary/0, report/0]).
--export([ensure_dbs_exists/0]).
-
-% Old design doc which should be cleaned up
--define(CUSTODIAN_ID, <<"_design/custodian">>).
-
--record(state, {live, safe, callback, db, acc}).
-
-%% public functions.
-
-summary() ->
- Dict0 = dict:from_list(
- [{conflicted, 0}] ++
- [{{live, N}, 0} || N <- lists:seq(0, cluster_n() - 1)] ++
- [{{safe, N}, 0} || N <- lists:seq(0, cluster_n() - 1)]
- ),
- Fun = fun
- (_Id, _Range, {conflicted, _N}, Dict) ->
- dict:update_counter(conflicted, 1, Dict);
- (_Id, _Range, Item, Dict) ->
- dict:update_counter(Item, 1, Dict)
- end,
- dict:to_list(fold_dbs(Dict0, Fun)).
-
-report() ->
- Fun = fun
- (Id, _Range, {conflicted, N}, Acc) ->
- [{Id, {conflicted, N}} | Acc];
- (Id, Range, Item, Acc) ->
- [{Id, Range, Item} | Acc]
- end,
- fold_dbs([], Fun).
-
-ensure_dbs_exists() ->
- DbName = mem3_sync:shards_db(),
- {ok, Db} = mem3_util:ensure_exists(DbName),
- ensure_custodian_ddoc_is_deleted(Db),
- {ok, Db}.
-
-%% private functions.
-
-fold_dbs(Acc, Fun) ->
- Safe = maybe_redirect([node() | nodes()]),
- Live = Safe -- maintenance_nodes(Safe),
- {ok, Db} = ensure_dbs_exists(),
- try
- State0 = #state{live = Live, safe = Safe, callback = Fun, db = Db, acc = Acc},
- {ok, State1} = couch_db:fold_docs(Db, fun fold_dbs1/2, State0, []),
- State1#state.acc
- after
- couch_db:close(Db)
- end.
-
-fold_dbs1(#full_doc_info{id = <<"_design/", _/binary>>}, Acc) ->
- {ok, Acc};
-fold_dbs1(#full_doc_info{deleted = true}, Acc) ->
- {ok, Acc};
-fold_dbs1(#full_doc_info{id = Id} = FDI, State) ->
- InternalAcc =
- case count_conflicts(FDI) of
- 0 ->
- State#state.acc;
- ConflictCount ->
- (State#state.callback)(Id, null, {conflicted, ConflictCount}, State#state.acc)
- end,
- fold_dbs(Id, load_shards(State#state.db, FDI), State#state{acc = InternalAcc}).
-
-fold_dbs(Id, Shards, State) ->
- IsSafe = fun(#shard{node = N}) -> lists:member(N, State#state.safe) end,
- IsLive = fun(#shard{node = N}) -> lists:member(N, State#state.live) end,
- LiveShards = lists:filter(IsLive, Shards),
- SafeShards = lists:filter(IsSafe, Shards),
- TargetN = mem3_util:calculate_max_n(Shards),
- Acc0 = State#state.acc,
- Acc1 =
- case mem3_util:calculate_max_n(LiveShards) of
- LiveN when LiveN < TargetN ->
- LiveRanges = get_range_counts(LiveN, LiveShards, Shards),
- lists:foldl(
- fun({Range, N}, FAcc) ->
- (State#state.callback)(Id, Range, {live, N}, FAcc)
- end,
- Acc0,
- LiveRanges
- );
- _ ->
- Acc0
- end,
- Acc2 =
- case mem3_util:calculate_max_n(SafeShards) of
- SafeN when SafeN < TargetN ->
- SafeRanges = get_range_counts(SafeN, SafeShards, Shards),
- lists:foldl(
- fun({Range, N}, FAcc) ->
- (State#state.callback)(Id, Range, {safe, N}, FAcc)
- end,
- Acc1,
- SafeRanges
- );
- _ ->
- Acc1
- end,
- {ok, State#state{acc = Acc2}}.
-
-get_range_counts(MaxN, Shards, AllShards) ->
- Ranges = ranges(Shards),
- AllRanges = ranges(AllShards),
-
- % Get a list of ranges that were used to fill the MaxN rings. Also return
- % whatever was left (not part of the rings).
- {UnusedRanges, UsedRanges} = get_n_rings(MaxN, Ranges, []),
-
- % All the ranges that participated in filling the N rings will get
- % their number of copies set to MaxN.
- UsedCounts = update_counts(UsedRanges, #{}, 1, fun(_) -> MaxN end),
-
- % Add ranges that were present but didn't get picked in the rings
- PresentCounts = update_counts(UnusedRanges, UsedCounts, 1, fun(N) ->
- max(N + 1, MaxN)
- end),
-
- % Handle shards that are not present at all. Mark these ranges as missing.
- Missing = [R || R <- AllRanges, not lists:member(R, Ranges)],
- RangeCounts = update_counts(Missing, PresentCounts, 0, fun(_) -> 0 end),
-
- % Report only shards with counts =< MaxN
- RangeCounts1 = maps:filter(fun(_, N) -> N =< MaxN end, RangeCounts),
- lists:sort(maps:to_list(RangeCounts1)).
-
-update_counts(Ranges, Acc0, Init, UpdateFun) ->
- lists:foldl(
- fun({B, E}, Acc) ->
- maps:update_with({B, E}, UpdateFun, Init, Acc)
- end,
- Acc0,
- Ranges
- ).
-
-ranges(Shards) ->
- lists:map(
- fun(S) ->
- [B, E] = mem3:range(S),
- {B, E}
- end,
- Shards
- ).
-
-get_n_rings(N, Ranges, Rings) when N =< 0 ->
- {Ranges, Rings};
-get_n_rings(N, Ranges, Rings) ->
- Ring = mem3_util:get_ring(Ranges),
- get_n_rings(N - 1, Ranges -- Ring, Rings ++ Ring).
-
-cluster_n() ->
- config:get_integer("cluster", "n", 3).
-
-maintenance_nodes(Nodes) ->
- {Modes, _} = rpc:multicall(Nodes, config, get, ["couchdb", "maintenance_mode"]),
- [N || {N, Mode} <- lists:zip(Nodes, Modes), Mode =:= "true"].
-
-load_shards(Db, #full_doc_info{id = Id} = FDI) ->
- case couch_db:open_doc(Db, FDI, [ejson_body]) of
- {ok, #doc{body = {Props}}} ->
- mem3_util:build_shards(Id, Props);
- {not_found, _} ->
- erlang:error(database_does_not_exist, ?b2l(Id))
- end.
-
-maybe_redirect(Nodes) ->
- maybe_redirect(Nodes, []).
-
-maybe_redirect([], Acc) ->
- Acc;
-maybe_redirect([Node | Rest], Acc) ->
- case config:get("mem3.redirects", atom_to_list(Node)) of
- undefined ->
- maybe_redirect(Rest, [Node | Acc]);
- Redirect ->
- maybe_redirect(Rest, [list_to_atom(Redirect) | Acc])
- end.
-
-count_conflicts(#full_doc_info{rev_tree = T}) ->
- Leafs = [1 || {#leaf{deleted = false}, _} <- couch_key_tree:get_all_leafs(T)],
- length(Leafs) - 1.
-
-% Ensure the design doc which was added 3.2.0 is deleted as we switched to using a BDU
-% function instead. After a few releases this function could be removed as well
-%
-ensure_custodian_ddoc_is_deleted(Db) ->
- case couch_db:open_doc(Db, ?CUSTODIAN_ID, [ejson_body]) of
- {not_found, _Reason} ->
- ok;
- {ok, Doc} ->
- DeletedDoc = Doc#doc{deleted = true, body = {[]}},
- try couch_db:update_doc(Db, DeletedDoc, [?ADMIN_CTX]) of
- {ok, _} ->
- LogMsg = "~p : deleted custodian ddoc ~s",
- couch_log:notice(LogMsg, [?MODULE, ?CUSTODIAN_ID]),
- ok
- catch
- conflict ->
- {ok, NewDb} = couch_db:reopen(Db),
- ensure_custodian_ddoc_is_deleted(NewDb)
- end
- end.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-get_range_counts_test_() ->
- [
- ?_assertEqual(Res, get_range_counts(N, Shards, AllShards))
- || {N, Shards, AllShards, Res} <- [
- % No shards are present. There is a full range shard that would
- % fit. Report that range as missing.
- {0, [], [full()], [{{0, ?RING_END}, 0}]},
-
- % Can't complete the ring. But would complete it if had the
- % {2, ?RING_END} interval available.
- {0, [sh(0, 1)], [sh(0, 1), sh(2, ?RING_END)], [{{2, ?RING_END}, 0}]},
-
- % Can complete the ring only 1 time. Report that range as the
- % one available with a count of 1
- {1, [full()], [full(), full()], [{{0, ?RING_END}, 1}]},
-
- % Can complete the ring only 1 time with a full range shard, but
- % there is also {2, ?RING_END} that would complete another the
- % the ring as well if {0, 1} was present.
- {1, [sh(2, ?RING_END), full()], [sh(0, 1), sh(2, ?RING_END), full()], [
- {{0, 1}, 0},
- {{0, ?RING_END}, 1},
- {{2, ?RING_END}, 1}
- ]},
-
- % Can complete the ring 2 times [{0, 2},{3, ?RING_END)] and full(),
- % and there is remnant of a 5, 9 range that would comlete the ring
- % as well if {0, 4} and {10, ?RING_END} were present. So report
- {2, [sh(0, 2), sh(3, ?RING_END), sh(5, 9), full()],
- [
- sh(0, 2),
- sh(
- 3,
- ?RING_END
- ),
- full(),
- sh(0, 4),
- sh(5, 9),
- sh(10, ?RING_END)
- ],
- [
- {{0, 2}, 1},
- {{0, 4}, 0},
- {{0, ?RING_END}, 1},
- {{3, ?RING_END}, 1},
- {{5, 9}, 1},
- {{10, ?RING_END}, 0}
- ]}
- ]
- ].
-
-full() ->
- #shard{range = [0, ?RING_END]}.
-
-sh(B, E) ->
- #shard{range = [B, E]}.
-
--endif.
diff --git a/src/ddoc_cache/LICENSE b/src/ddoc_cache/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/ddoc_cache/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/ddoc_cache/README.md b/src/ddoc_cache/README.md
deleted file mode 100644
index 81d600b12..000000000
--- a/src/ddoc_cache/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Design Doc Cache
-================
-
-Pretty much covers it.
diff --git a/src/ddoc_cache/priv/stats_descriptions.cfg b/src/ddoc_cache/priv/stats_descriptions.cfg
deleted file mode 100644
index f769a979f..000000000
--- a/src/ddoc_cache/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-{[ddoc_cache, hit], [
- {type, counter},
- {desc, <<"number of design doc cache hits">>}
-]}.
-{[ddoc_cache, miss], [
- {type, counter},
- {desc, <<"number of design doc cache misses">>}
-]}.
-{[ddoc_cache, recovery], [
- {type, counter},
- {desc, <<"number of design doc cache recoveries">>}
-]}.
diff --git a/src/ddoc_cache/src/ddoc_cache.app.src b/src/ddoc_cache/src/ddoc_cache.app.src
deleted file mode 100644
index 0132fe108..000000000
--- a/src/ddoc_cache/src/ddoc_cache.app.src
+++ /dev/null
@@ -1,31 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, ddoc_cache, [
- {description, "Design Document Cache"},
- {vsn, git},
- {registered, [
- ddoc_cache_lru
- ]},
- {applications, [
- kernel,
- stdlib,
- crypto,
- config,
- couch_event,
- couch_log,
- couch_stats,
- mem3,
- fabric
- ]},
- {mod, {ddoc_cache_app, []}}
-]}.
diff --git a/src/ddoc_cache/src/ddoc_cache.erl b/src/ddoc_cache/src/ddoc_cache.erl
deleted file mode 100644
index 747abc753..000000000
--- a/src/ddoc_cache/src/ddoc_cache.erl
+++ /dev/null
@@ -1,53 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache).
-
--export([
- open_doc/2,
- open_doc/3,
- open_validation_funs/1,
- open_custom/2,
- refresh/2,
-
- %% deprecated
- open/2
-]).
-
-open_doc(DbName, DocId) ->
- Key = {ddoc_cache_entry_ddocid, {DbName, DocId}},
- ddoc_cache_lru:open(Key).
-
-open_doc(DbName, DocId, RevId) ->
- Key = {ddoc_cache_entry_ddocid_rev, {DbName, DocId, RevId}},
- ddoc_cache_lru:open(Key).
-
-open_validation_funs(DbName) ->
- Key = {ddoc_cache_entry_validation_funs, DbName},
- ddoc_cache_lru:open(Key).
-
-open_custom(DbName, Mod) ->
- Key = {ddoc_cache_entry_custom, {DbName, Mod}},
- ddoc_cache_lru:open(Key).
-
-refresh(ShardDbName, DDocIds) when is_list(DDocIds) ->
- DbName = mem3:dbname(ShardDbName),
- ddoc_cache_lru:refresh(DbName, DDocIds).
-
-open(DbName, validation_funs) ->
- open_validation_funs(DbName);
-open(DbName, Module) when is_atom(Module) ->
- open_custom(DbName, Module);
-open(DbName, <<"_design/", _/binary>> = DDocId) when is_binary(DbName) ->
- open_doc(DbName, DDocId);
-open(DbName, DDocId) when is_binary(DDocId) ->
- open_doc(DbName, <<"_design/", DDocId/binary>>).
diff --git a/src/ddoc_cache/src/ddoc_cache.hrl b/src/ddoc_cache/src/ddoc_cache.hrl
deleted file mode 100644
index dba0d37b2..000000000
--- a/src/ddoc_cache/src/ddoc_cache.hrl
+++ /dev/null
@@ -1,40 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--type dbname() :: iodata().
--type docid() :: iodata().
--type doc_hash() :: <<_:128>>.
--type revision() :: {pos_integer(), doc_hash()}.
-
--define(CACHE, ddoc_cache_entries).
--define(LRU, ddoc_cache_lru).
--define(REFRESH_TIMEOUT, 67000).
--define(SHUTDOWN_TIMEOUT, 1000).
-
--record(entry, {
- key,
- val,
- pid
-}).
-
--record(opener, {
- key,
- pid,
- clients
-}).
-
-
--ifdef(TEST).
--define(EVENT(Name, Arg), ddoc_cache_ev:event(Name, Arg)).
--else.
--define(EVENT(Name, Arg), ignore).
--endif.
diff --git a/src/ddoc_cache/src/ddoc_cache_app.erl b/src/ddoc_cache/src/ddoc_cache_app.erl
deleted file mode 100644
index 3f2f02d5d..000000000
--- a/src/ddoc_cache/src/ddoc_cache_app.erl
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_app).
--behaviour(application).
-
--export([start/2, stop/1]).
-
-start(_StartType, _StartArgs) ->
- ddoc_cache_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
deleted file mode 100644
index 5a1711dd8..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ /dev/null
@@ -1,327 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry).
--behaviour(gen_server).
--vsn(1).
-
-% for the stacktrace macro only so far
--include_lib("couch/include/couch_db.hrl").
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2,
-
- start_link/2,
- shutdown/1,
- open/2,
- accessed/1,
- refresh/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--export([
- do_open/1
-]).
-
--include("ddoc_cache.hrl").
-
--ifndef(TEST).
--define(ENTRY_SHUTDOWN_TIMEOUT, 5000).
--else.
--define(ENTRY_SHUTDOWN_TIMEOUT, 500).
--endif.
-
--record(st, {
- key,
- val,
- opener,
- waiters,
- ts,
- accessed
-}).
-
-dbname({Mod, Arg}) ->
- Mod:dbname(Arg).
-
-ddocid({Mod, Arg}) ->
- Mod:ddocid(Arg).
-
-recover({Mod, Arg}) ->
- Mod:recover(Arg).
-
-insert({Mod, Arg}, Value) ->
- Mod:insert(Arg, Value).
-
-start_link(Key, Default) ->
- Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
- {ok, Pid}.
-
-shutdown(Pid) ->
- Ref = erlang:monitor(process, Pid),
- ok = gen_server:cast(Pid, shutdown),
- receive
- {'DOWN', Ref, process, Pid, normal} ->
- ok;
- {'DOWN', Ref, process, Pid, Reason} ->
- erlang:exit(Reason)
- after ?ENTRY_SHUTDOWN_TIMEOUT ->
- erlang:demonitor(Ref, [flush]),
- erlang:exit({timeout, {entry_shutdown, Pid}})
- end.
-
-open(Pid, Key) ->
- try
- Resp = gen_server:call(Pid, open),
- case Resp of
- {open_ok, Val} ->
- Val;
- {open_error, {T, R, S}} ->
- erlang:raise(T, R, S)
- end
- catch
- error:database_does_not_exist ->
- erlang:error(database_does_not_exist);
- exit:_ ->
- % Its possible that this process was evicted just
- % before we tried talking to it. Just fallback
- % to a standard recovery
- recover(Key)
- end.
-
-accessed(Pid) ->
- gen_server:cast(Pid, accessed).
-
-refresh(Pid) ->
- gen_server:cast(Pid, force_refresh).
-
-init({Key, undefined}) ->
- true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
- St = #st{
- key = Key,
- opener = spawn_opener(Key),
- waiters = [],
- accessed = 1
- },
- ?EVENT(started, Key),
- gen_server:enter_loop(?MODULE, [], St);
-init({Key, Wrapped}) ->
- Default = ddoc_cache_value:unwrap(Wrapped),
- Updates = [
- {#entry.val, Default},
- {#entry.pid, self()}
- ],
- NewTs = os:timestamp(),
- true = ets:update_element(?CACHE, Key, Updates),
- true = ets:insert(?LRU, {{NewTs, Key, self()}}),
- St = #st{
- key = Key,
- val = {open_ok, {ok, Default}},
- opener = start_timer(),
- waiters = [],
- ts = NewTs,
- accessed = 1
- },
- ?EVENT(default_started, Key),
- gen_server:enter_loop(?MODULE, [], St, hibernate).
-
-terminate(_Reason, St) ->
- #st{
- key = Key,
- opener = Pid,
- ts = Ts
- } = St,
- % We may have already deleted our cache entry
- % during shutdown
- Pattern = #entry{key = Key, pid = self(), _ = '_'},
- CacheMSpec = [{Pattern, [], [true]}],
- true = ets:select_delete(?CACHE, CacheMSpec) < 2,
- % We may have already deleted our LRU entry
- % during shutdown
- if
- Ts == undefined ->
- ok;
- true ->
- LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
- true = ets:select_delete(?LRU, LruMSpec) < 2
- end,
- % Blow away any current opener if it exists
- if
- not is_pid(Pid) -> ok;
- true -> catch exit(Pid, kill)
- end,
- ok.
-
-handle_call(open, From, #st{opener = Pid} = St) when is_pid(Pid) ->
- NewSt = St#st{
- waiters = [From | St#st.waiters]
- },
- {noreply, NewSt};
-handle_call(open, _From, St) ->
- {reply, St#st.val, St};
-handle_call(Msg, _From, St) ->
- {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
-handle_cast(accessed, St) ->
- ?EVENT(accessed, St#st.key),
- drain_accessed(),
- NewSt = St#st{
- accessed = St#st.accessed + 1
- },
- {noreply, update_lru(NewSt)};
-handle_cast(force_refresh, St) ->
- % If we had frequent design document updates
- % they could end up racing accessed events and
- % end up prematurely evicting this entry from
- % cache. To prevent this we just make sure that
- % accessed is set to at least 1 before we
- % execute a refresh.
- NewSt =
- if
- St#st.accessed > 0 -> St;
- true -> St#st{accessed = 1}
- end,
- % We remove the cache entry value so that any
- % new client comes to us for the refreshed
- % value.
- true = ets:update_element(?CACHE, St#st.key, {#entry.val, undefined}),
- handle_cast(refresh, NewSt);
-handle_cast(refresh, #st{accessed = 0} = St) ->
- {stop, normal, St};
-handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
- #st{
- key = Key
- } = St,
- erlang:cancel_timer(Ref),
- NewSt = St#st{
- opener = spawn_opener(Key),
- accessed = 0
- },
- {noreply, NewSt};
-handle_cast(refresh, #st{opener = Pid} = St) when is_pid(Pid) ->
- catch exit(Pid, kill),
- receive
- {'DOWN', _, _, Pid, _} -> ok
- end,
- NewSt = St#st{
- opener = spawn_opener(St#st.key),
- accessed = 0
- },
- {noreply, NewSt};
-handle_cast(shutdown, St) ->
- remove_from_cache(St),
- {stop, normal, St};
-handle_cast(Msg, St) ->
- {stop, {bad_cast, Msg}, St}.
-
-handle_info({'DOWN', _, _, Pid, Resp}, #st{key = Key, opener = Pid} = St) ->
- case Resp of
- {open_ok, Key, {ok, Val}} ->
- update_cache(St, Val),
- NewSt1 = St#st{
- val = {open_ok, {ok, Val}},
- opener = start_timer(),
- waiters = []
- },
- NewSt2 = update_lru(NewSt1),
- respond(St#st.waiters, {open_ok, {ok, Val}}),
- {noreply, NewSt2};
- {Status, Key, Other} ->
- NewSt = St#st{
- val = {Status, Other},
- opener = undefined,
- waiters = undefined
- },
- remove_from_cache(NewSt),
- respond(St#st.waiters, {Status, Other}),
- {stop, normal, NewSt}
- end;
-handle_info(Msg, St) ->
- {stop, {bad_info, Msg}, St}.
-
-code_change(_, St, _) ->
- {ok, St}.
-
-spawn_opener(Key) ->
- {Pid, _} = erlang:spawn_monitor(?MODULE, do_open, [Key]),
- Pid.
-
-start_timer() ->
- TimeOut = config:get_integer(
- "ddoc_cache", "refresh_timeout", ?REFRESH_TIMEOUT
- ),
- erlang:send_after(TimeOut, self(), {'$gen_cast', refresh}).
-
-do_open(Key) ->
- try recover(Key) of
- Resp ->
- erlang:exit({open_ok, Key, Resp})
- catch ?STACKTRACE(T, R, S)
- erlang:exit({open_error, Key, {T, R, S}})
- end.
-
-update_lru(#st{key = Key, ts = Ts} = St) ->
- remove_from_lru(Ts, Key),
- NewTs = os:timestamp(),
- true = ets:insert(?LRU, {{NewTs, Key, self()}}),
- St#st{ts = NewTs}.
-
-update_cache(#st{val = undefined} = St, Val) ->
- true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
- ?EVENT(inserted, St#st.key);
-update_cache(#st{val = V1} = _St, V2) when {open_ok, {ok, V2}} == V1 ->
- ?EVENT(update_noop, _St#st.key);
-update_cache(St, Val) ->
- true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
- ?EVENT(updated, {St#st.key, Val}).
-
-remove_from_cache(St) ->
- #st{
- key = Key,
- ts = Ts
- } = St,
- Pattern = #entry{key = Key, pid = self(), _ = '_'},
- CacheMSpec = [{Pattern, [], [true]}],
- 1 = ets:select_delete(?CACHE, CacheMSpec),
- remove_from_lru(Ts, Key),
- ?EVENT(removed, St#st.key),
- ok.
-
-remove_from_lru(Ts, Key) ->
- if
- Ts == undefined ->
- ok;
- true ->
- LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
- 1 = ets:select_delete(?LRU, LruMSpec)
- end.
-
-drain_accessed() ->
- receive
- {'$gen_cast', accessed} ->
- drain_accessed()
- after 0 ->
- ok
- end.
-
-respond(Waiters, Resp) ->
- [gen_server:reply(W, Resp) || W <- Waiters].
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_custom.erl b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
deleted file mode 100644
index 8747b46bc..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
+++ /dev/null
@@ -1,32 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_custom).
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
-dbname({DbName, _}) ->
- DbName.
-
-ddocid(_) ->
- no_ddocid.
-
-recover({DbName, Mod}) ->
- Mod:recover(DbName).
-
-insert(_, _) ->
- ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
deleted file mode 100644
index cf40725e4..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
+++ /dev/null
@@ -1,39 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_ddocid).
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
-dbname({DbName, _}) ->
- DbName.
-
-ddocid({_, DDocId}) ->
- DDocId.
-
-recover({DbName, DDocId}) ->
- fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
-
-insert({DbName, DDocId}, {ok, #doc{revs = Revs} = DDoc}) ->
- {Depth, [RevId | _]} = Revs,
- Rev = {Depth, RevId},
- Key = {ddoc_cache_entry_ddocid_rev, {DbName, DDocId, Rev}},
- spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
-insert(_, _) ->
- ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
deleted file mode 100644
index 5126f5210..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
+++ /dev/null
@@ -1,39 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_ddocid_rev).
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
-dbname({DbName, _, _}) ->
- DbName.
-
-ddocid({_, DDocId, _}) ->
- DDocId.
-
-recover({DbName, DDocId, Rev}) ->
- Opts = [ejson_body, ?ADMIN_CTX],
- {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], Opts),
- Resp.
-
-insert({DbName, DDocId, _Rev}, {ok, #doc{} = DDoc}) ->
- Key = {ddoc_cache_entry_ddocid, {DbName, DDocId}},
- spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
-insert(_, _) ->
- ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
deleted file mode 100644
index bcd122252..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
+++ /dev/null
@@ -1,42 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_validation_funs).
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
-dbname(DbName) ->
- DbName.
-
-ddocid(_) ->
- no_ddocid.
-
-recover(DbName) ->
- {ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)),
- Funs = lists:flatmap(
- fun(DDoc) ->
- case couch_doc:get_validate_doc_fun(DDoc) of
- nil -> [];
- Fun -> [Fun]
- end
- end,
- DDocs
- ),
- {ok, Funs}.
-
-insert(_, _) ->
- ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
deleted file mode 100644
index 7381e6c28..000000000
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ /dev/null
@@ -1,337 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_lru).
--behaviour(gen_server).
--vsn(1).
-
--export([
- start_link/0,
- open/1,
- insert/2,
- refresh/2
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--export([
- handle_db_event/3
-]).
-
--include("ddoc_cache.hrl").
-
--record(st, {
- % pid -> key
- pids,
- % dbname -> docid -> key -> pid
- dbs,
- evictor
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-open(Key) ->
- try ets:lookup(?CACHE, Key) of
- [] ->
- lru_start(Key, true);
- [#entry{pid = undefined}] ->
- lru_start(Key, false);
- [#entry{val = undefined, pid = Pid}] ->
- couch_stats:increment_counter([ddoc_cache, miss]),
- ddoc_cache_entry:open(Pid, Key);
- [#entry{val = Val, pid = Pid}] ->
- couch_stats:increment_counter([ddoc_cache, hit]),
- ddoc_cache_entry:accessed(Pid),
- {ok, Val}
- catch
- _:_ ->
- couch_stats:increment_counter([ddoc_cache, recovery]),
- ddoc_cache_entry:recover(Key)
- end.
-
-insert(Key, Value) ->
- case ets:lookup(?CACHE, Key) of
- [] ->
- Wrapped = ddoc_cache_value:wrap(Value),
- gen_server:call(?MODULE, {start, Key, Wrapped}, infinity);
- [#entry{}] ->
- ok
- end.
-
-refresh(DbName, DDocIds) ->
- gen_server:cast(?MODULE, {refresh, DbName, DDocIds}).
-
-init(_) ->
- couch_util:set_mqd_off_heap(?MODULE),
- process_flag(trap_exit, true),
- BaseOpts = [public, named_table],
- CacheOpts =
- [
- set,
- {read_concurrency, true},
- {keypos, #entry.key}
- ] ++ BaseOpts,
- ets:new(?CACHE, CacheOpts),
- ets:new(?LRU, [ordered_set, {write_concurrency, true}] ++ BaseOpts),
- {ok, Pids} = khash:new(),
- {ok, Dbs} = khash:new(),
- {ok, Evictor} = couch_event:link_listener(
- ?MODULE, handle_db_event, nil, [all_dbs]
- ),
- ?EVENT(lru_init, nil),
- {ok, #st{
- pids = Pids,
- dbs = Dbs,
- evictor = Evictor
- }}.
-
-terminate(_Reason, St) ->
- case is_pid(St#st.evictor) of
- true -> exit(St#st.evictor, kill);
- false -> ok
- end,
- ok.
-
-handle_call({start, Key, Default}, _From, St) ->
- #st{
- pids = Pids,
- dbs = Dbs
- } = St,
- case ets:lookup(?CACHE, Key) of
- [] ->
- MaxSize = config:get_integer("ddoc_cache", "max_size", 104857600),
- case trim(St, max(0, MaxSize)) of
- ok ->
- true = ets:insert_new(?CACHE, #entry{key = Key}),
- {ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
- true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
- ok = khash:put(Pids, Pid, Key),
- store_key(Dbs, Key, Pid),
- {reply, {ok, Pid}, St};
- full ->
- ?EVENT(full, Key),
- {reply, full, St}
- end;
- [#entry{pid = Pid}] ->
- {reply, {ok, Pid}, St}
- end;
-handle_call(Msg, _From, St) ->
- {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-handle_cast({evict, DbName}, St) ->
- gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
- {noreply, St};
-handle_cast({refresh, DbName, DDocIds}, St) ->
- gen_server:abcast(mem3:nodes(), ?MODULE, {do_refresh, DbName, DDocIds}),
- {noreply, St};
-handle_cast({do_evict, DbName}, St) ->
- #st{
- dbs = Dbs
- } = St,
- ToRem =
- case khash:lookup(Dbs, DbName) of
- {value, DDocIds} ->
- AccOut = khash:fold(
- DDocIds,
- fun(_, Keys, Acc1) ->
- khash:to_list(Keys) ++ Acc1
- end,
- []
- ),
- ?EVENT(evicted, DbName),
- AccOut;
- not_found ->
- ?EVENT(evict_noop, DbName),
- []
- end,
- lists:foreach(
- fun({Key, Pid}) ->
- remove_entry(St, Key, Pid)
- end,
- ToRem
- ),
- khash:del(Dbs, DbName),
- {noreply, St};
-handle_cast({do_refresh, DbName, DDocIdList}, St) ->
- #st{
- dbs = Dbs
- } = St,
- % We prepend no_ddocid to the DDocIdList below
- % so that we refresh all custom and validation
- % function entries which load data from all
- % design documents.
- case khash:lookup(Dbs, DbName) of
- {value, DDocIds} ->
- lists:foreach(
- fun(DDocId) ->
- case khash:lookup(DDocIds, DDocId) of
- {value, Keys} ->
- khash:fold(
- Keys,
- fun(_, Pid, _) ->
- ddoc_cache_entry:refresh(Pid)
- end,
- nil
- );
- not_found ->
- ok
- end
- end,
- [no_ddocid | DDocIdList]
- );
- not_found ->
- ok
- end,
- {noreply, St};
-handle_cast(Msg, St) ->
- {stop, {invalid_cast, Msg}, St}.
-
-handle_info({'EXIT', Pid, Reason}, #st{evictor = Pid} = St) ->
- {stop, Reason, St};
-handle_info({'EXIT', Pid, normal}, St) ->
- % This clause handles when an entry starts
- % up but encounters an error or uncacheable
- % response from its recover call.
- #st{
- pids = Pids
- } = St,
- {value, Key} = khash:lookup(Pids, Pid),
- khash:del(Pids, Pid),
- remove_key(St, Key),
- {noreply, St};
-handle_info(Msg, St) ->
- {stop, {invalid_info, Msg}, St}.
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-handle_db_event(ShardDbName, created, St) ->
- gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
- {ok, St};
-handle_db_event(ShardDbName, deleted, St) ->
- gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
- {ok, St};
-handle_db_event(_DbName, _Event, St) ->
- {ok, St}.
-
-lru_start(Key, DoInsert) ->
- case gen_server:call(?MODULE, {start, Key, undefined}, infinity) of
- {ok, Pid} ->
- couch_stats:increment_counter([ddoc_cache, miss]),
- Resp = ddoc_cache_entry:open(Pid, Key),
- if
- not DoInsert -> ok;
- true -> ddoc_cache_entry:insert(Key, Resp)
- end,
- Resp;
- full ->
- couch_stats:increment_counter([ddoc_cache, recovery]),
- ddoc_cache_entry:recover(Key)
- end.
-
-trim(_, 0) ->
- full;
-trim(St, MaxSize) ->
- CurSize = ets:info(?CACHE, memory) * erlang:system_info(wordsize),
- if
- CurSize =< MaxSize ->
- ok;
- true ->
- case ets:first(?LRU) of
- {_Ts, Key, Pid} ->
- remove_entry(St, Key, Pid),
- trim(St, MaxSize);
- '$end_of_table' ->
- full
- end
- end.
-
-remove_entry(St, Key, Pid) ->
- #st{
- pids = Pids
- } = St,
- unlink_and_flush(Pid),
- ddoc_cache_entry:shutdown(Pid),
- khash:del(Pids, Pid),
- remove_key(St, Key).
-
-store_key(Dbs, Key, Pid) ->
- DbName = ddoc_cache_entry:dbname(Key),
- DDocId = ddoc_cache_entry:ddocid(Key),
- case khash:lookup(Dbs, DbName) of
- {value, DDocIds} ->
- case khash:lookup(DDocIds, DDocId) of
- {value, Keys} ->
- khash:put(Keys, Key, Pid);
- not_found ->
- {ok, Keys} = khash:from_list([{Key, Pid}]),
- khash:put(DDocIds, DDocId, Keys)
- end;
- not_found ->
- {ok, Keys} = khash:from_list([{Key, Pid}]),
- {ok, DDocIds} = khash:from_list([{DDocId, Keys}]),
- khash:put(Dbs, DbName, DDocIds)
- end.
-
-remove_key(St, Key) ->
- #st{
- dbs = Dbs
- } = St,
- DbName = ddoc_cache_entry:dbname(Key),
- DDocId = ddoc_cache_entry:ddocid(Key),
-
- % For non-existent ddocs, a new ddoc_cache_entry is spawned for
- % each call to ddoc_cache:open. Multiple calls to open the same
- % non-existent ddoc will create multiple cache entries with the
- % same Key but different PIDs. This can result in the following
- % khash lookups returning not_found, so handle those corner cases.
- case khash:lookup(Dbs, DbName) of
- {value, DDocIds} ->
- case khash:lookup(DDocIds, DDocId) of
- {value, Keys} ->
- ok = khash:del(Keys, Key),
- case khash:size(Keys) of
- 0 -> khash:del(DDocIds, DDocId);
- _ -> ok
- end,
- case khash:size(DDocIds) of
- 0 -> khash:del(Dbs, DbName);
- _ -> ok
- end;
- not_found ->
- ok
- end;
- not_found ->
- ok
- end.
-
-unlink_and_flush(Pid) ->
- erlang:unlink(Pid),
- % Its possible that the entry process has already exited before
- % we unlink it so we have to flush out a possible 'EXIT'
- % message sitting in our message queue. Notice that we're
- % maintaining the assertion that all entry processes only
- % ever exit normally.
- receive
- {'EXIT', Pid, normal} ->
- ok
- after 0 ->
- ok
- end.
diff --git a/src/ddoc_cache/src/ddoc_cache_sup.erl b/src/ddoc_cache/src/ddoc_cache_sup.erl
deleted file mode 100644
index e94b542bd..000000000
--- a/src/ddoc_cache/src/ddoc_cache_sup.erl
+++ /dev/null
@@ -1,35 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_sup).
--behaviour(supervisor).
-
--export([
- start_link/0,
- init/1
-]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init([]) ->
- Children = [
- {
- ddoc_cache_lru,
- {ddoc_cache_lru, start_link, []},
- permanent,
- 5000,
- worker,
- [ddoc_cache_lru]
- }
- ],
- {ok, {{one_for_one, 25, 1}, Children}}.
diff --git a/src/ddoc_cache/src/ddoc_cache_value.erl b/src/ddoc_cache/src/ddoc_cache_value.erl
deleted file mode 100644
index 59585ee58..000000000
--- a/src/ddoc_cache/src/ddoc_cache_value.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_value).
-
--export([
- wrap/1,
- unwrap/1
-]).
-
-wrap(Value) ->
- {?MODULE, term_to_binary(Value)}.
-
-unwrap({?MODULE, Bin}) when is_binary(Bin) ->
- binary_to_term(Bin).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
deleted file mode 100644
index 54c8c585b..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_basic_test).
-
--export([
- recover/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-recover(DbName) ->
- {ok, {DbName, totes_custom}}.
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-check_basic_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"cache_ddoc", fun cache_ddoc/1},
- {"cache_ddoc_rev", fun cache_ddoc_rev/1},
- {"cache_vdu", fun cache_vdu/1},
- {"cache_custom", fun cache_custom/1},
- {"cache_ddoc_refresher_unchanged", fun cache_ddoc_refresher_unchanged/1},
- {"dont_cache_not_found", fun dont_cache_not_found/1},
- {"deprecated_api_works", fun deprecated_api_works/1}
- ])
- }.
-
-check_no_vdu_test_() ->
- {
- setup,
- fun() -> ddoc_cache_tutil:start_couch([{write_ddocs, false}]) end,
- fun ddoc_cache_tutil:stop_couch/1,
- ddoc_cache_tutil:with([
- {"cache_no_vdu_no_ddoc", fun cache_no_vdu_no_ddoc/1},
- {"cache_no_vdu_empty_ddoc", fun cache_no_vdu_empty_ddoc/1}
- ])
- }.
-
-cache_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- ?assertEqual(0, ets:info(?CACHE, size)),
- Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
- ?assertEqual(2, ets:info(?CACHE, size)),
- Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertEqual(Resp1, Resp2),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-cache_ddoc_rev({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- ?assertEqual(0, ets:info(?CACHE, size)),
- Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
- ?assertEqual(2, ets:info(?CACHE, size)),
- Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- ?assertEqual(Resp1, Resp2),
- ?assertEqual(2, ets:info(?CACHE, size)),
-
- % Assert that the non-rev cache entry is separate
- Resp3 = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp3),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-cache_vdu({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- ?assertEqual(0, ets:info(?CACHE, size)),
- Resp1 = ddoc_cache:open_validation_funs(DbName),
- ?assertMatch({ok, [_]}, Resp1),
- ?assertEqual(1, ets:info(?CACHE, size)),
- Resp2 = ddoc_cache:open_validation_funs(DbName),
- ?assertEqual(Resp1, Resp2),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-cache_custom({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- ?assertEqual(0, ets:info(?CACHE, size)),
- Resp1 = ddoc_cache:open_custom(DbName, ?MODULE),
- ?assertMatch({ok, {DbName, totes_custom}}, Resp1),
- ?assertEqual(1, ets:info(?CACHE, size)),
- Resp2 = ddoc_cache:open_custom(DbName, ?MODULE),
- ?assertEqual(Resp1, Resp2),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-cache_ddoc_refresher_unchanged({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ddoc_cache:open_doc(DbName, ?FOOBAR),
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
- Tab1 = [_, _] = lists:sort(ets:tab2list(?CACHE)),
- ddoc_cache:open_doc(DbName, ?FOOBAR),
- meck:wait(ddoc_cache_ev, event, [accessed, '_'], 1000),
- Tab2 = lists:sort(ets:tab2list(?CACHE)),
- ?assertEqual(Tab2, Tab1).
-
-dont_cache_not_found({DbName, _}) ->
- DDocId = <<"_design/not_found">>,
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_doc(DbName, DDocId),
- ?assertEqual({not_found, missing}, Resp),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ?assertEqual(0, ets:info(?LRU, size)).
-
-deprecated_api_works({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- {ok, _} = ddoc_cache:open(DbName, ?FOOBAR),
- {ok, _} = ddoc_cache:open(DbName, <<"foobar">>),
- {ok, _} = ddoc_cache:open(DbName, ?MODULE),
- {ok, _} = ddoc_cache:open(DbName, validation_funs).
-
-cache_no_vdu_no_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_validation_funs(DbName),
- ?assertEqual({ok, []}, Resp),
- ?assertEqual(1, ets:info(?CACHE, size)),
- ?assertEqual(1, ets:info(?LRU, size)).
-
-cache_no_vdu_empty_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- DDoc = #doc{
- id = <<"_design/no_vdu">>,
- body = {[]}
- },
- {ok, _} = fabric:update_docs(DbName, [DDoc], [?ADMIN_CTX]),
- Resp = ddoc_cache:open_validation_funs(DbName),
- ?assertEqual({ok, []}, Resp),
- ?assertEqual(1, ets:info(?CACHE, size)),
- ?assertEqual(1, ets:info(?LRU, size)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
deleted file mode 100644
index d2d0559c6..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
+++ /dev/null
@@ -1,71 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_coverage_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-coverage_test_() ->
- {
- setup,
- fun ddoc_cache_tutil:start_couch/0,
- fun ddoc_cache_tutil:stop_couch/1,
- [
- fun restart_lru/0,
- fun stop_on_evictor_death/0
- ]
- }.
-
-restart_lru() ->
- send_bad_messages(ddoc_cache_lru),
- ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c})),
- ?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])).
-
-stop_on_evictor_death() ->
- meck:new(ddoc_cache_ev, [passthrough]),
- try
- Lru = whereis(ddoc_cache_lru),
- State = sys:get_state(Lru),
- Evictor = element(4, State),
- Ref = erlang:monitor(process, Lru),
- exit(Evictor, shutdown),
- receive
- {'DOWN', Ref, _, _, Reason} ->
- ?assertEqual(shutdown, Reason)
- end,
- meck:wait(ddoc_cache_ev, event, [lru_init, '_'], 1000),
- ?assert(whereis(ddoc_cache_lru) /= Lru)
- after
- meck:unload()
- end.
-
-send_bad_messages(Name) ->
- wait_for_restart(Name, fun() ->
- ?assertEqual({invalid_call, foo}, gen_server:call(Name, foo))
- end),
- wait_for_restart(Name, fun() ->
- gen_server:cast(Name, foo)
- end),
- wait_for_restart(Name, fun() ->
- whereis(Name) ! foo
- end).
-
-wait_for_restart(Server, Fun) ->
- Ref = erlang:monitor(process, whereis(Server)),
- Fun(),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- end,
- ?assert(is_pid(test_util:wait_process(Server))).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
deleted file mode 100644
index d6538e4a3..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_disabled_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- config:set("ddoc_cache", "max_size", "0", false),
- Ctx.
-
-check_disabled_test_() ->
- {
- setup,
- fun start_couch/0,
- fun ddoc_cache_tutil:stop_couch/1,
- ddoc_cache_tutil:with([
- {"resp_ok", fun resp_ok/1},
- {"resp_not_found", fun resp_not_found/1},
- {"check_effectively_disabled", fun check_effectively_disabled/1}
- ])
- }.
-
-resp_ok({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ?assertEqual(0, ets:info(?LRU, size)).
-
-resp_not_found({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_doc(DbName, <<"_design/not_found">>),
- ?assertEqual({not_found, missing}, Resp),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ?assertEqual(0, ets:info(?LRU, size)).
-
-check_effectively_disabled({DbName, _}) ->
- config:set("ddoc_cache", "max_size", "1", false),
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ?assertEqual(0, ets:info(?LRU, size)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
deleted file mode 100644
index fdba0f030..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
+++ /dev/null
@@ -1,156 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_test).
-
--export([
- recover/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-recover(<<"foo">>) ->
- timer:sleep(30000);
-recover(DbName) ->
- {ok, {DbName, such_custom}}.
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-check_entry_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"cancel_and_replace_opener", fun cancel_and_replace_opener/1},
- {"condenses_access_messages", fun condenses_access_messages/1},
- {"kill_opener_on_terminate", fun kill_opener_on_terminate/1},
- {"evict_when_not_accessed", fun evict_when_not_accessed/1},
- {"open_dead_entry", fun open_dead_entry/1},
- {"handles_bad_messages", fun handles_bad_messages/1},
- {"handles_code_change", fun handles_code_change/1}
- ])
- }.
-
-cancel_and_replace_opener(_) ->
- Key = {ddoc_cache_entry_custom, {<<"foo">>, ?MODULE}},
- true = ets:insert_new(?CACHE, #entry{key = Key}),
- {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
- Opener1 = element(4, sys:get_state(Entry)),
- Ref1 = erlang:monitor(process, Opener1),
- gen_server:cast(Entry, force_refresh),
- receive
- {'DOWN', Ref1, _, _, _} -> ok
- end,
- Opener2 = element(4, sys:get_state(Entry)),
- ?assert(Opener2 /= Opener1),
- ?assert(is_process_alive(Opener2)),
- % Clean up after ourselves
- unlink(Entry),
- ddoc_cache_entry:shutdown(Entry).
-
-condenses_access_messages({DbName, _}) ->
- meck:reset(ddoc_cache_ev),
- Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
- true = ets:insert(?CACHE, #entry{key = Key}),
- {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
- erlang:suspend_process(Entry),
- lists:foreach(
- fun(_) ->
- gen_server:cast(Entry, accessed)
- end,
- lists:seq(1, 100)
- ),
- erlang:resume_process(Entry),
- meck:wait(1, ddoc_cache_ev, event, [accessed, Key], 1000),
- ?assertError(
- timeout,
- meck:wait(2, ddoc_cache_ev, event, [accessed, Key], 100)
- ),
- unlink(Entry),
- ddoc_cache_entry:shutdown(Entry).
-
-kill_opener_on_terminate(_) ->
- Pid = spawn(fun() ->
- receive
- _ -> ok
- end
- end),
- ?assert(is_process_alive(Pid)),
- St = {st, key, val, Pid, waiters, ts, accessed},
- ?assertEqual(ok, ddoc_cache_entry:terminate(normal, St)),
- ?assert(not is_process_alive(Pid)).
-
-evict_when_not_accessed(_) ->
- meck:reset(ddoc_cache_ev),
- Key = {ddoc_cache_entry_custom, {<<"bar">>, ?MODULE}},
- true = ets:insert_new(?CACHE, #entry{key = Key}),
- {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
- Ref = erlang:monitor(process, Entry),
- AccessCount1 = element(7, sys:get_state(Entry)),
- ?assertEqual(1, AccessCount1),
- ok = gen_server:cast(Entry, refresh),
-
- meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
-
- AccessCount2 = element(7, sys:get_state(Entry)),
- ?assertEqual(0, AccessCount2),
- ok = gen_server:cast(Entry, refresh),
- receive
- {'DOWN', Ref, _, _, Reason} -> Reason
- end,
- ?assertEqual(normal, Reason),
- ?assertEqual(0, ets:info(?CACHE, size)).
-
-open_dead_entry({DbName, _}) ->
- Pid = spawn(fun() -> ok end),
- Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
- ?assertEqual(recover(DbName), ddoc_cache_entry:open(Pid, Key)).
-
-handles_bad_messages(_) ->
- CallExpect = {stop, {bad_call, foo}, {bad_call, foo}, baz},
- CastExpect = {stop, {bad_cast, foo}, bar},
- InfoExpect = {stop, {bad_info, foo}, bar},
- ?assertEqual(CallExpect, ddoc_cache_entry:handle_call(foo, bar, baz)),
- ?assertEqual(CastExpect, ddoc_cache_entry:handle_cast(foo, bar)),
- ?assertEqual(InfoExpect, ddoc_cache_entry:handle_info(foo, bar)).
-
-handles_code_change(_) ->
- CCExpect = {ok, bar},
- ?assertEqual(CCExpect, ddoc_cache_entry:code_change(foo, bar, baz)).
-
-handles_bad_shutdown_test_() ->
- {timeout, 10,
- ?_test(begin
- ErrorPid = spawn(fun() ->
- receive
- _ -> exit(bad_shutdown)
- end
- end),
- ?assertExit(bad_shutdown, ddoc_cache_entry:shutdown(ErrorPid)),
- NotDeadYetPid = spawn(fun() ->
- timer:sleep(infinity)
- end),
- ?assertExit(
- {timeout, {entry_shutdown, NotDeadYetPid}},
- ddoc_cache_entry:shutdown(NotDeadYetPid)
- )
- end)}.
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl b/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
deleted file mode 100644
index ded2469a4..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_ev).
-
--export([
- event/2
-]).
-
-event(Name, Arg) ->
- couch_log:error("~s :: ~s :: ~p", [?MODULE, Name, Arg]).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
deleted file mode 100644
index fffb3ed47..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_eviction_test).
-
--export([
- recover/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include_lib("mem3/include/mem3.hrl").
--include("ddoc_cache_test.hrl").
-
-recover(DbName) ->
- {ok, {DbName, totes_custom}}.
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-check_eviction_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"evict_all", fun evict_all/1},
- {"dont_evict_all_unrelated", fun dont_evict_all_unrelated/1}
- ])
- }.
-
-evict_all({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- #shard{name = ShardName} = hd(mem3:shards(DbName)),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- {ok, _} = ddoc_cache:open_validation_funs(DbName),
- {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
- ?assertEqual(4, ets:info(?CACHE, size)),
- {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
- meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
- meck:wait(4, ddoc_cache_ev, event, [removed, '_'], 1000),
- ?assertEqual(0, ets:info(?CACHE, size)).
-
-dont_evict_all_unrelated({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- {ok, _} = ddoc_cache:open_validation_funs(DbName),
- {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
- ?assertEqual(4, ets:info(?CACHE, size)),
- ShardName = <<"shards/00000000-ffffffff/test.1384769918">>,
- {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
- meck:wait(ddoc_cache_ev, event, [evict_noop, <<"test">>], 1000),
- ?assertEqual(4, ets:info(?CACHE, size)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
deleted file mode 100644
index d1dac869a..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
+++ /dev/null
@@ -1,245 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_lru_test).
-
--export([
- recover/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-recover(<<"pause", _/binary>>) ->
- receive
- go -> ok
- end,
- {ok, paused};
-recover(<<"big", _/binary>>) ->
- {ok, [couch_rand:uniform() || _ <- lists:seq(1, 8192)]};
-recover(DbName) ->
- {ok, DbName}.
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-check_not_started_test() ->
- % Starting couch, but not ddoc_cache
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- [
- fun(_) ->
- Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}},
- ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key))
- end
- ]
- }.
-
-check_lru_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"check_multi_start", fun check_multi_start/1},
- {"check_multi_open", fun check_multi_open/1},
- {"check_capped_size", fun check_capped_size/1},
- {"check_cache_refill", fun check_cache_refill/1},
- {"check_evict_and_exit", fun check_evict_and_exit/1}
- ])
- }.
-
-check_multi_start(_) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}},
- % These will all get sent through ddoc_cache_lru
- Clients = lists:map(
- fun(_) ->
- spawn_monitor(fun() ->
- ddoc_cache_lru:open(Key)
- end)
- end,
- lists:seq(1, 10)
- ),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
- lists:foreach(
- fun({Pid, _Ref}) ->
- ?assert(is_process_alive(Pid))
- end,
- Clients
- ),
- [#entry{pid = Pid}] = ets:tab2list(?CACHE),
- Opener = element(4, sys:get_state(Pid)),
- OpenerRef = erlang:monitor(process, Opener),
- ?assert(is_process_alive(Opener)),
- Opener ! go,
- receive
- {'DOWN', OpenerRef, _, _, _} -> ok
- end,
- lists:foreach(
- fun({_, Ref}) ->
- receive
- {'DOWN', Ref, _, _, normal} -> ok
- end
- end,
- Clients
- ).
-
-check_multi_open(_) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}},
- % We wait after the first client so that
- % the rest of the clients go directly to
- % ddoc_cache_entry bypassing ddoc_cache_lru
- Client1 = spawn_monitor(fun() ->
- ddoc_cache_lru:open(Key)
- end),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
- Clients =
- [Client1] ++
- lists:map(
- fun(_) ->
- spawn_monitor(fun() ->
- ddoc_cache_lru:open(Key)
- end)
- end,
- lists:seq(1, 9)
- ),
- lists:foreach(
- fun({Pid, _Ref}) ->
- ?assert(is_process_alive(Pid))
- end,
- Clients
- ),
- [#entry{pid = Pid}] = ets:tab2list(?CACHE),
- Opener = element(4, sys:get_state(Pid)),
- OpenerRef = erlang:monitor(process, Opener),
- ?assert(is_process_alive(Opener)),
- Opener ! go,
- receive
- {'DOWN', OpenerRef, _, _, _} -> ok
- end,
- lists:foreach(
- fun({_, Ref}) ->
- receive
- {'DOWN', Ref, _, _, normal} -> ok
- end
- end,
- Clients
- ).
-
-check_capped_size(_) ->
- % The extra factor of two in the size checks is
- % a fudge factor. We don't reject entries from
- % the cache if they would put us over the limit
- % as we don't have the size information a
- % priori.
- config:set("ddoc_cache", "max_size", "1048576", false),
- MaxSize = 1048576,
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- lists:foreach(
- fun(I) ->
- DbName = list_to_binary("big_" ++ integer_to_list(I)),
- ddoc_cache:open_custom(DbName, ?MODULE),
- meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
- ?assert(cache_size() < MaxSize * 2)
- end,
- lists:seq(1, 25)
- ),
- lists:foreach(
- fun(I) ->
- DbName = list_to_binary("big_" ++ integer_to_list(I)),
- ddoc_cache:open_custom(DbName, ?MODULE),
- meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
- ?assert(cache_size() < MaxSize * 2)
- end,
- lists:seq(26, 100)
- ).
-
-check_cache_refill({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
-
- InitDDoc = fun(I) ->
- NumBin = list_to_binary(integer_to_list(I)),
- DDocId = <<"_design/", NumBin/binary>>,
- Doc = #doc{id = DDocId, body = {[]}},
- {ok, _} = fabric:update_doc(DbName, Doc, [?ADMIN_CTX]),
- {ok, _} = ddoc_cache:open_doc(DbName, DDocId),
- {ddoc_cache_entry_ddocid, {DbName, DDocId}}
- end,
-
- lists:foreach(
- fun(I) ->
- Key = InitDDoc(I),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
- end,
- lists:seq(1, 5)
- ),
-
- ShardName = mem3:name(hd(mem3:shards(DbName))),
- {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
- meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
- meck:wait(10, ddoc_cache_ev, event, [removed, '_'], 1000),
- ?assertEqual(0, ets:info(?CACHE, size)),
-
- lists:foreach(
- fun(I) ->
- Key = InitDDoc(I),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
- end,
- lists:seq(6, 10)
- ).
-
-check_evict_and_exit(_) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
-
- Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}},
- ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key)),
- [#entry{key = Key, pid = Pid}] = ets:tab2list(?CACHE),
-
- erlang:monitor(process, whereis(ddoc_cache_lru)),
-
- % Pause the LRU so we can queue multiple messages
- erlang:suspend_process(whereis(ddoc_cache_lru)),
-
- gen_server:cast(ddoc_cache_lru, {do_evict, <<"dbname">>}),
- whereis(ddoc_cache_lru) ! {'EXIT', Pid, normal},
-
- % Resume the LRU and ensure that it doesn't die
- erlang:resume_process(whereis(ddoc_cache_lru)),
-
- meck:wait(ddoc_cache_ev, event, [evicted, <<"dbname">>], 1000),
-
- % Make sure it can handle another message
- OtherKey = {ddoc_cache_entry_custom, {<<"otherdb">>, ?MODULE}},
- ?assertEqual({ok, <<"otherdb">>}, ddoc_cache_lru:open(OtherKey)),
-
- % And verify our monitor doesn't fire
- timer:sleep(500),
- ?assertEqual({messages, []}, process_info(self(), messages)).
-
-cache_size() ->
- ets:info(?CACHE, memory) * erlang:system_info(wordsize).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
deleted file mode 100644
index 8da535294..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
+++ /dev/null
@@ -1,81 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_no_cache_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-ddoc(DDocId) ->
- {ok, #doc{
- id = DDocId,
- revs = {1, [<<"deadbeefdeadbeef">>]},
- body =
- {[
- {<<"ohai">>, null}
- ]}
- }}.
-
-not_found(_DDocId) ->
- {not_found, missing}.
-
-return_error(_DDocId) ->
- {error, timeout}.
-
-no_cache_test_() ->
- {
- "ddoc_cache no cache test",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {fun ddoc/1, fun no_cache_open_ok_test/2},
- {fun not_found/1, fun no_cache_open_not_found_test/2},
- {fun return_error/1, fun no_cache_open_error_test/2}
- ]
- }
- }
- }.
-
-setup_all() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(fabric),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-setup(Resp) ->
- meck:expect(fabric, open_doc, fun(_, DDocId, _) ->
- Resp(DDocId)
- end).
-
-teardown(_, _) ->
- meck:unload().
-
-no_cache_open_ok_test(_, _) ->
- Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
- ?_assertEqual(ddoc(<<"bar">>), Resp).
-
-no_cache_open_not_found_test(_, _) ->
- Resp = ddoc_cache:open_doc(<<"foo">>, <<"baz">>),
- ?_assertEqual(not_found(<<"baz">>), Resp).
-
-no_cache_open_error_test(_, _) ->
- Resp = ddoc_cache:open_doc(<<"foo">>, <<"bif">>),
- ?_assertEqual(return_error(<<"bif">>), Resp).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
deleted file mode 100644
index 8e71b1270..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
+++ /dev/null
@@ -1,41 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_open_error_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:expect(fabric, open_doc, fun(_, ?FOOBAR, _) ->
- erlang:error(test_kaboom)
- end),
- Ctx.
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-check_open_error_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"handle_open_error", fun handle_open_error/1}
- ])
- }.
-
-handle_open_error({DbName, _}) ->
- ?assertError(test_kaboom, ddoc_cache:open_doc(DbName, ?FOOBAR)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
deleted file mode 100644
index f9a9460e7..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
+++ /dev/null
@@ -1,102 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_open_test).
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-%% behaviour callbacks
-dbname(DbName) ->
- DbName.
-
-ddocid(_) ->
- no_ddocid.
-
-recover({deleted, _DbName}) ->
- erlang:error(database_does_not_exist);
-recover(DbName) ->
- ddoc_cache_entry_validation_funs:recover(DbName).
-
-insert(_, _) ->
- ok.
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_entry_validation_funs, [passthrough]),
- meck:expect(
- ddoc_cache_entry_validation_funs,
- recover,
- ['_'],
- meck:passthrough()
- ),
- Ctx.
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-check_open_error_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"should_return_database_does_not_exist", fun should_return_database_does_not_exist/1},
- {"should_not_call_recover_when_database_does_not_exist",
- fun should_not_call_recover_when_database_does_not_exist/1},
- {"should_call_recover_when_needed", fun should_call_recover_when_needed/1},
- {"should_call_recover_when_needed", fun should_not_crash_lru_process/1}
- ])
- }.
-
-should_return_database_does_not_exist({DbName, _}) ->
- ?assertError(
- database_does_not_exist,
- ddoc_cache_lru:open({?MODULE, {deleted, DbName}})
- ).
-
-should_not_call_recover_when_database_does_not_exist({DbName, _}) ->
- meck:reset(ddoc_cache_entry_validation_funs),
- ?assertError(
- database_does_not_exist,
- ddoc_cache_lru:open({?MODULE, {deleted, DbName}})
- ),
- ?assertError(
- timeout,
- meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 100)
- ).
-
-should_call_recover_when_needed({DbName, _}) ->
- meck:reset(ddoc_cache_entry_validation_funs),
- ddoc_cache_lru:open({?MODULE, DbName}),
- ?assertEqual(
- ok,
- meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 500)
- ).
-
-should_not_crash_lru_process({DbName, _}) ->
- LRUPid = whereis(ddoc_cache_lru),
- ?assert(is_process_alive(LRUPid)),
- ?assertError(
- database_does_not_exist,
- ddoc_cache_lru:open({?MODULE, {deleted, DbName}})
- ),
- ?assert(is_process_alive(LRUPid)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
deleted file mode 100644
index e690f8235..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
+++ /dev/null
@@ -1,150 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_refresh_test).
-
--export([
- recover/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-recover(DbName) ->
- {ok, {DbName, rand_string()}}.
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-check_refresh_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"refresh_ddoc", fun refresh_ddoc/1},
- {"refresh_ddoc_rev", fun refresh_ddoc_rev/1},
- {"refresh_vdu", fun refresh_vdu/1},
- {"refresh_custom", fun refresh_custom/1},
- {"refresh_multiple", fun refresh_multiple/1}
- ])
- }.
-
-refresh_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
-
- ?assertEqual(2, ets:info(?CACHE, size)),
- [#entry{key = Key, val = DDoc}, _] = lists:sort(ets:tab2list(?CACHE)),
- NewDDoc = DDoc#doc{
- body = {[{<<"foo">>, <<"baz">>}]}
- },
- {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- Expect = NewDDoc#doc{
- revs = {Depth, [RevId | element(2, DDoc#doc.revs)]}
- },
- meck:wait(ddoc_cache_ev, event, [updated, {Key, Expect}], 1000),
- ?assertMatch({ok, Expect}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-refresh_ddoc_rev({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- {ok, RevDDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
-
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
-
- [_, #entry{key = Key, val = DDoc}] = lists:sort(ets:tab2list(?CACHE)),
- NewDDoc = DDoc#doc{
- body = {[{<<"foo">>, <<"kazam">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- % We pass the rev explicitly so we assert that we're
- % getting the same original response from the cache
- meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
- ?assertMatch({ok, RevDDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-refresh_vdu({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- {ok, [_]} = ddoc_cache:open_validation_funs(DbName),
- [#entry{key = Key}] = ets:tab2list(?CACHE),
- {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [updated, {Key, []}], 1000),
- ?assertMatch({ok, []}, ddoc_cache:open_validation_funs(DbName)),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-refresh_custom({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- {ok, Resp1} = ddoc_cache:open_custom(DbName, ?MODULE),
- {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?CUSTOM]),
- {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000),
- ?assertNotEqual({ok, Resp1}, ddoc_cache:open_custom(DbName, ?MODULE)),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-refresh_multiple({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- ?assertEqual(2, ets:info(?CACHE, size)),
- % Relying on the sort order of entry keys to make
- % sure our entries line up for this test
- [
- #entry{key = NoRevKey, val = DDoc},
- #entry{key = RevKey, val = DDoc}
- ] = lists:sort(ets:tab2list(?CACHE)),
- NewDDoc = DDoc#doc{
- body = {[{<<"foo">>, <<"kalamazoo">>}]}
- },
- {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- Updated = NewDDoc#doc{
- revs = {Depth, [RevId | element(2, DDoc#doc.revs)]}
- },
- meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000),
- meck:wait(ddoc_cache_ev, event, [updated, {NoRevKey, Updated}], 1000),
- % We pass the rev explicitly so we assert that we're
- % getting the same original response from the cache
- ?assertEqual({ok, Updated}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
- ?assertEqual({ok, DDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-rand_string() ->
- Bin = crypto:strong_rand_bytes(8),
- to_hex(Bin, []).
-
-to_hex(<<>>, Acc) ->
- list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
- to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-hexdig(C) when C >= 0, C =< 9 ->
- C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
- C + $A - 10.
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
deleted file mode 100644
index f974dd804..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
+++ /dev/null
@@ -1,221 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_remove_test).
-
--export([
- recover/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-recover(DbName) ->
- {ok, #doc{body = {Body}}} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
- case couch_util:get_value(<<"status">>, Body) of
- <<"ok">> ->
- {ok, yay};
- <<"not_ok">> ->
- {ruh, roh};
- <<"error">> ->
- erlang:error(thpppt)
- end.
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-check_refresh_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"remove_ddoc", fun remove_ddoc/1},
- {"remove_ddoc_rev", fun remove_ddoc_rev/1},
- {"remove_ddoc_rev_only", fun remove_ddoc_rev_only/1},
- {"remove_custom_not_ok", fun remove_custom_not_ok/1},
- {"remove_custom_error", fun remove_custom_error/1}
- ])
- }.
-
-remove_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- ?assertEqual(0, ets:info(?CACHE, size)),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
-
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
-
- [#entry{val = DDoc}, #entry{val = DDoc}] = ets:tab2list(?CACHE),
- {Depth, [RevId | _]} = DDoc#doc.revs,
- NewDDoc = DDoc#doc{
- deleted = true,
- body = {[]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
-
- DDocIdKey = {ddoc_cache_entry_ddocid, {DbName, ?FOOBAR}},
- Rev = {Depth, RevId},
- DDocIdRevKey = {ddoc_cache_entry_ddocid_rev, {DbName, ?FOOBAR, Rev}},
- meck:wait(ddoc_cache_ev, event, [removed, DDocIdKey], 1000),
- meck:wait(ddoc_cache_ev, event, [update_noop, DDocIdRevKey], 1000),
-
- ?assertMatch({not_found, deleted}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-remove_ddoc_rev({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU),
- {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev),
-
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
-
- % Notice the sort so that we know we're getting the
- % revid version second.
- [_, #entry{key = Key, val = DDoc, pid = Pid}] =
- lists:sort(ets:tab2list(?CACHE)),
-
- NewDDoc = DDoc#doc{
- body = {[{<<"an">>, <<"update">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
- % Compact the database so that the old rev is removed
- lists:foreach(
- fun(Shard) ->
- do_compact(Shard#shard.name)
- end,
- mem3:local_shards(DbName)
- ),
- % Trigger a refresh rather than wait for the timeout
- ddoc_cache_entry:refresh(Pid),
- meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
- ?assertMatch(
- {{not_found, missing}, _},
- ddoc_cache:open_doc(DbName, ?VDU, Rev)
- ),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-remove_ddoc_rev_only({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU),
- {ok, _} = ddoc_cache:open_doc(DbName, ?VDU),
- {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev),
- % Relying on the sort order of keys to keep
- % these lined up for testing
- [
- #entry{key = NoRevKey, val = DDoc, pid = NoRevPid},
- #entry{key = RevKey, val = DDoc, pid = RevPid}
- ] = lists:sort(ets:tab2list(?CACHE)),
- NewDDoc = DDoc#doc{
- body = {[{<<"new">>, <<"awesomeness">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000),
- % Compact the database so that the old rev is removed
- lists:foreach(
- fun(Shard) ->
- do_compact(Shard#shard.name)
- end,
- mem3:local_shards(DbName)
- ),
- % Trigger a refresh rather than wait for the timeout
- ddoc_cache_entry:refresh(NoRevPid),
- ddoc_cache_entry:refresh(RevPid),
- meck:wait(ddoc_cache_ev, event, [update_noop, NoRevKey], 1000),
- meck:wait(ddoc_cache_ev, event, [removed, RevKey], 1000),
- ?assertMatch({ok, _}, ddoc_cache:open_doc(DbName, ?VDU)),
- ?assertMatch(
- {{not_found, missing}, _},
- ddoc_cache:open_doc(DbName, ?VDU, Rev)
- ),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-remove_custom_not_ok({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- init_custom_ddoc(DbName),
- {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
- [#entry{key = Key}] = ets:tab2list(?CACHE),
- {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
- NewDDoc = DDoc#doc{
- body = {[{<<"status">>, <<"not_ok">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
- ?assertEqual({ruh, roh}, ddoc_cache:open_custom(DbName, ?MODULE)),
- ?assertEqual(0, ets:info(?CACHE, size)).
-
-remove_custom_error({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- init_custom_ddoc(DbName),
- {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
- [#entry{key = Key}] = ets:tab2list(?CACHE),
- {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
- NewDDoc = DDoc#doc{
- body = {[{<<"status">>, <<"error">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
- ?assertError(thpppt, ddoc_cache:open_custom(DbName, ?MODULE)),
- ?assertEqual(0, ets:info(?CACHE, size)).
-
-init_custom_ddoc(DbName) ->
- Body = {[{<<"status">>, <<"ok">>}]},
- {ok, Doc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
- NewDoc = Doc#doc{body = Body},
- {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]).
-
-do_compact(ShardName) ->
- {ok, Db} = couch_db:open_int(ShardName, []),
- try
- {ok, Pid} = couch_db:start_compact(Db),
- Ref = erlang:monitor(process, Pid),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- end
- after
- couch_db:close(Db)
- end,
- wait_for_compaction(ShardName).
-
-wait_for_compaction(ShardName) ->
- {ok, Db} = couch_db:open_int(ShardName, []),
- CompactRunning =
- try
- {ok, Info} = couch_db:get_db_info(Db),
- couch_util:get_value(compact_running, Info)
- after
- couch_db:close(Db)
- end,
- if
- not CompactRunning ->
- ok;
- true ->
- timer:sleep(100),
- wait_for_compaction(ShardName)
- end.
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl b/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl
deleted file mode 100644
index 73f7bc217..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--define(CACHE, ddoc_cache_entries).
--define(LRU, ddoc_cache_lru).
--define(OPENERS, ddoc_cache_openers).
-
--define(FOOBAR, <<"_design/foobar">>).
--define(VDU, <<"_design/vdu">>).
--define(CUSTOM, <<"_design/custom">>).
-
--record(entry, {
- key,
- val,
- pid
-}).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl b/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
deleted file mode 100644
index ced5c9f99..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
+++ /dev/null
@@ -1,110 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_tutil).
-
--export([
- start_couch/0,
- start_couch/1,
- stop_couch/1,
- clear/0,
- get_rev/2,
- ddocs/0,
- purge_modules/0,
- with/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch/include/couch_eunit.hrl").
-
-start_couch() ->
- start_couch([{write_ddocs, true}]).
-
-start_couch(Options) ->
- WriteDDocs = couch_util:get_value(write_ddocs, Options, true),
- purge_modules(),
- Ctx = test_util:start_couch(?CONFIG_CHAIN, [chttpd, ddoc_cache]),
- TmpDb = ?tempdb(),
- ok = fabric:create_db(TmpDb, [{q, "1"}, {n, "1"}]),
- if
- not WriteDDocs -> ok;
- true -> {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX])
- end,
- {TmpDb, Ctx}.
-
-stop_couch({TmpDb, Ctx}) ->
- fabric:delete_db(TmpDb),
- test_util:stop_couch(Ctx).
-
-clear() ->
- application:stop(ddoc_cache),
- application:start(ddoc_cache).
-
-get_rev(DbName, DDocId) ->
- {_, Ref} = erlang:spawn_monitor(fun() ->
- {ok, #doc{revs = Revs}} = fabric:open_doc(DbName, DDocId, [?ADMIN_CTX]),
- {Depth, [RevId | _]} = Revs,
- exit({Depth, RevId})
- end),
- receive
- {'DOWN', Ref, _, _, Rev} -> Rev
- end.
-
-ddocs() ->
- FooBar = #doc{
- id = <<"_design/foobar">>,
- body =
- {[
- {<<"foo">>, <<"bar">>}
- ]}
- },
- VDU = #doc{
- id = <<"_design/vdu">>,
- body =
- {[
- {<<"validate_doc_update">>, <<"function(doc) {return;}">>}
- ]}
- },
- Custom = #doc{
- id = <<"_design/custom">>,
- body =
- {[
- {<<"status">>, <<"ok">>},
- {<<"custom">>, <<"hotrod">>}
- ]}
- },
- [FooBar, VDU, Custom].
-
-purge_modules() ->
- case application:get_key(ddoc_cache, modules) of
- {ok, Mods} ->
- lists:foreach(
- fun(Mod) ->
- case code:which(Mod) of
- cover_compiled ->
- ok;
- _ ->
- code:delete(Mod),
- code:purge(Mod)
- end
- end,
- Mods
- );
- undefined ->
- ok
- end.
-
-%% eunit implementation of {with, Tests} doesn't detect test name correctly
-with(Tests) ->
- fun(ArgsTuple) ->
- [{Name, ?_test(Fun(ArgsTuple))} || {Name, Fun} <- Tests]
- end.
diff --git a/src/dreyfus/.gitignore b/src/dreyfus/.gitignore
deleted file mode 100644
index 16fd00698..000000000
--- a/src/dreyfus/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-ebin/
-.*.sw?
-test/elixir/_build
-test/elixir/deps
diff --git a/src/dreyfus/LICENSE.txt b/src/dreyfus/LICENSE.txt
deleted file mode 100644
index 1561dafac..000000000
--- a/src/dreyfus/LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2015 IBM Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/dreyfus/README.md b/src/dreyfus/README.md
deleted file mode 100644
index d653432d0..000000000
--- a/src/dreyfus/README.md
+++ /dev/null
@@ -1,78 +0,0 @@
-What is dreyfus?
--------------
-Dreyfus manages Clouseau nodes to deliver full-text search features.
-
-Dreyfus consists of the following files:
-
-- **dreyfus.app.src** - application resource file. As can be seen from this file, a callback module for the application is dreyfus_app, and the two registered processes started in this application are: dreyfus_index_manager and dreyfus_sup.
-- **dreyfus_app.erl** - a callback module for the application that starts the top supervisor by dreyfus_sup:start_link().
-- **dreyfus_sup.erl** - the top supervisor that starts dreyfus_index_manager as its child worker process.
-- **dreyfus_index_manager.erl** - manages multiple processes of dreyfus_index.
-- **dreyfus_index.erl** - contains main callback functions to operate on index. One process is created for every index (a distinct index function in a design document).
-- **dreyfus_index_updater.erl** - contains callback functions for index update.
-- **dreyfus_httpd.erl** - handles http requests.
-- **dreyfus_fabric.erl**, dreyfus_fabric_cleanup.erl, dreyfus_fabric_group1.erl, dreyfus_fabric_group2.erl, dreyfus_fabric_info.erl, dreyfus_fabric_search.erl - collection of proxy functions for operations in a cluster with shards.
-- **dreyfus_rpc.erl** - proxy functions executed for every shard.
-- **clouseau_rpc.erl** - contains remote procedure calls functions to Clouseau nodes.
-- **dreyfus_bookmark.erl** - utility functions for managing bookmarks for retrieving the next set of results
-- **dreyfus_util.erl** - various utility functions
-
-
-
-Life of http request
--------------
-Http requests have the following life cycle:
-
-![Dreyfus](https://cloud.githubusercontent.com/assets/5738841/7590919/cbaf1c50-f898-11e4-8a4c-462a1a680135.png)
-
-1. A request from chttpd goes to dreyfus_httpd.
-2. dreyfus_httpd:
- - passes and validates the request in functions: `parse_index_params` & `validate_index_query`.
- - depending on the type of the request invokes one of the fabric_functions: dreyfus_fabric_search, dreyfus_fabric_group1, dreyfus_fabric_group2, dreyfus_fabric_info, or dreyfus_fabric_cleanup.
-3. dreyfus_fabric:
- - Get shards and workers to be executed on every shard:
- `Shards = dreyfus_util:get_shards(DbName, QueryArgs)`,
- `Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search,
- [DDoc, IndexName, dreyfus_util:export(QueryArgs)])`
- - spawns processes to execute jobs on every shard using a RPC server rexi: `rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, State, infinity, 1000 * 60 * 60)
-`
-4. dreyfus_rpc:
- - is executed on every shard of every node at the same time.
- - calls `dreyfus_index_manager:get_index(DbName, Index)` to get an index. dreyfus_index_manager will spawn a process of creating an index if the index doesn't exist.
- - an index of every shard will be updated if necessary with an instruction `dreyfus_index:await(Pid, MinSeq)`.
- - calls `dreyfus_index:Fun(Pid, QueryArgs)` with a corresponding search request.
-
-5. dreyfus_index:
- - synchronously calls `clouseau_rpc:search`.
-6. clouseau_rp:
- - calls `ioq:call(Ref, Msg, erlang:get(io_priority))` to run search on clouseau nodes using Lucene.
-7. top_docs are returned from Lucene
-8. top_docs are passed to dreyfus_index
-9. top_docs are passed to dreyfus_rpc
-10. dreyfus_rpc processes pass their individual top_docs as a reply `rexi:reply(Result)` to the initial dreyfus_fabric process that spawned them.
-11. dreyfus_fabric merges documents from all shards: `MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort)` and returns the results to dreyfus_httpd.
-12. dreyfus_httpd returns the formatted results to chttpd through send_json(..)
-
-
-Indexing
--------------
-
-### Indexing triggered by a search request
-During a search request, before dreyfus_rpc calls dreyfus_index:search, dreyfus_rpc first initiates the updating of Lucene indexes. It does it in the following way:
-
-![DreyfusIndexing.png](https://cloud.githubusercontent.com/assets/5738841/7590923/d12303fe-f898-11e4-833d-b1387b7048a6.png)
-
-1. The last sequence number (signifying the number of the last change in the database) in calculated: `{_LastSeq, MinSeq} = calculate_seqs(Db, Stale)`. For the stale queries (queries that don't need to reflect recent changes in the database), MinSeq will be 0, meaning that they don't need to initiate update of the index, before returning query results. The meaning of 0 is 'wait until index is at least at update_seq 0' which is true even for empty indexes.
-
-2. Function call `dreyfus_index:design_doc_to_index(DDoc, IndexName)` returns a record representation of an index: `#index{
- analyzer=Analyzer,
- ddoc_id=Id,
- def=Def,
- def_lang=Language,
- name=IndexName,
- sig=Sig}`. `Sig` here is a hashed version of an index function and an analyzer represented in a Javascript function in a design document. `Sig` is used to check if an index description is changed, and the index needs to be reconstructed.
-
-
-3. Function call `dreyfus_index_manager:get_index(DbName, Index)` will return Pid of the corresponding to this index dreyfus_index process. dreyfus_index_manager stores all the dreyfus_index processes for all indexes in the storage: `ets:new(?BY_SIG, [set, private, named_table])`. If the dreyfus_index process of the given index exists in the ets ?BY_SIG, it will be returned. If it doesn't exist, a new dreyfus_index process will be spawned. For this, dreyfus_index_manager in the `handle_call({get_index,..)` will return `{noreply, State};` to not block gen_server, and will transfer handling creation of a new index process to the spawned process - `spawn_link(fun() -> new_index(DbName, Index) end)`, remembering the Pid of the caller in the ets ?BY_SIG. `new_index` will create a new index process, sending `open_ok` message to the dreyfus_index_manager gen_server. `handle_call({open_ok,..) ` will retrieve the Pid - `From` of the original caller, and send a reply to this caller, a message containing a Pid of the created index process - NewPid. Calling `add_to_ets(NewPid, DbName, Sig)` will update two ets ?BY_SIG and ?BY_Pid.
-
-4. `dreyfus_index:await(Pid, MinSeq)` will initiate the update of the index, if the requested MinSeq is bigger than the current Seq stored in the index. It will do this by calling `dreyfus_index_updater:update(IndexPid, Index)`. Dreyfus_index_updater will load all documents, modified since last seq stored in the drefus index, and for every document will call `clouseau_rpc:delete` to delete documents in Java Lucene Index, or `clouseau_rpc:update` to update an index in Java Lucene Index.
diff --git a/src/dreyfus/include/dreyfus.hrl b/src/dreyfus/include/dreyfus.hrl
deleted file mode 100644
index 7c6a36945..000000000
--- a/src/dreyfus/include/dreyfus.hrl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(index, {
- current_seq=0,
- dbname,
- ddoc_id,
- analyzer,
- def,
- def_lang,
- name,
- sig=nil
-}).
-
--record(grouping, {
- by=nil,
- groups=[],
- offset=0,
- limit=10,
- sort=relevance,
- new_api=true
-}).
-
--record(index_query_args, {
- q,
- partition=nil,
- limit=25,
- stale=false,
- include_docs=false,
- bookmark=nil,
- sort=relevance,
- grouping=#grouping{},
- stable=false,
- counts=nil,
- ranges=nil,
- drilldown=[],
- include_fields=nil,
- highlight_fields=nil,
- highlight_pre_tag = <<"<em>">>,
- highlight_post_tag = <<"</em>">>,
- highlight_number=1,
- highlight_size=0,
- raw_bookmark=false
-}).
-
--record(sortable, {
- order, % sort order
- shard, % originating shard
- item % the item itself
-}).
-
-% Our local representation of top_docs, not equal to wire format.
--record(top_docs, {
- update_seq,
- total_hits,
- hits,
- counts,
- ranges
-}).
-
-%% These must match the case classes in clouseau.
--record(hit, {
- order,
- fields
-}).
diff --git a/src/dreyfus/priv/stats_descriptions.cfg b/src/dreyfus/priv/stats_descriptions.cfg
deleted file mode 100644
index 7f93ee26a..000000000
--- a/src/dreyfus/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,65 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-
-{[dreyfus, httpd, search], [
- {type, histogram},
- {desc, <<"Distribution of overall search request latency as experienced by the end user">>}
-]}.
-{[dreyfus, rpc, search], [
- {type, histogram},
- {desc, <<"length of a search RPC worker">>}
-]}.
-{[dreyfus, rpc, group1], [
- {type, histogram},
- {desc, <<"length of a group1 RPC worker">>}
-]}.
-{[dreyfus, rpc, group2], [
- {type, histogram},
- {desc, <<"length of a group2 RPC worker">>}
-]}.
-{[dreyfus, rpc, info], [
- {type, histogram},
- {desc, <<"length of an info RPC worker">>}
-]}.
-{[dreyfus, index, await], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index await request">>}
-]}.
-{[dreyfus, index, search], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index search request">>}
-]}.
-{[dreyfus, index, group1], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index group1 request">>}
-]}.
-{[dreyfus, index, group2], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index group2 request">>}
-]}.
-{[dreyfus, index, info], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index info request">>}
-]}.
-
-%% Declare IOQ search channel metrics
-{[couchdb, io_queue, search], [
- {type, counter},
- {desc, <<"Search IO directly triggered by client requests">>}
-]}.
-
-%% Declare IOQ2 search channel metrics
-{[couchdb, io_queue2, search, count], [
- {type, counter},
- {desc, <<"Search IO directly triggered by client requests">>}
-]}.
diff --git a/src/dreyfus/src/clouseau_rpc.erl b/src/dreyfus/src/clouseau_rpc.erl
deleted file mode 100644
index 908182793..000000000
--- a/src/dreyfus/src/clouseau_rpc.erl
+++ /dev/null
@@ -1,108 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(clouseau_rpc).
-
--include("dreyfus.hrl").
-
--export([open_index/3]).
--export([await/2, commit/2, get_update_seq/1, info/1, search/2]).
--export([group1/7, group2/2]).
--export([delete/2, update/3, cleanup/1, cleanup/2, rename/1]).
--export([analyze/2, version/0, disk_size/1]).
--export([set_purge_seq/2, get_purge_seq/1, get_root_dir/0]).
--export([connected/0]).
-
-open_index(Peer, Path, Analyzer) ->
- rpc({main, clouseau()}, {open, Peer, Path, Analyzer}).
-
-disk_size(Path) ->
- rpc({main, clouseau()}, {disk_size, Path}).
-get_root_dir() ->
- rpc({main, clouseau()}, {get_root_dir}).
-
-await(Ref, MinSeq) ->
- rpc(Ref, {await, MinSeq}).
-
-commit(Ref, NewCommitSeq) ->
- rpc(Ref, {commit, NewCommitSeq}).
-
-info(Ref) ->
- rpc(Ref, info).
-
-get_update_seq(Ref) ->
- rpc(Ref, get_update_seq).
-
-set_purge_seq(Ref, Seq) ->
- rpc(Ref, {set_purge_seq, Seq}).
-
-get_purge_seq(Ref) ->
- rpc(Ref, get_purge_seq).
-
-search(Ref, Args) ->
- case rpc(Ref, {search, Args}) of
- {ok, Response} when is_list(Response) ->
- {ok, #top_docs{
- update_seq = couch_util:get_value(update_seq, Response),
- total_hits = couch_util:get_value(total_hits, Response),
- hits = couch_util:get_value(hits, Response),
- counts = couch_util:get_value(counts, Response),
- ranges = couch_util:get_value(ranges, Response)
- }};
- Else ->
- Else
- end.
-
-group1(Ref, Query, GroupBy, Refresh, Sort, Offset, Limit) ->
- rpc(Ref, {group1, Query, GroupBy, Refresh, Sort, Offset, Limit}).
-
-group2(Ref, Args) ->
- rpc(Ref, {group2, Args}).
-
-delete(Ref, Id) ->
- rpc(Ref, {delete, couch_util:to_binary(Id)}).
-
-update(Ref, Id, Fields) ->
- rpc(Ref, {update, Id, Fields}).
-
-cleanup(DbName) ->
- gen_server:cast({cleanup, clouseau()}, {cleanup, DbName}).
-
-rename(DbName) ->
- gen_server:cast({cleanup, clouseau()}, {rename, DbName}).
-
-cleanup(DbName, ActiveSigs) ->
- gen_server:cast({cleanup, clouseau()}, {cleanup, DbName, ActiveSigs}).
-
-analyze(Analyzer, Text) ->
- rpc({analyzer, clouseau()}, {analyze, Analyzer, Text}).
-
-version() ->
- rpc({main, clouseau()}, version).
-
-connected() ->
- HiddenNodes = erlang:nodes(hidden),
- case lists:member(clouseau(), HiddenNodes) of
- true ->
- true;
- false ->
- % We might have just booted up, so let's ping
- pong == net_adm:ping(clouseau())
- end.
-
-rpc(Ref, Msg) ->
- ioq:call(Ref, Msg, erlang:get(io_priority)).
-
-clouseau() ->
- list_to_atom(config:get("dreyfus", "name", "clouseau@127.0.0.1")).
diff --git a/src/dreyfus/src/dreyfus.app.src b/src/dreyfus/src/dreyfus.app.src
deleted file mode 100644
index be6595222..000000000
--- a/src/dreyfus/src/dreyfus.app.src
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
-{application, dreyfus, [
- {description, "Clouseau index manager"},
- {vsn, git},
- {mod, {dreyfus_app, []}},
- {registered, [dreyfus_index_manager, dreyfus_sup]},
- {applications, [kernel, stdlib, couch_log, config, couch_event, mem3, ioq, couch_epi]}
-]}.
diff --git a/src/dreyfus/src/dreyfus.erl b/src/dreyfus/src/dreyfus.erl
deleted file mode 100644
index 0fed3e60e..000000000
--- a/src/dreyfus/src/dreyfus.erl
+++ /dev/null
@@ -1,31 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus).
-
--export([available/0]).
-
-available() ->
- case application:get_env(dreyfus, available) of
- {ok, Val} ->
- Val;
- undefined ->
- case clouseau_rpc:connected() of
- true ->
- ok = application:set_env(dreyfus, available, true),
- true;
- false ->
- false
- end
- end.
diff --git a/src/dreyfus/src/dreyfus_app.erl b/src/dreyfus/src/dreyfus_app.erl
deleted file mode 100644
index 3f2fc5086..000000000
--- a/src/dreyfus/src/dreyfus_app.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, []) ->
- dreyfus_sup:start_link().
-
-stop([]) ->
- ok.
diff --git a/src/dreyfus/src/dreyfus_bookmark.erl b/src/dreyfus/src/dreyfus_bookmark.erl
deleted file mode 100644
index e33087a27..000000000
--- a/src/dreyfus/src/dreyfus_bookmark.erl
+++ /dev/null
@@ -1,88 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_bookmark).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--export([
- update/3,
- unpack/2,
- pack/1,
- add_missing_shards/2
-]).
-
-update(_Sort, Bookmark, []) ->
- Bookmark;
-update(relevance, Bookmark, [#sortable{} = Sortable | Rest]) ->
- #sortable{
- order = [Score, Doc],
- shard = Shard
- } = Sortable,
- B1 = fabric_dict:store(Shard, {Score, Doc}, Bookmark),
- B2 = fabric_view:remove_overlapping_shards(Shard, B1),
- update(relevance, B2, Rest);
-update(Sort, Bookmark, [#sortable{} = Sortable | Rest]) ->
- #sortable{
- order = Order,
- shard = Shard
- } = Sortable,
- B1 = fabric_dict:store(Shard, Order, Bookmark),
- B2 = fabric_view:remove_overlapping_shards(Shard, B1),
- update(Sort, B2, Rest).
-
-unpack(DbName, #index_query_args{bookmark = nil} = Args) ->
- fabric_dict:init(dreyfus_util:get_shards(DbName, Args), nil);
-unpack(DbName, #index_query_args{} = Args) ->
- unpack(DbName, Args#index_query_args.bookmark);
-unpack(DbName, Packed) when is_binary(Packed) ->
- lists:map(
- fun({Node, Range, After}) ->
- case mem3:get_shard(DbName, Node, Range) of
- {ok, Shard} ->
- {Shard, After};
- {error, not_found} ->
- PlaceHolder = #shard{
- node = Node,
- range = Range,
- dbname = DbName,
- _ = '_'
- },
- {PlaceHolder, After}
- end
- end,
- binary_to_term(couch_util:decodeBase64Url(Packed))
- ).
-
-pack(nil) ->
- null;
-pack(Workers) ->
- Workers1 = [{N, R, A} || {#shard{node = N, range = R}, A} <- Workers, A =/= nil],
- Bin = term_to_binary(Workers1, [compressed, {minor_version, 1}]),
- couch_util:encodeBase64Url(Bin).
-
-add_missing_shards(Bookmark, LiveShards) ->
- {BookmarkShards, _} = lists:unzip(Bookmark),
- add_missing_shards(Bookmark, BookmarkShards, LiveShards).
-
-add_missing_shards(Bookmark, _, []) ->
- Bookmark;
-add_missing_shards(Bookmark, BMShards, [H | T]) ->
- Bookmark1 =
- case lists:keymember(H#shard.range, #shard.range, BMShards) of
- true -> Bookmark;
- false -> fabric_dict:store(H, nil, Bookmark)
- end,
- add_missing_shards(Bookmark1, BMShards, T).
diff --git a/src/dreyfus/src/dreyfus_config.erl b/src/dreyfus/src/dreyfus_config.erl
deleted file mode 100644
index df138f35d..000000000
--- a/src/dreyfus/src/dreyfus_config.erl
+++ /dev/null
@@ -1,16 +0,0 @@
--module(dreyfus_config).
-
--export([data/0, get/1]).
-
-data() ->
- try
- config:get("dreyfus_blacklist")
- catch
- error:badarg ->
- % lazy workaround to address issue with epi invocation on startup
- []
- end.
-
-get(Key) ->
- Handle = couch_epi:get_handle({dreyfus, black_list}),
- couch_epi:get_value(Handle, dreyfus, Key).
diff --git a/src/dreyfus/src/dreyfus_epi.erl b/src/dreyfus/src/dreyfus_epi.erl
deleted file mode 100644
index 22c2c90a8..000000000
--- a/src/dreyfus/src/dreyfus_epi.erl
+++ /dev/null
@@ -1,47 +0,0 @@
--module(dreyfus_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
--define(DATA_INTERVAL, 1000).
-
-app() ->
- dreyfus.
-
-providers() ->
- [
- {couch_db, dreyfus_plugin_couch_db},
- {chttpd_handlers, dreyfus_httpd_handlers}
- ].
-
-services() ->
- [].
-
-data_subscriptions() ->
- [{dreyfus, black_list}].
-
-data_providers() ->
- [
- {{dreyfus, black_list}, {callback_module, dreyfus_config}, [{interval, ?DATA_INTERVAL}]}
- ].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- Listeners = application:get_env(dreyfus, config_listeners, []),
- lists:foreach(
- fun(L) ->
- L ! dreyfus_config_change_finished
- end,
- Listeners
- ).
diff --git a/src/dreyfus/src/dreyfus_fabric.erl b/src/dreyfus/src/dreyfus_fabric.erl
deleted file mode 100644
index 5689c1d4e..000000000
--- a/src/dreyfus/src/dreyfus_fabric.erl
+++ /dev/null
@@ -1,270 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric).
--export([get_json_docs/2, handle_error_message/7]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
--include("dreyfus.hrl").
-
-get_json_docs(DbName, DocIds) ->
- fabric:all_docs(DbName, fun callback/2, [], [{keys, DocIds}, {include_docs, true}]).
-
-callback({meta, _}, Acc) ->
- {ok, Acc};
-callback({error, Reason}, _Acc) ->
- {error, Reason};
-callback({row, Row}, Acc) ->
- {id, Id} = lists:keyfind(id, 1, Row),
- {ok, [{Id, lists:keyfind(doc, 1, Row)} | Acc]};
-callback(complete, Acc) ->
- {ok, lists:reverse(Acc)};
-callback(timeout, _Acc) ->
- {error, timeout}.
-
-handle_error_message(
- {rexi_DOWN, _, {_, NodeRef}, _},
- _Worker,
- Counters,
- _Replacements,
- _StartFun,
- _StartArgs,
- RingOpts
-) ->
- case fabric_util:remove_down_workers(Counters, NodeRef, RingOpts) of
- {ok, NewCounters} ->
- {ok, NewCounters};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_error_message(
- {rexi_EXIT, {maintenance_mode, _}},
- Worker,
- Counters,
- Replacements,
- StartFun,
- StartArgs,
- RingOpts
-) ->
- handle_replacement(
- Worker,
- Counters,
- Replacements,
- StartFun,
- StartArgs,
- RingOpts
- );
-handle_error_message(
- {rexi_EXIT, Reason},
- Worker,
- Counters,
- _Replacements,
- _StartFun,
- _StartArgs,
- RingOpts
-) ->
- handle_error(Reason, Worker, Counters, RingOpts);
-handle_error_message(
- {error, Reason},
- Worker,
- Counters,
- _Replacements,
- _StartFun,
- _StartArgs,
- RingOpts
-) ->
- handle_error(Reason, Worker, Counters, RingOpts);
-handle_error_message(
- {'EXIT', Reason},
- Worker,
- Counters,
- _Replacements,
- _StartFun,
- _StartArgs,
- RingOpts
-) ->
- handle_error({exit, Reason}, Worker, Counters, RingOpts);
-handle_error_message(
- Reason,
- Worker,
- Counters,
- _Replacements,
- _StartFun,
- _StartArgs,
- RingOpts
-) ->
- couch_log:error("Unexpected error during request: ~p", [Reason]),
- handle_error(Reason, Worker, Counters, RingOpts).
-
-handle_error(Reason, Worker, Counters0, RingOpts) ->
- Counters = fabric_dict:erase(Worker, Counters0),
- case fabric_ring:is_progress_possible(Counters, RingOpts) of
- true ->
- {ok, Counters};
- false ->
- {error, Reason}
- end.
-
-handle_replacement(
- Worker,
- OldCntrs0,
- OldReplacements,
- StartFun,
- StartArgs,
- RingOpts
-) ->
- OldCounters = lists:filter(
- fun({#shard{ref = R}, _}) ->
- R /= Worker#shard.ref
- end,
- OldCntrs0
- ),
- case lists:keytake(Worker#shard.range, 1, OldReplacements) of
- {value, {_Range, Replacements}, NewReplacements} ->
- NewCounters = lists:foldl(
- fun(Repl, CounterAcc) ->
- NewCounter = start_replacement(StartFun, StartArgs, Repl),
- fabric_dict:store(NewCounter, nil, CounterAcc)
- end,
- OldCounters,
- Replacements
- ),
- true = fabric_ring:is_progress_possible(NewCounters, RingOpts),
- NewRefs = fabric_dict:fetch_keys(NewCounters),
- {new_refs, NewRefs, NewCounters, NewReplacements};
- false ->
- handle_error(
- {nodedown, <<"progress not possible">>},
- Worker,
- OldCounters,
- RingOpts
- )
- end.
-
-start_replacement(StartFun, StartArgs, Shard) ->
- [DDoc, IndexName, QueryArgs] = StartArgs,
- After =
- case QueryArgs#index_query_args.bookmark of
- Bookmark when is_list(Bookmark) ->
- lists:foldl(
- fun({#shard{range = R0}, After0}, Acc) ->
- case R0 == Shard#shard.range of
- true -> After0;
- false -> Acc
- end
- end,
- nil,
- Bookmark
- );
- _ ->
- nil
- end,
- QueryArgs1 = QueryArgs#index_query_args{bookmark = After},
- StartArgs1 = [DDoc, IndexName, QueryArgs1],
- Ref = rexi:cast(
- Shard#shard.node,
- {dreyfus_rpc, StartFun, [Shard#shard.name | StartArgs1]}
- ),
- Shard#shard{ref = Ref}.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-node_down_test() ->
- [S1, S2, S3] = [
- mk_shard("n1", [0, 4]),
- mk_shard("n1", [5, ?RING_END]),
- mk_shard("n2", [0, ?RING_END])
- ],
- [W1, W2, W3] = [
- S1#shard{ref = make_ref()},
- S2#shard{ref = make_ref()},
- S3#shard{ref = make_ref()}
- ],
- Counters1 = fabric_dict:init([W1, W2, W3], nil),
-
- N1 = S1#shard.node,
- Msg1 = {rexi_DOWN, nil, {nil, N1}, nil},
- Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, []),
- ?assertEqual({ok, [{W3, nil}]}, Res1),
-
- {ok, Counters2} = Res1,
- N2 = S3#shard.node,
- Msg2 = {rexi_DOWN, nil, {nil, N2}, nil},
- Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, []),
- ?assertEqual({error, {nodedown, <<"progress not possible">>}}, Res2).
-
-worker_error_test() ->
- [S1, S2] = [
- mk_shard("n1", [0, ?RING_END]),
- mk_shard("n2", [0, ?RING_END])
- ],
- [W1, W2] = [S1#shard{ref = make_ref()}, S2#shard{ref = make_ref()}],
- Counters1 = fabric_dict:init([W1, W2], nil),
-
- Res1 = handle_error(bam, W1, Counters1, []),
- ?assertEqual({ok, [{W2, nil}]}, Res1),
-
- {ok, Counters2} = Res1,
- ?assertEqual({error, boom}, handle_error(boom, W2, Counters2, [])).
-
-node_down_with_partitions_test() ->
- [S1, S2] = [
- mk_shard("n1", [0, 4]),
- mk_shard("n2", [0, 8])
- ],
- [W1, W2] = [
- S1#shard{ref = make_ref()},
- S2#shard{ref = make_ref()}
- ],
- Counters1 = fabric_dict:init([W1, W2], nil),
- RingOpts = [{any, [S1, S2]}],
-
- N1 = S1#shard.node,
- Msg1 = {rexi_DOWN, nil, {nil, N1}, nil},
- Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, RingOpts),
- ?assertEqual({ok, [{W2, nil}]}, Res1),
-
- {ok, Counters2} = Res1,
- N2 = S2#shard.node,
- Msg2 = {rexi_DOWN, nil, {nil, N2}, nil},
- Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, RingOpts),
- ?assertEqual({error, {nodedown, <<"progress not possible">>}}, Res2).
-
-worker_error_with_partitions_test() ->
- [S1, S2] = [
- mk_shard("n1", [0, 4]),
- mk_shard("n2", [0, 8])
- ],
- [W1, W2] = [
- S1#shard{ref = make_ref()},
- S2#shard{ref = make_ref()}
- ],
- Counters1 = fabric_dict:init([W1, W2], nil),
- RingOpts = [{any, [S1, S2]}],
-
- Res1 = handle_error(bam, W1, Counters1, RingOpts),
- ?assertEqual({ok, [{W2, nil}]}, Res1),
-
- {ok, Counters2} = Res1,
- ?assertEqual({error, boom}, handle_error(boom, W2, Counters2, RingOpts)).
-
-mk_shard(Name, Range) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = Range}.
-
--endif.
diff --git a/src/dreyfus/src/dreyfus_fabric_cleanup.erl b/src/dreyfus/src/dreyfus_fabric_cleanup.erl
deleted file mode 100644
index e2710744d..000000000
--- a/src/dreyfus/src/dreyfus_fabric_cleanup.erl
+++ /dev/null
@@ -1,105 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_cleanup).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/1]).
-
-go(DbName) ->
- {ok, DesignDocs} = fabric:design_docs(DbName),
- ActiveSigs = lists:usort(
- lists:flatmap(
- fun active_sigs/1,
- [couch_doc:from_json_obj(DD) || DD <- DesignDocs]
- )
- ),
- cleanup_local_purge_doc(DbName, ActiveSigs),
- clouseau_rpc:cleanup(DbName, ActiveSigs),
- ok.
-
-active_sigs(#doc{body = {Fields}} = Doc) ->
- try
- {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
- {IndexNames, _} = lists:unzip(RawIndexes),
- [
- begin
- {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
- Index#index.sig
- end
- || IndexName <- IndexNames
- ]
- catch
- error:{badmatch, _Error} ->
- []
- end.
-
-cleanup_local_purge_doc(DbName, ActiveSigs) ->
- {ok, BaseDir} = clouseau_rpc:get_root_dir(),
- DbNamePattern = <<DbName/binary, ".*">>,
- Pattern0 = filename:join([BaseDir, "shards", "*", DbNamePattern, "*"]),
- Pattern = binary_to_list(iolist_to_binary(Pattern0)),
- DirListStrs = filelib:wildcard(Pattern),
- DirList = [iolist_to_binary(DL) || DL <- DirListStrs],
- LocalShards = mem3:local_shards(DbName),
- ActiveDirs = lists:foldl(
- fun(LS, AccOuter) ->
- lists:foldl(
- fun(Sig, AccInner) ->
- DirName = filename:join([BaseDir, LS#shard.name, Sig]),
- [DirName | AccInner]
- end,
- AccOuter,
- ActiveSigs
- )
- end,
- [],
- LocalShards
- ),
-
- DeadDirs = DirList -- ActiveDirs,
- lists:foreach(
- fun(IdxDir) ->
- Sig = dreyfus_util:get_signature_from_idxdir(IdxDir),
- case Sig of
- undefined ->
- ok;
- _ ->
- DocId = dreyfus_util:get_local_purge_doc_id(Sig),
- LocalShards = mem3:local_shards(DbName),
- lists:foreach(
- fun(LS) ->
- ShardDbName = LS#shard.name,
- {ok, ShardDb} = couch_db:open_int(ShardDbName, []),
- case couch_db:open_doc(ShardDb, DocId, []) of
- {ok, LocalPurgeDoc} ->
- couch_db:update_doc(
- ShardDb,
- LocalPurgeDoc#doc{deleted = true},
- [?ADMIN_CTX]
- );
- {not_found, _} ->
- ok
- end,
- couch_db:close(ShardDb)
- end,
- LocalShards
- )
- end
- end,
- DeadDirs
- ).
diff --git a/src/dreyfus/src/dreyfus_fabric_group1.erl b/src/dreyfus/src/dreyfus_fabric_group1.erl
deleted file mode 100644
index 1edfd653f..000000000
--- a/src/dreyfus/src/dreyfus_fabric_group1.erl
+++ /dev/null
@@ -1,153 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_group1).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/4]).
-
--record(state, {
- limit,
- sort,
- top_groups,
- counters,
- start_args,
- replacements,
- ring_opts
-}).
-
-go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
- go(DbName, DDoc, IndexName, QueryArgs);
-go(DbName, DDoc, IndexName, #index_query_args{} = QueryArgs) ->
- DesignName = dreyfus_util:get_design_docid(DDoc),
- dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
- Shards = dreyfus_util:get_shards(DbName, QueryArgs),
- RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards),
- Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group1, [
- DDoc,
- IndexName,
- dreyfus_util:export(QueryArgs)
- ]),
- Replacements = fabric_view:get_shard_replacements(DbName, Workers),
- Counters = fabric_dict:init(Workers, nil),
- RexiMon = fabric_util:create_monitors(Workers),
- State = #state{
- limit = QueryArgs#index_query_args.grouping#grouping.limit,
- sort = QueryArgs#index_query_args.grouping#grouping.sort,
- top_groups = [],
- counters = Counters,
- start_args = [DDoc, IndexName, QueryArgs],
- replacements = Replacements,
- ring_opts = RingOpts
- },
- try
- rexi_utils:recv(
- Workers,
- #shard.ref,
- fun handle_message/3,
- State,
- infinity,
- 1000 * 60 * 60
- )
- after
- rexi_monitor:stop(RexiMon),
- fabric_util:cleanup(Workers)
- end;
-go(DbName, DDoc, IndexName, OldArgs) ->
- go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
-
-handle_message({ok, NewTopGroups}, Shard, State0) ->
- State = upgrade_state(State0),
- #state{top_groups = TopGroups, limit = Limit, sort = Sort} = State,
- case fabric_dict:lookup_element(Shard, State#state.counters) of
- undefined ->
- %% already heard from someone else in this range
- {ok, State};
- nil ->
- C1 = fabric_dict:store(Shard, ok, State#state.counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- MergedTopGroups = merge_top_groups(
- TopGroups, make_sortable(Shard, NewTopGroups), Limit, Sort
- ),
- State1 = State#state{
- counters = C2,
- top_groups = MergedTopGroups
- },
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, State1};
- false ->
- {stop, remove_sortable(MergedTopGroups)}
- end
- end;
-handle_message(Error, Worker, State0) ->
- State = upgrade_state(State0),
- case
- dreyfus_fabric:handle_error_message(
- Error,
- Worker,
- State#state.counters,
- State#state.replacements,
- group1,
- State#state.start_args,
- State#state.ring_opts
- )
- of
- {ok, Counters} ->
- {ok, State#state{counters = Counters}};
- {new_refs, NewRefs, NewCounters, NewReplacements} ->
- NewState = State#state{
- counters = NewCounters,
- replacements = NewReplacements
- },
- {new_refs, NewRefs, NewState};
- Else ->
- Else
- end.
-
-merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) ->
- MergedGroups0 = TopGroupsA ++ TopGroupsB,
- GNs = lists:usort([N || #sortable{item = {N, _}} <- MergedGroups0]),
- MergedGroups = [
- merge_top_group(Sort, [S || #sortable{item = {N, _}} = S <- MergedGroups0, N =:= GN])
- || GN <- GNs
- ],
- lists:sublist(dreyfus_util:sort(Sort, MergedGroups), Limit).
-
-merge_top_group(_Sort, [Group]) ->
- Group;
-merge_top_group(Sort, [_, _] = Groups) ->
- hd(dreyfus_util:sort(Sort, Groups)).
-
-make_sortable(Shard, TopGroups) ->
- [#sortable{item = G, order = Order, shard = Shard} || {_Name, Order} = G <- TopGroups].
-
-remove_sortable(Sortables) ->
- [Item || #sortable{item = Item} <- Sortables].
-
-upgrade_state({state, Limit, Sort, TopGroups, Counters}) ->
- #state{
- limit = Limit,
- sort = Sort,
- top_groups = TopGroups,
- counters = Counters,
- replacements = []
- };
-upgrade_state(#state{} = State) ->
- State.
diff --git a/src/dreyfus/src/dreyfus_fabric_group2.erl b/src/dreyfus/src/dreyfus_fabric_group2.erl
deleted file mode 100644
index e962c7e4a..000000000
--- a/src/dreyfus/src/dreyfus_fabric_group2.erl
+++ /dev/null
@@ -1,185 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_group2).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/4]).
-
--record(state, {
- limit,
- sort,
- total_hits,
- total_grouped_hits,
- top_groups,
- counters,
- start_args,
- replacements,
- ring_opts
-}).
-
-go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
- go(DbName, DDoc, IndexName, QueryArgs);
-go(DbName, DDoc, IndexName, #index_query_args{} = QueryArgs) ->
- DesignName = dreyfus_util:get_design_docid(DDoc),
- dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
- Shards = dreyfus_util:get_shards(DbName, QueryArgs),
- RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards),
- Workers = fabric_util:submit_jobs(
- Shards,
- dreyfus_rpc,
- group2,
- [DDoc, IndexName, dreyfus_util:export(QueryArgs)]
- ),
- Replacements = fabric_view:get_shard_replacements(DbName, Workers),
- Counters = fabric_dict:init(Workers, nil),
- RexiMon = fabric_util:create_monitors(Workers),
- State = #state{
- limit = QueryArgs#index_query_args.limit,
- sort = QueryArgs#index_query_args.sort,
- total_hits = 0,
- total_grouped_hits = 0,
- top_groups = [],
- counters = Counters,
- start_args = [DDoc, IndexName, QueryArgs],
- replacements = Replacements,
- ring_opts = RingOpts
- },
- try
- rexi_utils:recv(
- Workers,
- #shard.ref,
- fun handle_message/3,
- State,
- infinity,
- 1000 * 60 * 60
- )
- after
- rexi_monitor:stop(RexiMon),
- fabric_util:cleanup(Workers)
- end;
-go(DbName, DDoc, IndexName, OldArgs) ->
- go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
-
-handle_message(
- {ok, NewTotalHits, NewTotalGroupedHits, NewTopGroups},
- Shard,
- State0
-) ->
- State = upgrade_state(State0),
- #state{
- total_hits = TotalHits,
- total_grouped_hits = TotalGroupedHits,
- top_groups = TopGroups,
- limit = Limit,
- sort = Sort
- } = State,
- case fabric_dict:lookup_element(Shard, State#state.counters) of
- undefined ->
- %% already heard from someone else in this range
- {ok, State};
- nil ->
- C1 = fabric_dict:store(Shard, ok, State#state.counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- MergedTotalHits = NewTotalHits + TotalHits,
- MergedTotalGroupedHits = NewTotalGroupedHits + TotalGroupedHits,
- Sortable = make_sortable(Shard, NewTopGroups),
- MergedTopGroups = merge_top_groups(TopGroups, Sortable, Limit, Sort),
- State1 = State#state{
- counters = C2,
- total_hits = MergedTotalHits,
- total_grouped_hits = MergedTotalGroupedHits,
- top_groups = MergedTopGroups
- },
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, State1};
- false ->
- {stop,
- {MergedTotalHits, MergedTotalGroupedHits, remove_sortable(MergedTopGroups)}}
- end
- end;
-handle_message(Error, Worker, State0) ->
- State = upgrade_state(State0),
- case
- dreyfus_fabric:handle_error_message(
- Error,
- Worker,
- State#state.counters,
- State#state.replacements,
- group2,
- State#state.start_args,
- State#state.ring_opts
- )
- of
- {ok, Counters} ->
- {ok, State#state{counters = Counters}};
- {new_refs, NewRefs, NewCounters, NewReplacements} ->
- NewState = State#state{
- counters = NewCounters,
- replacements = NewReplacements
- },
- {new_refs, NewRefs, NewState};
- Else ->
- Else
- end.
-
-merge_top_groups([], TopGroups, _Limit, _Sort) ->
- TopGroups;
-merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) ->
- lists:zipwith(
- fun(A, B) -> merge_top_group(A, B, Limit, Sort) end,
- TopGroupsA,
- TopGroupsB
- ).
-
-merge_top_group({Name, TotalA, HitsA}, {Name, TotalB, HitsB}, Limit, Sort) ->
- MergedHits = lists:sublist(dreyfus_util:sort(Sort, HitsA ++ HitsB), Limit),
- {Name, TotalA + TotalB, MergedHits}.
-
-make_sortable(Shard, TopGroups) ->
- [make_sortable_group(Shard, TopGroup) || TopGroup <- TopGroups].
-
-make_sortable_group(Shard, {Name, TotalHits, Hits}) ->
- {Name, TotalHits, [make_sortable_hit(Shard, Hit) || Hit <- Hits]}.
-
-make_sortable_hit(Shard, Hit) ->
- #sortable{item = Hit, order = Hit#hit.order, shard = Shard}.
-
-remove_sortable(SortableGroups) ->
- [remove_sortable_group(G) || G <- SortableGroups].
-
-remove_sortable_group({Name, TotalHits, SortableHits}) ->
- {Name, TotalHits, [remove_sortable_hit(H) || H <- SortableHits]}.
-
-remove_sortable_hit(SortableHit) ->
- SortableHit#sortable.item.
-
-upgrade_state({state, Limit, Sort, TotalHits, TotalGroupedHits, TopGroups, Counters}) ->
- #state{
- limit = Limit,
- sort = Sort,
- total_hits = TotalHits,
- total_grouped_hits = TotalGroupedHits,
- top_groups = TopGroups,
- counters = Counters,
- replacements = []
- };
-upgrade_state(#state{} = State) ->
- State.
diff --git a/src/dreyfus/src/dreyfus_fabric_info.erl b/src/dreyfus/src/dreyfus_fabric_info.erl
deleted file mode 100644
index 8c7ae70c1..000000000
--- a/src/dreyfus/src/dreyfus_fabric_info.erl
+++ /dev/null
@@ -1,112 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_info).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/4]).
-
-go(DbName, DDocId, IndexName, InfoLevel) when is_binary(DDocId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", DDocId/binary>>, []),
- dreyfus_util:maybe_deny_index(DbName, DDocId, IndexName),
- go(DbName, DDoc, IndexName, InfoLevel);
-go(DbName, DDoc, IndexName, InfoLevel) ->
- DesignName = dreyfus_util:get_design_docid(DDoc),
- dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, InfoLevel, [DDoc, IndexName]),
- RexiMon = fabric_util:create_monitors(Shards),
- Acc0 = {fabric_dict:init(Workers, nil), []},
- try
- fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker, {Counters, Acc}) ->
- case fabric_util:remove_down_workers(Counters, NodeRef) of
- {ok, NewCounters} ->
- {ok, {NewCounters, Acc}};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_message({rexi_EXIT, Reason}, Worker, {Counters, Acc}) ->
- NewCounters = fabric_dict:erase(Worker, Counters),
- case fabric_ring:is_progress_possible(NewCounters) of
- true ->
- {ok, {NewCounters, Acc}};
- false ->
- {error, Reason}
- end;
-handle_message({ok, Info}, Worker, {Counters, Acc}) ->
- case fabric_dict:lookup_element(Worker, Counters) of
- undefined ->
- % already heard from someone else in this range
- {ok, {Counters, Acc}};
- nil ->
- C1 = fabric_dict:store(Worker, ok, Counters),
- C2 = fabric_view:remove_overlapping_shards(Worker, C1),
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, {C2, [Info | Acc]}};
- false ->
- {stop, merge_results(lists:flatten([Info | Acc]))}
- end
- end;
-handle_message({error, Reason}, Worker, {Counters, Acc}) ->
- NewCounters = fabric_dict:erase(Worker, Counters),
- case fabric_ring:is_progress_possible(NewCounters) of
- true ->
- {ok, {NewCounters, Acc}};
- false ->
- {error, Reason}
- end;
-handle_message({'EXIT', _}, Worker, {Counters, Acc}) ->
- NewCounters = fabric_dict:erase(Worker, Counters),
- case fabric_ring:is_progress_possible(NewCounters) of
- true ->
- {ok, {NewCounters, Acc}};
- false ->
- {error, {nodedown, <<"progress not possible">>}}
- end.
-
-merge_results(Info) ->
- Dict = lists:foldl(
- fun({K, V}, D0) -> orddict:append(K, V, D0) end,
- orddict:new(),
- Info
- ),
- orddict:fold(
- fun
- (disk_size, X, Acc) ->
- [{disk_size, lists:sum(X)} | Acc];
- (doc_count, X, Acc) ->
- [{doc_count, lists:sum(X)} | Acc];
- (doc_del_count, X, Acc) ->
- [{doc_del_count, lists:sum(X)} | Acc];
- (committed_seq, X, Acc) ->
- [{committed_seq, lists:sum(X)} | Acc];
- (pending_seq, X, Acc) ->
- [{pending_seq, lists:sum(X)} | Acc];
- (signature, [X | _], Acc) ->
- [{signature, X} | Acc];
- (_, _, Acc) ->
- Acc
- end,
- [],
- Dict
- ).
diff --git a/src/dreyfus/src/dreyfus_fabric_search.erl b/src/dreyfus/src/dreyfus_fabric_search.erl
deleted file mode 100644
index 7e78e5fc3..000000000
--- a/src/dreyfus/src/dreyfus_fabric_search.erl
+++ /dev/null
@@ -1,334 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_search).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/4]).
-
--record(state, {
- limit,
- sort,
- top_docs,
- counters,
- start_args,
- replacements,
- ring_opts
-}).
-
-go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(
- DbName,
- <<"_design/", GroupId/binary>>,
- [ejson_body]
- ),
- dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
- go(DbName, DDoc, IndexName, QueryArgs);
-go(DbName, DDoc, IndexName, #index_query_args{bookmark = nil} = QueryArgs) ->
- DesignName = dreyfus_util:get_design_docid(DDoc),
- dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
- Shards = dreyfus_util:get_shards(DbName, QueryArgs),
- RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards),
- Workers = fabric_util:submit_jobs(
- Shards,
- dreyfus_rpc,
- search,
- [DDoc, IndexName, dreyfus_util:export(QueryArgs)]
- ),
- Counters = fabric_dict:init(Workers, nil),
- go(DbName, DDoc, IndexName, QueryArgs, Counters, Counters, RingOpts);
-go(DbName, DDoc, IndexName, #index_query_args{} = QueryArgs) ->
- Bookmark0 =
- try
- dreyfus_bookmark:unpack(DbName, QueryArgs)
- catch
- _:_ ->
- throw({bad_request, "Invalid bookmark parameter supplied"})
- end,
- Shards = dreyfus_util:get_shards(DbName, QueryArgs),
- LiveNodes = [node() | nodes()],
- LiveShards = [S || #shard{node = Node} = S <- Shards, lists:member(Node, LiveNodes)],
- Bookmark1 = dreyfus_bookmark:add_missing_shards(Bookmark0, LiveShards),
- Counters0 = lists:flatmap(
- fun({#shard{name = Name, node = N} = Shard, After}) ->
- QueryArgs1 = dreyfus_util:export(QueryArgs#index_query_args{
- bookmark = After
- }),
- case lists:member(Shard, LiveShards) of
- true ->
- Ref = rexi:cast(N, {dreyfus_rpc, search, [Name, DDoc, IndexName, QueryArgs1]}),
- [Shard#shard{ref = Ref}];
- false ->
- lists:map(
- fun(#shard{name = Name2, node = N2} = NewShard) ->
- Ref = rexi:cast(
- N2, {dreyfus_rpc, search, [Name2, DDoc, IndexName, QueryArgs1]}
- ),
- NewShard#shard{ref = Ref}
- end,
- find_replacement_shards(Shard, LiveShards)
- )
- end
- end,
- Bookmark1
- ),
- Counters = fabric_dict:init(Counters0, nil),
- WorkerShards = fabric_dict:fetch_keys(Counters),
- RingOpts = dreyfus_util:get_ring_opts(QueryArgs, WorkerShards),
- QueryArgs2 = QueryArgs#index_query_args{
- bookmark = Bookmark1
- },
- go(DbName, DDoc, IndexName, QueryArgs2, Counters, Bookmark1, RingOpts);
-go(DbName, DDoc, IndexName, OldArgs) ->
- go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
-
-go(DbName, DDoc, IndexName, QueryArgs, Counters, Bookmark, RingOpts) ->
- {Workers, _} = lists:unzip(Counters),
- #index_query_args{
- limit = Limit,
- sort = Sort,
- raw_bookmark = RawBookmark
- } = QueryArgs,
- Replacements = fabric_view:get_shard_replacements(DbName, Workers),
- State = #state{
- limit = Limit,
- sort = Sort,
- top_docs = #top_docs{total_hits = 0, hits = []},
- counters = Counters,
- start_args = [DDoc, IndexName, QueryArgs],
- replacements = Replacements,
- ring_opts = RingOpts
- },
- RexiMon = fabric_util:create_monitors(Workers),
- try
- rexi_utils:recv(
- Workers,
- #shard.ref,
- fun handle_message/3,
- State,
- infinity,
- 1000 * 60 * 60
- )
- of
- {ok, Result} ->
- #state{top_docs = TopDocs} = Result,
- #top_docs{
- total_hits = TotalHits,
- hits = Hits,
- counts = Counts,
- ranges = Ranges
- } = TopDocs,
- case RawBookmark of
- true ->
- {ok, Bookmark, TotalHits, Hits, Counts, Ranges};
- false ->
- Bookmark1 = dreyfus_bookmark:update(Sort, Bookmark, Hits),
- Hits1 = remove_sortable(Hits),
- {ok, Bookmark1, TotalHits, Hits1, Counts, Ranges}
- end;
- {error, Reason} ->
- {error, Reason}
- after
- rexi_monitor:stop(RexiMon),
- fabric_util:cleanup(Workers)
- end.
-
-handle_message({ok, #top_docs{} = NewTopDocs}, Shard, State0) ->
- State = upgrade_state(State0),
- #state{top_docs = TopDocs, limit = Limit, sort = Sort} = State,
- case fabric_dict:lookup_element(Shard, State#state.counters) of
- undefined ->
- %% already heard from someone else in this range
- {ok, State};
- nil ->
- C1 = fabric_dict:store(Shard, ok, State#state.counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- Sortable = make_sortable(Shard, NewTopDocs),
- MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort),
- State1 = State#state{
- counters = C2,
- top_docs = MergedTopDocs
- },
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, State1};
- false ->
- {stop, State1}
- end
- end;
-% upgrade clause
-handle_message({ok, {top_docs, UpdateSeq, TotalHits, Hits}}, Shard, State) ->
- TopDocs = #top_docs{
- update_seq = UpdateSeq,
- total_hits = TotalHits,
- hits = Hits
- },
- handle_message({ok, TopDocs}, Shard, State);
-handle_message(Error, Worker, State0) ->
- State = upgrade_state(State0),
- case
- dreyfus_fabric:handle_error_message(
- Error,
- Worker,
- State#state.counters,
- State#state.replacements,
- search,
- State#state.start_args,
- State#state.ring_opts
- )
- of
- {ok, Counters} ->
- {ok, State#state{counters = Counters}};
- {new_refs, NewRefs, NewCounters, NewReplacements} ->
- NewState = State#state{
- counters = NewCounters,
- replacements = NewReplacements
- },
- {new_refs, NewRefs, NewState};
- Else ->
- Else
- end.
-
-find_replacement_shards(#shard{range = Range}, AllShards) ->
- [Shard || Shard <- AllShards, Shard#shard.range =:= Range].
-
-make_sortable(Shard, #top_docs{} = TopDocs) ->
- Hits = make_sortable(Shard, TopDocs#top_docs.hits),
- TopDocs#top_docs{hits = Hits};
-make_sortable(Shard, List) when is_list(List) ->
- make_sortable(Shard, List, []).
-
-make_sortable(_, [], Acc) ->
- lists:reverse(Acc);
-make_sortable(Shard, [#hit{} = Hit | Rest], Acc) ->
- make_sortable(Shard, Rest, [#sortable{item = Hit, order = Hit#hit.order, shard = Shard} | Acc]).
-
-remove_sortable(List) ->
- remove_sortable(List, []).
-
-remove_sortable([], Acc) ->
- lists:reverse(Acc);
-remove_sortable([#sortable{item = Item} | Rest], Acc) ->
- remove_sortable(Rest, [Item | Acc]).
-
-merge_top_docs(#top_docs{} = TopDocsA, #top_docs{} = TopDocsB, Limit, Sort) ->
- MergedTotal = sum_element(#top_docs.total_hits, TopDocsA, TopDocsB),
- MergedHits = lists:sublist(
- dreyfus_util:sort(
- Sort,
- TopDocsA#top_docs.hits ++ TopDocsB#top_docs.hits
- ),
- Limit
- ),
- MergedCounts = merge_facets(TopDocsA#top_docs.counts, TopDocsB#top_docs.counts),
- MergedRanges = merge_facets(TopDocsA#top_docs.ranges, TopDocsB#top_docs.ranges),
- #top_docs{
- total_hits = MergedTotal,
- hits = MergedHits,
- counts = MergedCounts,
- ranges = MergedRanges
- }.
-
-merge_facets(undefined, undefined) ->
- undefined;
-merge_facets(undefined, Facets) ->
- sort_facets(Facets);
-merge_facets(Facets, undefined) ->
- sort_facets(Facets);
-merge_facets(FacetsA, FacetsB) ->
- merge_facets_int(sort_facets(FacetsA), sort_facets(FacetsB)).
-
-merge_facets_int([], []) ->
- [];
-merge_facets_int(FacetsA, []) ->
- FacetsA;
-merge_facets_int([], FacetsB) ->
- FacetsB;
-merge_facets_int([{KA, _, _} = A | RA], [{KB, _, _} | _] = FB) when KA < KB ->
- [A | merge_facets_int(RA, FB)];
-merge_facets_int([{KA, VA, CA} | RA], [{KB, VB, CB} | RB]) when KA =:= KB ->
- [{KA, VA + VB, merge_facets_int(CA, CB)} | merge_facets_int(RA, RB)];
-merge_facets_int([{KA, _, _} | _] = FA, [{KB, _, _} = B | RB]) when KA > KB ->
- [B | merge_facets_int(FA, RB)].
-
-sort_facets([]) ->
- [];
-sort_facets(Facets) ->
- lists:sort(
- lists:map(
- fun({K, V, C}) -> {K, V, sort_facets(C)} end,
- Facets
- )
- ).
-
-sum_element(N, T1, T2) ->
- element(N, T1) + element(N, T2).
-
-upgrade_state({state, Limit, Sort, TopDocs, Counters}) ->
- #state{
- limit = Limit,
- sort = Sort,
- top_docs = TopDocs,
- counters = Counters,
- replacements = []
- };
-upgrade_state(#state{} = State) ->
- State.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-merge_facets_test() ->
- % empty list is a no-op
- ?assertEqual([{foo, 1.0, []}], merge_facets([{foo, 1.0, []}], [])),
-
- % one level, one key
- ?assertEqual(
- [{foo, 3.0, []}],
- merge_facets(
- [{foo, 1.0, []}],
- [{foo, 2.0, []}]
- )
- ),
-
- % one level, two keys
- ?assertEqual(
- [{bar, 6.0, []}, {foo, 9.0, []}],
- merge_facets(
- [{foo, 1.0, []}, {bar, 2.0, []}],
- [{bar, 4.0, []}, {foo, 8.0, []}]
- )
- ),
-
- % multi level, multi keys
- ?assertEqual(
- [{foo, 2.0, [{bar, 2.0, []}]}],
- merge_facets(
- [{foo, 1.0, [{bar, 1.0, []}]}],
- [{foo, 1.0, [{bar, 1.0, []}]}]
- )
- ),
-
- ?assertEqual(
- [{foo, 5.0, [{bar, 7.0, [{bar, 1.0, []}, {baz, 3.0, []}, {foo, 6.5, []}]}]}],
- merge_facets(
- [{foo, 1.0, [{bar, 2.0, [{baz, 3.0, []}, {foo, 0.5, []}]}]}],
- [{foo, 4.0, [{bar, 5.0, [{foo, 6.0, []}, {bar, 1.0, []}]}]}]
- )
- ).
-
--endif.
diff --git a/src/dreyfus/src/dreyfus_httpd.erl b/src/dreyfus/src/dreyfus_httpd.erl
deleted file mode 100644
index 39d205b95..000000000
--- a/src/dreyfus/src/dreyfus_httpd.erl
+++ /dev/null
@@ -1,709 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_httpd).
-
--export([
- handle_search_req/3,
- handle_info_req/3,
- handle_disk_size_req/3,
- handle_cleanup_req/2,
- handle_analyze_req/1
-]).
-
--include("dreyfus.hrl").
--include_lib("couch/include/couch_db.hrl").
--import(chttpd, [
- send_method_not_allowed/2,
- send_json/2, send_json/3,
- send_error/2
-]).
-
-handle_search_req(Req, Db, DDoc) ->
- handle_search_req(Req, Db, DDoc, 0, 500).
-
-handle_search_req(
- #httpd{method = Method, path_parts = [_, _, _, _, IndexName]} = Req,
- Db,
- DDoc,
- RetryCount,
- RetryPause
-) when
- Method == 'GET'; Method == 'POST'
-->
- DbName = couch_db:name(Db),
- Start = os:timestamp(),
- QueryArgs =
- #index_query_args{
- include_docs = IncludeDocs,
- grouping = Grouping
- } = parse_index_params(Req, Db),
- validate_search_restrictions(Db, DDoc, QueryArgs),
- Response =
- case Grouping#grouping.by of
- nil ->
- case dreyfus_fabric_search:go(DbName, DDoc, IndexName, QueryArgs) of
- % legacy clause
- {ok, Bookmark0, TotalHits, Hits0} ->
- Hits = hits_to_json(DbName, IncludeDocs, Hits0),
- Bookmark = dreyfus_bookmark:pack(Bookmark0),
- send_json(
- Req,
- 200,
- {[
- {total_rows, TotalHits},
- {bookmark, Bookmark},
- {rows, Hits}
- ]}
- );
- {ok, Bookmark0, TotalHits, Hits0, Counts0, Ranges0} ->
- Hits = hits_to_json(DbName, IncludeDocs, Hits0),
- Bookmark = dreyfus_bookmark:pack(Bookmark0),
- Counts =
- case Counts0 of
- undefined ->
- [];
- _ ->
- [{counts, facets_to_json(Counts0)}]
- end,
- Ranges =
- case Ranges0 of
- undefined ->
- [];
- _ ->
- [{ranges, facets_to_json(Ranges0)}]
- end,
- send_json(Req, 200, {
- [
- {total_rows, TotalHits},
- {bookmark, Bookmark},
- {rows, Hits}
- ] ++ Counts ++ Ranges
- });
- {error, Reason} ->
- handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
- end;
- _ ->
- % ensure limit in group query >0
- UseNewApi = Grouping#grouping.new_api,
- case dreyfus_fabric_group1:go(DbName, DDoc, IndexName, QueryArgs) of
- {ok, []} ->
- send_grouped_response(Req, {0, 0, []}, UseNewApi);
- {ok, TopGroups} ->
- QueryArgs1 = QueryArgs#index_query_args{
- grouping = Grouping#grouping{groups = TopGroups}
- },
- case
- dreyfus_fabric_group2:go(
- DbName,
- DDoc,
- IndexName,
- QueryArgs1
- )
- of
- {ok, {TotalHits, TotalGroupedHits, Groups0}} ->
- Groups = [
- group_to_json(DbName, IncludeDocs, Group, UseNewApi)
- || Group <- Groups0
- ],
- send_grouped_response(
- Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi
- );
- {error, Reason} ->
- handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
- end;
- {error, Reason} ->
- handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
- end
- end,
- RequestTime = timer:now_diff(os:timestamp(), Start) div 1000,
- couch_stats:update_histogram([dreyfus, httpd, search], RequestTime),
- Response;
-handle_search_req(#httpd{path_parts = [_, _, _, _, _]} = Req, _Db, _DDoc, _RetryCount, _RetryPause) ->
- send_method_not_allowed(Req, "GET,POST");
-handle_search_req(Req, _Db, _DDoc, _RetryCount, _RetryPause) ->
- send_error(Req, {bad_request, "path not recognized"}).
-
-handle_info_req(
- #httpd{method = 'GET', path_parts = [_, _, _, _, IndexName]} = Req,
- Db,
- #doc{id = Id} = DDoc
-) ->
- DbName = couch_db:name(Db),
- case dreyfus_fabric_info:go(DbName, DDoc, IndexName, info) of
- {ok, IndexInfoList} ->
- send_json(
- Req,
- 200,
- {[
- {name, <<Id/binary, "/", IndexName/binary>>},
- {search_index, {IndexInfoList}}
- ]}
- );
- {error, Reason} ->
- send_error(Req, Reason)
- end;
-handle_info_req(#httpd{path_parts = [_, _, _, _, _]} = Req, _Db, _DDoc) ->
- send_method_not_allowed(Req, "GET");
-handle_info_req(Req, _Db, _DDoc) ->
- send_error(Req, {bad_request, "path not recognized"}).
-
-handle_disk_size_req(
- #httpd{method = 'GET', path_parts = [_, _, _, _, IndexName]} = Req, Db, #doc{id = Id} = DDoc
-) ->
- DbName = couch_db:name(Db),
- case dreyfus_fabric_info:go(DbName, DDoc, IndexName, disk_size) of
- {ok, IndexInfoList} ->
- send_json(
- Req,
- 200,
- {[
- {name, <<Id/binary, "/", IndexName/binary>>},
- {search_index, {IndexInfoList}}
- ]}
- );
- {error, Reason} ->
- send_error(Req, Reason)
- end;
-handle_disk_size_req(#httpd{path_parts = [_, _, _, _, _]} = Req, _Db, _DDoc) ->
- send_method_not_allowed(Req, "GET");
-handle_disk_size_req(Req, _Db, _DDoc) ->
- send_error(Req, {bad_request, "path not recognized"}).
-
-handle_cleanup_req(#httpd{method = 'POST'} = Req, Db) ->
- ok = dreyfus_fabric_cleanup:go(couch_db:name(Db)),
- send_json(Req, 202, {[{ok, true}]});
-handle_cleanup_req(Req, _Db) ->
- send_method_not_allowed(Req, "POST").
-
-handle_analyze_req(#httpd{method = 'GET'} = Req) ->
- Analyzer = couch_httpd:qs_value(Req, "analyzer"),
- Text = couch_httpd:qs_value(Req, "text"),
- analyze(Req, Analyzer, Text);
-handle_analyze_req(#httpd{method = 'POST'} = Req) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {Fields} = chttpd:json_body_obj(Req),
- Analyzer = couch_util:get_value(<<"analyzer">>, Fields),
- Text = couch_util:get_value(<<"text">>, Fields),
- analyze(Req, Analyzer, Text);
-handle_analyze_req(Req) ->
- send_method_not_allowed(Req, "GET,POST").
-
-analyze(Req, Analyzer, Text) ->
- case Analyzer of
- undefined ->
- throw({bad_request, "analyzer parameter is mandatory"});
- _ when is_list(Analyzer) ->
- ok;
- _ when is_binary(Analyzer) ->
- ok;
- {[_ | _]} ->
- ok;
- _ ->
- throw({bad_request, "analyzer parameter must be a string or an object"})
- end,
- case Text of
- undefined ->
- throw({bad_request, "text parameter is mandatory"});
- _ when is_list(Text) ->
- ok;
- _ when is_binary(Text) ->
- ok;
- _ ->
- throw({bad_request, "text parameter must be a string"})
- end,
- case
- clouseau_rpc:analyze(
- couch_util:to_binary(Analyzer),
- couch_util:to_binary(Text)
- )
- of
- {ok, Tokens} ->
- send_json(Req, 200, {[{tokens, Tokens}]});
- {error, Reason} ->
- send_error(Req, Reason)
- end.
-
-parse_index_params(#httpd{method = 'GET'} = Req, Db) ->
- IndexParams = lists:flatmap(
- fun({K, V}) -> parse_index_param(K, V) end,
- chttpd:qs(Req)
- ),
- parse_index_params(IndexParams, Db);
-parse_index_params(#httpd{method = 'POST'} = Req, Db) ->
- {JsonBody} = chttpd:json_body_obj(Req),
- QSEntry =
- case chttpd:qs_value(Req, "partition") of
- undefined -> [];
- StrVal -> [{<<"partition">>, ?l2b(StrVal)}]
- end,
- IndexParams = lists:flatmap(
- fun({K, V}) ->
- parse_json_index_param(K, V)
- end,
- QSEntry ++ JsonBody
- ),
- ensure_unique_partition(IndexParams),
- parse_index_params(IndexParams, Db);
-parse_index_params(IndexParams, Db) ->
- DefaultLimit =
- case fabric_util:is_partitioned(Db) of
- true ->
- list_to_integer(config:get("dreyfus", "limit_partitions", "2000"));
- false ->
- list_to_integer(config:get("dreyfus", "limit", "25"))
- end,
- Args = #index_query_args{limit = DefaultLimit},
- lists:foldl(
- fun({K, V}, Args2) ->
- validate_index_query(K, V, Args2)
- end,
- Args,
- IndexParams
- ).
-
-validate_index_query(q, Value, Args) ->
- Args#index_query_args{q = Value};
-validate_index_query(partition, Value, Args) ->
- Args#index_query_args{partition = Value};
-validate_index_query(stale, Value, Args) ->
- Args#index_query_args{stale = Value};
-validate_index_query(limit, Value, Args) ->
- Args#index_query_args{limit = Value};
-validate_index_query(include_docs, Value, Args) ->
- Args#index_query_args{include_docs = Value};
-validate_index_query(include_fields, Value, Args) ->
- Args#index_query_args{include_fields = Value};
-validate_index_query(bookmark, Value, Args) ->
- Args#index_query_args{bookmark = Value};
-validate_index_query(sort, Value, Args) ->
- Args#index_query_args{sort = Value};
-validate_index_query(group_by, Value, #index_query_args{grouping = Grouping} = Args) ->
- Args#index_query_args{grouping = Grouping#grouping{by = Value, new_api = false}};
-validate_index_query(group_field, Value, #index_query_args{grouping = Grouping} = Args) ->
- Args#index_query_args{grouping = Grouping#grouping{by = Value, new_api = true}};
-validate_index_query(group_sort, Value, #index_query_args{grouping = Grouping} = Args) ->
- Args#index_query_args{grouping = Grouping#grouping{sort = Value}};
-validate_index_query(group_limit, Value, #index_query_args{grouping = Grouping} = Args) ->
- Args#index_query_args{grouping = Grouping#grouping{limit = Value}};
-validate_index_query(stable, Value, Args) ->
- Args#index_query_args{stable = Value};
-validate_index_query(counts, Value, Args) ->
- Args#index_query_args{counts = Value};
-validate_index_query(ranges, Value, Args) ->
- Args#index_query_args{ranges = Value};
-validate_index_query(drilldown, [[_ | _] | _] = Value, Args) ->
- Args#index_query_args{drilldown = Value};
-validate_index_query(drilldown, Value, Args) ->
- DrillDown = Args#index_query_args.drilldown,
- Args#index_query_args{drilldown = [Value | DrillDown]};
-validate_index_query(highlight_fields, Value, Args) ->
- Args#index_query_args{highlight_fields = Value};
-validate_index_query(highlight_pre_tag, Value, Args) ->
- Args#index_query_args{highlight_pre_tag = Value};
-validate_index_query(highlight_post_tag, Value, Args) ->
- Args#index_query_args{highlight_post_tag = Value};
-validate_index_query(highlight_number, Value, Args) ->
- Args#index_query_args{highlight_number = Value};
-validate_index_query(highlight_size, Value, Args) ->
- Args#index_query_args{highlight_size = Value};
-validate_index_query(extra, _Value, Args) ->
- Args.
-
-parse_index_param("", _) ->
- [];
-parse_index_param("q", Value) ->
- [{q, ?l2b(Value)}];
-parse_index_param("query", Value) ->
- [{q, ?l2b(Value)}];
-parse_index_param("partition", Value) ->
- [{partition, ?l2b(Value)}];
-parse_index_param("bookmark", Value) ->
- [{bookmark, ?l2b(Value)}];
-parse_index_param("sort", Value) ->
- [{sort, ?JSON_DECODE(Value)}];
-parse_index_param("limit", Value) ->
- [{limit, ?JSON_DECODE(Value)}];
-parse_index_param("stale", "ok") ->
- [{stale, ok}];
-parse_index_param("stale", _Value) ->
- throw({query_parse_error, <<"stale only available as stale=ok">>});
-parse_index_param("include_docs", Value) ->
- [{include_docs, parse_bool_param("include_docs", Value)}];
-parse_index_param("group_by", Value) ->
- [{group_by, ?l2b(Value)}];
-parse_index_param("group_field", Value) ->
- [{group_field, ?l2b(Value)}];
-parse_index_param("group_sort", Value) ->
- [{group_sort, ?JSON_DECODE(Value)}];
-parse_index_param("group_limit", Value) ->
- [{group_limit, parse_positive_int_param("group_limit", Value, "max_group_limit", "200")}];
-parse_index_param("stable", Value) ->
- [{stable, parse_bool_param("stable", Value)}];
-parse_index_param("include_fields", Value) ->
- [{include_fields, ?JSON_DECODE(Value)}];
-parse_index_param("counts", Value) ->
- [{counts, ?JSON_DECODE(Value)}];
-parse_index_param("ranges", Value) ->
- [{ranges, ?JSON_DECODE(Value)}];
-parse_index_param("drilldown", Value) ->
- [{drilldown, ?JSON_DECODE(Value)}];
-parse_index_param("highlight_fields", Value) ->
- [{highlight_fields, ?JSON_DECODE(Value)}];
-parse_index_param("highlight_pre_tag", Value) ->
- [{highlight_pre_tag, ?JSON_DECODE(Value)}];
-parse_index_param("highlight_post_tag", Value) ->
- [{highlight_post_tag, ?JSON_DECODE(Value)}];
-parse_index_param("highlight_number", Value) ->
- [{highlight_number, parse_positive_int_param2("highlight_number", Value)}];
-parse_index_param("highlight_size", Value) ->
- [{highlight_size, parse_positive_int_param2("highlight_size", Value)}];
-parse_index_param(Key, Value) ->
- [{extra, {Key, Value}}].
-
-parse_json_index_param(<<"q">>, Value) ->
- [{q, Value}];
-parse_json_index_param(<<"query">>, Value) ->
- [{q, Value}];
-parse_json_index_param(<<"partition">>, Value) ->
- [{partition, Value}];
-parse_json_index_param(<<"bookmark">>, Value) ->
- [{bookmark, Value}];
-parse_json_index_param(<<"sort">>, Value) ->
- [{sort, Value}];
-parse_json_index_param(<<"limit">>, Value) ->
- [{limit, Value}];
-parse_json_index_param(<<"stale">>, <<"ok">>) ->
- [{stale, ok}];
-parse_json_index_param(<<"include_docs">>, Value) when is_boolean(Value) ->
- [{include_docs, Value}];
-parse_json_index_param(<<"group_by">>, Value) ->
- [{group_by, Value}];
-parse_json_index_param(<<"group_field">>, Value) ->
- [{group_field, Value}];
-parse_json_index_param(<<"group_sort">>, Value) ->
- [{group_sort, Value}];
-parse_json_index_param(<<"group_limit">>, Value) ->
- [{group_limit, parse_positive_int_param("group_limit", Value, "max_group_limit", "200")}];
-parse_json_index_param(<<"stable">>, Value) ->
- [{stable, parse_bool_param("stable", Value)}];
-parse_json_index_param(<<"include_fields">>, Value) ->
- [{include_fields, Value}];
-parse_json_index_param(<<"counts">>, Value) ->
- [{counts, Value}];
-parse_json_index_param(<<"ranges">>, Value) ->
- [{ranges, Value}];
-parse_json_index_param(<<"drilldown">>, Value) ->
- [{drilldown, Value}];
-parse_json_index_param(<<"highlight_fields">>, Value) ->
- [{highlight_fields, Value}];
-parse_json_index_param(<<"highlight_pre_tag">>, Value) ->
- [{highlight_pre_tag, Value}];
-parse_json_index_param(<<"highlight_post_tag">>, Value) ->
- [{highlight_post_tag, Value}];
-parse_json_index_param(<<"highlight_number">>, Value) ->
- [{highlight_number, parse_positive_int_param2("highlight_number", Value)}];
-parse_json_index_param(<<"highlight_size">>, Value) ->
- [{highlight_size, parse_positive_int_param2("highlight_size", Value)}];
-parse_json_index_param(Key, Value) ->
- [{extra, {Key, Value}}].
-
-%% VV copied from chttpd_view.erl
-
-parse_bool_param(_, Val) when is_boolean(Val) ->
- Val;
-parse_bool_param(_, "true") ->
- true;
-parse_bool_param(_, "false") ->
- false;
-parse_bool_param(Name, Val) ->
- Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)}).
-
-parse_int_param(_, Val) when is_integer(Val) ->
- Val;
-parse_int_param(Name, Val) ->
- case (catch list_to_integer(Val)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_positive_int_param(Name, Val, Prop, Default) ->
- MaximumVal = list_to_integer(
- config:get("dreyfus", Prop, Default)
- ),
- case parse_int_param(Name, Val) of
- IntVal when IntVal > MaximumVal ->
- Fmt = "Value for ~s is too large, must not exceed ~p",
- Msg = io_lib:format(Fmt, [Name, MaximumVal]),
- throw({query_parse_error, ?l2b(Msg)});
- IntVal when IntVal > 0 ->
- IntVal;
- IntVal when IntVal =< 0 ->
- Fmt = "~s must be greater than zero",
- Msg = io_lib:format(Fmt, [Name]),
- throw({query_parse_error, ?l2b(Msg)});
- _ ->
- Fmt = "Invalid value for ~s: ~p",
- Msg = io_lib:format(Fmt, [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_positive_int_param2(Name, Val) ->
- case parse_int_param(Name, Val) of
- IntVal when IntVal > 0 ->
- IntVal;
- IntVal when IntVal =< 0 ->
- Fmt = "~s must be greater than zero",
- Msg = io_lib:format(Fmt, [Name]),
- throw({query_parse_error, ?l2b(Msg)});
- _ ->
- Fmt = "Invalid value for ~s: ~p",
- Msg = io_lib:format(Fmt, [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_non_negative_int_param(Name, Val, Prop, Default) ->
- MaximumVal = list_to_integer(
- config:get("dreyfus", Prop, Default)
- ),
- case parse_int_param(Name, Val) of
- IntVal when IntVal > MaximumVal ->
- Fmt = "Value for ~s is too large, must not exceed ~p",
- Msg = io_lib:format(Fmt, [Name, MaximumVal]),
- throw({query_parse_error, ?l2b(Msg)});
- IntVal when IntVal >= 0 ->
- IntVal;
- IntVal when IntVal < 0 ->
- Fmt = "~s must be greater than or equal to zero",
- Msg = io_lib:format(Fmt, [Name]),
- throw({query_parse_error, ?l2b(Msg)});
- _ ->
- Fmt = "Invalid value for ~s: ~p",
- Msg = io_lib:format(Fmt, [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-ensure_unique_partition(IndexParams) ->
- Partitions = lists:filter(
- fun({Key, _Val}) ->
- Key == partition
- end,
- IndexParams
- ),
- case length(lists:usort(Partitions)) > 1 of
- true ->
- Msg = <<"Multiple conflicting values for `partition` provided">>,
- throw({bad_request, Msg});
- false ->
- ok
- end.
-
-validate_search_restrictions(Db, DDoc, Args) ->
- #index_query_args{
- q = Query,
- partition = Partition,
- grouping = Grouping,
- limit = Limit,
- counts = Counts,
- drilldown = Drilldown,
- ranges = Ranges
- } = Args,
- #grouping{
- by = GroupBy,
- limit = GroupLimit,
- sort = GroupSort
- } = Grouping,
-
- case Query of
- undefined ->
- Msg1 = <<"Query must include a 'q' or 'query' argument">>,
- throw({query_parse_error, Msg1});
- _ ->
- ok
- end,
-
- DbPartitioned = fabric_util:is_partitioned(Db),
- ViewPartitioned = get_view_partition_option(DDoc, DbPartitioned),
-
- case not DbPartitioned andalso is_binary(Partition) of
- true ->
- Msg2 = <<"`partition` not supported on this index">>,
- throw({bad_request, Msg2});
- false ->
- ok
- end,
-
- case {ViewPartitioned, is_binary(Partition)} of
- {false, false} ->
- ok;
- {true, true} ->
- ok;
- {true, false} ->
- Msg3 = <<
- "`partition` parameter is mandatory "
- "for queries to this index."
- >>,
- throw({bad_request, Msg3});
- {false, true} ->
- Msg4 = <<"`partition` not supported on this index">>,
- throw({bad_request, Msg4})
- end,
-
- case DbPartitioned of
- true ->
- MaxLimit = config:get("dreyfus", "max_limit", "2000"),
- parse_non_negative_int_param(
- "limit", Limit, "max_limit_partitions", MaxLimit
- );
- false ->
- MaxLimit = config:get("dreyfus", "max_limit", "200"),
- parse_non_negative_int_param("limit", Limit, "max_limit", MaxLimit)
- end,
-
- DefaultArgs = #index_query_args{},
-
- case
- is_binary(Partition) andalso
- (Counts /= DefaultArgs#index_query_args.counts orelse
- Drilldown /= DefaultArgs#index_query_args.drilldown orelse
- Ranges /= DefaultArgs#index_query_args.ranges orelse
- GroupSort /= DefaultArgs#index_query_args.grouping#grouping.sort orelse
- GroupBy /= DefaultArgs#index_query_args.grouping#grouping.by orelse
- GroupLimit /= DefaultArgs#index_query_args.grouping#grouping.limit)
- of
- true ->
- Msg5 =
- <<"`partition` and any of `drilldown`, `ranges`, `group_field`, `group_sort`, `group_limit` or `group_by` are incompatible">>,
- throw({bad_request, Msg5});
- false ->
- ok
- end.
-
-get_view_partition_option(#doc{body = {Props}}, Default) ->
- {Options} = couch_util:get_value(<<"options">>, Props, {[]}),
- couch_util:get_value(<<"partitioned">>, Options, Default).
-
-hits_to_json(DbName, IncludeDocs, Hits) ->
- {Ids, HitData} = lists:unzip(lists:map(fun get_hit_data/1, Hits)),
- chttpd_stats:incr_rows(length(Hits)),
- if
- IncludeDocs ->
- chttpd_stats:incr_reads(length(Hits)),
- {ok, JsonDocs} = dreyfus_fabric:get_json_docs(DbName, Ids),
- lists:zipwith(
- fun(Hit, {Id, Doc}) ->
- case Hit of
- {Id, Order, Fields} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}}, Doc]};
- {Id, Order, Fields, Highlights} ->
- {[
- {id, Id},
- {order, Order},
- {fields, {Fields}},
- {highlights, {Highlights}},
- Doc
- ]}
- end
- end,
- HitData,
- JsonDocs
- );
- true ->
- lists:map(
- fun(Hit) ->
- case Hit of
- {Id, Order, Fields} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}}]};
- {Id, Order, Fields, Highlights} ->
- {[
- {id, Id},
- {order, Order},
- {fields, {Fields}},
- {highlights, {Highlights}}
- ]}
- end
- end,
- HitData
- )
- end.
-
-get_hit_data(Hit) ->
- Id = couch_util:get_value(<<"_id">>, Hit#hit.fields),
- Fields = lists:keydelete(<<"_id">>, 1, Hit#hit.fields),
- case couch_util:get_value(<<"_highlights">>, Hit#hit.fields) of
- undefined ->
- {Id, {Id, Hit#hit.order, Fields}};
- Highlights ->
- Fields0 = lists:keydelete(<<"_highlights">>, 1, Fields),
- {Id, {Id, Hit#hit.order, Fields0, Highlights}}
- end.
-
-group_to_json(DbName, IncludeDocs, {Name, TotalHits, Hits}, UseNewApi) ->
- {TotalHitsKey, HitsKey} =
- case UseNewApi of
- true -> {total_rows, rows};
- false -> {total_hits, hits}
- end,
- {[
- {by, Name},
- {TotalHitsKey, TotalHits},
- {HitsKey, hits_to_json(DbName, IncludeDocs, Hits)}
- ]}.
-
-facets_to_json(Facets) ->
- {[facet_to_json(F) || F <- Facets]}.
-
-facet_to_json({K, V, []}) ->
- {hd(K), V};
-facet_to_json({K0, _V0, C0}) ->
- C2 = [{tl(K1), V1, C1} || {K1, V1, C1} <- C0],
- {hd(K0), facets_to_json(C2)}.
-
-send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi) ->
- GroupResponsePairs =
- case UseNewApi of
- true ->
- [{total_rows, TotalHits}, {groups, Groups}];
- false ->
- [{total_hits, TotalHits}, {total_grouped_hits, TotalGroupedHits}, {groups, Groups}]
- end,
- send_json(Req, 200, {GroupResponsePairs}).
-
-handle_error(Req, Db, DDoc, RetryCount, RetryPause, {exit, _} = Err) ->
- backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause, Err);
-handle_error(Req, Db, DDoc, RetryCount, RetryPause, {{normal, _}, _} = Err) ->
- backoff_and_retry(Req, Db, DDoc, RetryPause, RetryCount, Err);
-handle_error(Req, _Db, _DDoc, _RetryCount, _RetryPause, Reason) ->
- send_error(Req, Reason).
-
-backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause, Error) ->
- RetryLimit = list_to_integer(config:get("dreyfus", "retry_limit", "5")),
- case RetryCount > RetryLimit of
- true ->
- case Error of
- {exit, noconnection} ->
- SvcName = config:get("dreyfus", "name", "clouseau@127.0.0.1"),
- ErrMsg = "Could not connect to the Clouseau Java service at " ++ SvcName,
- send_error(Req, {ou_est_clouseau, ErrMsg});
- _ ->
- send_error(Req, timeout)
- end;
- false ->
- timer:sleep(RetryPause),
- handle_search_req(Req, Db, DDoc, RetryCount + 1, RetryPause * 2)
- end.
diff --git a/src/dreyfus/src/dreyfus_httpd_handlers.erl b/src/dreyfus/src/dreyfus_httpd_handlers.erl
deleted file mode 100644
index 05188c252..000000000
--- a/src/dreyfus/src/dreyfus_httpd_handlers.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1]).
-
-url_handler(<<"_search_analyze">>) -> fun dreyfus_httpd:handle_analyze_req/1;
-url_handler(_) -> no_match.
-
-db_handler(<<"_search_cleanup">>) -> fun dreyfus_httpd:handle_cleanup_req/2;
-db_handler(_) -> no_match.
-
-design_handler(<<"_search">>) -> fun dreyfus_httpd:handle_search_req/3;
-design_handler(<<"_search_info">>) -> fun dreyfus_httpd:handle_info_req/3;
-design_handler(<<"_search_disk_size">>) -> fun dreyfus_httpd:handle_disk_size_req/3;
-design_handler(_) -> no_match.
diff --git a/src/dreyfus/src/dreyfus_index.erl b/src/dreyfus/src/dreyfus_index.erl
deleted file mode 100644
index df3e68f84..000000000
--- a/src/dreyfus/src/dreyfus_index.erl
+++ /dev/null
@@ -1,432 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
-%% A dreyfus_index gen_server is linked to its clouseau twin.
-
--module(dreyfus_index).
--behaviour(gen_server).
--vsn(1).
--include_lib("couch/include/couch_db.hrl").
--include("dreyfus.hrl").
-
-% public api.
--export([
- start_link/2,
- design_doc_to_index/2,
- await/2,
- search/2,
- info/1,
- group1/2,
- group2/2,
- design_doc_to_indexes/1
-]).
-
-% gen_server api.
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
-% private definitions.
--record(state, {
- dbname,
- index,
- updater_pid = nil,
- index_pid = nil,
- waiting_list = []
-}).
-
-% exported for callback.
--export([search_int/2, group1_int/2, group2_int/2, info_int/1]).
-
-% public functions.
-start_link(DbName, Index) ->
- proc_lib:start_link(?MODULE, init, [{DbName, Index}]).
-
-await(Pid, MinSeq) ->
- MFA = {gen_server, call, [Pid, {await, MinSeq}, infinity]},
- dreyfus_util:time([index, await], MFA).
-
-search(Pid0, QueryArgs) ->
- Pid = to_index_pid(Pid0),
- MFA = {?MODULE, search_int, [Pid, QueryArgs]},
- dreyfus_util:time([index, search], MFA).
-
-group1(Pid0, QueryArgs) ->
- Pid = to_index_pid(Pid0),
- MFA = {?MODULE, group1_int, [Pid, QueryArgs]},
- dreyfus_util:time([index, group1], MFA).
-
-group2(Pid0, QueryArgs) ->
- Pid = to_index_pid(Pid0),
- MFA = {?MODULE, group2_int, [Pid, QueryArgs]},
- dreyfus_util:time([index, group2], MFA).
-
-info(Pid0) ->
- Pid = to_index_pid(Pid0),
- MFA = {?MODULE, info_int, [Pid]},
- dreyfus_util:time([index, info], MFA).
-
-%% We either have a dreyfus_index gen_server pid or the remote
-%% clouseau pid.
-to_index_pid(Pid) ->
- case node(Pid) == node() of
- true -> gen_server:call(Pid, get_index_pid, infinity);
- false -> Pid
- end.
-
-design_doc_to_indexes(#doc{body = {Fields}} = Doc) ->
- RawIndexes = couch_util:get_value(<<"indexes">>, Fields, {[]}),
- case RawIndexes of
- {IndexList} when is_list(IndexList) ->
- {IndexNames, _} = lists:unzip(IndexList),
- lists:flatmap(
- fun(IndexName) ->
- case (catch design_doc_to_index(Doc, IndexName)) of
- {ok, #index{} = Index} -> [Index];
- _ -> []
- end
- end,
- IndexNames
- );
- _ ->
- []
- end.
-
-% gen_server functions.
-
-init({DbName, Index}) ->
- process_flag(trap_exit, true),
- case open_index(DbName, Index) of
- {ok, Pid, Seq} ->
- State = #state{
- dbname = DbName,
- index = Index#index{current_seq = Seq, dbname = DbName},
- index_pid = Pid
- },
- case couch_db:open_int(DbName, []) of
- {ok, Db} ->
- try
- couch_db:monitor(Db)
- after
- couch_db:close(Db)
- end,
- dreyfus_util:maybe_create_local_purge_doc(Db, Pid, Index),
- proc_lib:init_ack({ok, self()}),
- gen_server:enter_loop(?MODULE, [], State);
- Error ->
- proc_lib:init_ack(Error)
- end;
- Error ->
- proc_lib:init_ack(Error)
- end.
-
-handle_call(
- {await, RequestSeq},
- From,
- #state{
- index =
- #index{dbname = DbName, name = IdxName, ddoc_id = DDocId, current_seq = Seq} = Index,
- index_pid = IndexPid,
- updater_pid = nil,
- waiting_list = WaitList
- } = State
-) when RequestSeq > Seq ->
- DbName2 = mem3:dbname(DbName),
- <<"_design/", GroupId/binary>> = DDocId,
- NewState =
- case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
- false ->
- UpPid = spawn_link(fun() ->
- dreyfus_index_updater:update(IndexPid, Index)
- end),
- State#state{
- updater_pid = UpPid,
- waiting_list = [{From, RequestSeq} | WaitList]
- };
- _ ->
- couch_log:notice(
- "Index Blocked from Updating - db: ~p,"
- " ddocid: ~p name: ~p",
- [DbName, DDocId, IdxName]
- ),
- State
- end,
- {noreply, NewState};
-handle_call(
- {await, RequestSeq},
- _From,
- #state{index = #index{current_seq = Seq}} = State
-) when RequestSeq =< Seq ->
- {reply, {ok, State#state.index_pid, Seq}, State};
-handle_call({await, RequestSeq}, From, #state{waiting_list = WaitList} = State) ->
- {noreply, State#state{
- waiting_list = [{From, RequestSeq} | WaitList]
- }};
-% upgrade
-handle_call(get_index_pid, _From, State) ->
- {reply, State#state.index_pid, State};
-% obsolete
-handle_call({search, QueryArgs0}, _From, State) ->
- Reply = search_int(State#state.index_pid, QueryArgs0),
- {reply, Reply, State};
-% obsolete
-handle_call({group1, QueryArgs0}, _From, State) ->
- Reply = group1_int(State#state.index_pid, QueryArgs0),
- {reply, Reply, State};
-% obsolete
-handle_call({group2, QueryArgs0}, _From, State) ->
- Reply = group2_int(State#state.index_pid, QueryArgs0),
- {reply, Reply, State};
-% obsolete
-handle_call(info, _From, State) ->
- Reply = info_int(State#state.index_pid),
- {reply, Reply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(
- {'EXIT', FromPid, {updated, NewSeq}},
- #state{
- index = #index{dbname = DbName, name = IdxName, ddoc_id = DDocId} = Index0,
- index_pid = IndexPid,
- updater_pid = UpPid,
- waiting_list = WaitList
- } = State
-) when UpPid == FromPid ->
- Index = Index0#index{current_seq = NewSeq},
- case reply_with_index(IndexPid, Index, WaitList) of
- [] ->
- {noreply, State#state{
- index = Index,
- updater_pid = nil,
- waiting_list = []
- }};
- StillWaiting ->
- DbName2 = mem3:dbname(DbName),
- <<"_design/", GroupId/binary>> = DDocId,
- Pid =
- case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
- true ->
- couch_log:notice(
- "Index Blocked from Updating - db: ~p, ddocid: ~p"
- " name: ~p",
- [DbName, GroupId, IdxName]
- ),
- nil;
- false ->
- spawn_link(fun() ->
- dreyfus_index_updater:update(IndexPid, Index)
- end)
- end,
- {noreply, State#state{
- index = Index,
- updater_pid = Pid,
- waiting_list = StillWaiting
- }}
- end;
-handle_info({'EXIT', _, {updated, _}}, State) ->
- {noreply, State};
-handle_info(
- {'EXIT', FromPid, Reason},
- #state{
- index = Index,
- index_pid = IndexPid,
- waiting_list = WaitList
- } = State
-) when FromPid == IndexPid ->
- couch_log:notice(
- "index for ~p closed with reason ~p", [index_name(Index), Reason]
- ),
- [gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList],
- {stop, normal, State};
-handle_info(
- {'EXIT', FromPid, Reason},
- #state{
- index = Index,
- updater_pid = UpPid,
- waiting_list = WaitList
- } = State
-) when FromPid == UpPid ->
- couch_log:info(
- "Shutting down index server ~p, updater ~p closing w/ reason ~w",
- [index_name(Index), UpPid, Reason]
- ),
- [gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList],
- {stop, normal, State};
-handle_info({'EXIT', Pid, Reason}, State) ->
- % probably dreyfus_index_manager.
- couch_log:notice("Unknown pid ~p closed with reason ~p", [Pid, Reason]),
- {stop, normal, State};
-handle_info(
- {'DOWN', _, _, Pid, Reason},
- #state{
- index = Index,
- waiting_list = WaitList
- } = State
-) ->
- couch_log:info(
- "Shutting down index server ~p, db ~p closing w/ reason ~w",
- [index_name(Index), Pid, Reason]
- ),
- [gen_server:reply(P, {error, Reason}) || {P, _} <- WaitList],
- {stop, normal, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-% private functions.
-
-open_index(DbName, #index{analyzer = Analyzer, sig = Sig}) ->
- Path = <<DbName/binary, "/", Sig/binary>>,
- case clouseau_rpc:open_index(self(), Path, Analyzer) of
- {ok, Pid} ->
- case clouseau_rpc:get_update_seq(Pid) of
- {ok, Seq} ->
- {ok, Pid, Seq};
- Error ->
- Error
- end;
- Error ->
- Error
- end.
-
-design_doc_to_index(#doc{id = Id, body = {Fields}}, IndexName) ->
- Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
- {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
- InvalidDDocError =
- {invalid_design_doc, <<"index `", IndexName/binary, "` must have parameter `index`">>},
- case lists:keyfind(IndexName, 1, RawIndexes) of
- false ->
- {error, {not_found, <<IndexName/binary, " not found.">>}};
- {IndexName, {Index}} ->
- Analyzer = couch_util:get_value(<<"analyzer">>, Index, <<"standard">>),
- case couch_util:get_value(<<"index">>, Index) of
- undefined ->
- {error, InvalidDDocError};
- Def ->
- Sig = ?l2b(
- couch_util:to_hex(
- couch_hash:md5_hash(
- term_to_binary({Analyzer, Def})
- )
- )
- ),
- {ok, #index{
- analyzer = Analyzer,
- ddoc_id = Id,
- def = Def,
- def_lang = Language,
- name = IndexName,
- sig = Sig
- }}
- end;
- _ ->
- {error, InvalidDDocError}
- end.
-
-reply_with_index(IndexPid, Index, WaitList) ->
- reply_with_index(IndexPid, Index, WaitList, []).
-
-reply_with_index(_IndexPid, _Index, [], Acc) ->
- Acc;
-reply_with_index(IndexPid, #index{current_seq = IndexSeq} = Index, [{Pid, Seq} | Rest], Acc) when
- Seq =< IndexSeq
-->
- gen_server:reply(Pid, {ok, IndexPid, IndexSeq}),
- reply_with_index(IndexPid, Index, Rest, Acc);
-reply_with_index(IndexPid, Index, [{Pid, Seq} | Rest], Acc) ->
- reply_with_index(IndexPid, Index, Rest, [{Pid, Seq} | Acc]).
-
-index_name(#index{dbname = DbName, ddoc_id = DDocId, name = IndexName}) ->
- <<DbName/binary, " ", DDocId/binary, " ", IndexName/binary>>.
-
-args_to_proplist(#index_query_args{} = Args) ->
- [
- {'query', Args#index_query_args.q},
- {partition, Args#index_query_args.partition},
- {limit, Args#index_query_args.limit},
- {refresh, Args#index_query_args.stale =:= false},
- {'after', Args#index_query_args.bookmark},
- {sort, Args#index_query_args.sort},
- {include_fields, Args#index_query_args.include_fields},
- {counts, Args#index_query_args.counts},
- {ranges, Args#index_query_args.ranges},
- {drilldown, Args#index_query_args.drilldown},
- {highlight_fields, Args#index_query_args.highlight_fields},
- {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
- {highlight_post_tag, Args#index_query_args.highlight_post_tag},
- {highlight_number, Args#index_query_args.highlight_number},
- {highlight_size, Args#index_query_args.highlight_size}
- ].
-
-args_to_proplist2(#index_query_args{} = Args) ->
- [
- {'query', Args#index_query_args.q},
- {field, Args#index_query_args.grouping#grouping.by},
- {refresh, Args#index_query_args.stale =:= false},
- {groups, Args#index_query_args.grouping#grouping.groups},
- {group_sort, Args#index_query_args.grouping#grouping.sort},
- {sort, Args#index_query_args.sort},
- {limit, Args#index_query_args.limit},
- {include_fields, Args#index_query_args.include_fields},
- {highlight_fields, Args#index_query_args.highlight_fields},
- {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
- {highlight_post_tag, Args#index_query_args.highlight_post_tag},
- {highlight_number, Args#index_query_args.highlight_number},
- {highlight_size, Args#index_query_args.highlight_size}
- ].
-
-search_int(Pid, QueryArgs0) ->
- QueryArgs = dreyfus_util:upgrade(QueryArgs0),
- Props = args_to_proplist(QueryArgs),
- clouseau_rpc:search(Pid, Props).
-
-group1_int(Pid, QueryArgs0) ->
- QueryArgs = dreyfus_util:upgrade(QueryArgs0),
- #index_query_args{
- q = Query,
- stale = Stale,
- grouping = #grouping{
- by = GroupBy,
- offset = Offset,
- limit = Limit,
- sort = Sort
- }
- } = QueryArgs,
- clouseau_rpc:group1(
- Pid,
- Query,
- GroupBy,
- Stale =:= false,
- Sort,
- Offset,
- Limit
- ).
-
-group2_int(Pid, QueryArgs0) ->
- QueryArgs = dreyfus_util:upgrade(QueryArgs0),
- Props = args_to_proplist2(QueryArgs),
- clouseau_rpc:group2(Pid, Props).
-
-info_int(Pid) ->
- clouseau_rpc:info(Pid).
diff --git a/src/dreyfus/src/dreyfus_index_manager.erl b/src/dreyfus/src/dreyfus_index_manager.erl
deleted file mode 100644
index f0dbbec64..000000000
--- a/src/dreyfus/src/dreyfus_index_manager.erl
+++ /dev/null
@@ -1,160 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_index_manager).
--behaviour(gen_server).
--vsn(1).
--include_lib("couch/include/couch_db.hrl").
--include("dreyfus.hrl").
-
--define(BY_SIG, dreyfus_by_sig).
--define(BY_PID, dreyfus_by_pid).
-
-% public api.
--export([start_link/0, get_index/2, get_disk_size/2]).
-
-% gen_server api.
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
--export([handle_db_event/3]).
-
-% public functions.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_index(DbName, Index) ->
- gen_server:call(?MODULE, {get_index, DbName, Index}, infinity).
-
-get_disk_size(DbName, #index{sig = Sig}) ->
- Path = <<DbName/binary, "/", Sig/binary>>,
- clouseau_rpc:disk_size(Path).
-
-% gen_server functions.
-
-init([]) ->
- couch_util:set_mqd_off_heap(?MODULE),
- ets:new(?BY_SIG, [set, private, named_table]),
- ets:new(?BY_PID, [set, private, named_table]),
- couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]),
- process_flag(trap_exit, true),
- {ok, nil}.
-
-handle_call({get_index, DbName, #index{sig = Sig} = Index}, From, State) ->
- case ets:lookup(?BY_SIG, {DbName, Sig}) of
- [] ->
- Pid = spawn_link(fun() -> new_index(DbName, Index) end),
- ets:insert(?BY_PID, {Pid, opening, {DbName, Sig}}),
- ets:insert(?BY_SIG, {{DbName, Sig}, [From]}),
- {noreply, State};
- [{_, WaitList}] when is_list(WaitList) ->
- ets:insert(?BY_SIG, {{DbName, Sig}, [From | WaitList]}),
- {noreply, State};
- [{_, ExistingPid}] ->
- {reply, {ok, ExistingPid}, State}
- end;
-handle_call({open_ok, DbName, Sig, NewPid}, {OpenerPid, _}, State) ->
- link(NewPid),
- [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}),
- [gen_server:reply(From, {ok, NewPid}) || From <- WaitList],
- ets:delete(?BY_PID, OpenerPid),
- add_to_ets(NewPid, DbName, Sig),
- {reply, ok, State};
-handle_call({open_error, DbName, Sig, Error}, {OpenerPid, _}, State) ->
- [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}),
- [gen_server:reply(From, Error) || From <- WaitList],
- ets:delete(?BY_PID, OpenerPid),
- ets:delete(?BY_SIG, {DbName, Sig}),
- {reply, ok, State}.
-
-handle_cast({cleanup, DbName}, State) ->
- clouseau_rpc:cleanup(DbName),
- {noreply, State};
-handle_cast({rename, DbName}, State) ->
- clouseau_rpc:rename(DbName),
- {noreply, State}.
-
-handle_info({'EXIT', FromPid, Reason}, State) ->
- case ets:lookup(?BY_PID, FromPid) of
- [] ->
- if
- Reason =/= normal ->
- couch_log:error("Exit on non-updater process: ~p", [Reason]),
- exit(Reason);
- true ->
- ok
- end;
- % Using Reason /= normal to force a match error
- % if we didn't delete the Pid in a handle_call
- % message for some reason.
- [{_, opening, {DbName, Sig}}] when Reason /= normal ->
- Msg = {open_error, DbName, Sig, Reason},
- {reply, ok, _} = handle_call(Msg, {FromPid, nil}, State);
- [{_, {DbName, Sig}}] ->
- delete_from_ets(FromPid, DbName, Sig)
- end,
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, nil, _Extra) ->
- {ok, nil}.
-
-% private functions
-
-handle_db_event(DbName, created, _St) ->
- gen_server:cast(?MODULE, {cleanup, DbName}),
- {ok, nil};
-handle_db_event(DbName, deleted, _St) ->
- RecoveryEnabled = config:get_boolean(
- "couchdb",
- "enable_database_recovery",
- false
- ),
- case RecoveryEnabled of
- true ->
- gen_server:cast(?MODULE, {rename, DbName});
- false ->
- gen_server:cast(?MODULE, {cleanup, DbName})
- end,
-
- {ok, nil};
-handle_db_event(_DbName, _Event, _St) ->
- {ok, nil}.
-
-new_index(DbName, #index{sig = Sig} = Index) ->
- case (catch dreyfus_index:start_link(DbName, Index)) of
- {ok, NewPid} ->
- Msg = {open_ok, DbName, Sig, NewPid},
- ok = gen_server:call(?MODULE, Msg, infinity),
- unlink(NewPid);
- Error ->
- Msg = {open_error, DbName, Sig, Error},
- ok = gen_server:call(?MODULE, Msg, infinity)
- end.
-
-add_to_ets(Pid, DbName, Sig) ->
- true = ets:insert(?BY_PID, {Pid, {DbName, Sig}}),
- true = ets:insert(?BY_SIG, {{DbName, Sig}, Pid}).
-
-delete_from_ets(Pid, DbName, Sig) ->
- true = ets:delete(?BY_PID, Pid),
- true = ets:delete(?BY_SIG, {DbName, Sig}).
diff --git a/src/dreyfus/src/dreyfus_index_updater.erl b/src/dreyfus/src/dreyfus_index_updater.erl
deleted file mode 100644
index 6edc5a257..000000000
--- a/src/dreyfus/src/dreyfus_index_updater.erl
+++ /dev/null
@@ -1,184 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_index_updater).
--include_lib("couch/include/couch_db.hrl").
--include("dreyfus.hrl").
-
--export([update/2, load_docs/2]).
-
--import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]).
-
-update(IndexPid, Index) ->
- #index{
- current_seq = CurSeq,
- dbname = DbName,
- ddoc_id = DDocId,
- name = IndexName
- } = Index,
- erlang:put(io_priority, {search, DbName, IndexName}),
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- TotalUpdateChanges = couch_db:count_changes_since(Db, CurSeq),
- TotalPurgeChanges = count_pending_purged_docs_since(Db, IndexPid),
- TotalChanges = TotalUpdateChanges + TotalPurgeChanges,
-
- couch_task_status:add_task([
- {type, search_indexer},
- {database, DbName},
- {design_document, DDocId},
- {index, IndexName},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ]),
-
- %% update status every half second
- couch_task_status:set_update_frequency(500),
-
- %ExcludeIdRevs is [{Id1, Rev1}, {Id2, Rev2}, ...]
- %The Rev is the final Rev, not purged Rev.
- {ok, ExcludeIdRevs} = purge_index(Db, IndexPid, Index),
- %% compute on all docs modified since we last computed.
-
- NewCurSeq = couch_db:get_update_seq(Db),
- Proc = get_os_process(Index#index.def_lang),
- try
- true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]),
- EnumFun = fun ?MODULE:load_docs/2,
- [Changes] = couch_task_status:get([changes_done]),
- Acc0 = {Changes, IndexPid, Db, Proc, TotalChanges, erlang:timestamp(), ExcludeIdRevs},
- {ok, _} = couch_db:fold_changes(Db, CurSeq, EnumFun, Acc0, []),
- ok = clouseau_rpc:commit(IndexPid, NewCurSeq)
- after
- ret_os_process(Proc)
- end,
- exit({updated, NewCurSeq})
- after
- couch_db:close(Db)
- end.
-
-load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime, ExcludeIdRevs} = Acc) ->
- couch_task_status:update([{changes_done, I}, {progress, (I * 100) div Total}]),
- DI = couch_doc:to_doc_info(FDI),
- #doc_info{id = Id, high_seq = Seq, revs = [#rev_info{rev = Rev} | _]} = DI,
- %check if it is processed in purge_index to avoid update the index again.
- case lists:member({Id, Rev}, ExcludeIdRevs) of
- true -> ok;
- false -> update_or_delete_index(IndexPid, Db, DI, Proc)
- end,
- %% Force a commit every minute
- case timer:now_diff(Now = erlang:timestamp(), LastCommitTime) >= 60000000 of
- true ->
- ok = clouseau_rpc:commit(IndexPid, Seq),
- {ok, {I + 1, IndexPid, Db, Proc, Total, Now, ExcludeIdRevs}};
- false ->
- {ok, setelement(1, Acc, I + 1)}
- end.
-
-purge_index(Db, IndexPid, Index) ->
- {ok, IdxPurgeSeq} = clouseau_rpc:get_purge_seq(IndexPid),
- Proc = get_os_process(Index#index.def_lang),
- try
- true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]),
- FoldFun = fun({PurgeSeq, _UUID, Id, _Revs}, {Acc, _}) ->
- Acc0 =
- case couch_db:get_full_doc_info(Db, Id) of
- not_found ->
- ok = clouseau_rpc:delete(IndexPid, Id),
- Acc;
- FDI ->
- DI = couch_doc:to_doc_info(FDI),
- #doc_info{id = Id, revs = [#rev_info{rev = Rev} | _]} = DI,
- case lists:member({Id, Rev}, Acc) of
- true ->
- Acc;
- false ->
- update_or_delete_index(IndexPid, Db, DI, Proc),
- [{Id, Rev} | Acc]
- end
- end,
- update_task(1),
- {ok, {Acc0, PurgeSeq}}
- end,
-
- {ok, {ExcludeList, NewPurgeSeq}} = couch_db:fold_purge_infos(
- Db, IdxPurgeSeq, FoldFun, {[], 0}, []
- ),
- clouseau_rpc:set_purge_seq(IndexPid, NewPurgeSeq),
- update_local_doc(Db, Index, NewPurgeSeq),
- {ok, ExcludeList}
- after
- ret_os_process(Proc)
- end.
-
-count_pending_purged_docs_since(Db, IndexPid) ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- {ok, IdxPurgeSeq} = clouseau_rpc:get_purge_seq(IndexPid),
- DbPurgeSeq - IdxPurgeSeq.
-
-update_or_delete_index(IndexPid, Db, DI, Proc) ->
- #doc_info{id = Id, revs = [#rev_info{deleted = Del} | _]} = DI,
- case Del of
- true ->
- ok = clouseau_rpc:delete(IndexPid, Id);
- false ->
- case maybe_skip_doc(Db, Id) of
- true ->
- ok;
- false ->
- {ok, Doc} = couch_db:open_doc(Db, DI, []),
- Json = couch_doc:to_json_obj(Doc, []),
- [Fields | _] = proc_prompt(Proc, [<<"index_doc">>, Json]),
- Fields1 = [list_to_tuple(Field) || Field <- Fields],
- Fields2 = maybe_add_partition(Db, Id, Fields1),
- case Fields2 of
- [] -> ok = clouseau_rpc:delete(IndexPid, Id);
- _ -> ok = clouseau_rpc:update(IndexPid, Id, Fields2)
- end
- end
- end.
-
-update_local_doc(Db, Index, PurgeSeq) ->
- DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
- DocContent = dreyfus_util:get_local_purge_doc_body(Db, DocId, PurgeSeq, Index),
- couch_db:update_doc(Db, DocContent, []).
-
-update_task(NumChanges) ->
- [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
- Changes2 = Changes + NumChanges,
- Progress =
- case Total of
- 0 ->
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
- couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]).
-
-maybe_skip_doc(Db, <<"_design/", _/binary>>) ->
- couch_db:is_partitioned(Db);
-maybe_skip_doc(_Db, _Id) ->
- false.
-
-maybe_add_partition(_Db, _Id, []) ->
- [];
-maybe_add_partition(Db, Id, Fields) ->
- case couch_db:is_partitioned(Db) of
- true ->
- Partition = couch_partition:from_docid(Id),
- [{<<"_partition">>, Partition, {[]}} | Fields];
- false ->
- Fields
- end.
diff --git a/src/dreyfus/src/dreyfus_plugin_couch_db.erl b/src/dreyfus/src/dreyfus_plugin_couch_db.erl
deleted file mode 100644
index a55c26373..000000000
--- a/src/dreyfus/src/dreyfus_plugin_couch_db.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_plugin_couch_db).
-
--export([
- is_valid_purge_client/2,
- on_compact/2
-]).
-
-is_valid_purge_client(DbName, Props) ->
- dreyfus_util:verify_index_exists(DbName, Props).
-
-on_compact(DbName, DDocs) ->
- dreyfus_util:ensure_local_purge_docs(DbName, DDocs).
diff --git a/src/dreyfus/src/dreyfus_rpc.erl b/src/dreyfus/src/dreyfus_rpc.erl
deleted file mode 100644
index 2ebc5ffe5..000000000
--- a/src/dreyfus/src/dreyfus_rpc.erl
+++ /dev/null
@@ -1,134 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_rpc).
--include_lib("couch/include/couch_db.hrl").
--include("dreyfus.hrl").
--import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]).
-
-% public api.
--export([search/4, group1/4, group2/4, info/3, disk_size/3]).
-
-% private callback
--export([call/5, info_int/3]).
-
-search(DbName, DDoc, IndexName, QueryArgs) ->
- MFA = {?MODULE, call, [search, DbName, DDoc, IndexName, QueryArgs]},
- dreyfus_util:time([rpc, search], MFA).
-
-group1(DbName, DDoc, IndexName, QueryArgs) ->
- MFA = {?MODULE, call, [group1, DbName, DDoc, IndexName, QueryArgs]},
- dreyfus_util:time([rpc, group1], MFA).
-
-group2(DbName, DDoc, IndexName, QueryArgs) ->
- MFA = {?MODULE, call, [group2, DbName, DDoc, IndexName, QueryArgs]},
- dreyfus_util:time([rpc, group2], MFA).
-
-call(Fun, DbName, DDoc, IndexName, QueryArgs0) ->
- QueryArgs = dreyfus_util:upgrade(QueryArgs0),
- erlang:put(io_priority, {search, DbName}),
- check_interactive_mode(),
- {ok, Db} = get_or_create_db(DbName, []),
- #index_query_args{
- stale = Stale
- } = QueryArgs,
- {_LastSeq, MinSeq} = calculate_seqs(Db, Stale),
- case dreyfus_index:design_doc_to_index(DDoc, IndexName) of
- {ok, Index} ->
- case dreyfus_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- case dreyfus_index:await(Pid, MinSeq) of
- {ok, IndexPid, _Seq} ->
- Result = dreyfus_index:Fun(IndexPid, QueryArgs),
- rexi:reply(Result);
- % obsolete clauses, remove after upgrade
- ok ->
- Result = dreyfus_index:Fun(Pid, QueryArgs),
- rexi:reply(Result);
- {ok, _Seq} ->
- Result = dreyfus_index:Fun(Pid, QueryArgs),
- rexi:reply(Result);
- Error ->
- rexi:reply(Error)
- end;
- Error ->
- rexi:reply(Error)
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-info(DbName, DDoc, IndexName) ->
- MFA = {?MODULE, info_int, [DbName, DDoc, IndexName]},
- dreyfus_util:time([rpc, info], MFA).
-
-info_int(DbName, DDoc, IndexName) ->
- erlang:put(io_priority, {search, DbName}),
- check_interactive_mode(),
- case dreyfus_index:design_doc_to_index(DDoc, IndexName) of
- {ok, Index} ->
- case dreyfus_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- case dreyfus_index:info(Pid) of
- {ok, Fields} ->
- Info = [{signature, Index#index.sig} | Fields],
- rexi:reply({ok, Info});
- Else ->
- rexi:reply(Else)
- end;
- Error ->
- rexi:reply(Error)
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-disk_size(DbName, DDoc, IndexName) ->
- erlang:put(io_priority, {search, DbName}),
- check_interactive_mode(),
- case dreyfus_index:design_doc_to_index(DDoc, IndexName) of
- {ok, Index} ->
- Result = dreyfus_index_manager:get_disk_size(DbName, Index),
- rexi:reply(Result);
- Error ->
- rexi:reply(Error)
- end.
-
-get_or_create_db(DbName, Options) ->
- case couch_db:open_int(DbName, Options) of
- {not_found, no_db_file} ->
- couch_log:warning("~p creating ~s", [?MODULE, DbName]),
- mem3_util:get_or_create_db(DbName, Options);
- Else ->
- Else
- end.
-
-calculate_seqs(Db, Stale) ->
- LastSeq = couch_db:get_update_seq(Db),
- if
- Stale == ok orelse Stale == update_after ->
- {LastSeq, 0};
- true ->
- {LastSeq, LastSeq}
- end.
-
-check_interactive_mode() ->
- case config:get("couchdb", "maintenance_mode", "false") of
- "true" ->
- % Do this to avoid log spam from rexi_server
- rexi:reply({rexi_EXIT, {maintenance_mode, node()}}),
- exit(normal);
- _ ->
- ok
- end.
diff --git a/src/dreyfus/src/dreyfus_sup.erl b/src/dreyfus/src/dreyfus_sup.erl
deleted file mode 100644
index e19203af2..000000000
--- a/src/dreyfus/src/dreyfus_sup.erl
+++ /dev/null
@@ -1,30 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_sup).
--behaviour(supervisor).
-
--export([start_link/0, init/1]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init(_Args) ->
- Children = [
- child(dreyfus_index_manager)
- ],
- {ok, {{one_for_one, 10, 1}, couch_epi:register_service(dreyfus_epi, Children)}}.
-
-child(Child) ->
- {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}.
diff --git a/src/dreyfus/src/dreyfus_util.erl b/src/dreyfus/src/dreyfus_util.erl
deleted file mode 100644
index 301d3887a..000000000
--- a/src/dreyfus/src/dreyfus_util.erl
+++ /dev/null
@@ -1,486 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_util).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([get_shards/2, get_ring_opts/2, sort/2, upgrade/1, export/1, time/2]).
--export([in_black_list/1, in_black_list/3, maybe_deny_index/3]).
--export([get_design_docid/1]).
--export([
- ensure_local_purge_docs/2,
- get_value_from_options/2,
- get_local_purge_doc_id/1,
- get_local_purge_doc_body/4,
- maybe_create_local_purge_doc/2,
- maybe_create_local_purge_doc/3,
- get_signature_from_idxdir/1,
- verify_index_exists/2
-]).
-
-get_shards(DbName, #index_query_args{partition = nil} = Args) ->
- case use_ushards(Args) of
- true ->
- mem3:ushards(DbName);
- false ->
- mem3:shards(DbName)
- end;
-get_shards(DbName, #index_query_args{partition = Partition} = Args) ->
- PartitionId = couch_partition:shard_key(Partition),
- case use_ushards(Args) of
- true ->
- mem3:ushards(DbName, PartitionId);
- false ->
- mem3:shards(DbName, PartitionId)
- end;
-get_shards(DbName, Args) ->
- get_shards(DbName, upgrade(Args)).
-
-use_ushards(#index_query_args{stale = ok}) ->
- true;
-use_ushards(#index_query_args{stable = true}) ->
- true;
-use_ushards(#index_query_args{}) ->
- false.
-
-get_ring_opts(#index_query_args{partition = nil}, _Shards) ->
- [];
-get_ring_opts(#index_query_args{}, Shards) ->
- Shards1 = lists:map(
- fun(#shard{} = S) ->
- S#shard{ref = undefined}
- end,
- Shards
- ),
- [{any, Shards1}].
-
--spec sort(Order :: relevance | [any()], [#sortable{}]) -> [#sortable{}].
-sort(Sort, List0) ->
- {List1, Stash} = stash_items(List0),
- List2 = lists:sort(fun(A, B) -> sort(Sort, A, B) end, List1),
- unstash_items(List2, Stash).
-
-stash_items(List) ->
- lists:unzip([stash_item(Item) || Item <- List]).
-
-stash_item(Item) ->
- Ref = make_ref(),
- {Item#sortable{item = Ref}, {Ref, Item#sortable.item}}.
-
-unstash_items(List, Stash) ->
- [unstash_item(Item, Stash) || Item <- List].
-
-unstash_item(Stashed, Stash) ->
- {_, Item} = lists:keyfind(Stashed#sortable.item, 1, Stash),
- Stashed#sortable{item = Item}.
-
--spec sort(Order :: relevance | [any()], #sortable{}, #sortable{}) -> boolean().
-sort(relevance, #sortable{} = A, #sortable{} = B) ->
- sort2(pad([<<"-">>], <<"">>, length(A#sortable.order)), A, B);
-sort(Sort, #sortable{} = A, #sortable{} = B) when is_binary(Sort) ->
- sort2(pad([Sort], <<"">>, length(A#sortable.order)), A, B);
-sort(Sort, #sortable{} = A, #sortable{} = B) when is_list(Sort) ->
- sort2(pad(Sort, <<"">>, length(A#sortable.order)), A, B).
-
--spec sort2([any()], #sortable{}, #sortable{}) -> boolean().
-sort2([<<"-", _/binary>> | _], #sortable{order = [A | _]}, #sortable{order = [B | _]}) when
- A =/= B
-->
- A > B;
-sort2([_ | _], #sortable{order = [A | _]}, #sortable{order = [B | _]}) when A =/= B ->
- A < B;
-sort2([], #sortable{shard = #shard{range = A}}, #sortable{shard = #shard{range = B}}) ->
- % arbitrary tie-breaker
- A =< B;
-sort2(
- [_ | Rest],
- #sortable{order = [_ | RestA]} = SortableA,
- #sortable{order = [_ | RestB]} = SortableB
-) ->
- sort2(Rest, SortableA#sortable{order = RestA}, SortableB#sortable{order = RestB}).
-
-pad(List, _Padding, Length) when length(List) >= Length ->
- List;
-pad(List, Padding, Length) ->
- pad(List ++ [Padding], Padding, Length).
-
-upgrade(#index_query_args{} = Args) ->
- Args;
-upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, Sort, Grouping, Stable}) ->
- #index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable
- };
-upgrade(
- {index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, Sort, Grouping, Stable, Counts,
- Ranges, Drilldown}
-) ->
- #index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable,
- counts = Counts,
- ranges = Ranges,
- drilldown = Drilldown
- };
-upgrade(
- {index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, Sort, Grouping, Stable, Counts,
- Ranges, Drilldown, IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
- HighlightNumber, HighlightSize}
-) ->
- #index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable,
- counts = Counts,
- ranges = Ranges,
- drilldown = Drilldown,
- include_fields = IncludeFields,
- highlight_fields = HighlightFields,
- highlight_pre_tag = HighlightPreTag,
- highlight_post_tag = HighlightPostTag,
- highlight_number = HighlightNumber,
- highlight_size = HighlightSize
- };
-upgrade(
- {index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, Sort, Grouping, Stable, Counts,
- Ranges, Drilldown, IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
- HighlightNumber, HighlightSize, RawBookmark}
-) ->
- #index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable,
- counts = Counts,
- ranges = Ranges,
- drilldown = Drilldown,
- include_fields = IncludeFields,
- highlight_fields = HighlightFields,
- highlight_pre_tag = HighlightPreTag,
- highlight_post_tag = HighlightPostTag,
- highlight_number = HighlightNumber,
- highlight_size = HighlightSize,
- raw_bookmark = RawBookmark
- }.
-
-export(
- #index_query_args{
- partition = nil,
- counts = nil,
- ranges = nil,
- drilldown = [],
- include_fields = nil,
- highlight_fields = nil
- } = Args
-) ->
- % Ensure existing searches work during the upgrade by creating an
- % #index_query_args record in the old format
- {index_query_args, Args#index_query_args.q, Args#index_query_args.limit,
- Args#index_query_args.stale, Args#index_query_args.include_docs,
- Args#index_query_args.bookmark, Args#index_query_args.sort, Args#index_query_args.grouping,
- Args#index_query_args.stable};
-export(
- #index_query_args{
- partition = nil,
- include_fields = nil,
- highlight_fields = nil
- } = Args
-) ->
- {index_query_args, Args#index_query_args.q, Args#index_query_args.limit,
- Args#index_query_args.stale, Args#index_query_args.include_docs,
- Args#index_query_args.bookmark, Args#index_query_args.sort, Args#index_query_args.grouping,
- Args#index_query_args.stable, Args#index_query_args.counts, Args#index_query_args.ranges,
- Args#index_query_args.drilldown};
-export(#index_query_args{partition = nil} = Args) ->
- {index_query_args, Args#index_query_args.q, Args#index_query_args.limit,
- Args#index_query_args.stale, Args#index_query_args.include_docs,
- Args#index_query_args.bookmark, Args#index_query_args.sort, Args#index_query_args.grouping,
- Args#index_query_args.stable, Args#index_query_args.counts, Args#index_query_args.ranges,
- Args#index_query_args.drilldown, Args#index_query_args.include_fields,
- Args#index_query_args.highlight_fields, Args#index_query_args.highlight_pre_tag,
- Args#index_query_args.highlight_post_tag, Args#index_query_args.highlight_number,
- Args#index_query_args.highlight_size, Args#index_query_args.raw_bookmark};
-export(QueryArgs) ->
- QueryArgs.
-
-time(Metric, {M, F, A}) when is_list(Metric) ->
- Start = os:timestamp(),
- try
- erlang:apply(M, F, A)
- after
- Length = timer:now_diff(os:timestamp(), Start) / 1000,
- couch_stats:update_histogram([dreyfus | Metric], Length)
- end.
-
-in_black_list(DbName, GroupId, IndexName) when
- is_binary(DbName),
- is_binary(GroupId),
- is_binary(IndexName)
-->
- in_black_list(?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName));
-in_black_list(DbName, GroupId, IndexName) when
- is_list(DbName),
- is_list(GroupId),
- is_list(IndexName)
-->
- in_black_list(lists:flatten([DbName, ".", GroupId, ".", IndexName]));
-in_black_list(_DbName, _GroupId, _IndexName) ->
- false.
-
-in_black_list(IndexEntry) when is_list(IndexEntry) ->
- case dreyfus_config:get(IndexEntry) of
- undefined -> false;
- _ -> true
- end;
-in_black_list(_IndexEntry) ->
- false.
-
-maybe_deny_index(DbName, GroupId, IndexName) ->
- case in_black_list(DbName, GroupId, IndexName) of
- true ->
- Reason = ?l2b(
- io_lib:format(
- "Index <~s, ~s, ~s>, is BlackListed",
- [?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName)]
- )
- ),
- throw({bad_request, Reason});
- _ ->
- ok
- end.
-
-get_design_docid(#doc{id = <<"_design/", DesignName/binary>>}) ->
- DesignName.
-
-get_value_from_options(Key, Options) ->
- case couch_util:get_value(Key, Options) of
- undefined ->
- Reason = binary_to_list(Key) ++ " must exist in Options.",
- throw({bad_request, Reason});
- Value ->
- Value
- end.
-
-ensure_local_purge_docs(DbName, DDocs) ->
- couch_util:with_db(DbName, fun(Db) ->
- lists:foreach(
- fun(DDoc) ->
- #doc{body = {Props}} = DDoc,
- case couch_util:get_value(<<"indexes">>, Props) of
- undefined ->
- false;
- _ ->
- try dreyfus_index:design_doc_to_indexes(DDoc) of
- SIndexes -> ensure_local_purge_doc(Db, SIndexes)
- catch
- _:_ ->
- ok
- end
- end
- end,
- DDocs
- )
- end).
-
-ensure_local_purge_doc(Db, SIndexes) ->
- if
- SIndexes =/= [] ->
- lists:map(
- fun(SIndex) ->
- maybe_create_local_purge_doc(Db, SIndex)
- end,
- SIndexes
- );
- true ->
- ok
- end.
-
-maybe_create_local_purge_doc(Db, Index) ->
- DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
- case couch_db:open_doc(Db, DocId) of
- {not_found, _} ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- DocContent = dreyfus_util:get_local_purge_doc_body(
- Db, DocId, DbPurgeSeq, Index
- ),
- couch_db:update_doc(Db, DocContent, []);
- _ ->
- ok
- end.
-
-maybe_create_local_purge_doc(Db, IndexPid, Index) ->
- DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
- case couch_db:open_doc(Db, DocId) of
- {not_found, _} ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- clouseau_rpc:set_purge_seq(IndexPid, DbPurgeSeq),
- DocContent = dreyfus_util:get_local_purge_doc_body(
- Db, DocId, DbPurgeSeq, Index
- ),
- couch_db:update_doc(Db, DocContent, []);
- _ ->
- ok
- end.
-
-get_local_purge_doc_id(Sig) ->
- ?l2b(?LOCAL_DOC_PREFIX ++ "purge-" ++ "dreyfus-" ++ Sig).
-
-get_signature_from_idxdir(IdxDir) ->
- IdxDirList = filename:split(IdxDir),
- Sig = lists:last(IdxDirList),
- Sig2 =
- if
- not is_binary(Sig) -> Sig;
- true -> binary_to_list(Sig)
- end,
- case
- [
- Ch
- || Ch <- Sig2,
- not (((Ch >= $0) and (Ch =< $9)) orelse
- ((Ch >= $a) and (Ch =< $f)) orelse
- ((Ch >= $A) and (Ch =< $F)))
- ] == []
- of
- true -> Sig;
- false -> undefined
- end.
-
-get_local_purge_doc_body(_, LocalDocId, PurgeSeq, Index) ->
- #index{
- name = IdxName,
- ddoc_id = DDocId,
- sig = Sig
- } = Index,
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- JsonList =
- {[
- {<<"_id">>, LocalDocId},
- {<<"purge_seq">>, PurgeSeq},
- {<<"updated_on">>, NowSecs},
- {<<"indexname">>, IdxName},
- {<<"ddoc_id">>, DDocId},
- {<<"signature">>, Sig},
- {<<"type">>, <<"dreyfus">>}
- ]},
- couch_doc:from_json_obj(JsonList).
-
-verify_index_exists(DbName, Props) ->
- try
- Type = couch_util:get_value(<<"type">>, Props),
- if
- Type =/= <<"dreyfus">> ->
- false;
- true ->
- DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
- IndexName = couch_util:get_value(<<"indexname">>, Props),
- Sig = couch_util:get_value(<<"signature">>, Props),
- couch_util:with_db(DbName, fun(Db) ->
- case couch_db:get_design_doc(Db, DDocId) of
- {ok, #doc{} = DDoc} ->
- {ok, IdxState} = dreyfus_index:design_doc_to_index(
- DDoc, IndexName
- ),
- IdxState#index.sig == Sig;
- {not_found, _} ->
- false
- end
- end)
- end
- catch
- _:_ ->
- false
- end.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
--define(SORT(T, L), lists:sort(fun(A, B) -> sort(T, A, B) end, L)).
--define(ASC, <<"">>).
--define(DESC, <<"-">>).
-
-%% use proper for this...
-
-empty_test() ->
- ?assertEqual([], ?SORT([], [])).
-
-primary_asc_test() ->
- ?assertMatch(
- [#sortable{order = [1]}, #sortable{order = [2]}],
- ?SORT([?ASC], [#sortable{order = [2]}, #sortable{order = [1]}])
- ).
-
-primary_desc_test() ->
- ?assertMatch(
- [#sortable{order = [2]}, #sortable{order = [1]}],
- ?SORT([?DESC], [#sortable{order = [1]}, #sortable{order = [2]}])
- ).
-
-secondary_asc_test() ->
- ?assertMatch(
- [#sortable{order = [1, 1]}, #sortable{order = [1, 2]}],
- ?SORT([?ASC, ?ASC], [#sortable{order = [1, 2]}, #sortable{order = [1, 1]}])
- ).
-
-secondary_desc_test() ->
- ?assertMatch(
- [#sortable{order = [1, 2]}, #sortable{order = [1, 1]}],
- ?SORT([?DESC, ?DESC], [#sortable{order = [1, 1]}, #sortable{order = [1, 2]}])
- ).
-
-stash_test() ->
- {Stashed, Stash} = stash_items([#sortable{order = foo, item = bar}]),
- First = hd(Stashed),
- ?assert(is_reference(First#sortable.item)),
- Unstashed = hd(unstash_items(Stashed, Stash)),
- ?assertEqual(Unstashed#sortable.item, bar).
-
-ring_opts_test() ->
- Shards = [#shard{name = foo, ref = make_ref()}],
-
- QArgs1 = #index_query_args{partition = nil},
- ?assertEqual([], get_ring_opts(QArgs1, Shards)),
-
- QArgs2 = #index_query_args{partition = <<"x">>},
- ?assertMatch(
- [{any, [#shard{name = foo, ref = undefined}]}],
- get_ring_opts(QArgs2, Shards)
- ).
-
--endif.
diff --git a/src/dreyfus/test/dreyfus_blacklist_await_test.erl b/src/dreyfus/test/dreyfus_blacklist_await_test.erl
deleted file mode 100644
index de3d629e5..000000000
--- a/src/dreyfus/test/dreyfus_blacklist_await_test.erl
+++ /dev/null
@@ -1,86 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_blacklist_await_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("dreyfus/include/dreyfus.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(DDOC_ID, <<"_design/black_list_doc">>).
--define(INDEX_NAME, <<"my_index">>).
--define(DBNAME, <<"mydb">>).
--define(TIMEOUT, 1000).
-
-start() ->
- test_util:start_couch([dreyfus]).
-
-stop(_) ->
- test_util:stop_couch([dreyfus]).
-
-setup() ->
- ok = meck:new(couch_log),
- ok = meck:expect(couch_log, notice, fun(_Fmt, _Args) ->
- ?debugFmt(_Fmt, _Args)
- end).
-
-teardown(_) ->
- ok = meck:unload(couch_log).
-
-dreyfus_blacklist_await_test_() ->
- {
- "dreyfus black_list_doc await tests",
- {
- setup,
- fun start/0,
- fun stop/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun do_not_await_1/0
- ]
- }
- }
- }.
-
-do_not_await_1() ->
- ok = meck:new(dreyfus_index, [passthrough]),
- Denied = lists:flatten([
- ?b2l(?DBNAME),
- ".",
- "black_list_doc",
- ".",
- "my_index"
- ]),
- config:set("dreyfus_blacklist", Denied, "true"),
- dreyfus_test_util:wait_config_change(Denied, "true"),
- Index = #index{dbname = ?DBNAME, name = ?INDEX_NAME, ddoc_id = ?DDOC_ID},
- State = create_state(?DBNAME, Index, nil, nil, []),
- Msg = "Index Blocked from Updating - db: ~p, ddocid: ~p name: ~p",
- Return = wait_log_message(Msg, fun() ->
- {noreply, _NewState} = dreyfus_index:handle_call(
- {await, 1},
- self(),
- State
- )
- end),
- ?assertEqual(Return, ok).
-
-wait_log_message(Fmt, Fun) ->
- ok = meck:reset(couch_log),
- Fun(),
- ok = meck:wait(couch_log, '_', [Fmt, '_'], 5000).
-
-create_state(DbName, Index, UPid, IPid, WList) ->
- {state, DbName, Index, UPid, IPid, WList}.
diff --git a/src/dreyfus/test/dreyfus_blacklist_request_test.erl b/src/dreyfus/test/dreyfus_blacklist_request_test.erl
deleted file mode 100644
index 3e466327d..000000000
--- a/src/dreyfus/test/dreyfus_blacklist_request_test.erl
+++ /dev/null
@@ -1,164 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_blacklist_request_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("dreyfus/include/dreyfus.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(TIMEOUT, 1000).
-
-start() ->
- test_util:start_couch([dreyfus]),
- ok = meck:new(fabric, [passthrough]),
- ok = meck:expect(fabric, open_doc, fun(_, _, _) ->
- {ok, ddoc}
- end).
-
-stop(_) ->
- ok = meck:unload(fabric),
- test_util:stop_couch([dreyfus]).
-
-setup() ->
- ok.
-
-teardown(_) ->
- ok.
-
-dreyfus_blacklist_request_test_() ->
- {
- "dreyfus blacklist request tests",
- {
- setup,
- fun start/0,
- fun stop/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun deny_fabric_requests/0,
- fun allow_fabric_request/0
- ]
- }
- }
- }.
-
-deny_fabric_requests() ->
- Reason = <<"Index <mydb, myddocid, myindexname>, is BlackListed">>,
- QueryArgs = #index_query_args{},
- IndexQueryArgs = #index_query_args{},
- DDoc = #doc{id = <<"_design/myddocid">>},
- Denied = "mydb.myddocid.myindexname",
- config:set("dreyfus_blacklist", Denied, "true"),
- dreyfus_test_util:wait_config_change(Denied, "true"),
- ?assertThrow(
- {bad_request, Reason},
- dreyfus_fabric_search:go(
- <<"mydb">>,
- <<"myddocid">>,
- <<"myindexname">>,
- QueryArgs
- )
- ),
- ?assertThrow(
- {bad_request, Reason},
- dreyfus_fabric_group1:go(
- <<"mydb">>,
- <<"myddocid">>,
- <<"myindexname">>,
- QueryArgs
- )
- ),
- ?assertThrow(
- {bad_request, Reason},
- dreyfus_fabric_group2:go(
- <<"mydb">>,
- <<"myddocid">>,
- <<"myindexname">>,
- QueryArgs
- )
- ),
- ?assertThrow(
- {bad_request, Reason},
- dreyfus_fabric_info:go(
- <<"mydb">>,
- <<"myddocid">>,
- <<"myindexname">>,
- QueryArgs
- )
- ),
- ?assertThrow(
- {bad_request, Reason},
- dreyfus_fabric_search:go(
- <<"mydb">>,
- DDoc,
- <<"myindexname">>,
- IndexQueryArgs
- )
- ),
- ?assertThrow(
- {bad_request, Reason},
- dreyfus_fabric_group1:go(
- <<"mydb">>,
- DDoc,
- <<"myindexname">>,
- IndexQueryArgs
- )
- ),
- ?assertThrow(
- {bad_request, Reason},
- dreyfus_fabric_group2:go(
- <<"mydb">>,
- DDoc,
- <<"myindexname">>,
- IndexQueryArgs
- )
- ),
- ?assertThrow(
- {bad_request, Reason},
- dreyfus_fabric_info:go(
- <<"mydb">>,
- DDoc,
- <<"myindexname">>,
- IndexQueryArgs
- )
- ).
-
-allow_fabric_request() ->
- ok = meck:new(dreyfus_fabric_search, [passthrough]),
- ok = meck:expect(
- dreyfus_fabric_search,
- go,
- fun(A, GroupId, B, C) when is_binary(GroupId) ->
- meck:passthrough([A, GroupId, B, C])
- end
- ),
- ok = meck:expect(dreyfus_fabric_search, go, fun(_, _, _, _) ->
- ok
- end),
- Denied = "mydb2.myddocid2.myindexname2",
- QueryArgs = #index_query_args{},
- config:set("dreyfus_blacklist", Denied, "true"),
- dreyfus_test_util:wait_config_change(Denied, "true"),
- ?assertEqual(
- ok,
- dreyfus_fabric_search:go(
- <<"mydb">>,
- <<"myddocid">>,
- <<"indexnotthere">>,
- QueryArgs
- )
- ),
- ok = meck:unload(dreyfus_fabric_search).
diff --git a/src/dreyfus/test/dreyfus_config_test.erl b/src/dreyfus/test/dreyfus_config_test.erl
deleted file mode 100644
index 9ae0e56e0..000000000
--- a/src/dreyfus/test/dreyfus_config_test.erl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_config_test).
-
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(TIMEOUT, 1000).
-
-start() ->
- test_util:start_couch([dreyfus]).
-
-setup() ->
- ok.
-
-teardown(_) ->
- ok.
-
-dreyfus_config_test_() ->
- {
- "dreyfus config tests",
- {
- setup,
- fun start/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun check_black_list/0,
- fun check_delete_from_blacklist/0
- ]
- }
- }
- }.
-
-check_black_list() ->
- Index = "mydb.myddocid.myindexname",
- Index2 = "mydb2.myddocid2.myindexname2",
- Index3 = "mydb3.myddocid3.myindexname3",
- ok = config:set("dreyfus_blacklist", Index, "true"),
- ok = config:set("dreyfus_blacklist", Index2, "true"),
- ok = config:set("dreyfus_blacklist", Index3, "true"),
- dreyfus_test_util:wait_config_change(Index3, "true"),
- FinalBl = [Index3, Index2, Index],
- lists:foreach(
- fun(I) ->
- ?assertEqual("true", dreyfus_config:get(I))
- end,
- FinalBl
- ).
-
-check_delete_from_blacklist() ->
- Index = "mydb.myddocid.myindexname",
- Index2 = "mydb2.myddocid2.myindexname2",
- ok = config:set("dreyfus_blacklist", Index, "true"),
- dreyfus_test_util:wait_config_change(Index, "true"),
- ok = config:delete("dreyfus_blacklist", Index),
- dreyfus_test_util:wait_config_change(Index, undefined),
- ok = config:set("dreyfus_blacklist", Index2, "true"),
- dreyfus_test_util:wait_config_change(Index2, "true"),
- ?assertEqual(undefined, dreyfus_config:get(Index)),
- ?assertEqual("true", dreyfus_config:get(Index2)).
diff --git a/src/dreyfus/test/dreyfus_purge_test.erl b/src/dreyfus/test/dreyfus_purge_test.erl
deleted file mode 100644
index e64a046a7..000000000
--- a/src/dreyfus/test/dreyfus_purge_test.erl
+++ /dev/null
@@ -1,1118 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_purge_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("dreyfus/include/dreyfus.hrl").
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--export([
- test_purge_single/0,
- test_purge_multiple/0,
- test_purge_multiple2/0,
- test_purge_conflict/0,
- test_purge_conflict2/0,
- test_purge_conflict3/0,
- test_purge_conflict4/0,
- test_purge_update/0,
- test_purge_update2/0,
- test_delete/0,
- test_delete_purge_conflict/0,
- test_delete_conflict/0,
- test_all/0
-]).
--export([
- test_verify_index_exists1/0,
- test_verify_index_exists2/0,
- test_verify_index_exists_failed/0,
- test_local_doc/0,
- test_delete_local_doc/0,
- test_purge_search/0
-]).
-
--compile(export_all).
--compile(nowarn_export_all).
-
-test_all() ->
- test_purge_single(),
- test_purge_multiple(),
- test_purge_multiple2(),
- test_purge_conflict(),
- test_purge_conflict2(),
- test_purge_conflict3(),
- test_purge_conflict4(),
- test_purge_update(),
- test_purge_update2(),
- test_delete(),
- test_delete_purge_conflict(),
- test_delete_conflict(),
- test_verify_index_exists1(),
- test_verify_index_exists2(),
- test_verify_index_exists_failed(),
- test_delete_local_doc(),
- test_local_doc(),
- test_purge_search(),
- ok.
-
-test_purge_single() ->
- DbName = db_name(),
- create_db_docs(DbName),
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
- purge_docs(DbName, [<<"apple">>]),
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
- delete_db(DbName),
- ok.
-
-test_purge_multiple() ->
- Query = <<"color:red">>,
-
- %create the db and docs
- DbName = db_name(),
- create_db_docs(DbName),
-
- %first search request
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount1, 5),
-
- %purge 5 docs
- purge_docs(DbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
-
- %second search request
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount2, 0),
-
- %delete the db
- delete_db(DbName),
- ok.
-
-test_purge_multiple2() ->
- %create the db and docs
- DbName = db_name(),
- create_db_docs(DbName),
-
- Query = <<"color:red">>,
-
- %first search request
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount1, 5),
-
- %purge 2 docs
- purge_docs(DbName, [<<"apple">>, <<"tomato">>]),
-
- %second search request
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount2, 3),
-
- %purge 2 docs
- purge_docs(DbName, [<<"cherry">>, <<"haw">>]),
-
- %third search request
- {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount3, 1),
-
- %delete the db
- delete_db(DbName),
- ok.
-
-test_purge_conflict() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(
- <<"_replicator">>,
- make_replicate_doc(
- SourceDbName, TargetDbName
- ),
- [?ADMIN_CTX]
- ),
-
- %%check doc version
- wait_for_replicate(
- TargetDbName,
- [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ],
- 2,
- 5
- ),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- purge_docs(TargetDbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName,
- <<"color:red">>
- ),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName,
- <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount3 + GreenHitCount3),
- ?assertEqual(RedHitCount2, GreenHitCount3),
- ?assertEqual(GreenHitCount2, RedHitCount3),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_purge_conflict2() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName,
- <<"color:red">>
- ),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName,
- <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(
- <<"_replicator">>,
- make_replicate_doc(
- SourceDbName, TargetDbName
- ),
- [?ADMIN_CTX]
- ),
-
- wait_for_replicate(
- TargetDbName,
- [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ],
- 2,
- 5
- ),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- purge_docs(TargetDbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
- purge_docs(TargetDbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(0, RedHitCount3 + GreenHitCount3),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_purge_conflict3() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(
- <<"_replicator">>,
- make_replicate_doc(
- SourceDbName, TargetDbName
- ),
- [?ADMIN_CTX]
- ),
-
- %%check doc version
- wait_for_replicate(
- TargetDbName,
- [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ],
- 2,
- 5
- ),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- purge_docs(TargetDbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount3 + GreenHitCount3),
- ?assertEqual(RedHitCount2, GreenHitCount3),
- ?assertEqual(GreenHitCount2, RedHitCount3),
-
- purge_docs(TargetDbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
- {ok, _, RedHitCount4, _, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount4, _, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(0, RedHitCount4 + GreenHitCount4),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_purge_conflict4() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName, <<"green">>),
- create_db_docs(TargetDbName, <<"red">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(
- <<"_replicator">>,
- make_replicate_doc(
- SourceDbName, TargetDbName
- ),
- [?ADMIN_CTX]
- ),
-
- %%check doc version
- wait_for_replicate(
- TargetDbName,
- [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ],
- 2,
- 5
- ),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- purge_docs_with_all_revs(TargetDbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(0, RedHitCount3 + GreenHitCount3),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_purge_update() ->
- %create the db and docs
- DbName = db_name(),
- create_db_docs(DbName),
-
- QueryRed = <<"color:red">>,
- QueryGreen = <<"color:green">>,
-
- %first search request
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, QueryRed),
-
- ?assertEqual(HitCount1, 5),
-
- %update doc
- Rev = get_rev(DbName, <<"apple">>),
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"apple">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"color">>, <<"green">>},
- {<<"size">>, 8}
- ]}
- ),
- {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
-
- %second search request
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, QueryRed),
- {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, QueryGreen),
-
- % 4 red and 1 green
- ?assertEqual(HitCount2, 4),
- ?assertEqual(HitCount3, 1),
-
- % purge 2 docs, 1 red and 1 green
- purge_docs(DbName, [<<"apple">>, <<"tomato">>]),
-
- % third search request
- {ok, _, HitCount4, _, _, _} = dreyfus_search(DbName, QueryRed),
- {ok, _, HitCount5, _, _, _} = dreyfus_search(DbName, QueryGreen),
-
- % 3 red and 0 green
- ?assertEqual(HitCount4, 3),
- ?assertEqual(HitCount5, 0),
-
- delete_db(DbName),
- ok.
-
-test_purge_update2() ->
- %create the db and docs
- DbName = db_name(),
- create_db_docs(DbName),
-
- Query1 = <<"size:1">>,
- Query1000 = <<"size:1000">>,
-
- %first search request
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query1),
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query1000),
-
- ?assertEqual(HitCount1, 5),
- ?assertEqual(HitCount2, 0),
-
- %update doc 999 times, it will take about 30 seconds.
- update_doc(DbName, <<"apple">>, 999),
-
- %second search request
- {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, Query1),
- {ok, _, HitCount4, _, _, _} = dreyfus_search(DbName, Query1000),
-
- % 4 value(1) and 1 value(1000)
- ?assertEqual(HitCount3, 4),
- ?assertEqual(HitCount4, 1),
-
- % purge doc
- purge_docs(DbName, [<<"apple">>]),
-
- % third search request
- {ok, _, HitCount5, _, _, _} = dreyfus_search(DbName, Query1),
- {ok, _, HitCount6, _, _, _} = dreyfus_search(DbName, Query1000),
-
- % 4 value(1) and 0 value(1000)
- ?assertEqual(HitCount5, 4),
- ?assertEqual(HitCount6, 0),
-
- delete_db(DbName),
- ok.
-
-test_delete() ->
- DbName = db_name(),
- create_db_docs(DbName),
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
- ok = delete_docs(DbName, [<<"apple">>]),
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
- delete_db(DbName),
- ok.
-
-test_delete_conflict() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(
- <<"_replicator">>,
- make_replicate_doc(
- SourceDbName, TargetDbName
- ),
- [?ADMIN_CTX]
- ),
-
- wait_for_replicate(
- TargetDbName,
- [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ],
- 2,
- 5
- ),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- %delete docs
- delete_docs(TargetDbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount3 + GreenHitCount3),
- ?assertEqual(RedHitCount2, GreenHitCount3),
- ?assertEqual(GreenHitCount2, RedHitCount3),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_delete_purge_conflict() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(
- <<"_replicator">>,
- make_replicate_doc(
- SourceDbName, TargetDbName
- ),
- [?ADMIN_CTX]
- ),
-
- wait_for_replicate(
- TargetDbName,
- [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ],
- 2,
- 5
- ),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- %purge docs
- purge_docs(TargetDbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
-
- %delete docs
- delete_docs(TargetDbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"haw">>,
- <<"strawberry">>
- ]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>
- ),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>
- ),
-
- ?assertEqual(RedHitCount3, 0),
- ?assertEqual(GreenHitCount3, 0),
- ?assertEqual(GreenHitCount3, 0),
- ?assertEqual(RedHitCount3, 0),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_local_doc() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
- purge_docs(DbName, [
- <<"apple">>,
- <<"tomato">>,
- <<"cherry">>,
- <<"strawberry">>
- ]),
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
-
- %get local doc
- [Sig | _] = get_sigs(DbName),
- LocalId = dreyfus_util:get_local_purge_doc_id(Sig),
- LocalShards = mem3:local_shards(DbName),
- PurgeSeqs = lists:map(
- fun(Shard) ->
- {ok, Db} = couch_db:open_int(Shard#shard.name, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(Db, LocalId, []),
- {Props} = couch_doc:to_json_obj(LDoc, []),
- dreyfus_util:get_value_from_options(<<"updated_on">>, Props),
- PurgeSeq = dreyfus_util:get_value_from_options(<<"purge_seq">>, Props),
- Type = dreyfus_util:get_value_from_options(<<"type">>, Props),
- ?assertEqual(<<"dreyfus">>, Type),
- couch_db:close(Db),
- PurgeSeq
- end,
- LocalShards
- ),
- ?assertEqual(lists:sum(PurgeSeqs), 4),
-
- delete_db(DbName),
- ok.
-
-test_verify_index_exists1() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
-
- ok = purge_docs(DbName, [<<"apple">>]),
-
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
-
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest] = ShardNames,
- {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(
- Db,
- dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>
- ),
- []
- ),
- #doc{body = {Props}} = LDoc,
- ?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)),
- delete_db(DbName),
- ok.
-
-test_verify_index_exists2() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
-
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest] = ShardNames,
- {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(
- Db,
- dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>
- ),
- []
- ),
- #doc{body = {Props}} = LDoc,
- ?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)),
-
- delete_db(DbName),
- ok.
-
-test_verify_index_exists_failed() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
-
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest] = ShardNames,
- {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(
- Db,
- dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>
- ),
- []
- ),
- #doc{body = {Options}} = LDoc,
- OptionsDbErr = [
- {<<"indexname">>, dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
- {<<"ddoc_id">>, dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
- {<<"signature">>, dreyfus_util:get_value_from_options(<<"signature">>, Options)}
- ],
- ?assertEqual(
- false,
- dreyfus_util:verify_index_exists(
- ShardDbName, OptionsDbErr
- )
- ),
-
- OptionsIdxErr = [
- {<<"indexname">>, <<"someindex">>},
- {<<"ddoc_id">>, dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
- {<<"signature">>, dreyfus_util:get_value_from_options(<<"signature">>, Options)}
- ],
- ?assertEqual(
- false,
- dreyfus_util:verify_index_exists(
- ShardDbName, OptionsIdxErr
- )
- ),
-
- OptionsDDocErr = [
- {<<"indexname">>, dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
- {<<"ddoc_id">>, <<"somedesigndoc">>},
- {<<"signature">>, dreyfus_util:get_value_from_options(<<"signature">>, Options)}
- ],
- ?assertEqual(
- false,
- dreyfus_util:verify_index_exists(
- ShardDbName, OptionsDDocErr
- )
- ),
-
- OptionsSigErr = [
- {<<"indexname">>, dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
- {<<"ddoc_id">>, dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
- {<<"signature">>, <<"12345678901234567890123456789012">>}
- ],
- ?assertEqual(
- false,
- dreyfus_util:verify_index_exists(
- ShardDbName, OptionsSigErr
- )
- ),
-
- delete_db(DbName),
- ok.
-
-test_delete_local_doc() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
-
- ok = purge_docs(DbName, [<<"apple">>]),
-
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
-
- LDocId = dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>
- ),
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest] = ShardNames,
- {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, _} = couch_db:open_doc(Db, LDocId, []),
-
- delete_docs(DbName, [<<"_design/search">>]),
- io:format("DbName ~p~n", [DbName]),
- ?debugFmt("Converting ... ~n~p~n", [DbName]),
-
- dreyfus_fabric_cleanup:go(DbName),
- {ok, Db2} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {not_found, _} = couch_db:open_doc(Db2, LDocId, []),
-
- delete_db(DbName),
- ok.
-
-test_purge_search() ->
- DbName = db_name(),
- create_db_docs(DbName),
- purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"haw">>]),
- {ok, _, HitCount, _, _, _} = dreyfus_search(DbName, <<"color:red">>),
- ?assertEqual(HitCount, 2),
- delete_db(DbName),
- ok.
-
-%private API
-db_name() ->
- iolist_to_binary([
- "dreyfus-test-db-",
- [
- integer_to_list(I)
- || I <- [
- erlang:unique_integer([positive]),
- rand:uniform(10000)
- ]
- ]
- ]).
-
-purge_docs(DBName, DocIds) ->
- IdsRevs = [{DocId, [get_rev(DBName, DocId)]} || DocId <- DocIds],
- {ok, _} = fabric:purge_docs(DBName, IdsRevs, []),
- ok.
-
-purge_docs_with_all_revs(DBName, DocIds) ->
- IdsRevs = [{DocId, get_revs(DBName, DocId)} || DocId <- DocIds],
- {ok, _} = fabric:purge_docs(DBName, IdsRevs, []),
- ok.
-
-dreyfus_search(DbName, KeyWord) ->
- QueryArgs = #index_query_args{q = KeyWord},
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/search">>, []),
- dreyfus_fabric_search:go(DbName, DDoc, <<"index">>, QueryArgs).
-
-create_db_docs(DbName) ->
- create_db(DbName),
- create_docs(DbName, 5, <<"red">>).
-
-create_db_docs(DbName, Color) ->
- create_db(DbName),
- create_docs(DbName, 5, Color).
-
-create_docs(DbName, Count, Color) ->
- {ok, _} = fabric:update_docs(DbName, make_docs(Count, Color), [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(DbName, make_design_doc(dreyfus), [?ADMIN_CTX]).
-
-create_db(DbName) ->
- ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]).
-
-delete_db(DbName) ->
- ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
-
-make_docs(Count, Color) ->
- [make_doc(I, Color) || I <- lists:seq(1, Count)].
-
-make_doc(Id, Color) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, get_value(Id)},
- {<<"color">>, Color},
- {<<"size">>, 1}
- ]}
- ).
-
-get_value(Key) ->
- case Key of
- 1 -> <<"apple">>;
- 2 -> <<"tomato">>;
- 3 -> <<"cherry">>;
- 4 -> <<"strawberry">>;
- 5 -> <<"haw">>;
- 6 -> <<"carrot">>;
- 7 -> <<"pitaya">>;
- 8 -> <<"grape">>;
- 9 -> <<"date">>;
- 10 -> <<"watermelon">>
- end.
-
-make_design_doc(dreyfus) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"_design/search">>},
- {<<"language">>, <<"javascript">>},
- {<<"indexes">>,
- {[
- {<<"index">>,
- {[
- {<<"analyzer">>, <<"standard">>},
- {<<"index">>, <<
- "function (doc) { \n"
- " index(\"default\", doc._id);\n"
- " if(doc.color) {\n"
- " index(\"color\", doc.color);\n"
- " }\n"
- " if(doc.size) {\n"
- " index(\"size\", doc.size);\n"
- " }\n"
- "}"
- >>}
- ]}}
- ]}}
- ]}
- ).
-
-make_replicate_doc(SourceDbName, TargetDbName) ->
- couch_doc:from_json_obj(
- {[
- {<<"_id">>,
- list_to_binary(
- "replicate_fm_" ++
- binary_to_list(SourceDbName) ++ "_to_" ++ binary_to_list(TargetDbName)
- )},
- {<<"source">>, list_to_binary("http://localhost:15984/" ++ SourceDbName)},
- {<<"target">>, list_to_binary("http://localhost:15984/" ++ TargetDbName)}
- ]}
- ).
-
-get_rev(DbName, DocId) ->
- FDI = fabric:get_full_doc_info(DbName, DocId, []),
- #doc_info{revs = [#rev_info{} = PrevRev | _]} = couch_doc:to_doc_info(FDI),
- PrevRev#rev_info.rev.
-
-get_revs(DbName, DocId) ->
- FDI = fabric:get_full_doc_info(DbName, DocId, []),
- #doc_info{revs = Revs} = couch_doc:to_doc_info(FDI),
- [Rev#rev_info.rev || Rev <- Revs].
-
-update_doc(_, _, 0) ->
- ok;
-update_doc(DbName, DocId, Times) ->
- Rev = get_rev(DbName, DocId),
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, <<"apple">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"size">>, 1001 - Times}
- ]}
- ),
- {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
- update_doc(DbName, DocId, Times - 1).
-
-delete_docs(DbName, DocIds) ->
- lists:foreach(
- fun(DocId) -> ok = delete_doc(DbName, DocId) end,
- DocIds
- ).
-
-delete_doc(DbName, DocId) ->
- Rev = get_rev(DbName, DocId),
- DDoc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, DocId},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"_deleted">>, true}
- ]}
- ),
- {ok, _} = fabric:update_doc(DbName, DDoc, [?ADMIN_CTX]),
- ok.
-
-wait_for_replicate(_, _, _, 0) ->
- couch_log:notice("[~p] wait time out", [?MODULE]),
- ok;
-wait_for_replicate(DbName, DocIds, ExpectRevCount, TimeOut) when
- is_list(DocIds)
-->
- [wait_for_replicate(DbName, DocId, ExpectRevCount, TimeOut) || DocId <- DocIds];
-wait_for_replicate(DbName, DocId, ExpectRevCount, TimeOut) ->
- FDI = fabric:get_full_doc_info(DbName, DocId, []),
- #doc_info{revs = Revs} = couch_doc:to_doc_info(FDI),
- case erlang:length(Revs) of
- ExpectRevCount ->
- couch_log:notice(
- "[~p] wait end by expect, time used:~p, DocId:~p",
- [?MODULE, 5 - TimeOut, DocId]
- ),
- ok;
- true ->
- timer:sleep(1000),
- wait_for_replicate(DbName, DocId, ExpectRevCount, TimeOut - 1)
- end,
- ok.
-
-get_sigs(DbName) ->
- {ok, DesignDocs} = fabric:design_docs(DbName),
- lists:usort(
- lists:flatmap(
- fun active_sigs/1,
- [couch_doc:from_json_obj(DD) || DD <- DesignDocs]
- )
- ).
-
-active_sigs(#doc{body = {Fields}} = Doc) ->
- {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
- {IndexNames, _} = lists:unzip(RawIndexes),
- [
- begin
- {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
- Index#index.sig
- end
- || IndexName <- IndexNames
- ].
diff --git a/src/dreyfus/test/dreyfus_test_util.erl b/src/dreyfus/test/dreyfus_test_util.erl
deleted file mode 100644
index 79fd9b59d..000000000
--- a/src/dreyfus/test/dreyfus_test_util.erl
+++ /dev/null
@@ -1,15 +0,0 @@
--module(dreyfus_test_util).
-
--export([
- wait_config_change/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
-wait_config_change(Key, Value) ->
- test_util:wait(fun() ->
- case dreyfus_config:get(Key) of
- Value -> ok;
- _ -> wait
- end
- end).
diff --git a/src/dreyfus/test/elixir/mix.exs b/src/dreyfus/test/elixir/mix.exs
deleted file mode 100644
index 9b0f642dd..000000000
--- a/src/dreyfus/test/elixir/mix.exs
+++ /dev/null
@@ -1,30 +0,0 @@
-defmodule Foo.Mixfile do
- use Mix.Project
-
- def project do
- [
- app: :foo,
- version: "0.1.0",
- elixir: "~> 1.5",
- start_permanent: Mix.env == :prod,
- deps: deps()
- ]
- end
-
- # Run "mix help compile.app" to learn about applications.
- def application do
- [
- extra_applications: [:logger]
- ]
- end
-
- # Run "mix help deps" to learn about dependencies.
- defp deps do
- [
- # {:dep_from_hexpm, "~> 0.3.0"},
- {:httpotion, "~> 3.0"},
- {:jiffy, "~> 0.14.11"}
- # {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"},
- ]
- end
-end
diff --git a/src/dreyfus/test/elixir/mix.lock b/src/dreyfus/test/elixir/mix.lock
deleted file mode 100644
index ed51e5312..000000000
--- a/src/dreyfus/test/elixir/mix.lock
+++ /dev/null
@@ -1,5 +0,0 @@
-%{
- "httpotion": {:hex, :httpotion, "3.1.0", "14d20d9b0ce4e86e253eb91e4af79e469ad949f57a5d23c0a51b2f86559f6589", [:mix], [{:ibrowse, "~> 4.4", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"},
- "ibrowse": {:hex, :ibrowse, "4.4.1", "2b7d0637b0f8b9b4182de4bd0f2e826a4da2c9b04898b6e15659ba921a8d6ec2", [:rebar3], [], "hexpm"},
- "jiffy": {:hex, :jiffy, "0.14.13", "225a9a35e26417832c611526567194b4d3adc4f0dfa5f2f7008f4684076f2a01", [:rebar3], [], "hexpm"},
-}
diff --git a/src/dreyfus/test/elixir/run b/src/dreyfus/test/elixir/run
deleted file mode 100755
index 66a5947b7..000000000
--- a/src/dreyfus/test/elixir/run
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash -e
-cd "$(dirname "$0")"
-mix deps.get
-mix test --trace
diff --git a/src/dreyfus/test/elixir/test/partition_search_test.exs b/src/dreyfus/test/elixir/test/partition_search_test.exs
deleted file mode 100644
index 121995449..000000000
--- a/src/dreyfus/test/elixir/test/partition_search_test.exs
+++ /dev/null
@@ -1,247 +0,0 @@
-defmodule PartitionSearchTest do
- use CouchTestCase
-
- @moduletag :search
-
- @moduledoc """
- Test Partition functionality with search
- """
-
- def create_search_docs(db_name, pk1 \\ "foo", pk2 \\ "bar") do
- docs = for i <- 1..10 do
- id = if rem(i, 2) == 0 do
- "#{pk1}:#{i}"
- else
- "#{pk2}:#{i}"
- end
- %{
- :_id => id,
- :value => i,
- :some => "field"
- }
- end
-
- resp = Couch.post("/#{db_name}/_bulk_docs", headers: ["Content-Type": "application/json"], body: %{:docs => docs}, query: %{w: 3})
- assert resp.status_code in [201, 202]
- end
-
- def create_ddoc(db_name, opts \\ %{}) do
- index_fn = "function(doc) {\n if (doc.some) {\n index('some', doc.some);\n }\n}"
- default_ddoc = %{
- indexes: %{
- books: %{
- analyzer: %{name: "standard"},
- index: index_fn
- }
- }
- }
-
- ddoc = Enum.into(opts, default_ddoc)
-
- resp = Couch.put("/#{db_name}/_design/library", body: ddoc)
- assert resp.status_code in [201, 202]
- assert Map.has_key?(resp.body, "ok") == true
- end
-
- def get_ids (resp) do
- %{:body => %{"rows" => rows}} = resp
- Enum.map(rows, fn row -> row["id"] end)
- end
-
- @tag :with_partitioned_db
- test "Simple query returns partitioned search results", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:10", "foo:2", "foo:4", "foo:6", "foo:8"]
-
- url = "/#{db_name}/_partition/bar/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["bar:1", "bar:3", "bar:5", "bar:7", "bar:9"]
- end
-
- @tag :with_partitioned_db
- test "Only returns docs in partition not those in shard", context do
- db_name = context[:db_name]
- create_search_docs(db_name, "foo", "bar42")
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:10", "foo:2", "foo:4", "foo:6", "foo:8"]
- end
-
- @tag :with_partitioned_db
- test "Works with bookmarks and limit", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field", limit: 3})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:10", "foo:2", "foo:4"]
-
- %{:body => %{"bookmark" => bookmark}} = resp
-
- resp = Couch.get(url, query: %{q: "some:field", limit: 3, bookmark: bookmark})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:6", "foo:8"]
-
- resp = Couch.get(url, query: %{q: "some:field", limit: 2000, bookmark: bookmark})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:6", "foo:8"]
-
- resp = Couch.get(url, query: %{q: "some:field", limit: 2001, bookmark: bookmark})
- assert resp.status_code == 400
- end
-
- @tag :with_db
- test "Works with limit using POST for on non-partitioned db", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.post(url, body: %{:q => "some:field", :limit => 1})
- assert resp.status_code == 200
- end
-
- @tag :with_partitioned_db
- test "Works with limit using POST for partitioned db", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.post(url, body: %{:q => "some:field", :limit => 1})
- assert resp.status_code == 200
- end
-
- @tag :with_partitioned_db
- test "Cannot do global query with partition view", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/mandatory for queries to this index./, reason)
- end
-
- @tag :with_partitioned_db
- test "Cannot do partition query with global search ddoc", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name, options: %{partitioned: false})
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert reason == "`partition` not supported on this index"
- end
-
- @tag :with_db
- test "normal search on non-partitioned dbs still work", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert Enum.sort(ids) == Enum.sort(["bar:1", "bar:5", "bar:9", "foo:2", "bar:3", "foo:4", "foo:6", "bar:7", "foo:8", "foo:10"])
- end
-
- @tag :with_db
- test "normal search on non-partitioned dbs without limit", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert Enum.sort(ids) == Enum.sort(["bar:1", "bar:5", "bar:9", "foo:2", "bar:3", "foo:4", "foo:6", "bar:7", "foo:8", "foo:10"])
- end
-
- @tag :with_db
- test "normal search on non-partitioned dbs with limit", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field", limit: 3})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert Enum.sort(ids) == Enum.sort(["bar:1", "bar:5", "bar:9"])
- end
-
- @tag :with_db
- test "normal search on non-partitioned dbs with over limit", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field", limit: 201})
- assert resp.status_code == 400
- end
-
- @tag :with_partitioned_db
- test "rejects conflicting partition values", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.post(url, body: %{q: "some:field", partition: "bar"})
- assert resp.status_code == 400
- end
-
- @tag :with_partitioned_db
- test "restricted parameters are not allowed in query or body", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- body = %{q: "some:field", partition: "foo"}
-
- Enum.each(
- [
- {:counts, "[\"type\"]"},
- {:group_field, "some"},
- {:ranges, :jiffy.encode(%{price: %{cheap: "[0 TO 100]"}})},
- {:drilldown, "[\"key\",\"a\"]"},
- ],
- fn {key, value} ->
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- bannedparam = Map.put(body, key, value)
- get_resp = Couch.get(url, query: bannedparam)
- %{:body => %{"reason" => get_reason}} = get_resp
- assert Regex.match?(~r/are incompatible/, get_reason)
- post_resp = Couch.post(url, body: bannedparam)
- %{:body => %{"reason" => post_reason}} = post_resp
- assert Regex.match?(~r/are incompatible/, post_reason)
- end
- )
- end
-end
diff --git a/src/dreyfus/test/elixir/test/search_test.exs b/src/dreyfus/test/elixir/test/search_test.exs
deleted file mode 100644
index 829b3395f..000000000
--- a/src/dreyfus/test/elixir/test/search_test.exs
+++ /dev/null
@@ -1,226 +0,0 @@
-defmodule SearchTest do
- use CouchTestCase
-
- @moduletag :search
-
- @moduledoc """
- Test search
- """
-
- def create_search_docs(db_name) do
- resp = Couch.post("/#{db_name}/_bulk_docs",
- headers: ["Content-Type": "application/json"],
- body: %{:docs => [
- %{"item" => "apple", "place" => "kitchen", "state" => "new"},
- %{"item" => "banana", "place" => "kitchen", "state" => "new"},
- %{"item" => "carrot", "place" => "kitchen", "state" => "old"},
- %{"item" => "date", "place" => "lobby", "state" => "unknown"},
- ]}
- )
- assert resp.status_code in [201, 202]
- end
-
- def create_ddoc(db_name, opts \\ %{}) do
- default_ddoc = %{
- indexes: %{
- fruits: %{
- analyzer: %{name: "standard"},
- index: "function (doc) {\n index(\"item\", doc.item, {facet: true});\n index(\"place\", doc.place, {facet: true});\n index(\"state\", doc.state, {facet: true});\n}"
- }
- }
- }
-
- ddoc = Enum.into(opts, default_ddoc)
-
- resp = Couch.put("/#{db_name}/_design/inventory", body: ddoc)
- assert resp.status_code in [201, 202]
- assert Map.has_key?(resp.body, "ok") == true
- end
-
- def create_invalid_ddoc(db_name, opts \\ %{}) do
- invalid_ddoc = %{
- :indexes => [
- %{"name" => "foo", "ddoc" => "bar", "type" => "text"},
- ]
- }
-
- ddoc = Enum.into(opts, invalid_ddoc)
-
- resp = Couch.put("/#{db_name}/_design/search", body: ddoc)
- assert resp.status_code in [201, 202]
- assert Map.has_key?(resp.body, "ok") == true
- end
-
- def get_items (resp) do
- %{:body => %{"rows" => rows}} = resp
- Enum.map(rows, fn row -> row["doc"]["item"] end)
- end
-
- @tag :with_db
- test "search returns all items for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.get(url, query: %{q: "*:*", include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "carrot", "date"])
- end
-
- @tag :with_db
- test "drilldown single key single value for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.get(url, query: %{q: "*:*", drilldown: :jiffy.encode(["place", "kitchen"]), include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "carrot"])
- end
-
- @tag :with_db
- test "drilldown single key multiple values for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.get(url, query: %{q: "*:*", drilldown: :jiffy.encode(["state", "new", "unknown"]), include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "date"])
- end
-
- @tag :with_db
- test "drilldown multiple keys single values for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.get(url, query: %{q: "*:*", drilldown: :jiffy.encode([["state", "old"], ["item", "apple"]]), include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == []
- end
-
- @tag :with_db
- test "drilldown multiple query definitions for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits?q=*:*&drilldown=[\"state\",\"old\"]&drilldown=[\"item\",\"apple\"]&include_docs=true"
- resp = Couch.get(url)
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == []
- end
-
-
- @tag :with_db
- test "search returns all items for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{q: "*:*", include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "carrot", "date"])
- end
-
- @tag :with_db
- test "drilldown single key single value for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{query: "*:*", drilldown: ["place", "kitchen"], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "carrot"])
- end
-
- @tag :with_db
- test "drilldown single key multiple values for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{query: "*:*", drilldown: ["state", "new", "unknown"], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "date"])
- end
-
- @tag :with_db
- test "drilldown multiple keys single values for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{q: "*:*", drilldown: [["state", "old"], ["item", "apple"]], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == []
- end
-
- @tag :with_db
- test "drilldown three keys single values for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{q: "*:*", drilldown: [["place", "kitchen"], ["state", "new"], ["item", "apple"]], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == ["apple"]
- end
-
- @tag :with_db
- test "drilldown multiple keys multiple values for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{q: "*:*", drilldown: [["state", "old", "new"], ["item", "apple"]], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == ["apple"]
- end
-
- @tag :with_db
- test "drilldown multiple query definitions for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: "{\"include_docs\": true, \"q\": \"*:*\", \"drilldown\": [\"state\", \"old\"], \"drilldown\": [\"item\", \"apple\"]}")
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == ["apple"]
- end
-
- @tag :with_db
- test "clean up search index with invalid design document", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
- create_invalid_ddoc(db_name)
-
- resp = Couch.post("/#{db_name}/_search_cleanup")
- assert resp.status_code in [201, 202]
- end
-end
diff --git a/src/dreyfus/test/elixir/test/test_helper.exs b/src/dreyfus/test/elixir/test/test_helper.exs
deleted file mode 100644
index 6eb20e242..000000000
--- a/src/dreyfus/test/elixir/test/test_helper.exs
+++ /dev/null
@@ -1,4 +0,0 @@
-Code.require_file "../../../../couchdb/test/elixir/lib/couch.ex", __DIR__
-Code.require_file "../../../../couchdb/test/elixir/test/test_helper.exs", __DIR__
-Code.require_file "../../../../couchdb/test/elixir/test/support/couch_test_case.ex", __DIR__
-Code.require_file "../../../../couchdb/test/elixir/lib/couch/db_test.ex", __DIR__
diff --git a/src/fabric/LICENSE b/src/fabric/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/fabric/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/fabric/README.md b/src/fabric/README.md
deleted file mode 100644
index 421a39063..000000000
--- a/src/fabric/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-## fabric
-
-Fabric is a collection of proxy functions for [CouchDB][1] operations in a cluster. These functions are used in CouchDB as the remote procedure endpoints on each of the cluster nodes.
-
-For example, creating a database is a straightforward task in CouchDB 1.x, but for a clustered CouchDB, each node that will store a shard for the database needs to receive and execute a fabric function. The node handling the request also needs to compile the results from each of the nodes and respond accordingly to the client.
-
-Fabric is used in conjunction with 'Rexi' which is also an application within CouchDB.
-
-### Getting Started
-Fabric requires R13B03 or higher and can be built with [rebar][3].
-
-### License
-[Apache 2.0][2]
-
-
-[1]: http://couchdb.apache.org
-[2]: http://www.apache.org/licenses/LICENSE-2.0.html
-[3]: http://github.com/basho/rebar
diff --git a/src/fabric/include/fabric.hrl b/src/fabric/include/fabric.hrl
deleted file mode 100644
index 2a4da8bcf..000000000
--- a/src/fabric/include/fabric.hrl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include_lib("eunit/include/eunit.hrl").
-
--record(collector, {
- db_name=nil,
- query_args,
- callback,
- counters,
- buffer_size,
- blocked = [],
- total_rows = 0,
- offset = 0,
- rows = [],
- skip,
- limit,
- keys,
- os_proc,
- reducer,
- collation,
- lang,
- sorted,
- user_acc,
- update_seq
-}).
-
--record(stream_acc, {
- workers,
- ready,
- start_fun,
- replacements,
- ring_opts
-}).
-
--record(view_row, {key, id, value, doc, worker}).
--record(change, {key, id, value, deleted=false, doc, worker}).
diff --git a/src/fabric/priv/stats_descriptions.cfg b/src/fabric/priv/stats_descriptions.cfg
deleted file mode 100644
index d12aa0c84..000000000
--- a/src/fabric/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,28 +0,0 @@
-{[fabric, worker, timeouts], [
- {type, counter},
- {desc, <<"number of worker timeouts">>}
-]}.
-{[fabric, open_shard, timeouts], [
- {type, counter},
- {desc, <<"number of open shard timeouts">>}
-]}.
-{[fabric, read_repairs, success], [
- {type, counter},
- {desc, <<"number of successful read repair operations">>}
-]}.
-{[fabric, read_repairs, failure], [
- {type, counter},
- {desc, <<"number of failed read repair operations">>}
-]}.
-{[fabric, doc_update, errors], [
- {type, counter},
- {desc, <<"number of document update errors">>}
-]}.
-{[fabric, doc_update, mismatched_errors], [
- {type, counter},
- {desc, <<"number of document update errors with multiple error types">>}
-]}.
-{[fabric, doc_update, write_quorum_errors], [
- {type, counter},
- {desc, <<"number of write quorum errors">>}
-]}.
diff --git a/src/fabric/rebar.config b/src/fabric/rebar.config
deleted file mode 100644
index 362c8785e..000000000
--- a/src/fabric/rebar.config
+++ /dev/null
@@ -1,14 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/fabric/src/fabric.app.src b/src/fabric/src/fabric.app.src
deleted file mode 100644
index d7686ca1a..000000000
--- a/src/fabric/src/fabric.app.src
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, fabric, [
- {description, "Routing and proxying layer for CouchDB cluster"},
- {vsn, git},
- {registered, []},
- {applications, [
- kernel,
- stdlib,
- config,
- couch,
- rexi,
- mem3,
- couch_log,
- couch_stats
- ]}
-]}.
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
deleted file mode 100644
index 6d779d584..000000000
--- a/src/fabric/src/fabric.erl
+++ /dev/null
@@ -1,840 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-% DBs
--export([
- all_dbs/0, all_dbs/1,
- create_db/1, create_db/2,
- delete_db/1,
- delete_db/2,
- get_db_info/1,
- get_doc_count/1, get_doc_count/2,
- set_revs_limit/3,
- set_security/2, set_security/3,
- get_revs_limit/1,
- get_security/1, get_security/2,
- get_all_security/1, get_all_security/2,
- get_purge_infos_limit/1,
- set_purge_infos_limit/3,
- compact/1, compact/2,
- get_partition_info/2
-]).
-
-% Documents
--export([
- open_doc/3,
- open_revs/4,
- get_doc_info/3,
- get_full_doc_info/3,
- get_missing_revs/2, get_missing_revs/3,
- update_doc/3,
- update_docs/3,
- purge_docs/3,
- att_receiver/3
-]).
-
-% Views
--export([
- all_docs/4, all_docs/5,
- changes/4,
- query_view/3, query_view/4, query_view/6, query_view/7,
- get_view_group_info/2,
- end_changes/0
-]).
-
-% miscellany
--export([
- design_docs/1,
- reset_validation_funs/1,
- cleanup_index_files/0,
- cleanup_index_files/1,
- cleanup_index_files_all_nodes/1,
- dbname/1,
- inactive_index_files/1,
- db_uuids/1
-]).
-
--type dbname() :: (iodata() | tuple()).
--type docid() :: iodata().
--type revision() :: {integer(), binary()}.
--type callback() :: fun((any(), any()) -> {ok | stop, any()}).
--type json_obj() :: {[{binary() | atom(), any()}]}.
--type option() :: atom() | {atom(), any()}.
-
-%% db operations
-%% @equiv all_dbs(<<>>)
-all_dbs() ->
- all_dbs(<<>>).
-
-%% @doc returns a list of all database names
--spec all_dbs(Prefix :: iodata()) -> {ok, [binary()]}.
-all_dbs(Prefix) when is_binary(Prefix) ->
- Length = byte_size(Prefix),
- MatchingDbs = mem3:fold_shards(
- fun(#shard{dbname = DbName}, Acc) ->
- case DbName of
- <<Prefix:Length/binary, _/binary>> ->
- [DbName | Acc];
- _ ->
- Acc
- end
- end,
- []
- ),
- {ok, lists:usort(MatchingDbs)};
-%% @equiv all_dbs(list_to_binary(Prefix))
-all_dbs(Prefix) when is_list(Prefix) ->
- all_dbs(list_to_binary(Prefix)).
-
-%% @doc returns a property list of interesting properties
-%% about the database such as `doc_count', `disk_size',
-%% etc.
--spec get_db_info(dbname()) ->
- {ok, [
- {instance_start_time, binary()}
- | {doc_count, non_neg_integer()}
- | {doc_del_count, non_neg_integer()}
- | {purge_seq, non_neg_integer()}
- | {compact_running, boolean()}
- | {disk_size, non_neg_integer()}
- | {disk_format_version, pos_integer()}
- ]}.
-get_db_info(DbName) ->
- fabric_db_info:go(dbname(DbName)).
-
-%% @doc returns the size of a given partition
--spec get_partition_info(dbname(), Partition :: binary()) ->
- {ok, [
- {db_name, binary()}
- | {partition, binary()}
- | {doc_count, non_neg_integer()}
- | {doc_del_count, non_neg_integer()}
- | {sizes, json_obj()}
- ]}.
-get_partition_info(DbName, Partition) ->
- fabric_db_partition_info:go(dbname(DbName), Partition).
-
-%% @doc the number of docs in a database
-%% @equiv get_doc_count(DbName, <<"_all_docs">>)
-get_doc_count(DbName) ->
- get_doc_count(DbName, <<"_all_docs">>).
-
-%% @doc the number of design docs in a database
--spec get_doc_count(dbname(), Namespace :: binary()) ->
- {ok, non_neg_integer() | null}
- | {error, atom()}
- | {error, atom(), any()}.
-get_doc_count(DbName, <<"_all_docs">>) ->
- fabric_db_doc_count:go(dbname(DbName));
-get_doc_count(DbName, <<"_design">>) ->
- fabric_design_doc_count:go(dbname(DbName));
-get_doc_count(_DbName, <<"_local">>) ->
- {ok, null}.
-
-%% @equiv create_db(DbName, [])
-create_db(DbName) ->
- create_db(DbName, []).
-
-%% @doc creates a database with the given name.
-%%
-%% Options can include values for q and n,
-%% for example `{q, "8"}' and `{n, "3"}', which
-%% control how many shards to split a database into
-%% and how many nodes each doc is copied to respectively.
-%%
--spec create_db(dbname(), [option()]) -> ok | accepted | {error, atom()}.
-create_db(DbName, Options) ->
- fabric_db_create:go(dbname(DbName), opts(Options)).
-
-%% @equiv delete_db([])
-delete_db(DbName) ->
- delete_db(DbName, []).
-
-%% @doc delete a database
--spec delete_db(dbname(), [option()]) -> ok | accepted | {error, atom()}.
-delete_db(DbName, Options) ->
- fabric_db_delete:go(dbname(DbName), opts(Options)).
-
-%% @doc provide an upper bound for the number of tracked document revisions
--spec set_revs_limit(dbname(), pos_integer(), [option()]) -> ok.
-set_revs_limit(DbName, Limit, Options) when is_integer(Limit), Limit > 0 ->
- fabric_db_meta:set_revs_limit(dbname(DbName), Limit, opts(Options)).
-
-%% @doc retrieves the maximum number of document revisions
--spec get_revs_limit(dbname()) -> pos_integer() | no_return().
-get_revs_limit(DbName) ->
- {ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
- try
- couch_db:get_revs_limit(Db)
- after
- catch couch_db:close(Db)
- end.
-
-%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj :: json_obj()) -> ok.
-set_security(DbName, SecObj) ->
- fabric_db_meta:set_security(dbname(DbName), SecObj, [?ADMIN_CTX]).
-
-%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj :: json_obj(), [option()]) -> ok.
-set_security(DbName, SecObj, Options) ->
- fabric_db_meta:set_security(dbname(DbName), SecObj, opts(Options)).
-
-%% @doc sets the upper bound for the number of stored purge requests
--spec set_purge_infos_limit(dbname(), pos_integer(), [option()]) -> ok.
-set_purge_infos_limit(DbName, Limit, Options) when
- is_integer(Limit), Limit > 0
-->
- fabric_db_meta:set_purge_infos_limit(dbname(DbName), Limit, opts(Options)).
-
-%% @doc retrieves the upper bound for the number of stored purge requests
--spec get_purge_infos_limit(dbname()) -> pos_integer() | no_return().
-get_purge_infos_limit(DbName) ->
- {ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
- try
- couch_db:get_purge_infos_limit(Db)
- after
- catch couch_db:close(Db)
- end.
-
-get_security(DbName) ->
- get_security(DbName, [?ADMIN_CTX]).
-
-%% @doc retrieve the security object for a database
--spec get_security(dbname()) -> json_obj() | no_return().
-get_security(DbName, Options) ->
- {ok, Db} = fabric_util:get_db(dbname(DbName), opts(Options)),
- try
- couch_db:get_security(Db)
- after
- catch couch_db:close(Db)
- end.
-
-%% @doc retrieve the security object for all shards of a database
--spec get_all_security(dbname()) ->
- {ok, [{#shard{}, json_obj()}]}
- | {error, no_majority | timeout}
- | {error, atom(), any()}.
-get_all_security(DbName) ->
- get_all_security(DbName, []).
-
-%% @doc retrieve the security object for all shards of a database
--spec get_all_security(dbname(), [option()]) ->
- {ok, [{#shard{}, json_obj()}]}
- | {error, no_majority | timeout}
- | {error, atom(), any()}.
-get_all_security(DbName, Options) ->
- fabric_db_meta:get_all_security(dbname(DbName), opts(Options)).
-
-compact(DbName) ->
- [
- rexi:cast(Node, {fabric_rpc, compact, [Name]})
- || #shard{node = Node, name = Name} <- mem3:shards(dbname(DbName))
- ],
- ok.
-
-compact(DbName, DesignName) ->
- [
- rexi:cast(Node, {fabric_rpc, compact, [Name, DesignName]})
- || #shard{node = Node, name = Name} <- mem3:shards(dbname(DbName))
- ],
- ok.
-
-% doc operations
-
-%% @doc retrieve the doc with a given id
--spec open_doc(dbname(), docid(), [option()]) ->
- {ok, #doc{}}
- | {not_found, missing | deleted}
- | {timeout, any()}
- | {error, any()}
- | {error, any() | any()}.
-open_doc(DbName, Id, Options) ->
- case proplists:get_value(doc_info, Options) of
- undefined ->
- fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options));
- Else ->
- {error, {invalid_option, {doc_info, Else}}}
- end.
-
-%% @doc retrieve a collection of revisions, possible all
--spec open_revs(dbname(), docid(), [revision()] | all, [option()]) ->
- {ok, [{ok, #doc{}} | {{not_found, missing}, revision()}]}
- | {timeout, any()}
- | {error, any()}
- | {error, any(), any()}.
-open_revs(DbName, Id, Revs, Options) ->
- fabric_doc_open_revs:go(dbname(DbName), docid(Id), Revs, opts(Options)).
-
-%% @doc Retrieves an information on a document with a given id
--spec get_doc_info(dbname(), docid(), [options()]) ->
- {ok, #doc_info{}}
- | {not_found, missing}
- | {timeout, any()}
- | {error, any()}
- | {error, any() | any()}.
-get_doc_info(DbName, Id, Options) ->
- Options1 = [doc_info | Options],
- fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options1)).
-
-%% @doc Retrieves a full information on a document with a given id
--spec get_full_doc_info(dbname(), docid(), [options()]) ->
- {ok, #full_doc_info{}}
- | {not_found, missing | deleted}
- | {timeout, any()}
- | {error, any()}
- | {error, any() | any()}.
-get_full_doc_info(DbName, Id, Options) ->
- Options1 = [{doc_info, full} | Options],
- fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options1)).
-
-%% @equiv get_missing_revs(DbName, IdsRevs, [])
-get_missing_revs(DbName, IdsRevs) ->
- get_missing_revs(DbName, IdsRevs, []).
-
-%% @doc retrieve missing revisions for a list of `{Id, Revs}'
--spec get_missing_revs(dbname(), [{docid(), [revision()]}], [option()]) ->
- {ok, [{docid(), any(), [any()]}]}.
-get_missing_revs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
- Sanitized = [idrevs(IdR) || IdR <- IdsRevs],
- fabric_doc_missing_revs:go(dbname(DbName), Sanitized, opts(Options)).
-
-%% @doc update a single doc
-%% @equiv update_docs(DbName,[Doc],Options)
--spec update_doc(dbname(), #doc{} | json_obj(), [option()]) ->
- {ok, any()} | any().
-update_doc(DbName, Doc, Options) ->
- case update_docs(DbName, [Doc], opts(Options)) of
- {ok, [{ok, NewRev}]} ->
- {ok, NewRev};
- {accepted, [{accepted, NewRev}]} ->
- {accepted, NewRev};
- {ok, [{{_Id, _Rev}, Error}]} ->
- throw(Error);
- {ok, [Error]} ->
- throw(Error);
- {ok, []} ->
- % replication success
- #doc{revs = {Pos, [RevId | _]}} = doc(DbName, Doc),
- {ok, {Pos, RevId}};
- {error, [Error]} ->
- throw(Error)
- end.
-
-%% @doc update a list of docs
--spec update_docs(dbname(), [#doc{} | json_obj()], [option()]) ->
- {ok, any()} | any().
-update_docs(DbName, Docs0, Options) ->
- try
- Docs1 = docs(DbName, Docs0),
- fabric_doc_update:go(dbname(DbName), Docs1, opts(Options))
- of
- {ok, Results} ->
- {ok, Results};
- {accepted, Results} ->
- {accepted, Results};
- {error, Error} ->
- {error, Error};
- Error ->
- throw(Error)
- catch
- {aborted, PreCommitFailures} ->
- {aborted, PreCommitFailures}
- end.
-
-%% @doc purge revisions for a list '{Id, Revs}'
-%% returns {ok, {PurgeSeq, Results}}
--spec purge_docs(dbname(), [{docid(), [revision()]}], [option()]) ->
- {ok, [{Health, [revision()]}] | {error, any()}}
-when
- Health :: ok | accepted.
-purge_docs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
- IdsRevs2 = [idrevs(IdRs) || IdRs <- IdsRevs],
- fabric_doc_purge:go(dbname(DbName), IdsRevs2, opts(Options)).
-
-%% @doc spawns a process to upload attachment data and
-%% returns a fabric attachment receiver context tuple
-%% with the spawned middleman process, an empty binary,
-%% or exits with an error tuple {Error, Arg}
--spec att_receiver(
- #httpd{},
- dbname(),
- Length ::
- undefined
- | chunked
- | pos_integer()
- | {unknown_transfer_encoding, any()}
-) ->
- {fabric_attachment_receiver, pid(), chunked | pos_integer()} | binary().
-att_receiver(Req, DbName, Length) ->
- fabric_doc_atts:receiver(Req, DbName, Length).
-
-%% @equiv all_docs(DbName, [], Callback, Acc0, QueryArgs)
-all_docs(DbName, Callback, Acc, QueryArgs) ->
- all_docs(DbName, [], Callback, Acc, QueryArgs).
-
-%% @doc retrieves all docs. Additional query parameters, such as `limit',
-%% `start_key' and `end_key', `descending', and `include_docs', can
-%% also be passed to further constrain the query. See <a href=
-%% "http://wiki.apache.org/couchdb/HTTP_Document_API#All_Documents">
-%% all_docs</a> for details
--spec all_docs(
- dbname(),
- [{atom(), any()}],
- callback(),
- [] | tuple(),
- #mrargs{} | [option()]
-) ->
- {ok, any()} | {error, Reason :: term()}.
-
-all_docs(DbName, Options, Callback, Acc0, #mrargs{} = QueryArgs) when
- is_function(Callback, 2)
-->
- fabric_view_all_docs:go(dbname(DbName), opts(Options), QueryArgs, Callback, Acc0);
-%% @doc convenience function that takes a keylist rather than a record
-%% @equiv all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs))
-all_docs(DbName, Options, Callback, Acc0, QueryArgs) ->
- all_docs(DbName, Options, Callback, Acc0, kl_to_query_args(QueryArgs)).
-
--spec changes(dbname(), callback(), any(), #changes_args{} | [{atom(), any()}]) ->
- {ok, any()}.
-changes(DbName, Callback, Acc0, #changes_args{} = Options) ->
- Feed = Options#changes_args.feed,
- fabric_view_changes:go(dbname(DbName), Feed, Options, Callback, Acc0);
-%% @doc convenience function, takes keylist instead of record
-%% @equiv changes(DbName, Callback, Acc0, kl_to_changes_args(Options))
-changes(DbName, Callback, Acc0, Options) ->
- changes(DbName, Callback, Acc0, kl_to_changes_args(Options)).
-
-%% @equiv query_view(DbName, DesignName, ViewName, #mrargs{})
-query_view(DbName, DesignName, ViewName) ->
- query_view(DbName, DesignName, ViewName, #mrargs{}).
-
-%% @equiv query_view(DbName, DesignName,
-%% ViewName, fun default_callback/2, [], QueryArgs)
-query_view(DbName, DesignName, ViewName, QueryArgs) ->
- Callback = fun default_callback/2,
- query_view(DbName, DesignName, ViewName, Callback, [], QueryArgs).
-
-%% @equiv query_view(DbName, DesignName, [],
-%% ViewName, fun default_callback/2, [], QueryArgs)
-query_view(DbName, DDoc, ViewName, Callback, Acc, QueryArgs) ->
- query_view(DbName, [], DDoc, ViewName, Callback, Acc, QueryArgs).
-
-%% @doc execute a given view.
-%% There are many additional query args that can be passed to a view,
-%% see <a href="http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options">
-%% query args</a> for details.
--spec query_view(
- dbname(),
- [{atom(), any()}] | [],
- #doc{} | binary(),
- iodata(),
- callback(),
- any(),
- #mrargs{}
-) ->
- any().
-query_view(Db, Options, GroupId, ViewName, Callback, Acc0, QueryArgs) when
- is_binary(GroupId)
-->
- DbName = dbname(Db),
- {ok, DDoc} = ddoc_cache:open(DbName, <<"_design/", GroupId/binary>>),
- query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs);
-query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs0) ->
- DbName = dbname(Db),
- View = name(ViewName),
- case fabric_util:is_users_db(DbName) of
- true ->
- FakeDb = fabric_util:open_cluster_db(DbName, Options),
- couch_users_db:after_doc_read(DDoc, FakeDb);
- false ->
- ok
- end,
- {ok, #mrst{views = Views, language = Lang}} =
- couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
- QueryArgs1 = couch_mrview_util:set_view_type(QueryArgs0, View, Views),
- QueryArgs2 = fabric_util:validate_args(Db, DDoc, QueryArgs1),
- VInfo = couch_mrview_util:extract_view(Lang, QueryArgs2, View, Views),
- case is_reduce_view(QueryArgs2) of
- true ->
- fabric_view_reduce:go(
- Db,
- DDoc,
- View,
- QueryArgs2,
- Callback,
- Acc0,
- VInfo
- );
- false ->
- fabric_view_map:go(
- Db,
- Options,
- DDoc,
- View,
- QueryArgs2,
- Callback,
- Acc0,
- VInfo
- )
- end.
-
-%% @doc retrieve info about a view group, disk size, language, whether compaction
-%% is running and so forth
--spec get_view_group_info(dbname(), #doc{} | docid()) ->
- {ok, [
- {signature, binary()}
- | {language, binary()}
- | {disk_size, non_neg_integer()}
- | {compact_running, boolean()}
- | {updater_running, boolean()}
- | {waiting_commit, boolean()}
- | {waiting_clients, non_neg_integer()}
- | {update_seq, pos_integer()}
- | {purge_seq, non_neg_integer()}
- | {sizes, [
- {active, non_neg_integer()}
- | {external, non_neg_integer()}
- | {file, non_neg_integer()}
- ]}
- | {updates_pending, [
- {minimum, non_neg_integer()}
- | {preferred, non_neg_integer()}
- | {total, non_neg_integer()}
- ]}
- ]}.
-get_view_group_info(DbName, DesignId) ->
- fabric_group_info:go(dbname(DbName), design_doc(DesignId)).
-
--spec end_changes() -> ok.
-end_changes() ->
- fabric_view_changes:increment_changes_epoch().
-
-%% @doc retrieve all the design docs from a database
--spec design_docs(dbname()) -> {ok, [json_obj()]} | {error, Reason :: term()}.
-design_docs(DbName) ->
- Extra =
- case get(io_priority) of
- undefined -> [];
- Else -> [{io_priority, Else}]
- end,
- QueryArgs0 = #mrargs{
- include_docs = true,
- extra = Extra
- },
- QueryArgs = set_namespace(<<"_design">>, QueryArgs0),
- Callback = fun
- ({meta, _}, []) ->
- {ok, []};
- ({row, Props}, Acc) ->
- {ok, [couch_util:get_value(doc, Props) | Acc]};
- (complete, Acc) ->
- {ok, lists:reverse(Acc)};
- ({error, Reason}, _Acc) ->
- {error, Reason}
- end,
- fabric:all_docs(dbname(DbName), [?ADMIN_CTX], Callback, [], QueryArgs).
-
-%% @doc forces a reload of validation functions, this is performed after
-%% design docs are update
-%% NOTE: This function probably doesn't belong here as part fo the API
--spec reset_validation_funs(dbname()) -> [reference()].
-reset_validation_funs(DbName) ->
- [
- rexi:cast(Node, {fabric_rpc, reset_validation_funs, [Name]})
- || #shard{node = Node, name = Name} <- mem3:shards(DbName)
- ].
-
-%% @doc clean up index files for all Dbs
--spec cleanup_index_files() -> [ok].
-cleanup_index_files() ->
- {ok, Dbs} = fabric:all_dbs(),
- [cleanup_index_files(Db) || Db <- Dbs].
-
-%% @doc clean up index files for a specific db
--spec cleanup_index_files(dbname()) -> ok.
-cleanup_index_files(DbName) ->
- try
- lists:foreach(
- fun(File) ->
- file:delete(File)
- end,
- inactive_index_files(DbName)
- )
- catch
- error:Error ->
- couch_log:error(
- "~p:cleanup_index_files. Error: ~p",
- [?MODULE, Error]
- ),
- ok
- end.
-
-%% @doc inactive index files for a specific db
--spec inactive_index_files(dbname()) -> ok.
-inactive_index_files(DbName) ->
- {ok, DesignDocs} = fabric:design_docs(DbName),
-
- ActiveSigs = maps:from_list(
- lists:map(
- fun(#doc{id = GroupId}) ->
- {ok, Info} = fabric:get_view_group_info(DbName, GroupId),
- {binary_to_list(couch_util:get_value(signature, Info)), nil}
- end,
- [couch_doc:from_json_obj(DD) || DD <- DesignDocs]
- )
- ),
-
- FileList = lists:flatmap(
- fun(#shard{name = ShardName}) ->
- IndexDir = couch_index_util:index_dir(mrview, ShardName),
- filelib:wildcard([IndexDir, "/*"])
- end,
- mem3:local_shards(dbname(DbName))
- ),
-
- if
- ActiveSigs =:= [] ->
- FileList;
- true ->
- %% <sig>.view and <sig>.compact.view where <sig> is in ActiveSigs
- %% will be excluded from FileList because they are active view
- %% files and should not be deleted.
- lists:filter(
- fun(FilePath) ->
- not maps:is_key(get_view_sig_from_filename(FilePath), ActiveSigs)
- end,
- FileList
- )
- end.
-
-%% @doc clean up index files for a specific db on all nodes
--spec cleanup_index_files_all_nodes(dbname()) -> [reference()].
-cleanup_index_files_all_nodes(DbName) ->
- lists:foreach(
- fun(Node) ->
- rexi:cast(Node, {?MODULE, cleanup_index_files, [DbName]})
- end,
- mem3:nodes()
- ).
-
-%% some simple type validation and transcoding
-dbname(DbName) when is_list(DbName) ->
- list_to_binary(DbName);
-dbname(DbName) when is_binary(DbName) ->
- DbName;
-dbname(Db) ->
- try
- couch_db:name(Db)
- catch
- error:badarg ->
- erlang:error({illegal_database_name, Db})
- end.
-
-%% @doc get db shard uuids
--spec db_uuids(dbname()) -> map().
-db_uuids(DbName) ->
- fabric_db_uuids:go(dbname(DbName)).
-
-name(Thing) ->
- couch_util:to_binary(Thing).
-
-docid(DocId) when is_list(DocId) ->
- list_to_binary(DocId);
-docid(DocId) ->
- DocId.
-
-docs(Db, Docs) when is_list(Docs) ->
- [doc(Db, D) || D <- Docs];
-docs(_Db, Docs) ->
- erlang:error({illegal_docs_list, Docs}).
-
-doc(_Db, #doc{} = Doc) ->
- Doc;
-doc(Db0, {_} = Doc) ->
- Db =
- case couch_db:is_db(Db0) of
- true ->
- Db0;
- false ->
- Shard = hd(mem3:shards(Db0)),
- Props = couch_util:get_value(props, Shard#shard.opts, []),
- {ok, Db1} = couch_db:clustered_db(Db0, [{props, Props}]),
- Db1
- end,
- couch_db:doc_from_json_obj_validate(Db, Doc);
-doc(_Db, Doc) ->
- erlang:error({illegal_doc_format, Doc}).
-
-design_doc(#doc{} = DDoc) ->
- DDoc;
-design_doc(DocId) when is_list(DocId) ->
- design_doc(list_to_binary(DocId));
-design_doc(<<"_design/", _/binary>> = DocId) ->
- DocId;
-design_doc(GroupName) ->
- <<"_design/", GroupName/binary>>.
-
-idrevs({Id, Revs}) when is_list(Revs) ->
- {docid(Id), [rev(R) || R <- Revs]}.
-
-rev(Rev) when is_list(Rev); is_binary(Rev) ->
- couch_doc:parse_rev(Rev);
-rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
- Rev.
-
-%% @doc convenience method, useful when testing or calling fabric from the shell
-opts(Options) ->
- add_option(user_ctx, add_option(io_priority, Options)).
-
-add_option(Key, Options) ->
- case couch_util:get_value(Key, Options) of
- undefined ->
- case erlang:get(Key) of
- undefined ->
- Options;
- Value ->
- [{Key, Value} | Options]
- end;
- _ ->
- Options
- end.
-
-default_callback(complete, Acc) ->
- {ok, lists:reverse(Acc)};
-default_callback(Row, Acc) ->
- {ok, [Row | Acc]}.
-
-is_reduce_view(#mrargs{view_type = ViewType}) ->
- ViewType =:= red;
-is_reduce_view({Reduce, _, _}) ->
- Reduce =:= red.
-
-%% @doc convenience method for use in the shell, converts a keylist
-%% to a `changes_args' record
-kl_to_changes_args(KeyList) ->
- kl_to_record(KeyList, changes_args).
-
-%% @doc convenience method for use in the shell, converts a keylist
-%% to a `mrargs' record
-kl_to_query_args(KeyList) ->
- kl_to_record(KeyList, mrargs).
-
-%% @doc finds the index of the given Key in the record.
-%% note that record_info is only known at compile time
-%% so the code must be written in this way. For each new
-%% record type add a case clause
-lookup_index(Key, RecName) ->
- Indexes =
- case RecName of
- changes_args ->
- lists:zip(
- record_info(fields, changes_args),
- lists:seq(2, record_info(size, changes_args))
- );
- mrargs ->
- lists:zip(
- record_info(fields, mrargs),
- lists:seq(2, record_info(size, mrargs))
- )
- end,
- couch_util:get_value(Key, Indexes).
-
-%% @doc convert a keylist to record with given `RecName'
-%% @see lookup_index
-kl_to_record(KeyList, RecName) ->
- Acc0 =
- case RecName of
- changes_args -> #changes_args{};
- mrargs -> #mrargs{}
- end,
- lists:foldl(
- fun({Key, Value}, Acc) ->
- Index = lookup_index(couch_util:to_existing_atom(Key), RecName),
- setelement(Index, Acc, Value)
- end,
- Acc0,
- KeyList
- ).
-
-set_namespace(NS, #mrargs{extra = Extra} = Args) ->
- Args#mrargs{extra = [{namespace, NS} | Extra]}.
-
-get_view_sig_from_filename(FilePath) ->
- filename:basename(filename:basename(FilePath, ".view"), ".compact").
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-update_doc_test_() ->
- {
- "Update doc tests",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- fun(Ctx) ->
- [
- should_throw_conflict(Ctx)
- ]
- end
- }
- }.
-
-should_throw_conflict(Doc) ->
- ?_test(begin
- ?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
- end).
-
-setup() ->
- Doc = #doc{
- id = <<"test_doc">>,
- revs = {3, [<<5, 68, 252, 180, 43, 161, 216, 223, 26, 119, 71, 219, 212, 229, 159, 113>>]},
- body = {[{<<"foo">>, <<"asdf">>}, {<<"author">>, <<"tom">>}]},
- atts = [],
- deleted = false,
- meta = []
- },
- ok = application:ensure_started(config),
- ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
- ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
- ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
- ok = meck:expect(
- rexi_utils,
- recv,
- fun(_, _, _, _, _, _) ->
- {ok, {error, [{Doc, conflict}]}}
- end
- ),
- ok = meck:expect(
- couch_util,
- reorder_results,
- fun(_, [{_, Res}], _) ->
- [Res]
- end
- ),
- ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
- ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
- Doc.
-
-teardown(_) ->
- meck:unload(),
- ok = application:stop(config).
-
--endif.
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
deleted file mode 100644
index 38770aea4..000000000
--- a/src/fabric/src/fabric_db_create.erl
+++ /dev/null
@@ -1,237 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_create).
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-%% @doc Create a new database, and all its partition files across the cluster
-%% Options is proplist with user_ctx, n, q, validate_name
-go(DbName, Options) ->
- case validate_dbname(DbName, Options) of
- ok ->
- couch_partition:validate_dbname(DbName, Options),
- case db_exists(DbName) of
- true ->
- {error, file_exists};
- false ->
- {Shards, Doc} = generate_shard_map(DbName, Options),
- CreateShardResult = create_shard_files(Shards, Options),
- case CreateShardResult of
- enametoolong ->
- {error, {database_name_too_long, DbName}};
- _ ->
- case {CreateShardResult, create_shard_db_doc(Doc)} of
- {ok, {ok, Status}} ->
- Status;
- {ok, {error, conflict} = ShardDocError} ->
- % Check if it is just a race to create the shard doc
- case db_exists(DbName) of
- true -> {error, file_exists};
- false -> ShardDocError
- end;
- {file_exists, {ok, _}} ->
- {error, file_exists};
- {_, Error} ->
- Error
- end
- end
- end;
- Error ->
- Error
- end.
-
-validate_dbname(DbName, Options) ->
- case couch_util:get_value(validate_name, Options, true) of
- false ->
- ok;
- true ->
- couch_db:validate_dbname(DbName)
- end.
-
-generate_shard_map(DbName, Options) ->
- {MegaSecs, Secs, _} = os:timestamp(),
- Suffix = "." ++ integer_to_list(MegaSecs * 1000000 + Secs),
- Shards = mem3:choose_shards(DbName, [{shard_suffix, Suffix} | Options]),
- case mem3_util:open_db_doc(DbName) of
- {ok, Doc} ->
- % the DB already exists, and may have a different Suffix
- ok;
- {not_found, _} ->
- Doc = make_document(Shards, Suffix, Options)
- end,
- {Shards, Doc}.
-
-create_shard_files(Shards, Options) ->
- Workers = fabric_util:submit_jobs(Shards, create_db, [Options]),
- RexiMon = fabric_util:create_monitors(Shards),
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Workers) of
- {error, file_exists} ->
- file_exists;
- {error, enametoolong} ->
- enametoolong;
- {timeout, DefunctWorkers} ->
- fabric_util:log_timeout(DefunctWorkers, "create_db"),
- {error, timeout};
- _ ->
- ok
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({error, enametoolong}, _, _) ->
- {error, enametoolong};
-handle_message(file_exists, _, _) ->
- {error, file_exists};
-handle_message({rexi_DOWN, _, {_, Node}, _}, _, Workers) ->
- case lists:filter(fun(S) -> S#shard.node =/= Node end, Workers) of
- [] ->
- {stop, ok};
- RemainingWorkers ->
- {ok, RemainingWorkers}
- end;
-handle_message(_, Worker, Workers) ->
- case lists:delete(Worker, Workers) of
- [] ->
- {stop, ok};
- RemainingWorkers ->
- {ok, RemainingWorkers}
- end.
-
-create_shard_db_doc(Doc) ->
- Shards = [#shard{node = N} || N <- mem3:nodes()],
- RexiMon = fabric_util:create_monitors(Shards),
- Workers = fabric_util:submit_jobs(Shards, create_shard_db_doc, [Doc]),
- Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
- try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
- {timeout, {_, WorkersDict}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(
- DefunctWorkers,
- "create_shard_db_doc"
- ),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
- New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
- maybe_stop(W, New);
-handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
- maybe_stop(W, fabric_dict:erase(Worker, Counters));
-handle_db_update(conflict, _, _) ->
- % just fail when we get any conflicts
- {error, conflict};
-handle_db_update(Msg, Worker, {W, Counters}) ->
- maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
-
-maybe_stop(W, Counters) ->
- case fabric_dict:any(nil, Counters) of
- true ->
- {ok, {W, Counters}};
- false ->
- case lists:sum([1 || {_, ok} <- Counters]) of
- NumOk when NumOk >= (W div 2 + 1) ->
- {stop, ok};
- NumOk when NumOk > 0 ->
- {stop, accepted};
- _ ->
- {error, internal_server_error}
- end
- end.
-
-make_document([#shard{dbname = DbName} | _] = Shards, Suffix, Options) ->
- {RawOut, ByNodeOut, ByRangeOut} =
- lists:foldl(
- fun(#shard{node = N, range = [B, E]}, {Raw, ByNode, ByRange}) ->
- Range = ?l2b([
- couch_util:to_hex(<<B:32/integer>>),
- "-",
- couch_util:to_hex(<<E:32/integer>>)
- ]),
- Node = couch_util:to_binary(N),
- {
- [[<<"add">>, Range, Node] | Raw],
- orddict:append(Node, Range, ByNode),
- orddict:append(Range, Node, ByRange)
- }
- end,
- {[], [], []},
- Shards
- ),
- EngineProp =
- case couch_util:get_value(engine, Options) of
- E when is_binary(E) -> [{<<"engine">>, E}];
- _ -> []
- end,
- DbProps =
- case couch_util:get_value(props, Options) of
- Props when is_list(Props) -> [{<<"props">>, {Props}}];
- _ -> []
- end,
- #doc{
- id = DbName,
- body = {
- [
- {<<"shard_suffix">>, Suffix},
- {<<"changelog">>, lists:sort(RawOut)},
- {<<"by_node">>, {[{K, lists:sort(V)} || {K, V} <- ByNodeOut]}},
- {<<"by_range">>, {[{K, lists:sort(V)} || {K, V} <- ByRangeOut]}}
- ] ++ EngineProp ++ DbProps
- }
- }.
-
-db_exists(DbName) -> is_list(catch mem3:shards(DbName)).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-db_exists_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- fun db_exists_for_existing_db/0,
- fun db_exists_for_missing_db/0
- ]
- }.
-
-setup_all() ->
- meck:new(mem3).
-
-teardown_all(_) ->
- meck:unload().
-
-db_exists_for_existing_db() ->
- Mock = fun(DbName) when is_binary(DbName) ->
- [#shard{dbname = DbName, range = [0, 100]}]
- end,
- ok = meck:expect(mem3, shards, Mock),
- ?assertEqual(true, db_exists(<<"foobar">>)),
- ?assertEqual(true, meck:validate(mem3)).
-
-db_exists_for_missing_db() ->
- Mock = fun(DbName) ->
- erlang:error(database_does_not_exist, DbName)
- end,
- ok = meck:expect(mem3, shards, Mock),
- ?assertEqual(false, db_exists(<<"foobar">>)),
- ?assertEqual(false, meck:validate(mem3)).
-
--endif.
diff --git a/src/fabric/src/fabric_db_delete.erl b/src/fabric/src/fabric_db_delete.erl
deleted file mode 100644
index a257b0d6e..000000000
--- a/src/fabric/src/fabric_db_delete.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_delete).
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-%% @doc Options aren't used at all now in couch on delete but are left here
-%% to be consistent with fabric_db_create for possible future use
-%% @see couch_server:delete/2
-%%
-go(DbName, _Options) ->
- Shards = mem3:shards(DbName),
- % delete doc from shard_db
- try delete_shard_db_doc(DbName) of
- {ok, ok} ->
- ok;
- {ok, accepted} ->
- accepted;
- {ok, not_found} ->
- erlang:error(database_does_not_exist, DbName);
- Error ->
- Error
- after
- % delete the shard files
- fabric_util:submit_jobs(Shards, delete_db, [])
- end.
-
-delete_shard_db_doc(Doc) ->
- Shards = [#shard{node = N} || N <- mem3:nodes()],
- RexiMon = fabric_util:create_monitors(Shards),
- Workers = fabric_util:submit_jobs(Shards, delete_shard_db_doc, [Doc]),
- Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
- try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
- {timeout, {_, WorkersDict}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(
- DefunctWorkers,
- "delete_shard_db_doc"
- ),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
- New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
- maybe_stop(W, New);
-handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
- maybe_stop(W, fabric_dict:erase(Worker, Counters));
-handle_db_update(conflict, _, _) ->
- % just fail when we get any conflicts
- {error, conflict};
-handle_db_update(Msg, Worker, {W, Counters}) ->
- maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
-
-maybe_stop(W, Counters) ->
- case fabric_dict:any(nil, Counters) of
- true ->
- {ok, {W, Counters}};
- false ->
- {Ok, NotFound} = fabric_dict:fold(fun count_replies/3, {0, 0}, Counters),
- case {Ok + NotFound, Ok, NotFound} of
- {W, 0, W} ->
- {#shard{dbname = Name}, _} = hd(Counters),
- couch_log:warning("~p not_found ~d", [?MODULE, Name]),
- {stop, not_found};
- {W, _, _} ->
- {stop, ok};
- {_, M, _} when M > 0 ->
- {stop, accepted};
- _ ->
- {error, internal_server_error}
- end
- end.
-
-count_replies(_, ok, {Ok, NotFound}) ->
- {Ok + 1, NotFound};
-count_replies(_, not_found, {Ok, NotFound}) ->
- {Ok, NotFound + 1};
-count_replies(_, _, Acc) ->
- Acc.
diff --git a/src/fabric/src/fabric_db_doc_count.erl b/src/fabric/src/fabric_db_doc_count.erl
deleted file mode 100644
index b2ab35b81..000000000
--- a/src/fabric/src/fabric_db_doc_count.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_doc_count).
-
--export([go/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, get_doc_count, []),
- RexiMon = fabric_util:create_monitors(Shards),
- Acc0 = {fabric_dict:init(Workers, nil), []},
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {timeout, {WorkersDict, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(DefunctWorkers, "get_doc_count"),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, {Counters, Resps}) ->
- case fabric_ring:node_down(NodeRef, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, Reason}
- end;
-handle_message({ok, Count}, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_response(Shard, Count, Counters, Resps) of
- {ok, {Counters1, Resps1}} ->
- {ok, {Counters1, Resps1}};
- {stop, Resps1} ->
- Total = fabric_dict:fold(fun(_, C, A) -> A + C end, 0, Resps1),
- {stop, Total}
- end;
-handle_message(Reason, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, Reason}
- end.
diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl
deleted file mode 100644
index 5461404c5..000000000
--- a/src/fabric/src/fabric_db_info.erl
+++ /dev/null
@@ -1,191 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_info).
-
--export([go/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName) ->
- Shards = mem3:shards(DbName),
- CreationTime = mem3:shard_creation_time(DbName),
- Workers = fabric_util:submit_jobs(Shards, get_db_info, []),
- RexiMon = fabric_util:create_monitors(Shards),
- Fun = fun handle_message/3,
- {ok, ClusterInfo} = get_cluster_info(Shards),
- CInfo = [{cluster, ClusterInfo}],
- Acc0 = {fabric_dict:init(Workers, nil), [], CInfo},
- try
- case fabric_util:recv(Workers, #shard.ref, Fun, Acc0) of
- {ok, Acc} ->
- {ok, [{instance_start_time, CreationTime} | Acc]};
- {timeout, {WorkersDict, _, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- WorkersDict,
- nil
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_db_info"
- ),
- {error, timeout};
- {error, Error} ->
- throw(Error)
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, {Counters, Resps, CInfo}) ->
- case fabric_ring:node_down(NodeRef, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, CInfo}};
- error -> {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps, CInfo}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, CInfo}};
- error -> {error, Reason}
- end;
-handle_message({ok, Info}, Shard, {Counters, Resps, CInfo}) ->
- case fabric_ring:handle_response(Shard, Info, Counters, Resps) of
- {ok, {Counters1, Resps1}} ->
- {ok, {Counters1, Resps1, CInfo}};
- {stop, Resps1} ->
- {stop, build_final_response(CInfo, Shard#shard.dbname, Resps1)}
- end;
-handle_message(Reason, Shard, {Counters, Resps, CInfo}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, CInfo}};
- error -> {error, Reason}
- end.
-
-build_final_response(CInfo, DbName, Responses) ->
- AccF = fabric_dict:fold(
- fun(Shard, Info, {Seqs, PSeqs, Infos}) ->
- Seq = build_seq(Shard, Info),
- PSeq = couch_util:get_value(purge_seq, Info),
- {[{Shard, Seq} | Seqs], [{Shard, PSeq} | PSeqs], [Info | Infos]}
- end,
- {[], [], []},
- Responses
- ),
- {Seqs, PSeqs, Infos} = AccF,
- PackedSeq = fabric_view_changes:pack_seqs(Seqs),
- PackedPSeq = fabric_view_changes:pack_seqs(PSeqs),
- MergedInfos = merge_results(lists:flatten([CInfo | Infos])),
- Sequences = [{purge_seq, PackedPSeq}, {update_seq, PackedSeq}],
- [{db_name, DbName}] ++ Sequences ++ MergedInfos.
-
-build_seq(#shard{node = Node}, Info) when is_list(Info) ->
- Seq = couch_util:get_value(update_seq, Info),
- Uuid = couch_util:get_value(uuid, Info),
- PrefixLen = fabric_util:get_uuid_prefix_len(),
- {Seq, binary:part(Uuid, {0, PrefixLen}), Node}.
-
-merge_results(Info) ->
- Dict = lists:foldl(
- fun({K, V}, D0) -> orddict:append(K, V, D0) end,
- orddict:new(),
- Info
- ),
- orddict:fold(
- fun
- (doc_count, X, Acc) ->
- [{doc_count, lists:sum(X)} | Acc];
- (doc_del_count, X, Acc) ->
- [{doc_del_count, lists:sum(X)} | Acc];
- (compact_running, X, Acc) ->
- [{compact_running, lists:member(true, X)} | Acc];
- (sizes, X, Acc) ->
- [{sizes, {merge_object(X)}} | Acc];
- (disk_format_version, X, Acc) ->
- [{disk_format_version, lists:max(X)} | Acc];
- (cluster, [X], Acc) ->
- [{cluster, {X}} | Acc];
- (props, Xs, Acc) ->
- [{props, {merge_object(Xs)}} | Acc];
- (_K, _V, Acc) ->
- Acc
- end,
- [],
- Dict
- ).
-
-merge_object(Objects) ->
- Dict = lists:foldl(
- fun({Props}, D) ->
- lists:foldl(fun({K, V}, D0) -> orddict:append(K, V, D0) end, D, Props)
- end,
- orddict:new(),
- Objects
- ),
- orddict:fold(
- fun
- (Key, [X | _] = Xs, Acc) when is_integer(X) ->
- [{Key, lists:sum(Xs)} | Acc];
- (Key, [X | _] = Xs, Acc) when is_boolean(X) ->
- [{Key, lists:all(fun all_true/1, Xs)} | Acc];
- (_Key, _Xs, Acc) ->
- Acc
- end,
- [],
- Dict
- ).
-
-all_true(true) -> true;
-all_true(_) -> false.
-
-get_cluster_info(Shards) ->
- Dict = lists:foldl(
- fun(#shard{range = R}, Acc) ->
- dict:update_counter(R, 1, Acc)
- end,
- dict:new(),
- Shards
- ),
- Q = dict:size(Dict),
- N = dict:fold(fun(_, X, Acc) -> max(X, Acc) end, 0, Dict),
- %% defaults as per mem3:quorum/1
- WR = N div 2 + 1,
- {ok, [{q, Q}, {n, N}, {w, WR}, {r, WR}]}.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-get_cluster_info_test_() ->
- {
- setup,
- fun setup/0,
- fun get_cluster_info_test_generator/1
- }.
-
-setup() ->
- Quorums = [1, 2, 3],
- Shards = [1, 3, 5, 8, 12, 24],
- [{N, Q} || N <- Quorums, Q <- Shards].
-
-get_cluster_info_test_generator([]) ->
- [];
-get_cluster_info_test_generator([{N, Q} | Rest]) ->
- {generator, fun() ->
- Nodes = lists:seq(1, 8),
- Shards = mem3_util:create_partition_map(<<"foo">>, N, Q, Nodes),
- {ok, Info} = get_cluster_info(Shards),
- [
- ?_assertEqual(N, couch_util:get_value(n, Info)),
- ?_assertEqual(Q, couch_util:get_value(q, Info))
- ] ++ get_cluster_info_test_generator(Rest)
- end}.
-
--endif.
diff --git a/src/fabric/src/fabric_db_meta.erl b/src/fabric/src/fabric_db_meta.erl
deleted file mode 100644
index 1013b958d..000000000
--- a/src/fabric/src/fabric_db_meta.erl
+++ /dev/null
@@ -1,200 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_meta).
-
--export([
- set_revs_limit/3,
- set_security/3,
- get_all_security/2,
- set_purge_infos_limit/3
-]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(acc, {
- workers,
- finished,
- num_workers
-}).
-
-set_revs_limit(DbName, Limit, Options) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, set_revs_limit, [Limit, Options]),
- Handler = fun handle_revs_message/3,
- Acc0 = {Workers, length(Workers) - 1},
- case fabric_util:recv(Workers, #shard.ref, Handler, Acc0) of
- {ok, ok} ->
- ok;
- {timeout, {DefunctWorkers, _}} ->
- fabric_util:log_timeout(DefunctWorkers, "set_revs_limit"),
- {error, timeout};
- Error ->
- Error
- end.
-
-handle_revs_message(ok, _, {_Workers, 0}) ->
- {stop, ok};
-handle_revs_message(ok, Worker, {Workers, Waiting}) ->
- {ok, {lists:delete(Worker, Workers), Waiting - 1}};
-handle_revs_message(Error, _, _Acc) ->
- {error, Error}.
-
-set_purge_infos_limit(DbName, Limit, Options) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, set_purge_infos_limit, [Limit, Options]),
- Handler = fun handle_purge_message/3,
- Acc0 = {Workers, length(Workers) - 1},
- case fabric_util:recv(Workers, #shard.ref, Handler, Acc0) of
- {ok, ok} ->
- ok;
- {timeout, {DefunctWorkers, _}} ->
- fabric_util:log_timeout(DefunctWorkers, "set_purged_docs_limit"),
- {error, timeout};
- Error ->
- Error
- end.
-
-handle_purge_message(ok, _, {_Workers, 0}) ->
- {stop, ok};
-handle_purge_message(ok, Worker, {Workers, Waiting}) ->
- {ok, {lists:delete(Worker, Workers), Waiting - 1}};
-handle_purge_message(Error, _, _Acc) ->
- {error, Error}.
-
-set_security(DbName, SecObj, Options) ->
- Shards = mem3:shards(DbName),
- RexiMon = fabric_util:create_monitors(Shards),
- Workers = fabric_util:submit_jobs(Shards, set_security, [SecObj, Options]),
- Handler = fun handle_set_message/3,
- Acc = #acc{
- workers = Workers,
- finished = [],
- num_workers = length(Workers)
- },
- try fabric_util:recv(Workers, #shard.ref, Handler, Acc) of
- {ok, #acc{finished = Finished}} ->
- case check_sec_set(length(Workers), Finished) of
- ok -> ok;
- Error -> Error
- end;
- {timeout, #acc{workers = DefunctWorkers}} ->
- fabric_util:log_timeout(DefunctWorkers, "set_security"),
- {error, timeout};
- Error ->
- Error
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_set_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers = Wrkrs} = Acc) ->
- RemWorkers = lists:filter(fun(S) -> S#shard.node =/= Node end, Wrkrs),
- maybe_finish_set(Acc#acc{workers = RemWorkers});
-handle_set_message(ok, W, Acc) ->
- NewAcc = Acc#acc{
- workers = (Acc#acc.workers -- [W]),
- finished = [W | Acc#acc.finished]
- },
- maybe_finish_set(NewAcc);
-handle_set_message({rexi_EXIT, {maintenance_mode, _}}, W, Acc) ->
- NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
- maybe_finish_set(NewAcc);
-handle_set_message(Error, W, Acc) ->
- Dst = {W#shard.node, W#shard.name},
- couch_log:error("Failed to set security object on ~p :: ~p", [Dst, Error]),
- NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
- maybe_finish_set(NewAcc).
-
-maybe_finish_set(#acc{workers = []} = Acc) ->
- {stop, Acc};
-maybe_finish_set(#acc{finished = Finished, num_workers = NumWorkers} = Acc) ->
- case check_sec_set(NumWorkers, Finished) of
- ok -> {stop, Acc};
- _ -> {ok, Acc}
- end.
-
-check_sec_set(NumWorkers, SetWorkers) ->
- try
- check_sec_set_int(NumWorkers, SetWorkers)
- catch
- throw:Reason ->
- {error, Reason}
- end.
-
-check_sec_set_int(NumWorkers, SetWorkers) ->
- case length(SetWorkers) < ((NumWorkers div 2) + 1) of
- true -> throw(no_majority);
- false -> ok
- end,
- % Hack to reuse fabric_ring:is_progress_possible/1
- FakeCounters = [{S, 0} || S <- SetWorkers],
- case fabric_ring:is_progress_possible(FakeCounters) of
- false -> throw(no_ring);
- true -> ok
- end,
- ok.
-
-get_all_security(DbName, Options) ->
- Shards =
- case proplists:get_value(shards, Options) of
- Shards0 when is_list(Shards0) -> Shards0;
- _ -> mem3:shards(DbName)
- end,
- RexiMon = fabric_util:create_monitors(Shards),
- Workers = fabric_util:submit_jobs(Shards, get_all_security, [[?ADMIN_CTX]]),
- Handler = fun handle_get_message/3,
- Acc = #acc{
- workers = Workers,
- finished = [],
- num_workers = length(Workers)
- },
- try fabric_util:recv(Workers, #shard.ref, Handler, Acc) of
- {ok, #acc{finished = SecObjs}} when length(SecObjs) > length(Workers) / 2 ->
- {ok, SecObjs};
- {ok, _} ->
- {error, no_majority};
- {timeout, #acc{workers = DefunctWorkers}} ->
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_all_security"
- ),
- {error, timeout};
- Error ->
- Error
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_get_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers = Wrkrs} = Acc) ->
- RemWorkers = lists:filter(fun(S) -> S#shard.node =/= Node end, Wrkrs),
- maybe_finish_get(Acc#acc{workers = RemWorkers});
-handle_get_message({Props} = SecObj, W, Acc) when is_list(Props) ->
- NewAcc = Acc#acc{
- workers = (Acc#acc.workers -- [W]),
- finished = [{W, SecObj} | Acc#acc.finished]
- },
- maybe_finish_get(NewAcc);
-handle_get_message({rexi_EXIT, {maintenance_mode, _}}, W, Acc) ->
- NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
- maybe_finish_get(NewAcc);
-handle_get_message(Error, W, Acc) ->
- Dst = {W#shard.node, W#shard.name},
- couch_log:error("Failed to get security object on ~p :: ~p", [Dst, Error]),
- NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
- maybe_finish_get(NewAcc).
-
-maybe_finish_get(#acc{workers = []} = Acc) ->
- {stop, Acc};
-maybe_finish_get(Acc) ->
- {ok, Acc}.
diff --git a/src/fabric/src/fabric_db_partition_info.erl b/src/fabric/src/fabric_db_partition_info.erl
deleted file mode 100644
index efc895534..000000000
--- a/src/fabric/src/fabric_db_partition_info.erl
+++ /dev/null
@@ -1,148 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_partition_info).
-
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--record(acc, {
- counters,
- replies,
- ring_opts
-}).
-
-go(DbName, Partition) ->
- Shards = mem3:shards(DbName, couch_partition:shard_key(Partition)),
- Workers = fabric_util:submit_jobs(Shards, get_partition_info, [Partition]),
- RexiMon = fabric_util:create_monitors(Shards),
- Fun = fun handle_message/3,
- Acc0 = #acc{
- counters = fabric_dict:init(Workers, nil),
- replies = [],
- ring_opts = [{any, Shards}]
- },
- try
- case fabric_util:recv(Workers, #shard.ref, Fun, Acc0) of
- {ok, Res} ->
- {ok, Res};
- {timeout, {WorkersDict, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- WorkersDict,
- nil
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_partition_info"
- ),
- {error, timeout};
- {error, Error} ->
- throw(Error)
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, #acc{} = Acc) ->
- #acc{counters = Counters, ring_opts = RingOpts} = Acc,
- case fabric_util:remove_down_workers(Counters, NodeRef, RingOpts) of
- {ok, NewCounters} ->
- {ok, Acc#acc{counters = NewCounters}};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_message({rexi_EXIT, Reason}, Shard, #acc{} = Acc) ->
- #acc{counters = Counters, ring_opts = RingOpts} = Acc,
- NewCounters = fabric_dict:erase(Shard, Counters),
- case fabric_ring:is_progress_possible(NewCounters, RingOpts) of
- true ->
- {ok, Acc#acc{counters = NewCounters}};
- false ->
- {error, Reason}
- end;
-handle_message({ok, Info}, #shard{dbname = Name} = Shard, #acc{} = Acc) ->
- #acc{counters = Counters, replies = Replies} = Acc,
- Replies1 = [Info | Replies],
- Counters1 = fabric_dict:erase(Shard, Counters),
- case fabric_dict:size(Counters1) =:= 0 of
- true ->
- [FirstInfo | RestInfos] = Replies1,
- PartitionInfo = get_max_partition_size(FirstInfo, RestInfos),
- {stop, [{db_name, Name} | format_partition(PartitionInfo)]};
- false ->
- {ok, Acc#acc{counters = Counters1, replies = Replies1}}
- end;
-handle_message(_, _, #acc{} = Acc) ->
- {ok, Acc}.
-
-get_max_partition_size(Max, []) ->
- Max;
-get_max_partition_size(MaxInfo, [NextInfo | Rest]) ->
- {sizes, MaxSize} = lists:keyfind(sizes, 1, MaxInfo),
- {sizes, NextSize} = lists:keyfind(sizes, 1, NextInfo),
-
- {external, MaxExtSize} = lists:keyfind(external, 1, MaxSize),
- {external, NextExtSize} = lists:keyfind(external, 1, NextSize),
- case NextExtSize > MaxExtSize of
- true ->
- get_max_partition_size(NextInfo, Rest);
- false ->
- get_max_partition_size(MaxInfo, Rest)
- end.
-
-% for JS to work nicely we need to convert the size list
-% to a jiffy object
-format_partition(PartitionInfo) ->
- {value, {sizes, Size}, PartitionInfo1} = lists:keytake(sizes, 1, PartitionInfo),
- [{sizes, {Size}} | PartitionInfo1].
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-node_down_test() ->
- [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])],
- Acc1 = #acc{
- counters = fabric_dict:init([S1, S2], nil),
- ring_opts = [{any, [S1, S2]}]
- },
-
- N1 = S1#shard.node,
- {ok, Acc2} = handle_message({rexi_DOWN, nil, {nil, N1}, nil}, nil, Acc1),
- ?assertEqual([{S2, nil}], Acc2#acc.counters),
-
- N2 = S2#shard.node,
- ?assertEqual(
- {error, {nodedown, <<"progress not possible">>}},
- handle_message({rexi_DOWN, nil, {nil, N2}, nil}, nil, Acc2)
- ).
-
-worker_exit_test() ->
- [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])],
- Acc1 = #acc{
- counters = fabric_dict:init([S1, S2], nil),
- ring_opts = [{any, [S1, S2]}]
- },
-
- {ok, Acc2} = handle_message({rexi_EXIT, boom}, S1, Acc1),
- ?assertEqual([{S2, nil}], Acc2#acc.counters),
-
- ?assertEqual({error, bam}, handle_message({rexi_EXIT, bam}, S2, Acc2)).
-
-mk_shard(Name, Range) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = Range}.
-
--endif.
diff --git a/src/fabric/src/fabric_db_update_listener.erl b/src/fabric/src/fabric_db_update_listener.erl
deleted file mode 100644
index 78ccf5a4d..000000000
--- a/src/fabric/src/fabric_db_update_listener.erl
+++ /dev/null
@@ -1,183 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_update_listener).
-
--export([go/4, start_update_notifier/1, stop/1, wait_db_updated/1]).
--export([handle_db_event/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--record(worker, {
- ref,
- node,
- pid
-}).
-
--record(cb_state, {
- client_pid,
- client_ref,
- notify
-}).
-
--record(acc, {
- parent,
- state,
- shards
-}).
-
-go(Parent, ParentRef, DbName, Timeout) ->
- Shards = mem3:shards(DbName),
- Notifiers = start_update_notifiers(Shards),
- MonRefs = lists:usort([rexi_utils:server_pid(N) || #worker{node = N} <- Notifiers]),
- RexiMon = rexi_monitor:start(MonRefs),
- MonPid = start_cleanup_monitor(self(), Notifiers),
- %% This is not a common pattern for rexi but to enable the calling
- %% process to communicate via handle_message/3 we "fake" it as a
- %% a spawned worker.
- Workers = [#worker{ref = ParentRef, pid = Parent} | Notifiers],
- Acc = #acc{
- parent = Parent,
- state = unset,
- shards = Shards
- },
- Resp =
- try
- receive_results(Workers, Acc, Timeout)
- after
- rexi_monitor:stop(RexiMon),
- stop_cleanup_monitor(MonPid)
- end,
- case Resp of
- {ok, _} -> ok;
- {error, Error} -> erlang:error(Error);
- Error -> erlang:error(Error)
- end.
-
-start_update_notifiers(Shards) ->
- EndPointDict = lists:foldl(
- fun(#shard{node = Node, name = Name}, Acc) ->
- dict:append(Node, Name, Acc)
- end,
- dict:new(),
- Shards
- ),
- lists:map(
- fun({Node, DbNames}) ->
- Ref = rexi:cast(Node, {?MODULE, start_update_notifier, [DbNames]}),
- #worker{ref = Ref, node = Node}
- end,
- dict:to_list(EndPointDict)
- ).
-
-% rexi endpoint
-start_update_notifier(DbNames) ->
- {Caller, Ref} = get(rexi_from),
- Notify = config:get("couchdb", "maintenance_mode", "false") /= "true",
- State = #cb_state{client_pid = Caller, client_ref = Ref, notify = Notify},
- Options = [{parent, Caller}, {dbnames, DbNames}],
- couch_event:listen(?MODULE, handle_db_event, State, Options).
-
-handle_db_event(_DbName, updated, #cb_state{notify = true} = St) ->
- erlang:send(St#cb_state.client_pid, {St#cb_state.client_ref, db_updated}),
- {ok, St};
-handle_db_event(_DbName, deleted, St) ->
- erlang:send(St#cb_state.client_pid, {St#cb_state.client_ref, db_deleted}),
- stop;
-handle_db_event(_DbName, _Event, St) ->
- {ok, St}.
-
-start_cleanup_monitor(Parent, Notifiers) ->
- spawn(fun() ->
- Ref = erlang:monitor(process, Parent),
- cleanup_monitor(Parent, Ref, Notifiers)
- end).
-
-stop_cleanup_monitor(MonPid) ->
- MonPid ! {self(), stop}.
-
-cleanup_monitor(Parent, Ref, Notifiers) ->
- receive
- {'DOWN', Ref, _, _, _} ->
- stop_update_notifiers(Notifiers);
- {Parent, stop} ->
- stop_update_notifiers(Notifiers);
- Else ->
- couch_log:error("Unkown message in ~w :: ~w", [?MODULE, Else]),
- stop_update_notifiers(Notifiers),
- exit(Parent, {unknown_message, Else})
- end.
-
-stop_update_notifiers(Notifiers) ->
- rexi:kill_all([{N, Ref} || #worker{node = N, ref = Ref} <- Notifiers]).
-
-stop({Pid, Ref}) ->
- erlang:send(Pid, {Ref, done}).
-
-wait_db_updated({Pid, Ref}) ->
- MonRef = erlang:monitor(process, Pid),
- erlang:send(Pid, {Ref, get_state}),
- receive
- {state, Pid, State} ->
- erlang:demonitor(MonRef, [flush]),
- State;
- {'DOWN', MonRef, process, Pid, _Reason} ->
- changes_feed_died
- after 300000 ->
- ?MODULE:wait_db_updated({Pid, Ref})
- end.
-
-receive_results(Workers, Acc0, Timeout) ->
- Fun = fun handle_message/3,
- case rexi_utils:recv(Workers, #worker.ref, Fun, Acc0, infinity, Timeout) of
- {timeout, #acc{state = updated} = Acc} ->
- receive_results(Workers, Acc, Timeout);
- {timeout, #acc{state = waiting} = Acc} ->
- erlang:send(Acc#acc.parent, {state, self(), timeout}),
- receive_results(Workers, Acc#acc{state = unset}, Timeout);
- {timeout, Acc} ->
- receive_results(Workers, Acc#acc{state = timeout}, Timeout);
- {_, Acc} ->
- {ok, Acc}
- end.
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
- handle_error(Node, {nodedown, Node}, Acc);
-handle_message({rexi_EXIT, _Reason}, Worker, Acc) ->
- handle_error(Worker#worker.node, {worker_exit, Worker}, Acc);
-handle_message({gen_event_EXIT, Node, Reason}, _Worker, Acc) ->
- handle_error(Node, {gen_event_EXIT, Node, Reason}, Acc);
-handle_message(db_updated, _Worker, #acc{state = waiting} = Acc) ->
- % propagate message to calling controller
- erlang:send(Acc#acc.parent, {state, self(), updated}),
- {ok, Acc#acc{state = unset}};
-handle_message(db_updated, _Worker, Acc) ->
- {ok, Acc#acc{state = updated}};
-handle_message(db_deleted, _Worker, _Acc) ->
- {stop, ok};
-handle_message(get_state, _Worker, #acc{state = unset} = Acc) ->
- {ok, Acc#acc{state = waiting}};
-handle_message(get_state, _Worker, Acc) ->
- erlang:send(Acc#acc.parent, {state, self(), Acc#acc.state}),
- {ok, Acc#acc{state = unset}};
-handle_message(done, _, _) ->
- {stop, ok}.
-
-handle_error(Node, Reason, #acc{shards = Shards} = Acc) ->
- Rest = lists:filter(fun(#shard{node = N}) -> N /= Node end, Shards),
- case fabric_ring:is_progress_possible([{R, nil} || R <- Rest]) of
- true ->
- {ok, Acc#acc{shards = Rest}};
- false ->
- {error, Reason}
- end.
diff --git a/src/fabric/src/fabric_db_uuids.erl b/src/fabric/src/fabric_db_uuids.erl
deleted file mode 100644
index 12931a3d1..000000000
--- a/src/fabric/src/fabric_db_uuids.erl
+++ /dev/null
@@ -1,64 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_uuids).
-
--export([go/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName) when is_binary(DbName) ->
- Shards = mem3:live_shards(DbName, [node() | nodes()]),
- Workers = fabric_util:submit_jobs(Shards, get_uuid, []),
- RexiMon = fabric_util:create_monitors(Shards),
- Acc0 = {fabric_dict:init(Workers, nil), []},
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {timeout, {WorkersDict, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(DefunctWorkers, "db_uuids"),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, {Cntrs, Res}) ->
- case fabric_ring:node_down(NodeRef, Cntrs, Res, [all]) of
- {ok, Cntrs1} -> {ok, {Cntrs1, Res}};
- error -> {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_message({rexi_EXIT, Reason}, Shard, {Cntrs, Res}) ->
- case fabric_ring:handle_error(Shard, Cntrs, Res, [all]) of
- {ok, Cntrs1} -> {ok, {Cntrs1, Res}};
- error -> {error, Reason}
- end;
-handle_message(Uuid, Shard, {Cntrs, Res}) when is_binary(Uuid) ->
- case fabric_ring:handle_response(Shard, Uuid, Cntrs, Res, [all]) of
- {ok, {Cntrs1, Res1}} ->
- {ok, {Cntrs1, Res1}};
- {stop, Res1} ->
- Uuids = fabric_dict:fold(
- fun(#shard{} = S, Id, #{} = Acc) ->
- Acc#{Id => S#shard{ref = undefined}}
- end,
- #{},
- Res1
- ),
- {stop, Uuids}
- end;
-handle_message(Reason, Shard, {Cntrs, Res}) ->
- case fabric_ring:handle_error(Shard, Cntrs, Res, [all]) of
- {ok, Cntrs1} -> {ok, {Cntrs1, Res}};
- error -> {error, Reason}
- end.
diff --git a/src/fabric/src/fabric_design_doc_count.erl b/src/fabric/src/fabric_design_doc_count.erl
deleted file mode 100644
index f6f866a24..000000000
--- a/src/fabric/src/fabric_design_doc_count.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_design_doc_count).
-
--export([go/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, get_design_doc_count, []),
- RexiMon = fabric_util:create_monitors(Shards),
- Acc0 = {fabric_dict:init(Workers, nil), []},
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {timeout, {WorkersDict, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(DefunctWorkers, "get_design_doc_count"),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, {Counters, Resps}) ->
- case fabric_ring:node_down(NodeRef, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, Reason}
- end;
-handle_message({ok, Count}, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_response(Shard, Count, Counters, Resps) of
- {ok, {Counters1, Resps1}} ->
- {ok, {Counters1, Resps1}};
- {stop, Resps1} ->
- Total = fabric_dict:fold(fun(_, C, A) -> A + C end, 0, Resps1),
- {stop, Total}
- end;
-handle_message(Reason, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, Reason}
- end.
diff --git a/src/fabric/src/fabric_dict.erl b/src/fabric/src/fabric_dict.erl
deleted file mode 100644
index 8395221b4..000000000
--- a/src/fabric/src/fabric_dict.erl
+++ /dev/null
@@ -1,60 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_dict).
--compile(export_all).
--compile(nowarn_export_all).
-
-% Instead of ets, let's use an ordered keylist. We'll need to revisit if we
-% have >> 100 shards, so a private interface is a good idea. - APK June 2010
-
-init(Keys, InitialValue) ->
- orddict:from_list([{Key, InitialValue} || Key <- Keys]).
-
-is_key(Key, Dict) ->
- orddict:is_key(Key, Dict).
-
-fetch_keys(Dict) ->
- orddict:fetch_keys(Dict).
-
-decrement_all(Dict) ->
- [{K, V - 1} || {K, V} <- Dict].
-
-store(Key, Value, Dict) ->
- orddict:store(Key, Value, Dict).
-
-erase(Key, Dict) ->
- orddict:erase(Key, Dict).
-
-update_counter(Key, Incr, Dict0) ->
- orddict:update_counter(Key, Incr, Dict0).
-
-lookup_element(Key, Dict) ->
- couch_util:get_value(Key, Dict).
-
-size(Dict) ->
- orddict:size(Dict).
-
-any(Value, Dict) ->
- lists:keymember(Value, 2, Dict).
-
-filter(Fun, Dict) ->
- orddict:filter(Fun, Dict).
-
-fold(Fun, Acc0, Dict) ->
- orddict:fold(Fun, Acc0, Dict).
-
-to_list(Dict) ->
- orddict:to_list(Dict).
-
-from_list(KVList) when is_list(KVList) ->
- orddict:from_list(KVList).
diff --git a/src/fabric/src/fabric_doc_atts.erl b/src/fabric/src/fabric_doc_atts.erl
deleted file mode 100644
index 80a36ee51..000000000
--- a/src/fabric/src/fabric_doc_atts.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_atts).
-
--compile(tuple_calls).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([
- receiver/3,
- receiver_callback/2
-]).
-
-receiver(_Req, _DbName, undefined) ->
- <<"">>;
-receiver(_Req, _DbName, {unknown_transfer_encoding, Unknown}) ->
- exit({unknown_transfer_encoding, Unknown});
-receiver(Req, DbName, chunked) ->
- MiddleMan = spawn(fun() -> middleman(Req, DbName, chunked) end),
- {fabric_attachment_receiver, MiddleMan, chunked};
-receiver(_Req, _DbName, 0) ->
- <<"">>;
-receiver(Req, DbName, Length) when is_integer(Length) ->
- maybe_send_continue(Req),
- Middleman = spawn(fun() -> middleman(Req, DbName, Length) end),
- {fabric_attachment_receiver, Middleman, Length};
-receiver(_Req, _DbName, Length) ->
- exit({length_not_integer, Length}).
-
-receiver_callback(Middleman, chunked) ->
- fun(4096, ChunkFun, State) ->
- write_chunks(Middleman, ChunkFun, State)
- end;
-receiver_callback(Middleman, Length) when is_integer(Length) ->
- fun() ->
- Middleman ! {self(), gimme_data},
- Timeout = fabric_util:attachments_timeout(),
- receive
- {Middleman, Data} ->
- rexi:reply(attachment_chunk_received),
- Data
- after Timeout ->
- exit(timeout)
- end
- end.
-
-%%
-%% internal
-%%
-
-maybe_send_continue(#httpd{mochi_req = MochiReq} = Req) ->
- case couch_httpd:header_value(Req, "expect") of
- undefined ->
- ok;
- Expect ->
- case string:to_lower(Expect) of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _ ->
- ok
- end
- end.
-
-write_chunks(MiddleMan, ChunkFun, State) ->
- MiddleMan ! {self(), gimme_data},
- Timeout = fabric_util:attachments_timeout(),
- receive
- {MiddleMan, ChunkRecordList} ->
- rexi:reply(attachment_chunk_received),
- case flush_chunks(ChunkRecordList, ChunkFun, State) of
- {continue, NewState} ->
- write_chunks(MiddleMan, ChunkFun, NewState);
- {done, NewState} ->
- NewState
- end
- after Timeout ->
- exit(timeout)
- end.
-
-flush_chunks([], _ChunkFun, State) ->
- {continue, State};
-flush_chunks([{0, _}], _ChunkFun, State) ->
- {done, State};
-flush_chunks([Chunk | Rest], ChunkFun, State) ->
- NewState = ChunkFun(Chunk, State),
- flush_chunks(Rest, ChunkFun, NewState).
-
-receive_unchunked_attachment(_Req, 0) ->
- ok;
-receive_unchunked_attachment(Req, Length) ->
- receive
- {MiddleMan, go} ->
- Data = couch_httpd:recv(Req, 0),
- MiddleMan ! {self(), Data}
- end,
- receive_unchunked_attachment(Req, Length - size(Data)).
-
-middleman(Req, DbName, chunked) ->
- % spawn a process to actually receive the uploaded data
- RcvFun = fun(ChunkRecord, ok) ->
- receive
- {From, go} -> From ! {self(), ChunkRecord}
- end,
- ok
- end,
- Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req, 4096, RcvFun, ok) end),
-
- % take requests from the DB writers and get data from the receiver
- N = mem3:n(DbName),
- Timeout = fabric_util:attachments_timeout(),
- middleman_loop(Receiver, N, [], [], Timeout);
-middleman(Req, DbName, Length) ->
- Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
- N = mem3:n(DbName),
- Timeout = fabric_util:attachments_timeout(),
- middleman_loop(Receiver, N, [], [], Timeout).
-
-middleman_loop(Receiver, N, Counters0, ChunkList0, Timeout) ->
- receive
- {From, gimme_data} ->
- % Figure out how far along this writer (From) is in the list
- ListIndex =
- case fabric_dict:lookup_element(From, Counters0) of
- undefined -> 0;
- I -> I
- end,
-
- % Talk to the receiver to get another chunk if necessary
- ChunkList1 =
- if
- ListIndex == length(ChunkList0) ->
- Receiver ! {self(), go},
- receive
- {Receiver, ChunkRecord} ->
- ChunkList0 ++ [ChunkRecord]
- end;
- true ->
- ChunkList0
- end,
-
- % reply to the writer
- Reply = lists:nthtail(ListIndex, ChunkList1),
- From ! {self(), Reply},
-
- % Update the counter for this writer
- Counters1 = fabric_dict:update_counter(From, length(Reply), Counters0),
-
- % Drop any chunks that have been sent to all writers
- Size = fabric_dict:size(Counters1),
- NumToDrop = lists:min([I || {_, I} <- Counters1]),
-
- {ChunkList3, Counters3} =
- if
- Size == N andalso NumToDrop > 0 ->
- ChunkList2 = lists:nthtail(NumToDrop, ChunkList1),
- Counters2 = [{F, I - NumToDrop} || {F, I} <- Counters1],
- {ChunkList2, Counters2};
- true ->
- {ChunkList1, Counters1}
- end,
-
- middleman_loop(Receiver, N, Counters3, ChunkList3, Timeout)
- after Timeout ->
- exit(Receiver, kill),
- ok
- end.
diff --git a/src/fabric/src/fabric_doc_missing_revs.erl b/src/fabric/src/fabric_doc_missing_revs.erl
deleted file mode 100644
index ffd408f4e..000000000
--- a/src/fabric/src/fabric_doc_missing_revs.erl
+++ /dev/null
@@ -1,116 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_missing_revs).
-
--export([go/2, go/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName, AllIdsRevs) ->
- go(DbName, AllIdsRevs, []).
-
-go(_, [], _) ->
- {ok, []};
-go(DbName, AllIdsRevs, Options) ->
- Workers = lists:map(
- fun({#shard{name = Name, node = Node} = Shard, IdsRevs}) ->
- Ref = rexi:cast(
- Node,
- {fabric_rpc, get_missing_revs, [
- Name,
- IdsRevs,
- Options
- ]}
- ),
- Shard#shard{ref = Ref}
- end,
- group_idrevs_by_shard(DbName, AllIdsRevs)
- ),
- ResultDict = dict:from_list([{Id, {{nil, Revs}, []}} || {Id, Revs} <- AllIdsRevs]),
- RexiMon = fabric_util:create_monitors(Workers),
- Acc0 = {length(Workers), ResultDict, Workers},
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {timeout, {_, _, DefunctWorkers}} ->
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_missing_revs"
- ),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, {_WorkerLen, ResultDict, Workers}) ->
- NewWorkers = [W || #shard{node = Node} = W <- Workers, Node =/= NodeRef],
- skip_message({fabric_dict:size(NewWorkers), ResultDict, NewWorkers});
-handle_message({rexi_EXIT, _}, Worker, {W, D, Workers}) ->
- skip_message({W - 1, D, lists:delete(Worker, Workers)});
-handle_message({ok, Results}, _Worker, {1, D0, _}) ->
- D = update_dict(D0, Results),
- {stop, dict:fold(fun force_reply/3, [], D)};
-handle_message({ok, Results}, Worker, {WaitingCount, D0, Workers}) ->
- D = update_dict(D0, Results),
- case dict:fold(fun maybe_reply/3, {stop, []}, D) of
- continue ->
- % still haven't heard about some Ids
- {ok, {WaitingCount - 1, D, lists:delete(Worker, Workers)}};
- {stop, FinalReply} ->
- % finished, stop the rest of the jobs
- fabric_util:cleanup(lists:delete(Worker, Workers)),
- {stop, FinalReply}
- end.
-
-force_reply(Id, {{nil, Revs}, Anc}, Acc) ->
- % never heard about this ID, assume it's missing
- [{Id, Revs, Anc} | Acc];
-force_reply(_, {[], _}, Acc) ->
- Acc;
-force_reply(Id, {Revs, Anc}, Acc) ->
- [{Id, Revs, Anc} | Acc].
-
-maybe_reply(_, _, continue) ->
- continue;
-maybe_reply(_, {{nil, _}, _}, _) ->
- continue;
-maybe_reply(_, {[], _}, {stop, Acc}) ->
- {stop, Acc};
-maybe_reply(Id, {Revs, Anc}, {stop, Acc}) ->
- {stop, [{Id, Revs, Anc} | Acc]}.
-
-group_idrevs_by_shard(DbName, IdsRevs) ->
- dict:to_list(
- lists:foldl(
- fun({Id, Revs}, D0) ->
- lists:foldl(
- fun(Shard, D1) ->
- dict:append(Shard, {Id, Revs}, D1)
- end,
- D0,
- mem3:shards(DbName, Id)
- )
- end,
- dict:new(),
- IdsRevs
- )
- ).
-
-update_dict(D0, KVs) ->
- lists:foldl(fun({K, V, A}, D1) -> dict:store(K, {V, A}, D1) end, D0, KVs).
-
-skip_message({0, Dict, _Workers}) ->
- {stop, dict:fold(fun force_reply/3, [], Dict)};
-skip_message(Acc) ->
- {ok, Acc}.
diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl
deleted file mode 100644
index ba348112c..000000000
--- a/src/fabric/src/fabric_doc_open.erl
+++ /dev/null
@@ -1,611 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_open).
-
--export([go/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(acc, {
- dbname,
- workers,
- r,
- state,
- replies,
- node_revs = [],
- q_reply
-}).
-
-go(DbName, Id, Options) ->
- Handler =
- case proplists:get_value(doc_info, Options) of
- true -> get_doc_info;
- full -> get_full_doc_info;
- undefined -> open_doc
- end,
- Workers = fabric_util:submit_jobs(
- mem3:shards(DbName, Id),
- Handler,
- [Id, [deleted | Options]]
- ),
- SuppressDeletedDoc = not lists:member(deleted, Options),
- N = mem3:n(DbName),
- R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
- Acc0 = #acc{
- dbname = DbName,
- workers = Workers,
- r = erlang:min(N, list_to_integer(R)),
- state = r_not_met,
- replies = []
- },
- RexiMon = fabric_util:create_monitors(Workers),
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {ok, #acc{} = Acc} when Handler =:= open_doc ->
- Reply = handle_response(Acc),
- format_reply(Reply, SuppressDeletedDoc);
- {ok, #acc{state = r_not_met}} ->
- {error, quorum_not_met};
- {ok, #acc{q_reply = QuorumReply}} ->
- format_reply(QuorumReply, SuppressDeletedDoc);
- {timeout, #acc{workers = DefunctWorkers}} ->
- fabric_util:log_timeout(DefunctWorkers, atom_to_list(Handler)),
- {error, timeout};
- Error ->
- Error
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
- NewWorkers = [W || #shard{node = N} = W <- Acc#acc.workers, N /= Node],
- case NewWorkers of
- [] ->
- {stop, Acc#acc{workers = []}};
- _ ->
- {ok, Acc#acc{workers = NewWorkers}}
- end;
-handle_message({rexi_EXIT, _Reason}, Worker, Acc) ->
- NewWorkers = lists:delete(Worker, Acc#acc.workers),
- case NewWorkers of
- [] ->
- {stop, Acc#acc{workers = []}};
- _ ->
- {ok, Acc#acc{workers = NewWorkers}}
- end;
-handle_message(Reply, Worker, Acc) ->
- NewReplies = fabric_util:update_counter(Reply, 1, Acc#acc.replies),
- NewNodeRevs =
- case Reply of
- {ok, #doc{revs = {Pos, [Rev | _]}}} ->
- [{Worker#shard.node, [{Pos, Rev}]} | Acc#acc.node_revs];
- _ ->
- Acc#acc.node_revs
- end,
- NewAcc = Acc#acc{replies = NewReplies, node_revs = NewNodeRevs},
- case is_r_met(Acc#acc.workers, NewReplies, Acc#acc.r) of
- {true, QuorumReply} ->
- fabric_util:cleanup(lists:delete(Worker, Acc#acc.workers)),
- {stop, NewAcc#acc{workers = [], state = r_met, q_reply = QuorumReply}};
- wait_for_more ->
- NewWorkers = lists:delete(Worker, Acc#acc.workers),
- {ok, NewAcc#acc{workers = NewWorkers}};
- no_more_workers ->
- {stop, NewAcc#acc{workers = []}}
- end.
-
-handle_response(#acc{state = r_met, replies = Replies, q_reply = QuorumReply} = Acc) ->
- case {Replies, fabric_util:remove_ancestors(Replies, [])} of
- {[_], [_]} ->
- % Complete agreement amongst all copies
- QuorumReply;
- {[_ | _], [{_, {QuorumReply, _}}]} ->
- % Any divergent replies are ancestors of the QuorumReply,
- % repair the document asynchronously
- spawn(fun() -> read_repair(Acc) end),
- QuorumReply;
- _Else ->
- % real disagreement amongst the workers, block for the repair
- read_repair(Acc)
- end;
-handle_response(Acc) ->
- read_repair(Acc).
-
-is_r_met(Workers, Replies, R) ->
- case lists:dropwhile(fun({_, {_, Count}}) -> Count < R end, Replies) of
- [{_, {QuorumReply, _}} | _] ->
- {true, QuorumReply};
- [] when length(Workers) > 1 ->
- wait_for_more;
- [] ->
- no_more_workers
- end.
-
-read_repair(#acc{dbname = DbName, replies = Replies, node_revs = NodeRevs}) ->
- Docs = [Doc || {_, {{ok, #doc{} = Doc}, _}} <- Replies],
- case Docs of
- % omit local docs from read repair
- [#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} | _] ->
- choose_reply(Docs);
- [#doc{id = Id} | _] ->
- Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}],
- Res = fabric:update_docs(DbName, Docs, Opts),
- case Res of
- {ok, []} ->
- couch_stats:increment_counter([fabric, read_repairs, success]);
- _ ->
- couch_stats:increment_counter([fabric, read_repairs, failure]),
- couch_log:notice("read_repair ~s ~s ~p", [DbName, Id, Res])
- end,
- choose_reply(Docs);
- [] ->
- % Try hard to return some sort of information
- % to the client.
- Values = [V || {_, {V, _}} <- Replies],
- case lists:member({not_found, missing}, Values) of
- true ->
- {not_found, missing};
- false when length(Values) > 0 ->
- % Sort for stability in responses in
- % case we have some weird condition
- hd(lists:sort(Values));
- false ->
- {error, read_failure}
- end
- end.
-
-choose_reply(Docs) ->
- % Sort descending by {not deleted, rev}. This should match
- % the logic of couch_doc:to_doc_info/1.
- [Winner | _] = lists:sort(
- fun(DocA, DocB) ->
- InfoA = {not DocA#doc.deleted, DocA#doc.revs},
- InfoB = {not DocB#doc.deleted, DocB#doc.revs},
- InfoA > InfoB
- end,
- Docs
- ),
- {ok, Winner}.
-
-format_reply({ok, #full_doc_info{deleted = true}}, true) ->
- {not_found, deleted};
-format_reply({ok, #doc{deleted = true}}, true) ->
- {not_found, deleted};
-format_reply(not_found, _) ->
- {not_found, missing};
-format_reply(Else, _) ->
- Else.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
--define(MECK_MODS, [
- couch_log,
- couch_stats,
- fabric,
- fabric_util,
- mem3,
- rexi,
- rexi_monitor
-]).
-
-setup_all() ->
- meck:new(?MECK_MODS, [passthrough]).
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset(?MECK_MODS).
-
-teardown(_) ->
- ok.
-
-open_doc_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_is_r_met(),
- t_handle_message_down(),
- t_handle_message_exit(),
- t_handle_message_reply(),
- t_store_node_revs(),
- t_read_repair(),
- t_handle_response_quorum_met(),
- t_get_doc_info()
- ]
- }
- }.
-
-t_is_r_met() ->
- ?_test(begin
- Workers0 = [],
- Workers1 = [nil],
- Workers2 = [nil, nil],
-
- SuccessCases = [
- {{true, foo}, [fabric_util:kv(foo, 2)], 2},
- {{true, foo}, [fabric_util:kv(foo, 3)], 2},
- {{true, foo}, [fabric_util:kv(foo, 1)], 1},
- {{true, foo}, [fabric_util:kv(foo, 2), fabric_util:kv(bar, 1)], 2},
- {{true, bar}, [fabric_util:kv(bar, 1), fabric_util:kv(bar, 2)], 2},
- {{true, bar}, [fabric_util:kv(bar, 2), fabric_util:kv(foo, 1)], 2}
- ],
- lists:foreach(
- fun({Expect, Replies, Q}) ->
- ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
- end,
- SuccessCases
- ),
-
- WaitForMoreCases = [
- {[fabric_util:kv(foo, 1)], 2},
- {[fabric_util:kv(foo, 2)], 3},
- {[fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2}
- ],
- lists:foreach(
- fun({Replies, Q}) ->
- ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
- end,
- WaitForMoreCases
- ),
-
- FailureCases = [
- {Workers0, [fabric_util:kv(foo, 1)], 2},
- {Workers1, [fabric_util:kv(foo, 1)], 2},
- {Workers1, [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2},
- {Workers1, [fabric_util:kv(foo, 2)], 3}
- ],
- lists:foreach(
- fun({Workers, Replies, Q}) ->
- ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
- end,
- FailureCases
- )
- end).
-
-t_handle_message_down() ->
- Node0 = 'foo@localhost',
- Node1 = 'bar@localhost',
- Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
- Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
- Workers0 = [#shard{node = Node0} || _ <- [a, b]],
- Worker1 = #shard{node = Node1},
- Workers1 = Workers0 ++ [Worker1],
-
- ?_test(begin
- % Stop when no more workers are left
- ?assertEqual(
- {stop, #acc{workers = []}},
- handle_message(Down0, nil, #acc{workers = Workers0})
- ),
-
- % Continue when we have more workers
- ?assertEqual(
- {ok, #acc{workers = [Worker1]}},
- handle_message(Down0, nil, #acc{workers = Workers1})
- ),
-
- % A second DOWN removes the remaining workers
- ?assertEqual(
- {stop, #acc{workers = []}},
- handle_message(Down1, nil, #acc{workers = [Worker1]})
- )
- end).
-
-t_handle_message_exit() ->
- Exit = {rexi_EXIT, nil},
- Worker0 = #shard{ref = erlang:make_ref()},
- Worker1 = #shard{ref = erlang:make_ref()},
-
- ?_test(begin
- % Only removes the specified worker
- ?assertEqual(
- {ok, #acc{workers = [Worker1]}},
- handle_message(Exit, Worker0, #acc{workers = [Worker0, Worker1]})
- ),
-
- ?assertEqual(
- {ok, #acc{workers = [Worker0]}},
- handle_message(Exit, Worker1, #acc{workers = [Worker0, Worker1]})
- ),
-
- % We bail if it was the last worker
- ?assertEqual(
- {stop, #acc{workers = []}},
- handle_message(Exit, Worker0, #acc{workers = [Worker0]})
- )
- end).
-
-t_handle_message_reply() ->
- Worker0 = #shard{ref = erlang:make_ref()},
- Worker1 = #shard{ref = erlang:make_ref()},
- Worker2 = #shard{ref = erlang:make_ref()},
- Workers = [Worker0, Worker1, Worker2],
- Acc0 = #acc{workers = Workers, r = 2, replies = []},
-
- ?_test(begin
- meck:expect(rexi, kill_all, fun(_) -> ok end),
-
- % Test that we continue when we haven't met R yet
- ?assertMatch(
- {ok, #acc{
- workers = [Worker0, Worker1],
- replies = [{foo, {foo, 1}}]
- }},
- handle_message(foo, Worker2, Acc0)
- ),
-
- ?assertMatch(
- {ok, #acc{
- workers = [Worker0, Worker1],
- replies = [{bar, {bar, 1}}, {foo, {foo, 1}}]
- }},
- handle_message(bar, Worker2, Acc0#acc{
- replies = [{foo, {foo, 1}}]
- })
- ),
-
- % Test that we don't get a quorum when R isn't met. q_reply
- % isn't set and state remains unchanged and {stop, NewAcc}
- % is returned. Bit subtle on the assertions here.
-
- ?assertMatch(
- {stop, #acc{workers = [], replies = [{foo, {foo, 1}}]}},
- handle_message(foo, Worker0, Acc0#acc{workers = [Worker0]})
- ),
-
- ?assertMatch(
- {stop, #acc{
- workers = [],
- replies = [{bar, {bar, 1}}, {foo, {foo, 1}}]
- }},
- handle_message(bar, Worker0, Acc0#acc{
- workers = [Worker0],
- replies = [{foo, {foo, 1}}]
- })
- ),
-
- % Check that when R is met we stop with a new state and
- % a q_reply.
-
- ?assertMatch(
- {stop, #acc{
- workers = [],
- replies = [{foo, {foo, 2}}],
- state = r_met,
- q_reply = foo
- }},
- handle_message(foo, Worker1, Acc0#acc{
- workers = [Worker0, Worker1],
- replies = [{foo, {foo, 1}}]
- })
- ),
-
- ?assertEqual(
- {stop, #acc{
- workers = [],
- r = 1,
- replies = [{foo, {foo, 1}}],
- state = r_met,
- q_reply = foo
- }},
- handle_message(foo, Worker0, Acc0#acc{r = 1})
- ),
-
- ?assertMatch(
- {stop, #acc{
- workers = [],
- replies = [{bar, {bar, 1}}, {foo, {foo, 2}}],
- state = r_met,
- q_reply = foo
- }},
- handle_message(foo, Worker0, Acc0#acc{
- workers = [Worker0],
- replies = [{bar, {bar, 1}}, {foo, {foo, 1}}]
- })
- )
- end).
-
-t_store_node_revs() ->
- W1 = #shard{node = w1, ref = erlang:make_ref()},
- W2 = #shard{node = w2, ref = erlang:make_ref()},
- W3 = #shard{node = w3, ref = erlang:make_ref()},
- Foo1 = {ok, #doc{id = <<"bar">>, revs = {1, [<<"foo">>]}}},
- Foo2 = {ok, #doc{id = <<"bar">>, revs = {2, [<<"foo2">>, <<"foo">>]}}},
- NFM = {not_found, missing},
-
- InitAcc = #acc{workers = [W1, W2, W3], replies = [], r = 2},
-
- ?_test(begin
- meck:expect(rexi, kill_all, fun(_) -> ok end),
-
- % Simple case
- {ok, #acc{node_revs = NodeRevs1}} = handle_message(Foo1, W1, InitAcc),
- ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs1),
-
- % Make sure we only hold the head rev
- {ok, #acc{node_revs = NodeRevs2}} = handle_message(Foo2, W1, InitAcc),
- ?assertEqual([{w1, [{2, <<"foo2">>}]}], NodeRevs2),
-
- % Make sure we don't capture anything on error
- {ok, #acc{node_revs = NodeRevs3}} = handle_message(NFM, W1, InitAcc),
- ?assertEqual([], NodeRevs3),
-
- % Make sure we accumulate node revs
- Acc1 = InitAcc#acc{node_revs = [{w1, [{1, <<"foo">>}]}]},
- {ok, #acc{node_revs = NodeRevs4}} = handle_message(Foo2, W2, Acc1),
- ?assertEqual(
- [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
- NodeRevs4
- ),
-
- % Make sure rexi_DOWN doesn't modify node_revs
- Down = {rexi_DOWN, nil, {nil, w1}, nil},
- {ok, #acc{node_revs = NodeRevs5}} = handle_message(Down, W2, Acc1),
- ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs5),
-
- % Make sure rexi_EXIT doesn't modify node_revs
- Exit = {rexi_EXIT, reason},
- {ok, #acc{node_revs = NodeRevs6}} = handle_message(Exit, W2, Acc1),
- ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs6),
-
- % Make sure an error doesn't remove any node revs
- {ok, #acc{node_revs = NodeRevs7}} = handle_message(NFM, W2, Acc1),
- ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs7),
-
- % Make sure we have all of our node_revs when meeting
- % quorum
- {ok, Acc2} = handle_message(Foo1, W1, InitAcc),
- {ok, Acc3} = handle_message(Foo2, W2, Acc2),
- {stop, Acc4} = handle_message(NFM, W3, Acc3),
- ?assertEqual(
- [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
- Acc4#acc.node_revs
- )
- end).
-
-t_read_repair() ->
- Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
- NFM = {not_found, missing},
-
- ?_test(begin
- meck:expect(couch_log, notice, fun(_, _) -> ok end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-
- % Test when we have actual doc data to repair
- meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
- Acc0 = #acc{
- dbname = <<"name">>,
- replies = [fabric_util:kv(Foo1, 1)]
- },
- ?assertEqual(Foo1, read_repair(Acc0)),
-
- meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
- Acc1 = #acc{
- dbname = <<"name">>,
- replies = [fabric_util:kv(Foo1, 1), fabric_util:kv(Foo2, 1)]
- },
- ?assertEqual(Foo2, read_repair(Acc1)),
-
- % Test when we have nothing but errors
- Acc2 = #acc{replies = [fabric_util:kv(NFM, 1)]},
- ?assertEqual(NFM, read_repair(Acc2)),
-
- Acc3 = #acc{replies = [fabric_util:kv(NFM, 1), fabric_util:kv(foo, 2)]},
- ?assertEqual(NFM, read_repair(Acc3)),
-
- Acc4 = #acc{replies = [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)]},
- ?assertEqual(bar, read_repair(Acc4))
- end).
-
-t_handle_response_quorum_met() ->
- Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
- Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
-
- ?_test(begin
- meck:expect(couch_log, notice, fun(_, _) -> ok end),
- meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-
- BasicOkAcc = #acc{
- state = r_met,
- replies = [fabric_util:kv(Foo1, 2)],
- q_reply = Foo1
- },
- ?assertEqual(Foo1, handle_response(BasicOkAcc)),
-
- WithAncestorsAcc = #acc{
- state = r_met,
- replies = [fabric_util:kv(Foo1, 1), fabric_util:kv(Foo2, 2)],
- q_reply = Foo2
- },
- ?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
-
- % This also checks when the quorum isn't the most recent
- % revision.
- DeeperWinsAcc = #acc{
- state = r_met,
- replies = [fabric_util:kv(Foo1, 2), fabric_util:kv(Foo2, 1)],
- q_reply = Foo1
- },
- ?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
-
- % Check that we return the proper doc based on rev
- % (ie, pos is equal)
- BiggerRevWinsAcc = #acc{
- state = r_met,
- replies = [fabric_util:kv(Foo1, 1), fabric_util:kv(Bar1, 2)],
- q_reply = Bar1
- },
- ?assertEqual(Foo1, handle_response(BiggerRevWinsAcc))
-
- % r_not_met is a proxy to read_repair so we rely on
- % read_repair_test for those conditions.
- end).
-
-t_get_doc_info() ->
- ?_test(begin
- meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
- meck:expect(fabric_util, submit_jobs, fun(_, _, _) -> ok end),
- meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
- meck:expect(rexi_monitor, stop, fun(_) -> ok end),
- meck:expect(mem3, shards, fun(_, _) -> ok end),
- meck:expect(mem3, n, fun(_) -> 3 end),
- meck:expect(mem3, quorum, fun(_) -> 2 end),
-
- meck:expect(fabric_util, recv, fun(_, _, _, _) ->
- {ok, #acc{state = r_not_met}}
- end),
- Rsp1 = fabric_doc_open:go("test", "one", [doc_info]),
- ?assertEqual({error, quorum_not_met}, Rsp1),
-
- Rsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
- ?assertEqual({error, quorum_not_met}, Rsp2),
-
- meck:expect(fabric_util, recv, fun(_, _, _, _) ->
- {ok, #acc{state = r_met, q_reply = not_found}}
- end),
- MissingRsp1 = fabric_doc_open:go("test", "one", [doc_info]),
- ?assertEqual({not_found, missing}, MissingRsp1),
- MissingRsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
- ?assertEqual({not_found, missing}, MissingRsp2),
-
- meck:expect(fabric_util, recv, fun(_, _, _, _) ->
- A = #doc_info{},
- {ok, #acc{state = r_met, q_reply = {ok, A}}}
- end),
- {ok, Rec1} = fabric_doc_open:go("test", "one", [doc_info]),
- ?assert(is_record(Rec1, doc_info)),
-
- meck:expect(fabric_util, recv, fun(_, _, _, _) ->
- A = #full_doc_info{deleted = true},
- {ok, #acc{state = r_met, q_reply = {ok, A}}}
- end),
- Rsp3 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
- ?assertEqual({not_found, deleted}, Rsp3),
- {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full}, deleted]),
- ?assert(is_record(Rec2, full_doc_info))
- end).
-
--endif.
diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl
deleted file mode 100644
index 284187bff..000000000
--- a/src/fabric/src/fabric_doc_open_revs.erl
+++ /dev/null
@@ -1,766 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_open_revs).
-
--export([go/4]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--record(state, {
- dbname,
- worker_count,
- workers,
- reply_count = 0,
- reply_error_count = 0,
- r,
- revs,
- latest,
- replies = [],
- node_revs = [],
- repair = false
-}).
-
-go(DbName, Id, Revs, Options) ->
- Workers = fabric_util:submit_jobs(
- mem3:shards(DbName, Id),
- open_revs,
- [Id, Revs, Options]
- ),
- R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
- State = #state{
- dbname = DbName,
- worker_count = length(Workers),
- workers = Workers,
- r = list_to_integer(R),
- revs = Revs,
- latest = lists:member(latest, Options),
- replies = []
- },
- RexiMon = fabric_util:create_monitors(Workers),
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, State) of
- {ok, all_workers_died} ->
- {error, all_workers_died};
- {ok, Replies} ->
- {ok, Replies};
- {timeout, #state{workers = DefunctWorkers}} ->
- fabric_util:log_timeout(DefunctWorkers, "open_revs"),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker, #state{workers = Workers} = State) ->
- NewState = State#state{
- workers = lists:keydelete(NodeRef, #shard.node, Workers),
- reply_error_count = State#state.reply_error_count + 1
- },
- handle_message({ok, []}, nil, NewState);
-handle_message({rexi_EXIT, _}, Worker, #state{workers = Workers} = State) ->
- NewState = State#state{
- workers = lists:delete(Worker, Workers),
- reply_error_count = State#state.reply_error_count + 1
- },
- handle_message({ok, []}, nil, NewState);
-handle_message({ok, RawReplies}, Worker, State) ->
- #state{
- dbname = DbName,
- reply_count = ReplyCount,
- worker_count = WorkerCount,
- workers = Workers,
- replies = PrevReplies,
- node_revs = PrevNodeRevs,
- r = R,
- revs = Revs,
- latest = Latest,
- repair = InRepair,
- reply_error_count = ReplyErrorCount
- } = State,
-
- IsTree = Revs == all orelse Latest,
-
- % Do not count error replies when checking quorum
- RealReplyCount = ReplyCount + 1 - ReplyErrorCount,
- QuorumReplies = RealReplyCount >= R,
- {NewReplies, QuorumMet, Repair} =
- case IsTree of
- true ->
- {NewReplies0, AllInternal, Repair0} =
- tree_replies(PrevReplies, tree_sort(RawReplies)),
- NumLeafs = couch_key_tree:count_leafs(PrevReplies),
- SameNumRevs = length(RawReplies) == NumLeafs,
- QMet = AllInternal andalso SameNumRevs andalso QuorumReplies,
- % Don't set repair=true on the first reply
- {NewReplies0, QMet, (ReplyCount > 0) and Repair0};
- false ->
- {NewReplies0, MinCount} = dict_replies(PrevReplies, RawReplies),
- {NewReplies0, MinCount >= R, false}
- end,
- NewNodeRevs =
- if
- Worker == nil ->
- PrevNodeRevs;
- true ->
- IdRevs = lists:foldl(
- fun
- ({ok, #doc{revs = {Pos, [Rev | _]}}}, Acc) ->
- [{Pos, Rev} | Acc];
- (_, Acc) ->
- Acc
- end,
- [],
- RawReplies
- ),
- if
- IdRevs == [] -> PrevNodeRevs;
- true -> [{Worker#shard.node, IdRevs} | PrevNodeRevs]
- end
- end,
-
- Complete = (ReplyCount =:= (WorkerCount - 1)),
-
- case QuorumMet orelse Complete of
- true ->
- fabric_util:cleanup(lists:delete(Worker, Workers)),
- maybe_read_repair(
- DbName,
- IsTree,
- NewReplies,
- NewNodeRevs,
- ReplyCount + 1,
- InRepair orelse Repair
- ),
- {stop, format_reply(IsTree, NewReplies, RealReplyCount)};
- false ->
- {ok, State#state{
- replies = NewReplies,
- node_revs = NewNodeRevs,
- reply_count = ReplyCount + 1,
- workers = lists:delete(Worker, Workers),
- repair = InRepair orelse Repair
- }}
- end.
-
-tree_replies(RevTree, []) ->
- {RevTree, true, false};
-tree_replies(RevTree0, [{ok, Doc} | Rest]) ->
- {RevTree1, Done, Repair} = tree_replies(RevTree0, Rest),
- Path = couch_doc:to_path(Doc),
- case couch_key_tree:merge(RevTree1, Path) of
- {RevTree2, internal_node} ->
- {RevTree2, Done, Repair};
- {RevTree2, new_leaf} ->
- {RevTree2, Done, true};
- {RevTree2, _} ->
- {RevTree2, false, true}
- end;
-tree_replies(RevTree0, [{{not_found, missing}, {Pos, Rev}} | Rest]) ->
- {RevTree1, Done, Repair} = tree_replies(RevTree0, Rest),
- Node = {Rev, ?REV_MISSING, []},
- Path = {Pos, Node},
- case couch_key_tree:merge(RevTree1, Path) of
- {RevTree2, internal_node} ->
- {RevTree2, Done, true};
- {RevTree2, _} ->
- {RevTree2, false, Repair}
- end.
-
-tree_sort(Replies) ->
- SortFun = fun(A, B) -> sort_key(A) =< sort_key(B) end,
- lists:sort(SortFun, Replies).
-
-sort_key({ok, #doc{revs = {Pos, [Rev | _]}}}) ->
- {Pos, Rev};
-sort_key({{not_found, _}, {Pos, Rev}}) ->
- {Pos, Rev}.
-
-dict_replies(Dict, []) ->
- case [Count || {_Key, {_Reply, Count}} <- Dict] of
- [] -> {Dict, 0};
- Counts -> {Dict, lists:min(Counts)}
- end;
-dict_replies(Dict, [Reply | Rest]) ->
- NewDict = fabric_util:update_counter(Reply, 1, Dict),
- dict_replies(NewDict, Rest).
-
-maybe_read_repair(Db, IsTree, Replies, NodeRevs, ReplyCount, DoRepair) ->
- Docs =
- case IsTree of
- true -> tree_repair_docs(Replies, DoRepair);
- false -> dict_repair_docs(Replies, ReplyCount)
- end,
- case Docs of
- [] ->
- ok;
- _ ->
- erlang:spawn(fun() -> read_repair(Db, Docs, NodeRevs) end)
- end.
-
-tree_repair_docs(_Replies, false) ->
- [];
-tree_repair_docs(Replies, true) ->
- Leafs = couch_key_tree:get_all_leafs(Replies),
- [Doc || {Doc, {_Pos, _}} <- Leafs, is_record(Doc, doc)].
-
-dict_repair_docs(Replies, ReplyCount) ->
- NeedsRepair = lists:any(fun({_, {_, C}}) -> C < ReplyCount end, Replies),
- if
- not NeedsRepair -> [];
- true -> [Doc || {_, {{ok, Doc}, _}} <- Replies]
- end.
-
-read_repair(Db, Docs, NodeRevs) ->
- Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}],
- Res = fabric:update_docs(Db, Docs, Opts),
- case Res of
- {ok, []} ->
- couch_stats:increment_counter([fabric, read_repairs, success]);
- _ ->
- couch_stats:increment_counter([fabric, read_repairs, failure]),
- [#doc{id = Id} | _] = Docs,
- couch_log:notice("read_repair ~s ~s ~p", [Db, Id, Res])
- end.
-
-format_reply(_, _, RealReplyCount) when RealReplyCount =< 0 ->
- all_workers_died;
-format_reply(true, Replies, _) ->
- tree_format_replies(Replies);
-format_reply(false, Replies, _) ->
- dict_format_replies(Replies).
-
-tree_format_replies(RevTree) ->
- Leafs = couch_key_tree:get_all_leafs(RevTree),
- lists:sort(
- lists:map(
- fun(Reply) ->
- case Reply of
- {?REV_MISSING, {Pos, [Rev]}} ->
- {{not_found, missing}, {Pos, Rev}};
- {Doc, _} when is_record(Doc, doc) ->
- {ok, Doc}
- end
- end,
- Leafs
- )
- ).
-
-dict_format_replies(Dict) ->
- Replies0 = [Reply || {_, {Reply, _}} <- Dict],
-
- AllFoundRevs = lists:foldl(
- fun(Reply, Acc) ->
- case Reply of
- {ok, #doc{revs = {Pos, [RevId | _]}}} ->
- [{Pos, RevId} | Acc];
- _ ->
- Acc
- end
- end,
- [],
- Replies0
- ),
-
- %% Drop any not_found replies for which we
- %% found the revision on a different node.
- Replies1 = lists:filter(
- fun(Reply) ->
- case Reply of
- {{not_found, missing}, Rev} ->
- not lists:member(Rev, AllFoundRevs);
- _ ->
- true
- end
- end,
- Replies0
- ),
-
- % Remove replies with shorter revision
- % paths for a given revision.
- collapse_duplicate_revs(Replies1).
-
-collapse_duplicate_revs(Replies) ->
- % The collapse logic requires that replies are
- % sorted so that shorter rev paths are in
- % the list just before longer lists.
- %
- % This somewhat implicitly relies on Erlang's
- % sorting of [A, B] < [A, B, C] for all values
- % of C.
- collapse_duplicate_revs_int(lists:sort(Replies)).
-
-collapse_duplicate_revs_int([]) ->
- [];
-collapse_duplicate_revs_int([{ok, Doc1}, {ok, Doc2} | Rest]) ->
- {D1, R1} = Doc1#doc.revs,
- {D2, R2} = Doc2#doc.revs,
- Head =
- case D1 == D2 andalso lists:prefix(R1, R2) of
- true -> [];
- false -> [{ok, Doc1}]
- end,
- Head ++ collapse_duplicate_revs([{ok, Doc2} | Rest]);
-collapse_duplicate_revs_int([Reply | Rest]) ->
- [Reply | collapse_duplicate_revs(Rest)].
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- config:start_link([]),
- meck:new([fabric, couch_stats, couch_log]),
- meck:new(fabric_util, [passthrough]),
- meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
- meck:expect(couch_log, notice, fun(_, _) -> ok end),
- meck:expect(fabric_util, cleanup, fun(_) -> ok end).
-
-teardown_all(_) ->
- meck:unload(),
- config:stop().
-
-setup() ->
- meck:reset([
- couch_log,
- couch_stats,
- fabric,
- fabric_util
- ]).
-
-teardown(_) ->
- ok.
-
-state0(Revs, Latest) ->
- #state{
- worker_count = 3,
- workers =
- [#shard{node = 'node1'}, #shard{node = 'node2'}, #shard{node = 'node3'}],
- r = 2,
- revs = Revs,
- latest = Latest
- }.
-
-revs() -> [{1, <<"foo">>}, {1, <<"bar">>}, {1, <<"baz">>}].
-
-foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
-foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
-foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
-fooNF() -> {{not_found, missing}, {1, <<"foo">>}}.
-foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
-bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
-barNF() -> {{not_found, missing}, {1, <<"bar">>}}.
-bazNF() -> {{not_found, missing}, {1, <<"baz">>}}.
-baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
-
-open_doc_revs_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- check_empty_response_not_quorum(),
- check_basic_response(),
- check_finish_quorum(),
- check_finish_quorum_newer(),
- check_no_quorum_on_second(),
- check_done_on_third(),
- check_specific_revs_first_msg(),
- check_revs_done_on_agreement(),
- check_latest_true(),
- check_ancestor_counted_in_quorum(),
- check_not_found_counts_for_descendant(),
- check_worker_error_skipped(),
- check_quorum_only_counts_valid_responses(),
- check_empty_list_when_no_workers_reply(),
- check_node_rev_stored(),
- check_node_rev_store_head_only(),
- check_node_rev_store_multiple(),
- check_node_rev_dont_store_errors(),
- check_node_rev_store_non_errors(),
- check_node_rev_store_concatenate(),
- check_node_rev_store_concantenate_multiple(),
- check_node_rev_unmodified_on_down_or_exit(),
- check_not_found_replies_are_removed_when_doc_found(),
- check_not_found_returned_when_one_of_docs_not_found(),
- check_not_found_returned_when_doc_not_found(),
- check_longer_rev_list_returned(),
- check_longer_rev_list_not_combined(),
- check_not_found_removed_and_longer_rev_list()
- ]
- }
- }.
-
-% Tests for revs=all
-
-check_empty_response_not_quorum() ->
- % Simple smoke test that we don't think we're
- % done with a first empty response
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- W3 = #shard{node = 'node3'},
- ?_assertMatch(
- {ok, #state{workers = [W2, W3]}},
- handle_message({ok, []}, W1, state0(all, false))
- ).
-
-check_basic_response() ->
- % Check that we've handle a response
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- W3 = #shard{node = 'node3'},
- ?_assertMatch(
- {ok, #state{reply_count = 1, workers = [W2, W3]}},
- handle_message({ok, [foo1(), bar1()]}, W1, state0(all, false))
- ).
-
-check_finish_quorum() ->
- % Two messages with the same revisions means we're done
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- S0 = state0(all, false),
- {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
- Expect = {stop, [bar1(), foo1()]},
- ?assertEqual(Expect, handle_message({ok, [foo1(), bar1()]}, W2, S1))
- end).
-
-check_finish_quorum_newer() ->
- % We count a descendant of a revision for quorum so
- % foo1 should count for foo2 which means we're finished.
- % We also validate that read_repair was triggered.
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- S0 = state0(all, false),
- {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
- Expect = {stop, [bar1(), foo2()]},
- ok = meck:reset(fabric),
- ?assertEqual(Expect, handle_message({ok, [foo2(), bar1()]}, W2, S1)),
- ok = meck:wait(fabric, update_docs, '_', 5000),
- ?assertMatch(
- [{_, {fabric, update_docs, [_, _, _]}, _}],
- meck:history(fabric)
- )
- end).
-
-check_no_quorum_on_second() ->
- % Quorum not yet met for the foo revision so we
- % would wait for w3
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- W3 = #shard{node = 'node3'},
- S0 = state0(all, false),
- {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
- ?assertMatch(
- {ok, #state{workers = [W3]}},
- handle_message({ok, [bar1()]}, W2, S1)
- )
- end).
-
-check_done_on_third() ->
- % The third message of three means we're done no matter
- % what. Every revision seen in this pattern should be
- % included.
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- W3 = #shard{node = 'node3'},
- S0 = state0(all, false),
- {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
- {ok, S2} = handle_message({ok, [bar1()]}, W2, S1),
- Expect = {stop, [bar1(), foo1()]},
- ?assertEqual(Expect, handle_message({ok, [bar1()]}, W3, S2))
- end).
-
-% Tests for a specific list of revs
-
-check_specific_revs_first_msg() ->
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- W3 = #shard{node = 'node3'},
- S0 = state0(revs(), false),
- ?assertMatch(
- {ok, #state{reply_count = 1, workers = [W2, W3]}},
- handle_message({ok, [foo1(), bar1(), bazNF()]}, W1, S0)
- )
- end).
-
-check_revs_done_on_agreement() ->
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- S0 = state0(revs(), false),
- Msg = {ok, [foo1(), bar1(), bazNF()]},
- {ok, S1} = handle_message(Msg, W1, S0),
- Expect = {stop, [bar1(), foo1(), bazNF()]},
- ?assertEqual(Expect, handle_message(Msg, W2, S1))
- end).
-
-check_latest_true() ->
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- S0 = state0(revs(), true),
- Msg1 = {ok, [foo2(), bar1(), bazNF()]},
- Msg2 = {ok, [foo2(), bar1(), bazNF()]},
- {ok, S1} = handle_message(Msg1, W1, S0),
- Expect = {stop, [bar1(), foo2(), bazNF()]},
- ?assertEqual(Expect, handle_message(Msg2, W2, S1))
- end).
-
-check_ancestor_counted_in_quorum() ->
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- S0 = state0(revs(), true),
- Msg1 = {ok, [foo1(), bar1(), bazNF()]},
- Msg2 = {ok, [foo2(), bar1(), bazNF()]},
- Expect = {stop, [bar1(), foo2(), bazNF()]},
-
- % Older first
- {ok, S1} = handle_message(Msg1, W1, S0),
- ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-
- % Newer first
- {ok, S2} = handle_message(Msg2, W2, S0),
- ?assertEqual(Expect, handle_message(Msg1, W1, S2))
- end).
-
-check_not_found_counts_for_descendant() ->
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- S0 = state0(revs(), true),
- Msg1 = {ok, [foo1(), bar1(), bazNF()]},
- Msg2 = {ok, [foo1(), bar1(), baz1()]},
- Expect = {stop, [bar1(), baz1(), foo1()]},
-
- % not_found first
- {ok, S1} = handle_message(Msg1, W1, S0),
- ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-
- % not_found second
- {ok, S2} = handle_message(Msg2, W2, S0),
- ?assertEqual(Expect, handle_message(Msg1, W1, S2))
- end).
-
-check_worker_error_skipped() ->
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- W3 = #shard{node = 'node3'},
- S0 = state0(revs(), true),
- Msg1 = {ok, [foo1(), bar1(), baz1()]},
- Msg2 = {rexi_EXIT, reason},
- Msg3 = {ok, [foo1(), bar1(), baz1()]},
- Expect = {stop, [bar1(), baz1(), foo1()]},
-
- {ok, S1} = handle_message(Msg1, W1, S0),
- {ok, S2} = handle_message(Msg2, W2, S1),
- ?assertEqual(Expect, handle_message(Msg3, W3, S2))
- end).
-
-check_quorum_only_counts_valid_responses() ->
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- W3 = #shard{node = 'node3'},
- S0 = state0(revs(), true),
- Msg1 = {rexi_EXIT, reason},
- Msg2 = {rexi_EXIT, reason},
- Msg3 = {ok, [foo1(), bar1(), baz1()]},
- Expect = {stop, [bar1(), baz1(), foo1()]},
-
- {ok, S1} = handle_message(Msg1, W1, S0),
- {ok, S2} = handle_message(Msg2, W2, S1),
- ?assertEqual(Expect, handle_message(Msg3, W3, S2))
- end).
-
-check_empty_list_when_no_workers_reply() ->
- ?_test(begin
- W1 = #shard{node = 'node1'},
- W2 = #shard{node = 'node2'},
- W3 = #shard{node = 'node3'},
- S0 = state0(revs(), true),
- Msg1 = {rexi_EXIT, reason},
- Msg2 = {rexi_EXIT, reason},
- Msg3 = {rexi_DOWN, nodedown, {nil, node()}, nil},
- Expect = {stop, all_workers_died},
-
- {ok, S1} = handle_message(Msg1, W1, S0),
- {ok, S2} = handle_message(Msg2, W2, S1),
- ?assertEqual(Expect, handle_message(Msg3, W3, S2))
- end).
-
-check_node_rev_stored() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [foo1()]}, W1, S0),
- ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
- end).
-
-check_node_rev_store_head_only() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [foo2()]}, W1, S0),
- ?assertEqual([{node1, [{2, <<"foo2">>}]}], S1#state.node_revs)
- end).
-
-check_node_rev_store_multiple() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [foo1(), foo2()]}, W1, S0),
- ?assertEqual(
- [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
- S1#state.node_revs
- )
- end).
-
-check_node_rev_dont_store_errors() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [barNF()]}, W1, S0),
- ?assertEqual([], S1#state.node_revs)
- end).
-
-check_node_rev_store_non_errors() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [foo1(), barNF()]}, W1, S0),
- ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
- end).
-
-check_node_rev_store_concatenate() ->
- ?_test(begin
- W2 = #shard{node = node2},
- S0 = state0([], true),
- S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
- {ok, S2} = handle_message({ok, [foo2()]}, W2, S1),
- ?assertEqual(
- [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
- S2#state.node_revs
- )
- end).
-
-check_node_rev_store_concantenate_multiple() ->
- ?_test(begin
- W2 = #shard{node = node2},
- S0 = state0([], true),
- S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
- {ok, S2} = handle_message({ok, [foo2(), bar1()]}, W2, S1),
- ?assertEqual(
- [
- {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
- {node1, [{1, <<"foo">>}]}
- ],
- S2#state.node_revs
- )
- end).
-
-check_node_rev_unmodified_on_down_or_exit() ->
- ?_test(begin
- W2 = #shard{node = node2},
- S0 = state0([], true),
- S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
- Down = {rexi_DOWN, nodedown, {nil, node()}, nil},
- {ok, S2} = handle_message(Down, W2, S1),
- ?assertEqual(
- [{node1, [{1, <<"foo">>}]}],
- S2#state.node_revs
- ),
-
- Exit = {rexi_EXIT, reason},
- {ok, S3} = handle_message(Exit, W2, S1),
- ?assertEqual(
- [{node1, [{1, <<"foo">>}]}],
- S3#state.node_revs
- )
- end).
-
-check_not_found_replies_are_removed_when_doc_found() ->
- ?_test(begin
- Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
- Expect = [bar1(), foo1()],
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_not_found_returned_when_one_of_docs_not_found() ->
- ?_test(begin
- Replies = replies_to_dict([foo1(), foo2(), barNF()]),
- Expect = [foo1(), foo2(), barNF()],
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_not_found_returned_when_doc_not_found() ->
- ?_test(begin
- Replies = replies_to_dict([fooNF(), barNF(), bazNF()]),
- Expect = [barNF(), bazNF(), fooNF()],
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_longer_rev_list_returned() ->
- ?_test(begin
- Replies = replies_to_dict([foo2(), foo2stemmed()]),
- Expect = [foo2()],
- ?assertEqual(2, length(Replies)),
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_longer_rev_list_not_combined() ->
- ?_test(begin
- Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]),
- Expect = [bar1(), foo2()],
- ?assertEqual(3, length(Replies)),
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_not_found_removed_and_longer_rev_list() ->
- ?_test(begin
- Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]),
- Expect = [foo2()],
- ?assertEqual(3, length(Replies)),
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-replies_to_dict(Replies) ->
- [reply_to_element(R) || R <- Replies].
-
-reply_to_element({ok, #doc{revs = Revs}} = Reply) ->
- {_, [Rev | _]} = Revs,
- {{Rev, Revs}, {Reply, 1}};
-reply_to_element(Reply) ->
- {Reply, {Reply, 1}}.
-
--endif.
diff --git a/src/fabric/src/fabric_doc_purge.erl b/src/fabric/src/fabric_doc_purge.erl
deleted file mode 100644
index 64221ab0e..000000000
--- a/src/fabric/src/fabric_doc_purge.erl
+++ /dev/null
@@ -1,591 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_purge).
-
--export([
- go/3
-]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--record(acc, {
- worker_uuids,
- resps,
- uuid_counts,
- w
-}).
-
-go(_, [], _) ->
- {ok, []};
-go(DbName, IdsRevs, Options) ->
- % Generate our purge requests of {UUID, DocId, Revs}
- {UUIDs, Reqs} = create_reqs(IdsRevs, [], []),
-
- % Fire off rexi workers for each shard.
- {Workers, WorkerUUIDs} = dict:fold(
- fun(Shard, ShardReqs, {Ws, WUUIDs}) ->
- #shard{name = ShardDbName, node = Node} = Shard,
- Args = [ShardDbName, ShardReqs, Options],
- Ref = rexi:cast(Node, {fabric_rpc, purge_docs, Args}),
- Worker = Shard#shard{ref = Ref},
- ShardUUIDs = [UUID || {UUID, _Id, _Revs} <- ShardReqs],
- {[Worker | Ws], [{Worker, ShardUUIDs} | WUUIDs]}
- end,
- {[], []},
- group_reqs_by_shard(DbName, Reqs)
- ),
-
- UUIDCounts = lists:foldl(
- fun({_Worker, WUUIDs}, CountAcc) ->
- lists:foldl(
- fun(UUID, InnerCountAcc) ->
- dict:update_counter(UUID, 1, InnerCountAcc)
- end,
- CountAcc,
- WUUIDs
- )
- end,
- dict:new(),
- WorkerUUIDs
- ),
-
- RexiMon = fabric_util:create_monitors(Workers),
- Timeout = fabric_util:request_timeout(),
- Acc0 = #acc{
- worker_uuids = WorkerUUIDs,
- resps = dict:from_list([{UUID, []} || UUID <- UUIDs]),
- uuid_counts = UUIDCounts,
- w = w(DbName, Options)
- },
- Acc2 =
- try
- rexi_utils:recv(
- Workers,
- #shard.ref,
- fun handle_message/3,
- Acc0,
- infinity,
- Timeout
- )
- of
- {ok, Acc1} ->
- Acc1;
- {timeout, Acc1} ->
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = Resps
- } = Acc1,
- DefunctWorkers = [Worker || {Worker, _} <- WorkerUUIDs],
- fabric_util:log_timeout(DefunctWorkers, "purge_docs"),
- NewResps = append_errors(timeout, WorkerUUIDs, Resps),
- Acc1#acc{worker_uuids = [], resps = NewResps};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end,
-
- FinalResps = format_resps(UUIDs, Acc2),
- {resp_health(FinalResps), FinalResps}.
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = Resps
- } = Acc,
- Pred = fun({#shard{node = N}, _}) -> N == Node end,
- {Failed, Rest} = lists:partition(Pred, WorkerUUIDs),
- NewResps = append_errors(internal_server_error, Failed, Resps),
- maybe_stop(Acc#acc{worker_uuids = Rest, resps = NewResps});
-handle_message({rexi_EXIT, _}, Worker, Acc) ->
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = Resps
- } = Acc,
- {value, WorkerPair, Rest} = lists:keytake(Worker, 1, WorkerUUIDs),
- NewResps = append_errors(internal_server_error, [WorkerPair], Resps),
- maybe_stop(Acc#acc{worker_uuids = Rest, resps = NewResps});
-handle_message({ok, Replies}, Worker, Acc) ->
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = Resps
- } = Acc,
- {value, {_W, UUIDs}, Rest} = lists:keytake(Worker, 1, WorkerUUIDs),
- NewResps = append_resps(UUIDs, Replies, Resps),
- maybe_stop(Acc#acc{worker_uuids = Rest, resps = NewResps});
-handle_message({bad_request, Msg}, _, _) ->
- throw({bad_request, Msg}).
-
-create_reqs([], UUIDs, Reqs) ->
- {lists:reverse(UUIDs), lists:reverse(Reqs)};
-create_reqs([{Id, Revs} | RestIdsRevs], UUIDs, Reqs) ->
- UUID = couch_uuids:new(),
- NewUUIDs = [UUID | UUIDs],
- NewReqs = [{UUID, Id, Revs} | Reqs],
- create_reqs(RestIdsRevs, NewUUIDs, NewReqs).
-
-group_reqs_by_shard(DbName, Reqs) ->
- lists:foldl(
- fun({_UUID, Id, _Revs} = Req, D0) ->
- lists:foldl(
- fun(Shard, D1) ->
- dict:append(Shard, Req, D1)
- end,
- D0,
- mem3:shards(DbName, Id)
- )
- end,
- dict:new(),
- Reqs
- ).
-
-w(DbName, Options) ->
- try
- list_to_integer(couch_util:get_value(w, Options))
- catch
- _:_ ->
- mem3:quorum(DbName)
- end.
-
-append_errors(Type, WorkerUUIDs, Resps) ->
- lists:foldl(
- fun({_Worker, UUIDs}, RespAcc) ->
- Errors = [{error, Type} || _UUID <- UUIDs],
- append_resps(UUIDs, Errors, RespAcc)
- end,
- Resps,
- WorkerUUIDs
- ).
-
-append_resps([], [], Resps) ->
- Resps;
-append_resps([UUID | RestUUIDs], [Reply | RestReplies], Resps) ->
- NewResps = dict:append(UUID, Reply, Resps),
- append_resps(RestUUIDs, RestReplies, NewResps).
-
-maybe_stop(#acc{worker_uuids = []} = Acc) ->
- {stop, Acc};
-maybe_stop(#acc{resps = Resps, uuid_counts = Counts, w = W} = Acc) ->
- try
- dict:fold(
- fun(UUID, UUIDResps, _) ->
- UUIDCount = dict:fetch(UUID, Counts),
- case has_quorum(UUIDResps, UUIDCount, W) of
- true -> ok;
- false -> throw(keep_going)
- end
- end,
- nil,
- Resps
- ),
- {stop, Acc}
- catch
- throw:keep_going ->
- {ok, Acc}
- end.
-
-format_resps(UUIDs, #acc{} = Acc) ->
- #acc{
- resps = Resps,
- w = W
- } = Acc,
- FoldFun = fun(UUID, Replies, ReplyAcc) ->
- OkReplies = [Reply || {ok, Reply} <- Replies],
- case OkReplies of
- [] ->
- [Error | _] = lists:usort(Replies),
- [{UUID, Error} | ReplyAcc];
- _ ->
- AllRevs = lists:usort(lists:flatten(OkReplies)),
- IsOk =
- length(OkReplies) >= W andalso
- length(lists:usort(OkReplies)) == 1,
- Health =
- if
- IsOk -> ok;
- true -> accepted
- end,
- [{UUID, {Health, AllRevs}} | ReplyAcc]
- end
- end,
- FinalReplies = dict:fold(FoldFun, [], Resps),
- couch_util:reorder_results(UUIDs, FinalReplies);
-format_resps(_UUIDs, Else) ->
- Else.
-
-resp_health(Resps) ->
- Healths = lists:usort([H || {H, _} <- Resps]),
- HasError = lists:member(error, Healths),
- HasAccepted = lists:member(accepted, Healths),
- AllOk = Healths == [ok],
- if
- HasError -> error;
- HasAccepted -> accepted;
- AllOk -> ok;
- true -> error
- end.
-
-has_quorum(Resps, Count, W) ->
- OkResps = [R || {ok, _} = R <- Resps],
- OkCounts = lists:foldl(
- fun(R, Acc) ->
- orddict:update_counter(R, 1, Acc)
- end,
- orddict:new(),
- OkResps
- ),
- MaxOk = lists:max([0 | element(2, lists:unzip(OkCounts))]),
- if
- MaxOk >= W -> true;
- length(Resps) >= Count -> true;
- true -> false
- end.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-purge_test_() ->
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- t_w2_ok(),
- t_w3_ok(),
-
- t_w2_mixed_accepted(),
- t_w3_mixed_accepted(),
-
- t_w2_exit1_ok(),
- t_w2_exit2_accepted(),
- t_w2_exit3_error(),
-
- t_w4_accepted(),
-
- t_mixed_ok_accepted(),
- t_mixed_errors()
- ]
- }.
-
-setup() ->
- meck:new(couch_log),
- meck:expect(couch_log, warning, fun(_, _) -> ok end),
- meck:expect(couch_log, notice, fun(_, _) -> ok end).
-
-teardown(_) ->
- meck:unload().
-
-t_w2_ok() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {stop, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, true),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
- ?assertEqual(Expect, Resps),
- ?assertEqual(ok, resp_health(Resps))
- end).
-
-t_w3_ok() ->
- ?_test(begin
- Acc0 = create_init_acc(3),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(ok, resp_health(Resps))
- end).
-
-t_w2_mixed_accepted() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
- Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-
- {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg1, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [
- {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
- {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
- ],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-t_w3_mixed_accepted() ->
- ?_test(begin
- Acc0 = create_init_acc(3),
- Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
- Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-
- {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg2, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [
- {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
- {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
- ],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-t_w2_exit1_ok() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(ok, resp_health(Resps))
- end).
-
-t_w2_exit2_accepted() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-t_w2_exit3_error() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(ExitMsg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [
- {error, internal_server_error},
- {error, internal_server_error}
- ],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(error, resp_health(Resps))
- end).
-
-t_w4_accepted() ->
- % Make sure we return when all workers have responded
- % rather than wait around for a timeout if a user asks
- % for a qourum with more than the available number of
- % shards.
- ?_test(begin
- Acc0 = create_init_acc(4),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-t_mixed_ok_accepted() ->
- ?_test(begin
- WorkerUUIDs = [
- {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
- {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
- {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-
- {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
- {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
- {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
- ],
-
- Acc0 = #acc{
- worker_uuids = WorkerUUIDs,
- resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
- uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
- w = 2
- },
-
- Msg1 = {ok, [{ok, [{1, <<"foo">>}]}]},
- Msg2 = {ok, [{ok, [{2, <<"bar">>}]}]},
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
- {ok, Acc2} = handle_message(Msg1, worker(2, Acc0), Acc1),
- {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
- {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
- {stop, Acc5} = handle_message(Msg2, worker(6, Acc0), Acc4),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-t_mixed_errors() ->
- ?_test(begin
- WorkerUUIDs = [
- {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
- {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
- {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-
- {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
- {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
- {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
- ],
-
- Acc0 = #acc{
- worker_uuids = WorkerUUIDs,
- resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
- uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
- w = 2
- },
-
- Msg = {ok, [{ok, [{1, <<"foo">>}]}]},
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
- {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
- {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
- {stop, Acc5} = handle_message(ExitMsg, worker(6, Acc0), Acc4),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {error, internal_server_error}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
- ?assertEqual(Expect, Resps),
- ?assertEqual(error, resp_health(Resps))
- end).
-
-create_init_acc(W) ->
- UUID1 = <<"uuid1">>,
- UUID2 = <<"uuid2">>,
-
- Nodes = [node1, node2, node3],
- Shards = mem3_util:create_partition_map(<<"foo">>, 3, 1, Nodes),
-
- % Create our worker_uuids. We're relying on the fact that
- % we're using a fake Q=1 db so we don't have to worry
- % about any hashing here.
- WorkerUUIDs = lists:map(
- fun(Shard) ->
- {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
- end,
- Shards
- ),
-
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = dict:from_list([{UUID1, []}, {UUID2, []}]),
- uuid_counts = dict:from_list([{UUID1, 3}, {UUID2, 3}]),
- w = W
- }.
-
-worker(N, #acc{worker_uuids = WorkerUUIDs}) ->
- {Worker, _} = lists:nth(N, WorkerUUIDs),
- Worker.
-
-check_quorum(Acc, Expect) ->
- dict:fold(
- fun(_Shard, Resps, _) ->
- ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
- end,
- nil,
- Acc#acc.resps
- ).
-
--endif.
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
deleted file mode 100644
index 9de9c6580..000000000
--- a/src/fabric/src/fabric_doc_update.erl
+++ /dev/null
@@ -1,762 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_update).
-
--export([go/3]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(_, [], _) ->
- {ok, []};
-go(DbName, AllDocs0, Opts) ->
- AllDocs1 = before_doc_update(DbName, AllDocs0, Opts),
- AllDocs = tag_docs(AllDocs1),
- validate_atomic_update(DbName, AllDocs, lists:member(all_or_nothing, Opts)),
- Options = lists:delete(all_or_nothing, Opts),
- GroupedDocs = lists:map(
- fun({#shard{name = Name, node = Node} = Shard, Docs}) ->
- Docs1 = untag_docs(Docs),
- Ref = rexi:cast(Node, {fabric_rpc, update_docs, [Name, Docs1, Options]}),
- {Shard#shard{ref = Ref}, Docs}
- end,
- group_docs_by_shard(DbName, AllDocs)
- ),
- {Workers, _} = lists:unzip(GroupedDocs),
- RexiMon = fabric_util:create_monitors(Workers),
- W = couch_util:get_value(w, Options, integer_to_list(mem3:quorum(DbName))),
- Acc0 = {length(Workers), length(AllDocs), list_to_integer(W), GroupedDocs, dict:new()},
- Timeout = fabric_util:request_timeout(),
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, Acc0, infinity, Timeout) of
- {ok, {Health, Results}} when
- Health =:= ok; Health =:= accepted; Health =:= error
- ->
- ensure_all_responses(Health, AllDocs, Results);
- {timeout, Acc} ->
- {_, _, W1, GroupedDocs1, DocReplDict} = Acc,
- {DefunctWorkers, _} = lists:unzip(GroupedDocs1),
- fabric_util:log_timeout(DefunctWorkers, "update_docs"),
- {Health, _, Resp} = dict:fold(
- fun force_reply/3,
- {ok, W1, []},
- DocReplDict
- ),
- ensure_all_responses(Health, AllDocs, Resp);
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker, Acc0) ->
- {_, LenDocs, W, GroupedDocs, DocReplyDict} = Acc0,
- NewGrpDocs = [X || {#shard{node = N}, _} = X <- GroupedDocs, N =/= NodeRef],
- skip_message({length(NewGrpDocs), LenDocs, W, NewGrpDocs, DocReplyDict});
-handle_message({rexi_EXIT, _}, Worker, Acc0) ->
- {WC, LenDocs, W, GrpDocs, DocReplyDict} = Acc0,
- NewGrpDocs = lists:keydelete(Worker, 1, GrpDocs),
- skip_message({WC - 1, LenDocs, W, NewGrpDocs, DocReplyDict});
-handle_message({error, all_dbs_active}, Worker, Acc0) ->
- % treat it like rexi_EXIT, the hope at least one copy will return successfully
- {WC, LenDocs, W, GrpDocs, DocReplyDict} = Acc0,
- NewGrpDocs = lists:keydelete(Worker, 1, GrpDocs),
- skip_message({WC - 1, LenDocs, W, NewGrpDocs, DocReplyDict});
-handle_message(internal_server_error, Worker, Acc0) ->
- % happens when we fail to load validation functions in an RPC worker
- {WC, LenDocs, W, GrpDocs, DocReplyDict} = Acc0,
- NewGrpDocs = lists:keydelete(Worker, 1, GrpDocs),
- skip_message({WC - 1, LenDocs, W, NewGrpDocs, DocReplyDict});
-handle_message(attachment_chunk_received, _Worker, Acc0) ->
- {ok, Acc0};
-handle_message({ok, Replies}, Worker, Acc0) ->
- {WaitingCount, DocCount, W, GroupedDocs, DocReplyDict0} = Acc0,
- {value, {_, Docs}, NewGrpDocs} = lists:keytake(Worker, 1, GroupedDocs),
- DocReplyDict = append_update_replies(Docs, Replies, DocReplyDict0),
- case {WaitingCount, dict:size(DocReplyDict)} of
- {1, _} ->
- % last message has arrived, we need to conclude things
- {Health, W, Reply} = dict:fold(
- fun force_reply/3,
- {ok, W, []},
- DocReplyDict
- ),
- {stop, {Health, Reply}};
- {_, DocCount} ->
- % we've got at least one reply for each document, let's take a look
- case dict:fold(fun maybe_reply/3, {stop, W, []}, DocReplyDict) of
- continue ->
- {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}};
- {stop, W, FinalReplies} ->
- {stop, {ok, FinalReplies}}
- end;
- _ ->
- {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}}
- end;
-handle_message({missing_stub, Stub}, _, _) ->
- throw({missing_stub, Stub});
-handle_message({not_found, no_db_file} = X, Worker, Acc0) ->
- {_, _, _, GroupedDocs, _} = Acc0,
- Docs = couch_util:get_value(Worker, GroupedDocs),
- handle_message({ok, [X || _D <- Docs]}, Worker, Acc0);
-handle_message({bad_request, Msg}, _, _) ->
- throw({bad_request, Msg});
-handle_message({request_entity_too_large, Entity}, _, _) ->
- throw({request_entity_too_large, Entity}).
-
-before_doc_update(DbName, Docs, Opts) ->
- case {fabric_util:is_replicator_db(DbName), fabric_util:is_users_db(DbName)} of
- {true, _} ->
- %% cluster db is expensive to create so we only do it if we have to
- Db = fabric_util:open_cluster_db(DbName, Opts),
- [
- couch_replicator_docs:before_doc_update(Doc, Db, replicated_changes)
- || Doc <- Docs
- ];
- {_, true} ->
- %% cluster db is expensive to create so we only do it if we have to
- Db = fabric_util:open_cluster_db(DbName, Opts),
- [
- couch_users_db:before_doc_update(Doc, Db, interactive_edit)
- || Doc <- Docs
- ];
- _ ->
- Docs
- end.
-
-tag_docs([]) ->
- [];
-tag_docs([#doc{meta = Meta} = Doc | Rest]) ->
- [Doc#doc{meta = [{ref, make_ref()} | Meta]} | tag_docs(Rest)].
-
-untag_docs([]) ->
- [];
-untag_docs([#doc{meta = Meta} = Doc | Rest]) ->
- [Doc#doc{meta = lists:keydelete(ref, 1, Meta)} | untag_docs(Rest)].
-
-force_reply(Doc, [], {_, W, Acc}) ->
- {error, W, [{Doc, {error, internal_server_error}} | Acc]};
-force_reply(Doc, [FirstReply | _] = Replies, {Health, W, Acc}) ->
- case update_quorum_met(W, Replies) of
- {true, Reply} ->
- % corner case new_edits:false and vdu: [noreply, forbidden, noreply]
- case check_forbidden_msg(Replies) of
- {forbidden, ForbiddenReply} ->
- {Health, W, [{Doc, ForbiddenReply} | Acc]};
- false ->
- {Health, W, [{Doc, Reply} | Acc]}
- end;
- false ->
- case [Reply || {ok, Reply} <- Replies] of
- [] ->
- % check if all errors are identical, if so inherit health
- case lists:all(fun(E) -> E =:= FirstReply end, Replies) of
- true ->
- CounterKey = [fabric, doc_update, errors],
- couch_stats:increment_counter(CounterKey),
- {Health, W, [{Doc, FirstReply} | Acc]};
- false ->
- CounterKey = [fabric, doc_update, mismatched_errors],
- couch_stats:increment_counter(CounterKey),
- case check_forbidden_msg(Replies) of
- {forbidden, ForbiddenReply} ->
- {Health, W, [{Doc, ForbiddenReply} | Acc]};
- false ->
- {error, W, [{Doc, FirstReply} | Acc]}
- end
- end;
- [AcceptedRev | _] ->
- CounterKey = [fabric, doc_update, write_quorum_errors],
- couch_stats:increment_counter(CounterKey),
- NewHealth =
- case Health of
- ok -> accepted;
- _ -> Health
- end,
- {NewHealth, W, [{Doc, {accepted, AcceptedRev}} | Acc]}
- end
- end.
-
-maybe_reply(_, _, continue) ->
- % we didn't meet quorum for all docs, so we're fast-forwarding the fold
- continue;
-maybe_reply(Doc, Replies, {stop, W, Acc}) ->
- case update_quorum_met(W, Replies) of
- {true, Reply} ->
- {stop, W, [{Doc, Reply} | Acc]};
- false ->
- continue
- end.
-
-% this ensures that we got some response for all documents being updated
-ensure_all_responses(Health, AllDocs, Resp) ->
- Results = [
- R
- || R <- couch_util:reorder_results(
- AllDocs,
- Resp,
- {error, internal_server_error}
- ),
- R =/= noreply
- ],
- case lists:member({error, internal_server_error}, Results) of
- true ->
- {error, Results};
- false ->
- {Health, Results}
- end.
-
-% This is a corner case where
-% 1) revision tree for the document are out of sync across nodes
-% 2) update on one node extends the revision tree
-% 3) VDU forbids the document
-% 4) remaining nodes do not extend revision tree, so noreply is returned
-% If at at least one node forbids the update, and all other replies
-% are noreply, then we reject the update
-check_forbidden_msg(Replies) ->
- Pred = fun
- ({_, {forbidden, _}}) ->
- true;
- (_) ->
- false
- end,
- case lists:partition(Pred, Replies) of
- {[], _} ->
- false;
- {[ForbiddenReply = {_, {forbidden, _}} | _], RemReplies} ->
- case lists:all(fun(E) -> E =:= noreply end, RemReplies) of
- true ->
- {forbidden, ForbiddenReply};
- false ->
- false
- end
- end.
-
-update_quorum_met(W, Replies) ->
- Counters = lists:foldl(
- fun(R, D) -> orddict:update_counter(R, 1, D) end,
- orddict:new(),
- Replies
- ),
- GoodReplies = lists:filter(fun good_reply/1, Counters),
- case lists:dropwhile(fun({_, Count}) -> Count < W end, GoodReplies) of
- [] ->
- false;
- [{FinalReply, _} | _] ->
- {true, FinalReply}
- end.
-
-good_reply({{ok, _}, _}) ->
- true;
-good_reply({noreply, _}) ->
- true;
-good_reply(_) ->
- false.
-
--spec group_docs_by_shard(binary(), [#doc{}]) -> [{#shard{}, [#doc{}]}].
-group_docs_by_shard(DbName, Docs) ->
- dict:to_list(
- lists:foldl(
- fun(#doc{id = Id} = Doc, D0) ->
- lists:foldl(
- fun(Shard, D1) ->
- dict:append(Shard, Doc, D1)
- end,
- D0,
- mem3:shards(DbName, Id)
- )
- end,
- dict:new(),
- Docs
- )
- ).
-
-append_update_replies([], [], DocReplyDict) ->
- DocReplyDict;
-append_update_replies([Doc | Rest], [], Dict0) ->
- % icky, if replicated_changes only errors show up in result
- append_update_replies(Rest, [], dict:append(Doc, noreply, Dict0));
-append_update_replies([Doc | Rest1], [Reply | Rest2], Dict0) ->
- append_update_replies(Rest1, Rest2, dict:append(Doc, Reply, Dict0)).
-
-skip_message({0, _, W, _, DocReplyDict}) ->
- {Health, W, Reply} = dict:fold(fun force_reply/3, {ok, W, []}, DocReplyDict),
- {stop, {Health, Reply}};
-skip_message(Acc0) ->
- {ok, Acc0}.
-
-validate_atomic_update(_, _, false) ->
- ok;
-validate_atomic_update(_DbName, AllDocs, true) ->
- % TODO actually perform the validation. This requires some hackery, we need
- % to basically extract the prep_and_validate_updates function from couch_db
- % and only run that, without actually writing in case of a success.
- Error = {not_implemented, <<"all_or_nothing is not supported">>},
- PreCommitFailures = lists:map(
- fun(#doc{id = Id, revs = {Pos, Revs}}) ->
- case Revs of
- [] -> RevId = <<>>;
- [RevId | _] -> ok
- end,
- {{Id, {Pos, RevId}}, Error}
- end,
- AllDocs
- ),
- throw({aborted, PreCommitFailures}).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- meck:new([couch_log, couch_stats]),
- meck:expect(couch_log, warning, fun(_, _) -> ok end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end).
-
-teardown_all(_) ->
- meck:unload().
-
-doc_update_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- fun doc_update1/0,
- fun doc_update2/0,
- fun doc_update3/0,
- fun handle_all_dbs_active/0,
- fun handle_two_all_dbs_actives/0,
- fun one_forbid/0,
- fun two_forbid/0,
- fun extend_tree_forbid/0,
- fun other_errors_one_forbid/0,
- fun one_error_two_forbid/0,
- fun one_success_two_forbid/0
- ]
- }.
-
-% eunits
-doc_update1() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1],
- Docs2 = [Doc1, Doc2],
- Dict = dict:from_list([{Doc, []} || Doc <- Docs]),
- Dict2 = dict:from_list([{Doc, []} || Doc <- Docs2]),
-
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
-
- % test for W = 2
- AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs, Dict},
-
- {ok, {WaitingCountW2_1, _, _, _, _} = AccW2_1} =
- handle_message({ok, [{ok, Doc1}]}, hd(Shards), AccW2),
- ?assertEqual(WaitingCountW2_1, 2),
- {stop, FinalReplyW2} =
- handle_message({ok, [{ok, Doc1}]}, lists:nth(2, Shards), AccW2_1),
- ?assertEqual({ok, [{Doc1, {ok, Doc1}}]}, FinalReplyW2),
-
- % test for W = 3
- AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs, Dict},
-
- {ok, {WaitingCountW3_1, _, _, _, _} = AccW3_1} =
- handle_message({ok, [{ok, Doc1}]}, hd(Shards), AccW3),
- ?assertEqual(WaitingCountW3_1, 2),
-
- {ok, {WaitingCountW3_2, _, _, _, _} = AccW3_2} =
- handle_message({ok, [{ok, Doc1}]}, lists:nth(2, Shards), AccW3_1),
- ?assertEqual(WaitingCountW3_2, 1),
-
- {stop, FinalReplyW3} =
- handle_message({ok, [{ok, Doc1}]}, lists:nth(3, Shards), AccW3_2),
- ?assertEqual({ok, [{Doc1, {ok, Doc1}}]}, FinalReplyW3),
-
- % test w quorum > # shards, which should fail immediately
-
- Shards2 = mem3_util:create_partition_map("foo", 1, 1, ["node1"]),
- GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>, Shards2, Docs),
-
- AccW4 =
- {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
- Bool =
- case handle_message({ok, [{ok, Doc1}]}, hd(Shards2), AccW4) of
- {stop, _Reply} ->
- true;
- _ ->
- false
- end,
- ?assertEqual(Bool, true),
-
- % Docs with no replies should end up as {error, internal_server_error}
- SA1 = #shard{node = a, range = 1},
- SB1 = #shard{node = b, range = 1},
- SA2 = #shard{node = a, range = 2},
- SB2 = #shard{node = b, range = 2},
- GroupedDocs3 = [{SA1, [Doc1]}, {SB1, [Doc1]}, {SA2, [Doc2]}, {SB2, [Doc2]}],
- StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
- {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
- {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
- {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
- {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
- ?assertEqual(
- {error, [{Doc1, {accepted, "A"}}, {Doc2, {error, internal_server_error}}]},
- ReplyW5
- ).
-
-doc_update2() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, hd(Shards), Acc0),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message({rexi_EXIT, 1}, lists:nth(2, Shards), Acc1),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message({rexi_EXIT, 1}, lists:nth(3, Shards), Acc2),
-
- ?assertEqual(
- {accepted, [{Doc1, {accepted, Doc1}}, {Doc2, {accepted, Doc2}}]},
- Reply
- ).
-
-doc_update3() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, hd(Shards), Acc0),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message({rexi_EXIT, 1}, lists:nth(2, Shards), Acc1),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, lists:nth(3, Shards), Acc2),
-
- ?assertEqual({ok, [{Doc1, {ok, Doc1}}, {Doc2, {ok, Doc2}}]}, Reply).
-
-handle_all_dbs_active() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, hd(Shards), Acc0),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message({error, all_dbs_active}, lists:nth(2, Shards), Acc1),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, lists:nth(3, Shards), Acc2),
-
- ?assertEqual({ok, [{Doc1, {ok, Doc1}}, {Doc2, {ok, Doc2}}]}, Reply).
-
-handle_two_all_dbs_actives() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, hd(Shards), Acc0),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message({error, all_dbs_active}, lists:nth(2, Shards), Acc1),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message({error, all_dbs_active}, lists:nth(3, Shards), Acc2),
-
- ?assertEqual(
- {accepted, [{Doc1, {accepted, Doc1}}, {Doc2, {accepted, Doc2}}]},
- Reply
- ).
-
-one_forbid() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
-
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message({ok, [{ok, Doc1}, noreply]}, hd(Shards), Acc0),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message(
- {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(2, Shards), Acc1
- ),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message({ok, [{ok, Doc1}, noreply]}, lists:nth(3, Shards), Acc2),
-
- ?assertEqual(
- {ok, [
- {Doc1, {ok, Doc1}},
- {Doc2, {Doc2, {forbidden, <<"not allowed">>}}}
- ]},
- Reply
- ).
-
-two_forbid() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
-
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message({ok, [{ok, Doc1}, noreply]}, hd(Shards), Acc0),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message(
- {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(2, Shards), Acc1
- ),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message(
- {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(3, Shards), Acc2
- ),
-
- ?assertEqual(
- {ok, [
- {Doc1, {ok, Doc1}},
- {Doc2, {Doc2, {forbidden, <<"not allowed">>}}}
- ]},
- Reply
- ).
-
-% This should actually never happen, because an `{ok, Doc}` message means that the revision
-% tree is extended and so the VDU should forbid the document.
-% Leaving this test here to make sure quorum rules still apply.
-extend_tree_forbid() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
-
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, hd(Shards), Acc0),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message(
- {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(2, Shards), Acc1
- ),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, lists:nth(3, Shards), Acc2),
-
- ?assertEqual({ok, [{Doc1, {ok, Doc1}}, {Doc2, {ok, Doc2}}]}, Reply).
-
-other_errors_one_forbid() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
-
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message({ok, [{ok, Doc1}, {Doc2, {error, <<"foo">>}}]}, hd(Shards), Acc0),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message({ok, [{ok, Doc1}, {Doc2, {error, <<"bar">>}}]}, lists:nth(2, Shards), Acc1),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message(
- {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(3, Shards), Acc2
- ),
- ?assertEqual({error, [{Doc1, {ok, Doc1}}, {Doc2, {Doc2, {error, <<"foo">>}}}]}, Reply).
-
-one_error_two_forbid() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
-
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message(
- {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, hd(Shards), Acc0
- ),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message({ok, [{ok, Doc1}, {Doc2, {error, <<"foo">>}}]}, lists:nth(2, Shards), Acc1),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message(
- {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(3, Shards), Acc2
- ),
- ?assertEqual(
- {error, [{Doc1, {ok, Doc1}}, {Doc2, {Doc2, {forbidden, <<"not allowed">>}}}]}, Reply
- ).
-
-one_success_two_forbid() ->
- Doc1 = #doc{revs = {1, [<<"foo">>]}},
- Doc2 = #doc{revs = {1, [<<"bar">>]}},
- Docs = [Doc1, Doc2],
- Shards =
- mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
-
- Acc0 = {
- length(Shards),
- length(Docs),
- list_to_integer("2"),
- GroupedDocs,
- dict:from_list([{Doc, []} || Doc <- Docs])
- },
-
- {ok, {WaitingCount1, _, _, _, _} = Acc1} =
- handle_message(
- {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, hd(Shards), Acc0
- ),
- ?assertEqual(WaitingCount1, 2),
-
- {ok, {WaitingCount2, _, _, _, _} = Acc2} =
- handle_message({ok, [{ok, Doc1}, {Doc2, {ok, Doc2}}]}, lists:nth(2, Shards), Acc1),
- ?assertEqual(WaitingCount2, 1),
-
- {stop, Reply} =
- handle_message(
- {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(3, Shards), Acc2
- ),
- ?assertEqual(
- {error, [{Doc1, {ok, Doc1}}, {Doc2, {Doc2, {forbidden, <<"not allowed">>}}}]}, Reply
- ).
-
-% needed for testing to avoid having to start the mem3 application
-group_docs_by_shard_hack(_DbName, Shards, Docs) ->
- dict:to_list(
- lists:foldl(
- fun(#doc{id = _Id} = Doc, D0) ->
- lists:foldl(
- fun(Shard, D1) ->
- dict:append(Shard, Doc, D1)
- end,
- D0,
- Shards
- )
- end,
- dict:new(),
- Docs
- )
- ).
-
--endif.
diff --git a/src/fabric/src/fabric_group_info.erl b/src/fabric/src/fabric_group_info.erl
deleted file mode 100644
index ff875aa96..000000000
--- a/src/fabric/src/fabric_group_info.erl
+++ /dev/null
@@ -1,164 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_group_info).
-
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName, GroupId) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, GroupId, [?ADMIN_CTX]),
- go(DbName, DDoc);
-go(DbName, #doc{id = DDocId}) ->
- Shards = mem3:shards(DbName),
- Ushards = mem3:ushards(DbName),
- Workers = fabric_util:submit_jobs(Shards, group_info, [DDocId]),
- RexiMon = fabric_util:create_monitors(Shards),
- USet = sets:from_list([{Id, N} || #shard{name = Id, node = N} <- Ushards]),
- Acc = {fabric_dict:init(Workers, nil), [], USet},
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc) of
- {timeout, {WorkersDict, _, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(DefunctWorkers, "group_info"),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, {Counters, Resps, USet}) ->
- case fabric_ring:node_down(NodeRef, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, USet}};
- error -> {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps, USet}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, USet}};
- error -> {error, Reason}
- end;
-handle_message({ok, Info}, Shard, {Counters, Resps, USet}) ->
- case fabric_ring:handle_response(Shard, Info, Counters, Resps) of
- {ok, {Counters1, Resps1}} ->
- {ok, {Counters1, Resps1, USet}};
- {stop, Resps1} ->
- {stop, build_final_response(USet, Resps1)}
- end;
-handle_message(Reason, Shard, {Counters, Resps, USet}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, USet}};
- error -> {error, Reason}
- end.
-
-build_final_response(USet, Responses) ->
- AccF = fabric_dict:fold(
- fun(#shard{name = Id, node = Node}, Info, Acc) ->
- IsPreferred = sets:is_element({Id, Node}, USet),
- dict:append(Id, {Node, IsPreferred, Info}, Acc)
- end,
- dict:new(),
- Responses
- ),
- Pending = aggregate_pending(AccF),
- Infos = get_infos(AccF),
- [{updates_pending, {Pending}} | merge_results(Infos)].
-
-get_infos(Acc) ->
- Values = [V || {_, V} <- dict:to_list(Acc)],
- lists:flatten([Info || {_Node, _Pref, Info} <- lists:flatten(Values)]).
-
-aggregate_pending(Dict) ->
- {Preferred, Total, Minimum} =
- dict:fold(
- fun(_Name, Results, {P, T, M}) ->
- {Preferred, Total, Minimum} = calculate_pending(Results),
- {P + Preferred, T + Total, M + Minimum}
- end,
- {0, 0, 0},
- Dict
- ),
- [
- {minimum, Minimum},
- {preferred, Preferred},
- {total, Total}
- ].
-
-calculate_pending(Results) ->
- lists:foldl(
- fun
- ({_Node, true, Info}, {P, T, V}) ->
- Pending = couch_util:get_value(pending_updates, Info),
- {P + Pending, T + Pending, min(Pending, V)};
- ({_Node, false, Info}, {P, T, V}) ->
- Pending = couch_util:get_value(pending_updates, Info),
- {P, T + Pending, min(Pending, V)}
- end,
- {0, 0, infinity},
- Results
- ).
-
-merge_results(Info) ->
- Dict = lists:foldl(
- fun({K, V}, D0) -> orddict:append(K, V, D0) end,
- orddict:new(),
- Info
- ),
- orddict:fold(
- fun
- (signature, [X | _], Acc) ->
- [{signature, X} | Acc];
- (language, [X | _], Acc) ->
- [{language, X} | Acc];
- (sizes, X, Acc) ->
- [{sizes, {merge_object(X)}} | Acc];
- (compact_running, X, Acc) ->
- [{compact_running, lists:member(true, X)} | Acc];
- (updater_running, X, Acc) ->
- [{updater_running, lists:member(true, X)} | Acc];
- (waiting_commit, X, Acc) ->
- [{waiting_commit, lists:member(true, X)} | Acc];
- (waiting_clients, X, Acc) ->
- [{waiting_clients, lists:sum(X)} | Acc];
- (update_seq, X, Acc) ->
- [{update_seq, lists:sum(X)} | Acc];
- (purge_seq, X, Acc) ->
- [{purge_seq, lists:sum(X)} | Acc];
- (collator_versions, X, Acc) ->
- % Concatenate (undo orddict:append/3), then
- % sort and remove duplicates.
- Vs = lists:usort(lists:flatmap(fun(V) -> V end, X)),
- [{collator_versions, Vs} | Acc];
- (_, _, Acc) ->
- Acc
- end,
- [],
- Dict
- ).
-
-merge_object(Objects) ->
- Dict = lists:foldl(
- fun({Props}, D) ->
- lists:foldl(fun({K, V}, D0) -> orddict:append(K, V, D0) end, D, Props)
- end,
- orddict:new(),
- Objects
- ),
- orddict:fold(
- fun(Key, X, Acc) ->
- [{Key, lists:sum(X)} | Acc]
- end,
- [],
- Dict
- ).
diff --git a/src/fabric/src/fabric_ring.erl b/src/fabric/src/fabric_ring.erl
deleted file mode 100644
index 2bb7d717f..000000000
--- a/src/fabric/src/fabric_ring.erl
+++ /dev/null
@@ -1,560 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_ring).
-
--export([
- is_progress_possible/1,
- is_progress_possible/2,
- get_shard_replacements/2,
- node_down/3,
- node_down/4,
- handle_error/3,
- handle_error/4,
- handle_response/4,
- handle_response/5
-]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--type fabric_dict() :: [{#shard{}, any()}].
--type ring_opts() :: [atom() | tuple()].
-
-%% @doc looks for a fully covered keyrange in the list of counters
--spec is_progress_possible(fabric_dict()) -> boolean().
-is_progress_possible(Counters) ->
- is_progress_possible(Counters, []).
-
-%% @doc looks for a fully covered keyrange in the list of counters
-%% This version take ring option to configure how progress will
-%% be checked. By default, [], checks that the full ring is covered.
--spec is_progress_possible(fabric_dict(), ring_opts()) -> boolean().
-is_progress_possible(Counters, RingOpts) ->
- is_progress_possible(Counters, [], 0, ?RING_END, RingOpts).
-
--spec get_shard_replacements(binary(), [#shard{}]) -> [#shard{}].
-get_shard_replacements(DbName, UsedShards0) ->
- % We only want to generate a replacements list from shards
- % that aren't already used.
- AllLiveShards = mem3:live_shards(DbName, [node() | nodes()]),
- UsedShards = [S#shard{ref = undefined} || S <- UsedShards0],
- get_shard_replacements_int(AllLiveShards -- UsedShards, UsedShards).
-
--spec node_down(node(), fabric_dict(), fabric_dict()) ->
- {ok, fabric_dict()} | error.
-node_down(Node, Workers, Responses) ->
- node_down(Node, Workers, Responses, []).
-
--spec node_down(node(), fabric_dict(), fabric_dict(), ring_opts()) ->
- {ok, fabric_dict()} | error.
-node_down(Node, Workers, Responses, RingOpts) ->
- {B, E} = range_bounds(Workers, Responses),
- Workers1 = fabric_dict:filter(
- fun(#shard{node = N}, _) ->
- N =/= Node
- end,
- Workers
- ),
- case is_progress_possible(Workers1, Responses, B, E, RingOpts) of
- true -> {ok, Workers1};
- false -> error
- end.
-
--spec handle_error(#shard{}, fabric_dict(), fabric_dict()) ->
- {ok, fabric_dict()} | error.
-handle_error(Shard, Workers, Responses) ->
- handle_error(Shard, Workers, Responses, []).
-
--spec handle_error(#shard{}, fabric_dict(), fabric_dict(), ring_opts()) ->
- {ok, fabric_dict()} | error.
-handle_error(Shard, Workers, Responses, RingOpts) ->
- {B, E} = range_bounds(Workers, Responses),
- Workers1 = fabric_dict:erase(Shard, Workers),
- case is_progress_possible(Workers1, Responses, B, E, RingOpts) of
- true -> {ok, Workers1};
- false -> error
- end.
-
--spec handle_response(#shard{}, any(), fabric_dict(), fabric_dict()) ->
- {ok, {fabric_dict(), fabric_dict()}} | {stop, fabric_dict()}.
-handle_response(Shard, Response, Workers, Responses) ->
- handle_response(Shard, Response, Workers, Responses, []).
-
--spec handle_response(
- #shard{},
- any(),
- fabric_dict(),
- fabric_dict(),
- ring_opts()
-) ->
- {ok, {fabric_dict(), fabric_dict()}} | {stop, fabric_dict()}.
-handle_response(Shard, Response, Workers, Responses, RingOpts) ->
- handle_response(
- Shard,
- Response,
- Workers,
- Responses,
- RingOpts,
- fun stop_workers/1
- ).
-
-% Worker response handler. Gets reponses from shard and puts them in the list
-% until they complete a full ring. Then kill unused responses and remaining
-% workers.
-%
-% How a ring "completes" is driven by RingOpts:
-%
-% * When RingOpts is [] (the default case) responses must form a "clean"
-% ring, where all copies at the start of the range and end of the range must
-% have the same boundary values.
-%
-% * When RingOpts is [{any, [#shard{}]}] responses are accepted from any of
-% the provided list of shards. This type of ring might be used when querying
-% a partitioned database. As soon as a result from any of the shards
-% arrives, result collection stops.
-%
-% * When RingOpts is [all], responses are accepted until all the shards return
-% results
-%
-handle_response(Shard, Response, Workers, Responses, RingOpts, CleanupCb) ->
- Workers1 = fabric_dict:erase(Shard, Workers),
- case RingOpts of
- [] ->
- #shard{range = [B, E]} = Shard,
- Responses1 = [{{B, E}, Shard, Response} | Responses],
- handle_response_ring(Workers1, Responses1, CleanupCb);
- [{any, Any}] ->
- handle_response_any(Shard, Response, Workers1, Any, CleanupCb);
- [all] ->
- Responses1 = [{Shard, Response} | Responses],
- handle_response_all(Workers1, Responses1)
- end.
-
-handle_response_ring(Workers, Responses, CleanupCb) ->
- {MinB, MaxE} = range_bounds(Workers, Responses),
- Ranges = lists:map(fun({R, _, _}) -> R end, Responses),
- case mem3_util:get_ring(Ranges, MinB, MaxE) of
- [] ->
- {ok, {Workers, Responses}};
- Ring ->
- % Return one response per range in the ring. The
- % response list is reversed before sorting so that the
- % first shard copy to reply is first. We use keysort
- % because it is documented as being stable so that
- % we keep the relative order of duplicate shards
- SortedResponses = lists:keysort(1, lists:reverse(Responses)),
- UsedResponses = get_responses(Ring, SortedResponses),
- % Kill all the remaining workers as well as the redunant responses
- stop_unused_workers(Workers, Responses, UsedResponses, CleanupCb),
- {stop, fabric_dict:from_list(UsedResponses)}
- end.
-
-handle_response_any(Shard, Response, Workers, Any, CleanupCb) ->
- case lists:member(Shard#shard{ref = undefined}, Any) of
- true ->
- stop_unused_workers(Workers, [], [], CleanupCb),
- {stop, fabric_dict:from_list([{Shard, Response}])};
- false ->
- {ok, {Workers, []}}
- end.
-
-handle_response_all(Workers, Responses) ->
- case fabric_dict:size(Workers) =:= 0 of
- true ->
- {stop, fabric_dict:from_list(Responses)};
- false ->
- {ok, {Workers, Responses}}
- end.
-
-% Check if workers still waiting and the already received responses could
-% still form a continous range. The range won't always be the full ring, and
-% the bounds are computed based on the minimum and maximum interval beginning
-% and ends.
-%
-% There is also a special case where even if the ring cannot be formed, but
-% there is an overlap between all the shards, then it's considered that
-% progress can still be made. This is essentially to allow for split
-% partitioned shards where one shard copy on a node was split the set of ranges
-% might look like: 00-ff, 00-ff, 07-ff. Even if both 00-ff workers exit,
-% progress can still be made with the remaining 07-ff copy.
-%
--spec is_progress_possible(
- fabric_dict(),
- [{any(), #shard{}, any()}],
- non_neg_integer(),
- non_neg_integer(),
- ring_opts()
-) -> boolean().
-is_progress_possible([], [], _, _, _) ->
- false;
-is_progress_possible(Counters, Responses, MinB, MaxE, []) ->
- ResponseRanges = lists:map(fun({{B, E}, _, _}) -> {B, E} end, Responses),
- Ranges = fabric_util:worker_ranges(Counters) ++ ResponseRanges,
- mem3_util:get_ring(Ranges, MinB, MaxE) =/= [];
-is_progress_possible(Counters, _Responses, _, _, [all]) ->
- fabric_dict:size(Counters) > 0;
-is_progress_possible(Counters, Responses, _, _, [{any, AnyShards}]) ->
- InAny = fun(S) -> lists:member(S#shard{ref = undefined}, AnyShards) end,
- case fabric_dict:filter(fun(S, _) -> InAny(S) end, Counters) of
- [] ->
- case lists:filter(fun({_, S, _}) -> InAny(S) end, Responses) of
- [] -> false;
- [_ | _] -> true
- end;
- [_ | _] ->
- true
- end.
-
-get_shard_replacements_int(UnusedShards, UsedShards) ->
- % If we have more than one copy of a range then we don't
- % want to try and add a replacement to any copy.
- RangeCounts = lists:foldl(
- fun(#shard{range = R}, Acc) ->
- dict:update_counter(R, 1, Acc)
- end,
- dict:new(),
- UsedShards
- ),
-
- % For each seq shard range with a count of 1, find any
- % possible replacements from the unused shards. The
- % replacement list is keyed by range.
- lists:foldl(
- fun(#shard{range = [B, E] = Range}, Acc) ->
- case dict:find(Range, RangeCounts) of
- {ok, 1} ->
- Repls = mem3_util:non_overlapping_shards(UnusedShards, B, E),
- % Only keep non-empty lists of replacements
- if
- Repls == [] -> Acc;
- true -> [{Range, Repls} | Acc]
- end;
- _ ->
- Acc
- end
- end,
- [],
- UsedShards
- ).
-
-range_bounds(Workers, Responses) ->
- RespRanges = lists:map(fun({R, _, _}) -> R end, Responses),
- Ranges = fabric_util:worker_ranges(Workers) ++ RespRanges,
- {Bs, Es} = lists:unzip(Ranges),
- {lists:min(Bs), lists:max(Es)}.
-
-get_responses([], _) ->
- [];
-get_responses([Range | Ranges], [{Range, Shard, Value} | Resps]) ->
- [{Shard, Value} | get_responses(Ranges, Resps)];
-get_responses(Ranges, [_DupeRangeResp | Resps]) ->
- get_responses(Ranges, Resps).
-
-stop_unused_workers(_, _, _, undefined) ->
- ok;
-stop_unused_workers(Workers, AllResponses, UsedResponses, CleanupCb) ->
- WorkerShards = [S || {S, _} <- Workers],
- Used = [S || {S, _} <- UsedResponses],
- Unused = [S || {_, S, _} <- AllResponses, not lists:member(S, Used)],
- CleanupCb(WorkerShards ++ Unused).
-
-stop_workers(Shards) when is_list(Shards) ->
- rexi:kill_all([{Node, Ref} || #shard{node = Node, ref = Ref} <- Shards]).
-
-% Unit tests
-
-is_progress_possible_full_range_test() ->
- % a base case
- ?assertEqual(false, is_progress_possible([], [], 0, 0, [])),
- T1 = [[0, ?RING_END]],
- ?assertEqual(true, is_progress_possible(mk_cnts(T1))),
- T2 = [[0, 10], [11, 20], [21, ?RING_END]],
- ?assertEqual(true, is_progress_possible(mk_cnts(T2))),
- % gap
- T3 = [[0, 10], [12, ?RING_END]],
- ?assertEqual(false, is_progress_possible(mk_cnts(T3))),
- % outside range
- T4 = [[1, 10], [11, 20], [21, ?RING_END]],
- ?assertEqual(false, is_progress_possible(mk_cnts(T4))),
- % outside range
- T5 = [[0, 10], [11, 20], [21, ?RING_END + 1]],
- ?assertEqual(false, is_progress_possible(mk_cnts(T5))),
- % possible progress but with backtracking
- T6 = [[0, 10], [11, 20], [0, 5], [6, 21], [21, ?RING_END]],
- ?assertEqual(true, is_progress_possible(mk_cnts(T6))),
- % not possible, overlap is not exact
- T7 = [[0, 10], [13, 20], [21, ?RING_END], [9, 12]],
- ?assertEqual(false, is_progress_possible(mk_cnts(T7))).
-
-is_progress_possible_with_responses_test() ->
- C1 = mk_cnts([[0, ?RING_END]]),
- ?assertEqual(true, is_progress_possible(C1, [], 0, ?RING_END, [])),
- % check for gaps
- C2 = mk_cnts([[5, 6], [7, 8]]),
- ?assertEqual(true, is_progress_possible(C2, [], 5, 8, [])),
- ?assertEqual(false, is_progress_possible(C2, [], 4, 8, [])),
- ?assertEqual(false, is_progress_possible(C2, [], 5, 7, [])),
- ?assertEqual(false, is_progress_possible(C2, [], 4, 9, [])),
- % check for uneven shard range copies
- C3 = mk_cnts([[2, 5], [2, 10]]),
- ?assertEqual(true, is_progress_possible(C3, [], 2, 10, [])),
- ?assertEqual(false, is_progress_possible(C3, [], 2, 11, [])),
- ?assertEqual(false, is_progress_possible(C3, [], 3, 10, [])),
- % they overlap but still not a proper ring
- C4 = mk_cnts([[2, 4], [3, 7], [6, 10]]),
- ?assertEqual(false, is_progress_possible(C4, [], 2, 10, [])),
- % some of the ranges are in responses
- RS1 = mk_resps([{"n1", 7, 8, 42}]),
- C5 = mk_cnts([[5, 6]]),
- ?assertEqual(true, is_progress_possible(C5, RS1, 5, 8, [])),
- ?assertEqual(false, is_progress_possible([], RS1, 5, 8, [])),
- ?assertEqual(true, is_progress_possible([], RS1, 7, 8, [])).
-
-is_progress_possible_with_ring_opts_any_test() ->
- Opts = [{any, [mk_shard("n1", [0, 5]), mk_shard("n2", [3, 10])]}],
- C1 = [{mk_shard("n1", [0, ?RING_END]), nil}],
- RS1 = mk_resps([{"n1", 3, 10, 42}]),
- ?assertEqual(false, is_progress_possible(C1, [], 0, ?RING_END, Opts)),
- ?assertEqual(false, is_progress_possible([], [], 0, ?RING_END, Opts)),
- ?assertEqual(false, is_progress_possible([], RS1, 0, ?RING_END, Opts)),
- % explicitly accept only the shard specified in the ring options
- ?assertEqual(false, is_progress_possible([], RS1, 3, 10, [{any, []}])),
- % need to match the node exactly
- ?assertEqual(false, is_progress_possible([], RS1, 3, 10, Opts)),
- RS2 = mk_resps([{"n2", 3, 10, 42}]),
- ?assertEqual(true, is_progress_possible([], RS2, 3, 10, Opts)),
- % assert that counters can fill the ring not just the response
- C2 = [{mk_shard("n1", [0, 5]), nil}],
- ?assertEqual(true, is_progress_possible(C2, [], 0, ?RING_END, Opts)).
-
-is_progress_possible_with_ring_opts_all_test() ->
- C1 = [{mk_shard("n1", [0, ?RING_END]), nil}],
- ?assertEqual(true, is_progress_possible(C1, [], 0, ?RING_END, [all])),
- ?assertEqual(false, is_progress_possible([], [], 0, ?RING_END, [all])).
-
-get_shard_replacements_test() ->
- Unused = [
- mk_shard(N, [B, E])
- || {N, B, E} <- [
- {"n1", 11, 20},
- {"n1", 21, ?RING_END},
- {"n2", 0, 4},
- {"n2", 5, 10},
- {"n2", 11, 20},
- {"n3", 0, 21, ?RING_END}
- ]
- ],
- Used = [
- mk_shard(N, [B, E])
- || {N, B, E} <- [
- {"n2", 21, ?RING_END},
- {"n3", 0, 10},
- {"n3", 11, 20}
- ]
- ],
- Res = lists:sort(get_shard_replacements_int(Unused, Used)),
- % Notice that [0, 10] range can be replaces by spawning the
- % [0, 4] and [5, 10] workers on n1
- Expect = [
- {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
- {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
- {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
- ],
- ?assertEqual(Expect, Res).
-
-handle_response_basic_test() ->
- Shard1 = mk_shard("n1", [0, 1]),
- Shard2 = mk_shard("n1", [2, ?RING_END]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
- ?assertEqual(fabric_dict:erase(Shard1, Workers1), Workers2),
- ?assertEqual([{{0, 1}, Shard1, 42}], Responses1),
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertEqual({stop, [{Shard1, 42}, {Shard2, 43}]}, Result2).
-
-handle_response_incomplete_ring_test() ->
- Shard1 = mk_shard("n1", [0, 1]),
- Shard2 = mk_shard("n1", [2, 10]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
- ?assertEqual(fabric_dict:erase(Shard1, Workers1), Workers2),
- ?assertEqual([{{0, 1}, Shard1, 42}], Responses1),
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertEqual({stop, [{Shard1, 42}, {Shard2, 43}]}, Result2).
-
-handle_response_multiple_copies_test() ->
- Shard1 = mk_shard("n1", [0, 1]),
- Shard2 = mk_shard("n2", [0, 1]),
- Shard3 = mk_shard("n1", [2, ?RING_END]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2, Shard3], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertMatch({ok, {_, _}}, Result2),
- {ok, {Workers3, Responses2}} = Result2,
-
- Result3 = handle_response(Shard3, 44, Workers3, Responses2, [], undefined),
- % Use the value (42) to distinguish between [0, 1] copies. In reality
- % they should have the same value but here we need to assert that copy
- % that responded first is included in the ring.
- ?assertEqual({stop, [{Shard1, 42}, {Shard3, 44}]}, Result3).
-
-handle_response_backtracking_test() ->
- Shard1 = mk_shard("n1", [0, 5]),
- Shard2 = mk_shard("n1", [10, ?RING_END]),
- Shard3 = mk_shard("n2", [2, ?RING_END]),
- Shard4 = mk_shard("n3", [0, 1]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2, Shard3, Shard4], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertMatch({ok, {_, _}}, Result2),
- {ok, {Workers3, Responses2}} = Result2,
-
- Result3 = handle_response(Shard3, 44, Workers3, Responses2, [], undefined),
- ?assertMatch({ok, {_, _}}, Result3),
- {ok, {Workers4, Responses3}} = Result3,
-
- Result4 = handle_response(Shard4, 45, Workers4, Responses3, [], undefined),
- ?assertEqual({stop, [{Shard3, 44}, {Shard4, 45}]}, Result4).
-
-handle_response_ring_opts_any_test() ->
- Shard1 = mk_shard("n1", [0, 5]),
- Shard2 = mk_shard("n2", [0, 1]),
- Shard3 = mk_shard("n3", [0, 1]),
-
- Opts = [{any, [mk_shard("n3", [0, 1])]}],
-
- ShardList = [Shard1, Shard2, Shard3],
- WithRefs = [S#shard{ref = make_ref()} || S <- ShardList],
- Workers1 = fabric_dict:init(WithRefs, nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], Opts, undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, []}} = Result1,
-
- % Still waiting because the node doesn't match
- Result2 = handle_response(Shard2, 43, Workers2, [], Opts, undefined),
- ?assertMatch({ok, {_, _}}, Result2),
- {ok, {Workers3, []}} = Result2,
-
- Result3 = handle_response(Shard3, 44, Workers3, [], Opts, undefined),
- ?assertEqual({stop, [{Shard3, 44}]}, Result3).
-
-handle_response_ring_opts_all_test() ->
- Shard1 = mk_shard("n1", [0, 5]),
- Shard2 = mk_shard("n2", [0, 1]),
- Shard3 = mk_shard("n3", [0, 1]),
-
- ShardList = [Shard1, Shard2, Shard3],
- [W1, W2, W3] = WithRefs = [S#shard{ref = make_ref()} || S <- ShardList],
- Workers1 = fabric_dict:init(WithRefs, nil),
-
- Result1 = handle_response(W1, 42, Workers1, [], [all], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, _}} = Result1,
-
- % Even though n2 and n3 cover the same range, with 'all' option we wait for
- % all workers to return.
- Result2 = handle_response(W2, 43, Workers2, [], [all], undefined),
- ?assertMatch({ok, {_, _}}, Result2),
- {ok, {Workers3, _}} = Result2,
-
- % Stop only after all the shards respond
- Result3 = handle_response(W3, 44, Workers3, [], [all], undefined),
- ?assertMatch({stop, [_ | _]}, Result3).
-
-handle_error_test() ->
- Shard1 = mk_shard("n1", [0, 5]),
- Shard2 = mk_shard("n1", [10, ?RING_END]),
- Shard3 = mk_shard("n2", [2, ?RING_END]),
- Shard4 = mk_shard("n3", [0, 1]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2, Shard3, Shard4], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
-
- Result2 = handle_error(Shard2, Workers2, Responses1),
- ?assertMatch({ok, _}, Result2),
- {ok, Workers3} = Result2,
- ?assertEqual(fabric_dict:erase(Shard2, Workers2), Workers3),
-
- Result3 = handle_response(Shard3, 44, Workers3, Responses1, [], undefined),
- ?assertMatch({ok, {_, _}}, Result3),
- {ok, {Workers4, Responses3}} = Result3,
- ?assertEqual(error, handle_error(Shard4, Workers4, Responses3)).
-
-node_down_test() ->
- Shard1 = mk_shard("n1", [0, 5]),
- Shard2 = mk_shard("n1", [10, ?RING_END]),
- Shard3 = mk_shard("n2", [2, ?RING_END]),
- Shard4 = mk_shard("n3", [0, 1]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2, Shard3, Shard4], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertMatch({ok, {_, _}}, Result2),
- {ok, {Workers3, Responses2}} = Result2,
-
- Result3 = node_down(n1, Workers3, Responses2),
- ?assertMatch({ok, _}, Result3),
- {ok, Workers4} = Result3,
- ?assertEqual([{Shard3, nil}, {Shard4, nil}], Workers4),
-
- Result4 = handle_response(Shard3, 44, Workers4, Responses2, [], undefined),
- ?assertMatch({ok, {_, _}}, Result4),
- {ok, {Workers5, Responses3}} = Result4,
-
- % Note: Shard3 was already processed, it's ok if n2 went down after
- ?assertEqual({ok, [{Shard4, nil}]}, node_down(n2, Workers5, Responses3)),
-
- ?assertEqual(error, node_down(n3, Workers5, Responses3)).
-
-mk_cnts(Ranges) ->
- Shards = lists:map(fun mk_shard/1, Ranges),
- fabric_dict:init([S#shard{ref = make_ref()} || S <- Shards], nil).
-
-mk_resps(RangeNameVals) ->
- [{{B, E}, mk_shard(Name, [B, E]), V} || {Name, B, E, V} <- RangeNameVals].
-
-mk_shard([B, E]) when is_integer(B), is_integer(E) ->
- #shard{range = [B, E]}.
-
-mk_shard(Name, Range) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = Range}.
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
deleted file mode 100644
index 8780737ef..000000000
--- a/src/fabric/src/fabric_rpc.erl
+++ /dev/null
@@ -1,706 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc).
-
--export([
- get_db_info/1,
- get_doc_count/1,
- get_design_doc_count/1,
- get_update_seq/1
-]).
--export([
- open_doc/3,
- open_revs/4,
- get_doc_info/3,
- get_full_doc_info/3,
- get_missing_revs/2, get_missing_revs/3,
- update_docs/3
-]).
--export([all_docs/3, changes/3, map_view/4, reduce_view/4, group_info/2]).
--export([
- create_db/1, create_db/2,
- delete_db/1,
- reset_validation_funs/1,
- set_security/3,
- set_revs_limit/3,
- create_shard_db_doc/2,
- delete_shard_db_doc/2,
- get_partition_info/2
-]).
--export([get_all_security/2, open_shard/2]).
--export([compact/1, compact/2]).
--export([get_purge_seq/2, purge_docs/3, set_purge_infos_limit/3]).
-
--export([
- get_db_info/2,
- get_doc_count/2,
- get_design_doc_count/2,
- get_update_seq/2,
- changes/4,
- map_view/5,
- reduce_view/5,
- group_info/3,
- update_mrview/4,
- get_uuid/1
-]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-%% rpc endpoints
-%% call to with_db will supply your M:F with a Db instance
-%% and then remaining args
-
-%% @equiv changes(DbName, Args, StartSeq, [])
-changes(DbName, Args, StartSeq) ->
- changes(DbName, Args, StartSeq, []).
-
-changes(DbName, #changes_args{} = Args, StartSeq, DbOptions) ->
- changes(DbName, [Args], StartSeq, DbOptions);
-changes(DbName, Options, StartVector, DbOptions) ->
- set_io_priority(DbName, DbOptions),
- Args0 = lists:keyfind(changes_args, 1, Options),
- #changes_args{dir = Dir, filter_fun = Filter} = Args0,
- Args =
- case Filter of
- {fetch, custom, Style, Req, {DDocId, Rev}, FName} ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- Args0#changes_args{
- filter_fun = {custom, Style, Req, DDoc, FName}
- };
- {fetch, view, Style, {DDocId, Rev}, VName} ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- Args0#changes_args{filter_fun = {view, Style, DDoc, VName}};
- _ ->
- Args0
- end,
-
- DbOpenOptions = Args#changes_args.db_open_options ++ DbOptions,
- case get_or_create_db(DbName, DbOpenOptions) of
- {ok, Db} ->
- StartSeq = calculate_start_seq(Db, node(), StartVector),
- Enum = fun changes_enumerator/2,
- Opts = [{dir, Dir}],
- Acc0 = #fabric_changes_acc{
- db = Db,
- seq = StartSeq,
- args = Args,
- options = Options,
- pending = couch_db:count_changes_since(Db, StartSeq),
- epochs = couch_db:get_epochs(Db)
- },
- try
- {ok, #fabric_changes_acc{seq = LastSeq, pending = Pending, epochs = Epochs}} =
- do_changes(Db, StartSeq, Enum, Acc0, Opts),
- rexi:stream_last(
- {complete, [
- {seq, {LastSeq, uuid(Db), couch_db:owner_of(Epochs, LastSeq)}},
- {pending, Pending}
- ]}
- )
- after
- couch_db:close(Db)
- end;
- Error ->
- rexi:stream_last(Error)
- end.
-
-do_changes(Db, StartSeq, Enum, Acc0, Opts) ->
- #fabric_changes_acc{
- args = Args
- } = Acc0,
- #changes_args{
- filter = Filter
- } = Args,
- case Filter of
- "_doc_ids" ->
- % optimised code path, we’re looking up all doc_ids in the by-id instead of filtering
- % the entire by-seq tree to find the doc_ids one by one
- #changes_args{
- filter_fun = {doc_ids, Style, DocIds},
- dir = Dir
- } = Args,
- couch_changes:send_changes_doc_ids(
- Db, StartSeq, Dir, Enum, Acc0, {doc_ids, Style, DocIds}
- );
- "_design_docs" ->
- % optimised code path, we’re looking up all design_docs in the by-id instead of
- % filtering the entire by-seq tree to find the design_docs one by one
- #changes_args{
- filter_fun = {design_docs, Style},
- dir = Dir
- } = Args,
- couch_changes:send_changes_design_docs(
- Db, StartSeq, Dir, Enum, Acc0, {design_docs, Style}
- );
- _ ->
- couch_db:fold_changes(Db, StartSeq, Enum, Acc0, Opts)
- end.
-
-all_docs(DbName, Options, Args0) ->
- case fabric_util:upgrade_mrargs(Args0) of
- #mrargs{keys = undefined} = Args ->
- set_io_priority(DbName, Options),
- {ok, Db} = get_or_create_db(DbName, Options),
- CB = get_view_cb(Args),
- couch_mrview:query_all_docs(Db, Args, CB, Args)
- end.
-
-update_mrview(DbName, {DDocId, Rev}, ViewName, Args0) ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- couch_util:with_db(DbName, fun(Db) ->
- UpdateSeq = couch_db:get_update_seq(Db),
- {ok, Pid, _} = couch_mrview:get_view_index_pid(
- Db, DDoc, ViewName, fabric_util:upgrade_mrargs(Args0)
- ),
- couch_index:get_state(Pid, UpdateSeq)
- end).
-
-%% @equiv map_view(DbName, DDoc, ViewName, Args0, [])
-map_view(DbName, DDocInfo, ViewName, Args0) ->
- map_view(DbName, DDocInfo, ViewName, Args0, []).
-
-map_view(DbName, {DDocId, Rev}, ViewName, Args0, DbOptions) ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- map_view(DbName, DDoc, ViewName, Args0, DbOptions);
-map_view(DbName, DDoc, ViewName, Args0, DbOptions) ->
- set_io_priority(DbName, DbOptions),
- Args = fabric_util:upgrade_mrargs(Args0),
- {ok, Db} = get_or_create_db(DbName, DbOptions),
- CB = get_view_cb(Args),
- couch_mrview:query_view(Db, DDoc, ViewName, Args, CB, Args).
-
-%% @equiv reduce_view(DbName, DDoc, ViewName, Args0)
-reduce_view(DbName, DDocInfo, ViewName, Args0) ->
- reduce_view(DbName, DDocInfo, ViewName, Args0, []).
-
-reduce_view(DbName, {DDocId, Rev}, ViewName, Args0, DbOptions) ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- reduce_view(DbName, DDoc, ViewName, Args0, DbOptions);
-reduce_view(DbName, DDoc, ViewName, Args0, DbOptions) ->
- set_io_priority(DbName, DbOptions),
- Args = fabric_util:upgrade_mrargs(Args0),
- {ok, Db} = get_or_create_db(DbName, DbOptions),
- VAcc0 = #vacc{db = Db},
- couch_mrview:query_view(Db, DDoc, ViewName, Args, fun reduce_cb/2, VAcc0).
-
-create_db(DbName) ->
- create_db(DbName, []).
-
-create_db(DbName, Options) ->
- rexi:reply(
- case couch_server:create(DbName, Options) of
- {ok, _} ->
- ok;
- Error ->
- Error
- end
- ).
-
-create_shard_db_doc(_, Doc) ->
- rexi:reply(mem3_util:write_db_doc(Doc)).
-
-delete_db(DbName) ->
- couch_server:delete(DbName, []).
-
-delete_shard_db_doc(_, DocId) ->
- rexi:reply(mem3_util:delete_db_doc(DocId)).
-
-%% @equiv get_db_info(DbName, [])
-get_db_info(DbName) ->
- get_db_info(DbName, []).
-
-get_db_info(DbName, DbOptions) ->
- with_db(DbName, DbOptions, {couch_db, get_db_info, []}).
-
-get_partition_info(DbName, Partition) ->
- with_db(DbName, [], {couch_db, get_partition_info, [Partition]}).
-
-%% equiv get_doc_count(DbName, [])
-get_doc_count(DbName) ->
- get_doc_count(DbName, []).
-
-get_doc_count(DbName, DbOptions) ->
- with_db(DbName, DbOptions, {couch_db, get_doc_count, []}).
-
-%% equiv get_design_doc_count(DbName, [])
-get_design_doc_count(DbName) ->
- get_design_doc_count(DbName, []).
-
-get_design_doc_count(DbName, DbOptions) ->
- with_db(DbName, DbOptions, {couch_db, get_design_doc_count, []}).
-
-%% equiv get_update_seq(DbName, [])
-get_update_seq(DbName) ->
- get_update_seq(DbName, []).
-
-get_update_seq(DbName, DbOptions) ->
- with_db(DbName, DbOptions, {couch_db, get_update_seq, []}).
-
-set_security(DbName, SecObj, Options0) ->
- Options =
- case lists:keyfind(io_priority, 1, Options0) of
- false ->
- [{io_priority, {db_meta, security}} | Options0];
- _ ->
- Options0
- end,
- with_db(DbName, Options, {couch_db, set_security, [SecObj]}).
-
-get_all_security(DbName, Options) ->
- with_db(DbName, Options, {couch_db, get_security, []}).
-
-set_revs_limit(DbName, Limit, Options) ->
- with_db(DbName, Options, {couch_db, set_revs_limit, [Limit]}).
-
-set_purge_infos_limit(DbName, Limit, Options) ->
- with_db(DbName, Options, {couch_db, set_purge_infos_limit, [Limit]}).
-
-open_doc(DbName, DocId, Options) ->
- with_db(DbName, Options, {couch_db, open_doc, [DocId, Options]}).
-
-open_revs(DbName, Id, Revs, Options) ->
- with_db(DbName, Options, {couch_db, open_doc_revs, [Id, Revs, Options]}).
-
-get_full_doc_info(DbName, DocId, Options) ->
- with_db(DbName, Options, {couch_db, get_full_doc_info, [DocId]}).
-
-get_doc_info(DbName, DocId, Options) ->
- with_db(DbName, Options, {couch_db, get_doc_info, [DocId]}).
-
-get_missing_revs(DbName, IdRevsList) ->
- get_missing_revs(DbName, IdRevsList, []).
-
-get_missing_revs(DbName, IdRevsList, Options) ->
- with_db(DbName, Options, {couch_db, get_missing_revs, [IdRevsList]}).
-
-update_docs(DbName, Docs0, Options) ->
- {Docs1, Type} =
- case couch_util:get_value(read_repair, Options) of
- NodeRevs when is_list(NodeRevs) ->
- Filtered = read_repair_filter(DbName, Docs0, NodeRevs, Options),
- {Filtered, replicated_changes};
- undefined ->
- X =
- case proplists:get_value(replicated_changes, Options) of
- true -> replicated_changes;
- _ -> interactive_edit
- end,
- {Docs0, X}
- end,
- Docs2 = make_att_readers(Docs1),
- with_db(DbName, Options, {couch_db, update_docs, [Docs2, Options, Type]}).
-
-get_purge_seq(DbName, Options) ->
- with_db(DbName, Options, {couch_db, get_purge_seq, []}).
-
-purge_docs(DbName, UUIdsIdsRevs, Options) ->
- with_db(DbName, Options, {couch_db, purge_docs, [UUIdsIdsRevs, Options]}).
-
-%% @equiv group_info(DbName, DDocId, [])
-group_info(DbName, DDocId) ->
- group_info(DbName, DDocId, []).
-
-group_info(DbName, DDocId, DbOptions) ->
- with_db(DbName, DbOptions, {couch_mrview, get_info, [DDocId]}).
-
-reset_validation_funs(DbName) ->
- case get_or_create_db(DbName, []) of
- {ok, Db} ->
- couch_db:reload_validation_funs(Db);
- _ ->
- ok
- end.
-
-open_shard(Name, Opts) ->
- set_io_priority(Name, Opts),
- try
- rexi:reply(mem3_util:get_or_create_db(Name, Opts))
- catch
- exit:{timeout, _} ->
- couch_stats:increment_counter([fabric, open_shard, timeouts])
- end.
-
-compact(DbName) ->
- with_db(DbName, [], {couch_db, start_compact, []}).
-
-compact(ShardName, DesignName) ->
- {ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, ShardName, <<"_design/", DesignName/binary>>
- ),
- Ref = erlang:make_ref(),
- Pid ! {'$gen_call', {self(), Ref}, compact}.
-
-get_uuid(DbName) ->
- with_db(DbName, [], {couch_db, get_uuid, []}).
-
-%%
-%% internal
-%%
-
-with_db(DbName, Options, {M,F,A}) ->
- set_io_priority(DbName, Options),
- case get_or_create_db(DbName, Options) of
- {ok, Db} ->
- rexi:reply(try
- apply(M, F, [Db | A])
- catch Exception ->
- Exception;
- ?STACKTRACE(error, Reason, Stack)
- couch_log:error("rpc ~p:~p/~p ~p ~p", [M, F, length(A)+1, Reason,
- clean_stack(Stack)]),
- {error, Reason}
- end);
- Error ->
- rexi:reply(Error)
- end.
-
-read_repair_filter(DbName, Docs, NodeRevs, Options) ->
- set_io_priority(DbName, Options),
- case get_or_create_db(DbName, Options) of
- {ok, Db} ->
- try
- read_repair_filter(Db, Docs, NodeRevs)
- after
- couch_db:close(Db)
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-% A read repair operation may have been triggered by a node
-% that was out of sync with the local node. Thus, any time
-% we receive a read repair request we need to check if we
-% may have recently purged any of the given revisions and
-% ignore them if so.
-%
-% This is accomplished by looking at the purge infos that we
-% have locally that have not been replicated to the remote
-% node. The logic here is that we may have received the purge
-% request before the remote shard copy. So to check that we
-% need to look at the purge infos that we have locally but
-% have not yet sent to the remote copy.
-%
-% NodeRevs is a list of the {node(), [rev()]} tuples passed
-% as the read_repair option to update_docs.
-read_repair_filter(Db, Docs, NodeRevs) ->
- [#doc{id = DocId} | _] = Docs,
- NonLocalNodeRevs = [NR || {N, _} = NR <- NodeRevs, N /= node()],
- Nodes = lists:usort([Node || {Node, _} <- NonLocalNodeRevs]),
- NodeSeqs = get_node_seqs(Db, Nodes),
-
- DbPSeq = couch_db:get_purge_seq(Db),
- Lag = config:get_integer("couchdb", "read_repair_lag", 100),
-
- % Filter out read-repair updates from any node that is
- % so out of date that it would force us to scan a large
- % number of purge infos
- NodeFiltFun = fun({Node, _Revs}) ->
- {Node, NodeSeq} = lists:keyfind(Node, 1, NodeSeqs),
- NodeSeq >= DbPSeq - Lag
- end,
- RecentNodeRevs = lists:filter(NodeFiltFun, NonLocalNodeRevs),
-
- % For each node we scan the purge infos to filter out any
- % revisions that have been locally purged since we last
- % replicated to the remote node's shard copy.
- AllowableRevs = lists:foldl(
- fun({Node, Revs}, RevAcc) ->
- {Node, StartSeq} = lists:keyfind(Node, 1, NodeSeqs),
- FoldFun = fun({_PSeq, _UUID, PDocId, PRevs}, InnerAcc) ->
- if
- PDocId /= DocId -> {ok, InnerAcc};
- true -> {ok, InnerAcc -- PRevs}
- end
- end,
- {ok, FiltRevs} = couch_db:fold_purge_infos(Db, StartSeq, FoldFun, Revs),
- lists:usort(FiltRevs ++ RevAcc)
- end,
- [],
- RecentNodeRevs
- ),
-
- % Finally, filter the doc updates to only include revisions
- % that have not been purged locally.
- DocFiltFun = fun(#doc{revs = {Pos, [Rev | _]}}) ->
- lists:member({Pos, Rev}, AllowableRevs)
- end,
- lists:filter(DocFiltFun, Docs).
-
-get_node_seqs(Db, Nodes) ->
- % Gather the list of {Node, PurgeSeq} pairs for all nodes
- % that are present in our read repair group
- FoldFun = fun(#doc{id = Id, body = {Props}}, Acc) ->
- case Id of
- <<?LOCAL_DOC_PREFIX, "purge-mem3-", _/binary>> ->
- TgtNode = couch_util:get_value(<<"target_node">>, Props),
- PurgeSeq = couch_util:get_value(<<"purge_seq">>, Props),
- case lists:keyfind(TgtNode, 1, Acc) of
- {_, OldSeq} ->
- NewSeq = erlang:max(OldSeq, PurgeSeq),
- NewEntry = {TgtNode, NewSeq},
- NewAcc = lists:keyreplace(TgtNode, 1, Acc, NewEntry),
- {ok, NewAcc};
- false ->
- {ok, Acc}
- end;
- _ ->
- % We've processed all _local mem3 purge docs
- {stop, Acc}
- end
- end,
- InitAcc = [{list_to_binary(atom_to_list(Node)), 0} || Node <- Nodes],
- Opts = [{start_key, <<?LOCAL_DOC_PREFIX, "purge-mem3-">>}],
- {ok, NodeBinSeqs} = couch_db:fold_local_docs(Db, FoldFun, InitAcc, Opts),
- [{list_to_existing_atom(binary_to_list(N)), S} || {N, S} <- NodeBinSeqs].
-
-get_or_create_db(DbName, Options) ->
- mem3_util:get_or_create_db_int(DbName, Options).
-
-get_view_cb(#mrargs{extra = Options}) ->
- case couch_util:get_value(callback, Options) of
- {Mod, Fun} when is_atom(Mod), is_atom(Fun) ->
- fun Mod:Fun/2;
- _ ->
- fun view_cb/2
- end;
-get_view_cb(_) ->
- fun view_cb/2.
-
-view_cb({meta, Meta}, Acc) ->
- % Map function starting
- ok = rexi:stream2({meta, Meta}),
- {ok, Acc};
-view_cb({row, Row}, Acc) ->
- % Adding another row
- ViewRow = #view_row{
- id = couch_util:get_value(id, Row),
- key = couch_util:get_value(key, Row),
- value = couch_util:get_value(value, Row),
- doc = couch_util:get_value(doc, Row)
- },
- ok = rexi:stream2(ViewRow),
- {ok, Acc};
-view_cb(complete, Acc) ->
- % Finish view output
- ok = rexi:stream_last(complete),
- {ok, Acc};
-view_cb(ok, ddoc_updated) ->
- rexi:reply({ok, ddoc_updated}).
-
-reduce_cb({meta, Meta}, Acc) ->
- % Map function starting
- ok = rexi:stream2({meta, Meta}),
- {ok, Acc};
-reduce_cb({row, Row}, Acc) ->
- % Adding another row
- ok = rexi:stream2(#view_row{
- key = couch_util:get_value(key, Row),
- value = couch_util:get_value(value, Row)
- }),
- {ok, Acc};
-reduce_cb(complete, Acc) ->
- % Finish view output
- ok = rexi:stream_last(complete),
- {ok, Acc};
-reduce_cb(ok, ddoc_updated) ->
- rexi:reply({ok, ddoc_updated}).
-
-changes_enumerator(#full_doc_info{} = FDI, Acc) ->
- changes_enumerator(couch_doc:to_doc_info(FDI), Acc);
-changes_enumerator(#doc_info{id = <<"_local/", _/binary>>, high_seq = Seq}, Acc) ->
- {ok, Acc#fabric_changes_acc{seq = Seq, pending = Acc#fabric_changes_acc.pending - 1}};
-changes_enumerator(DocInfo, Acc) ->
- #fabric_changes_acc{
- db = Db,
- args = #changes_args{
- include_docs = IncludeDocs,
- conflicts = Conflicts,
- filter_fun = Filter,
- doc_options = DocOptions
- },
- pending = Pending,
- epochs = Epochs
- } = Acc,
- #doc_info{id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]} = DocInfo,
- case [X || X <- couch_changes:filter(Db, DocInfo, Filter), X /= null] of
- [] ->
- ChangesRow =
- {no_pass, [
- {pending, Pending - 1},
- {seq, {Seq, uuid(Db), couch_db:owner_of(Epochs, Seq)}}
- ]};
- Results ->
- Opts =
- if
- Conflicts -> [conflicts | DocOptions];
- true -> DocOptions
- end,
- ChangesRow =
- {change, [
- {pending, Pending - 1},
- {seq, {Seq, uuid(Db), couch_db:owner_of(Epochs, Seq)}},
- {id, Id},
- {changes, Results},
- {deleted, Del}
- | if
- IncludeDocs -> [doc_member(Db, DocInfo, Opts, Filter)];
- true -> []
- end
- ]}
- end,
- ok = rexi:stream2(ChangesRow),
- {ok, Acc#fabric_changes_acc{seq = Seq, pending = Pending - 1}}.
-
-doc_member(Shard, DocInfo, Opts, Filter) ->
- case couch_db:open_doc(Shard, DocInfo, [deleted | Opts]) of
- {ok, Doc} ->
- {doc, maybe_filtered_json_doc(Doc, Opts, Filter)};
- Error ->
- Error
- end.
-
-maybe_filtered_json_doc(Doc, Opts, {selector, _Style, {_Selector, Fields}}) when
- Fields =/= nil
-->
- mango_fields:extract(couch_doc:to_json_obj(Doc, Opts), Fields);
-maybe_filtered_json_doc(Doc, Opts, _Filter) ->
- couch_doc:to_json_obj(Doc, Opts).
-
-make_att_readers([]) ->
- [];
-make_att_readers([#doc{atts = Atts0} = Doc | Rest]) ->
- % % go through the attachments looking for 'follows' in the data,
- % % replace with function that reads the data from MIME stream.
- Atts = [couch_att:transform(data, fun make_att_reader/1, Att) || Att <- Atts0],
- [Doc#doc{atts = Atts} | make_att_readers(Rest)].
-
-make_att_reader({follows, Parser, Ref}) when is_pid(Parser) ->
- % This code will fail if the returned closure is called by a
- % process other than the one that called make_att_reader/1 in the
- % first place. The reason we don't put everything inside the
- % closure is that the `hello_from_writer` message must *always* be
- % sent to the parser, even if the closure never gets called. Also,
- % make sure `hello_from_writer` is sent only once for the all the
- % rest of the possible attachments.
- WriterPid = self(),
- ParserRef =
- case get({mp_parser_ref, Parser}) of
- undefined ->
- % First time encountering a particular parser pid. Monitor it,
- % in case it dies, and notify it about us, so it could monitor
- % us in case we die.
- PRef = erlang:monitor(process, Parser),
- put({mp_parser_ref, Parser}, PRef),
- Parser ! {hello_from_writer, Ref, WriterPid},
- PRef;
- Else ->
- Else
- end,
- fun() ->
- % Make sure the closure is always called from the same process which
- % sent the hello_from_writer message.
- case self() =:= WriterPid of
- true -> ok;
- false -> error({make_att_pid_assertion, self(), WriterPid})
- end,
- % Check if parser already died. This is for belt and suspenders mostly,
- % in case somehow we call the data function again after mp_parser_died
- % was thrown, so we are not stuck forever waiting for bytes.
- case get({mp_parser_died, Parser}) of
- undefined -> ok;
- AlreadyDiedReason -> throw({mp_parser_died, AlreadyDiedReason})
- end,
- Parser ! {get_bytes, Ref, self()},
- receive
- {bytes, Ref, Bytes} ->
- rexi:reply(attachment_chunk_received),
- Bytes;
- {'DOWN', ParserRef, _, _, Reason} ->
- put({mp_parser_died, Parser}, Reason),
- throw({mp_parser_died, Reason})
- end
- end;
-make_att_reader({fabric_attachment_receiver, Middleman, Length}) ->
- fabric_doc_atts:receiver_callback(Middleman, Length);
-make_att_reader(Else) ->
- Else.
-
-clean_stack(S) ->
- lists:map(
- fun
- ({M, F, A}) when is_list(A) -> {M, F, length(A)};
- (X) -> X
- end,
- S
- ).
-
-set_io_priority(DbName, Options) ->
- case lists:keyfind(io_priority, 1, Options) of
- {io_priority, Pri} ->
- erlang:put(io_priority, Pri);
- false ->
- erlang:put(io_priority, {interactive, DbName})
- end,
- case erlang:get(io_priority) of
- {interactive, _} ->
- case config:get("couchdb", "maintenance_mode", "false") of
- "true" ->
- % Done to silence error logging by rexi_server
- rexi:reply({rexi_EXIT, {maintenance_mode, node()}}),
- exit(normal);
- _ ->
- ok
- end;
- _ ->
- ok
- end.
-
-calculate_start_seq(Db, Node, Seq) ->
- case couch_db:calculate_start_seq(Db, Node, Seq) of
- N when is_integer(N) ->
- N;
- {replace, OriginalNode, Uuid, OriginalSeq} ->
- %% Scan history looking for an entry with
- %% * target_node == TargetNode
- %% * target_uuid == TargetUUID
- %% * target_seq =< TargetSeq
- %% If such an entry is found, stream from associated source_seq
- mem3_rep:find_source_seq(Db, OriginalNode, Uuid, OriginalSeq)
- end.
-
-uuid(Db) ->
- Uuid = couch_db:get_uuid(Db),
- Prefix = fabric_util:get_uuid_prefix_len(),
- binary:part(Uuid, {0, Prefix}).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-maybe_filtered_json_doc_no_filter_test() ->
- Body = {[{<<"a">>, 1}]},
- Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
- {JDocProps} = maybe_filtered_json_doc(Doc, [], x),
- ExpectedProps = [{<<"_id">>, <<"1">>}, {<<"_rev">>, <<"1-r1">>}, {<<"a">>, 1}],
- ?assertEqual(lists:keysort(1, JDocProps), ExpectedProps).
-
-maybe_filtered_json_doc_with_filter_test() ->
- Body = {[{<<"a">>, 1}]},
- Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
- Fields = [<<"a">>, <<"nonexistent">>],
- Filter = {selector, main_only, {some_selector, Fields}},
- {JDocProps} = maybe_filtered_json_doc(Doc, [], Filter),
- ?assertEqual(JDocProps, [{<<"a">>, 1}]).
-
--endif.
diff --git a/src/fabric/src/fabric_streams.erl b/src/fabric/src/fabric_streams.erl
deleted file mode 100644
index 2a3a2b004..000000000
--- a/src/fabric/src/fabric_streams.erl
+++ /dev/null
@@ -1,283 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_streams).
-
--export([
- start/2,
- start/3,
- start/4,
- start/5,
- cleanup/1
-]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(WORKER_CLEANER, fabric_worker_cleaner).
-
-start(Workers, Keypos) ->
- start(Workers, Keypos, undefined, undefined).
-
-start(Workers, Keypos, RingOpts) ->
- start(Workers, Keypos, undefined, undefined, RingOpts).
-
-start(Workers, Keypos, StartFun, Replacements) ->
- start(Workers, Keypos, StartFun, Replacements, []).
-
-start(Workers0, Keypos, StartFun, Replacements, RingOpts) ->
- Fun = fun handle_stream_start/3,
- Acc = #stream_acc{
- workers = fabric_dict:init(Workers0, waiting),
- ready = [],
- start_fun = StartFun,
- replacements = Replacements,
- ring_opts = RingOpts
- },
- spawn_worker_cleaner(self(), Workers0),
- Timeout = fabric_util:request_timeout(),
- case rexi_utils:recv(Workers0, Keypos, Fun, Acc, Timeout, infinity) of
- {ok, #stream_acc{ready = Workers}} ->
- AckedWorkers = fabric_dict:fold(
- fun(Worker, From, WorkerAcc) ->
- rexi:stream_start(From),
- [Worker | WorkerAcc]
- end,
- [],
- Workers
- ),
- {ok, AckedWorkers};
- Else ->
- Else
- end.
-
-cleanup(Workers) ->
- % Stop the auxiliary cleaner process as we got to the point where cleanup
- % happesn in the regular fashion so we don't want to send 2x the number kill
- % messages
- case get(?WORKER_CLEANER) of
- CleanerPid when is_pid(CleanerPid) ->
- erase(?WORKER_CLEANER),
- exit(CleanerPid, kill);
- _ ->
- ok
- end,
- fabric_util:cleanup(Workers).
-
-handle_stream_start({rexi_DOWN, _, {_, NodeRef}, _}, _, St) ->
- #stream_acc{workers = Workers, ready = Ready, ring_opts = RingOpts} = St,
- case fabric_ring:node_down(NodeRef, Workers, Ready, RingOpts) of
- {ok, Workers1} ->
- {ok, St#stream_acc{workers = Workers1}};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_stream_start({rexi_EXIT, Reason}, Worker, St) ->
- #stream_acc{
- workers = Workers,
- ready = Ready,
- replacements = Replacements,
- ring_opts = RingOpts
- } = St,
- case {fabric_ring:handle_error(Worker, Workers, Ready, RingOpts), Reason} of
- {{ok, Workers1}, _Reason} ->
- {ok, St#stream_acc{workers = Workers1}};
- {error, {maintenance_mode, _Node}} when Replacements /= undefined ->
- % Check if we have replacements for this range
- % and start the new workers if so.
- case lists:keytake(Worker#shard.range, 1, Replacements) of
- {value, {_Range, WorkerReplacements}, NewReplacements} ->
- FinalWorkers = lists:foldl(
- fun(Repl, NewWorkers) ->
- NewWorker = (St#stream_acc.start_fun)(Repl),
- add_worker_to_cleaner(self(), NewWorker),
- fabric_dict:store(NewWorker, waiting, NewWorkers)
- end,
- Workers,
- WorkerReplacements
- ),
- % Assert that our replaced worker provides us
- % the oppurtunity to make progress. Need to make sure
- % to include already processed responses, since we are
- % checking the full range and some workers have already
- % responded and were removed from the workers list
- ReadyWorkers = [{W, R} || {_, W, R} <- Ready],
- AllWorkers = FinalWorkers ++ ReadyWorkers,
- true = fabric_ring:is_progress_possible(AllWorkers),
- NewRefs = fabric_dict:fetch_keys(FinalWorkers),
- {new_refs, NewRefs, St#stream_acc{
- workers = FinalWorkers,
- replacements = NewReplacements
- }};
- false ->
- % If we progress isn't possible and we don't have any
- % replacements then we're dead in the water.
- {error, {nodedown, <<"progress not possible">>}}
- end;
- {error, _} ->
- {error, fabric_util:error_info(Reason)}
- end;
-handle_stream_start(rexi_STREAM_INIT, {Worker, From}, St) ->
- #stream_acc{workers = Workers, ready = Ready, ring_opts = RingOpts} = St,
- case fabric_dict:lookup_element(Worker, Workers) of
- undefined ->
- % This worker lost the race with other partition copies, terminate
- rexi:stream_cancel(From),
- {ok, St};
- waiting ->
- case fabric_ring:handle_response(Worker, From, Workers, Ready, RingOpts) of
- {ok, {Workers1, Ready1}} ->
- % Don't have a full ring yet. Keep getting responses
- {ok, St#stream_acc{workers = Workers1, ready = Ready1}};
- {stop, Ready1} ->
- % Have a full ring of workers. But don't ack the worker
- % yet so they don't start sending us rows until we're ready
- {stop, St#stream_acc{workers = [], ready = Ready1}}
- end
- end;
-handle_stream_start({ok, ddoc_updated}, _, St) ->
- WaitingWorkers = [W || {W, _} <- St#stream_acc.workers],
- ReadyWorkers = [W || {W, _} <- St#stream_acc.ready],
- cleanup(WaitingWorkers ++ ReadyWorkers),
- {stop, ddoc_updated};
-handle_stream_start(Else, _, _) ->
- exit({invalid_stream_start, Else}).
-
-% Spawn an auxiliary rexi worker cleaner. This will be used in cases
-% when the coordinator (request) process is forceably killed and doesn't
-% get a chance to process its `after` fabric:clean/1 clause.
-spawn_worker_cleaner(Coordinator, Workers) ->
- case get(?WORKER_CLEANER) of
- undefined ->
- Pid = spawn(fun() ->
- erlang:monitor(process, Coordinator),
- cleaner_loop(Coordinator, Workers)
- end),
- put(?WORKER_CLEANER, Pid),
- Pid;
- ExistingCleaner ->
- ExistingCleaner
- end.
-
-cleaner_loop(Pid, Workers) ->
- receive
- {add_worker, Pid, Worker} ->
- cleaner_loop(Pid, [Worker | Workers]);
- {'DOWN', _, _, Pid, _} ->
- fabric_util:cleanup(Workers)
- end.
-
-add_worker_to_cleaner(CoordinatorPid, Worker) ->
- case get(?WORKER_CLEANER) of
- CleanerPid when is_pid(CleanerPid) ->
- CleanerPid ! {add_worker, CoordinatorPid, Worker};
- _ ->
- ok
- end.
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-worker_cleaner_test_() ->
- {
- "Fabric spawn_worker_cleaner test",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- fun(_) ->
- [
- should_clean_workers(),
- does_not_fire_if_cleanup_called(),
- should_clean_additional_worker_too()
- ]
- end
- }
- }.
-
-should_clean_workers() ->
- ?_test(begin
- meck:reset(rexi),
- erase(?WORKER_CLEANER),
- Workers = [
- #shard{node = 'n1', ref = make_ref()},
- #shard{node = 'n2', ref = make_ref()}
- ],
- {Coord, _} = spawn_monitor(fun() ->
- receive
- die -> ok
- end
- end),
- Cleaner = spawn_worker_cleaner(Coord, Workers),
- Ref = erlang:monitor(process, Cleaner),
- Coord ! die,
- receive
- {'DOWN', Ref, _, Cleaner, _} -> ok
- end,
- ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
- end).
-
-does_not_fire_if_cleanup_called() ->
- ?_test(begin
- meck:reset(rexi),
- erase(?WORKER_CLEANER),
- Workers = [
- #shard{node = 'n1', ref = make_ref()},
- #shard{node = 'n2', ref = make_ref()}
- ],
- {Coord, _} = spawn_monitor(fun() ->
- receive
- die -> ok
- end
- end),
- Cleaner = spawn_worker_cleaner(Coord, Workers),
- Ref = erlang:monitor(process, Cleaner),
- cleanup(Workers),
- Coord ! die,
- receive
- {'DOWN', Ref, _, _, _} -> ok
- end,
- % 2 calls would be from cleanup/1 function. If cleanup process fired
- % too it would have been 4 calls total.
- ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
- end).
-
-should_clean_additional_worker_too() ->
- ?_test(begin
- meck:reset(rexi),
- erase(?WORKER_CLEANER),
- Workers = [
- #shard{node = 'n1', ref = make_ref()}
- ],
- {Coord, _} = spawn_monitor(fun() ->
- receive
- die -> ok
- end
- end),
- Cleaner = spawn_worker_cleaner(Coord, Workers),
- add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
- Ref = erlang:monitor(process, Cleaner),
- Coord ! die,
- receive
- {'DOWN', Ref, _, Cleaner, _} -> ok
- end,
- ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
- end).
-
-setup() ->
- ok = meck:expect(rexi, kill_all, fun(_) -> ok end).
-
-teardown(_) ->
- meck:unload().
-
--endif.
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
deleted file mode 100644
index 30e82c29a..000000000
--- a/src/fabric/src/fabric_util.erl
+++ /dev/null
@@ -1,477 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_util).
-
--export([
- submit_jobs/3, submit_jobs/4,
- cleanup/1,
- recv/4,
- get_db/1, get_db/2,
- error_info/1,
- update_counter/3,
- remove_ancestors/2,
- create_monitors/1,
- kv/2,
- remove_down_workers/2, remove_down_workers/3,
- doc_id_and_rev/1
-]).
--export([request_timeout/0, attachments_timeout/0, all_docs_timeout/0, view_timeout/1]).
--export([log_timeout/2, remove_done_workers/2]).
--export([is_users_db/1, is_replicator_db/1]).
--export([open_cluster_db/1, open_cluster_db/2]).
--export([is_partitioned/1]).
--export([validate_all_docs_args/2, validate_args/3]).
--export([upgrade_mrargs/1]).
--export([worker_ranges/1]).
--export([get_uuid_prefix_len/0]).
--export([isolate/1, isolate/2]).
-
--compile({inline, [{doc_id_and_rev, 1}]}).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-remove_down_workers(Workers, BadNode) ->
- remove_down_workers(Workers, BadNode, []).
-
-remove_down_workers(Workers, BadNode, RingOpts) ->
- Filter = fun(#shard{node = Node}, _) -> Node =/= BadNode end,
- NewWorkers = fabric_dict:filter(Filter, Workers),
- case fabric_ring:is_progress_possible(NewWorkers, RingOpts) of
- true ->
- {ok, NewWorkers};
- false ->
- error
- end.
-
-submit_jobs(Shards, EndPoint, ExtraArgs) ->
- submit_jobs(Shards, fabric_rpc, EndPoint, ExtraArgs).
-
-submit_jobs(Shards, Module, EndPoint, ExtraArgs) ->
- lists:map(
- fun(#shard{node = Node, name = ShardName} = Shard) ->
- Ref = rexi:cast(Node, {Module, EndPoint, [ShardName | ExtraArgs]}),
- Shard#shard{ref = Ref}
- end,
- Shards
- ).
-
-cleanup(Workers) ->
- rexi:kill_all([{Node, Ref} || #shard{node = Node, ref = Ref} <- Workers]).
-
-recv(Workers, Keypos, Fun, Acc0) ->
- rexi_utils:recv(Workers, Keypos, Fun, Acc0, request_timeout(), infinity).
-
-request_timeout() ->
- timeout("request", "60000").
-
-all_docs_timeout() ->
- timeout("all_docs", "10000").
-
-attachments_timeout() ->
- timeout("attachments", "600000").
-
-view_timeout(Args) ->
- PartitionQuery = couch_mrview_util:get_extra(Args, partition, false),
- case PartitionQuery of
- false -> timeout("view", "infinity");
- _ -> timeout("partition_view", "infinity")
- end.
-
-timeout(Type, Default) ->
- case config:get("fabric", Type ++ "_timeout", Default) of
- "infinity" -> infinity;
- N -> list_to_integer(N)
- end.
-
-log_timeout(Workers, EndPoint) ->
- CounterKey = [fabric, worker, timeouts],
- couch_stats:increment_counter(CounterKey),
- lists:map(
- fun(#shard{node = Dest, name = Name}) ->
- Fmt = "fabric_worker_timeout ~s,~p,~p",
- couch_log:error(Fmt, [EndPoint, Dest, Name])
- end,
- Workers
- ).
-
-remove_done_workers(Workers, WaitingIndicator) ->
- [W || {W, WI} <- fabric_dict:to_list(Workers), WI == WaitingIndicator].
-
-get_db(DbName) ->
- get_db(DbName, []).
-
-get_db(DbName, Options) ->
- {Local, SameZone, DifferentZone} = mem3:group_by_proximity(mem3:shards(DbName)),
- % Prefer shards on the same node over other nodes, prefer shards in the same zone over
- % over zones and sort each remote list by name so that we don't repeatedly try the same node.
- Shards =
- Local ++ lists:keysort(#shard.name, SameZone) ++ lists:keysort(#shard.name, DifferentZone),
- % suppress shards from down nodes
- Nodes = [node() | erlang:nodes()],
- Live = [S || #shard{node = N} = S <- Shards, lists:member(N, Nodes)],
- % Only accept factors > 1, otherwise our math breaks further down
- Factor = max(2, config:get_integer("fabric", "shard_timeout_factor", 2)),
- MinTimeout = config:get_integer("fabric", "shard_timeout_min_msec", 100),
- MaxTimeout = request_timeout(),
- Timeout = get_db_timeout(length(Live), Factor, MinTimeout, MaxTimeout),
- get_shard(Live, Options, Timeout, Factor).
-
-get_shard([], _Opts, _Timeout, _Factor) ->
- erlang:error({internal_server_error, "No DB shards could be opened."});
-get_shard([#shard{node = Node, name = Name} | Rest], Opts, Timeout, Factor) ->
- Mon = rexi_monitor:start([rexi_utils:server_pid(Node)]),
- MFA = {fabric_rpc, open_shard, [Name, [{timeout, Timeout} | Opts]]},
- Ref = rexi:cast(Node, self(), MFA, [sync]),
- try
- receive
- {Ref, {ok, Db}} ->
- {ok, Db};
- {Ref, {'rexi_EXIT', {{unauthorized, _} = Error, _}}} ->
- throw(Error);
- {Ref, {'rexi_EXIT', {{forbidden, _} = Error, _}}} ->
- throw(Error);
- {Ref, Reason} ->
- couch_log:debug("Failed to open shard ~p because: ~p", [Name, Reason]),
- get_shard(Rest, Opts, Timeout, Factor)
- after Timeout ->
- couch_log:debug("Failed to open shard ~p after: ~p", [Name, Timeout]),
- get_shard(Rest, Opts, Factor * Timeout, Factor)
- end
- after
- rexi_monitor:stop(Mon)
- end.
-
-get_db_timeout(N, Factor, MinTimeout, infinity) ->
- % MaxTimeout may be infinity so we just use the largest Erlang small int to
- % avoid blowing up the arithmetic
- get_db_timeout(N, Factor, MinTimeout, 1 bsl 59);
-get_db_timeout(N, Factor, MinTimeout, MaxTimeout) ->
- %
- % The progression of timeouts forms a geometric series:
- %
- % MaxTimeout = T + T*F + T*F^2 + T*F^3 ...
- %
- % Where T is the initial timeout and F is the factor. The formula for
- % the sum is:
- %
- % Sum[T * F^I, I <- 0..N] = T * (1 - F^(N + 1)) / (1 - F)
- %
- % Then, for a given sum and factor we can calculate the initial timeout T:
- %
- % T = Sum / ((1 - F^(N+1)) / (1 - F))
- %
- Timeout = MaxTimeout / ((1 - math:pow(Factor, N + 1)) / (1 - Factor)),
- % Apply a minimum timeout value
- max(MinTimeout, trunc(Timeout)).
-
-error_info({{timeout, _} = Error, _Stack}) ->
- Error;
-error_info({{Error, Reason}, Stack}) ->
- {Error, Reason, Stack};
-error_info({Error, Stack}) ->
- {Error, nil, Stack}.
-
-update_counter(Item, Incr, D) ->
- UpdateFun = fun({Old, Count}) -> {Old, Count + Incr} end,
- orddict:update(make_key(Item), UpdateFun, {Item, Incr}, D).
-
-make_key({ok, L}) when is_list(L) ->
- make_key(L);
-make_key([]) ->
- [];
-make_key([{ok, #doc{revs = {Pos, [RevId | _]}}} | Rest]) ->
- [{ok, {Pos, RevId}} | make_key(Rest)];
-make_key([{{not_found, missing}, Rev} | Rest]) ->
- [{not_found, Rev} | make_key(Rest)];
-make_key({ok, #doc{id = Id, revs = Revs}}) ->
- {Id, Revs};
-make_key(Else) ->
- Else.
-
-% this presumes the incoming list is sorted, i.e. shorter revlists come first
-remove_ancestors([], Acc) ->
- lists:reverse(Acc);
-remove_ancestors([{_, {{not_found, _}, Count}} = Head | Tail], Acc) ->
- % any document is a descendant
- case
- lists:filter(
- fun
- ({_, {{ok, #doc{}}, _}}) -> true;
- (_) -> false
- end,
- Tail
- )
- of
- [{_, {{ok, #doc{}} = Descendant, _}} | _] ->
- remove_ancestors(update_counter(Descendant, Count, Tail), Acc);
- [] ->
- remove_ancestors(Tail, [Head | Acc])
- end;
-remove_ancestors([{_, {{ok, #doc{revs = {Pos, Revs}}}, Count}} = Head | Tail], Acc) ->
- Descendants = lists:dropwhile(
- fun({_, {{ok, #doc{revs = {Pos2, Revs2}}}, _}}) ->
- case lists:nthtail(erlang:min(Pos2 - Pos, length(Revs2)), Revs2) of
- [] ->
- % impossible to tell if Revs2 is a descendant - assume no
- true;
- History ->
- % if Revs2 is a descendant, History is a prefix of Revs
- not lists:prefix(History, Revs)
- end
- end,
- Tail
- ),
- case Descendants of
- [] ->
- remove_ancestors(Tail, [Head | Acc]);
- [{Descendant, _} | _] ->
- remove_ancestors(update_counter(Descendant, Count, Tail), Acc)
- end;
-remove_ancestors([Error | Tail], Acc) ->
- remove_ancestors(Tail, [Error | Acc]).
-
-create_monitors(Shards) ->
- MonRefs = lists:usort([rexi_utils:server_pid(N) || #shard{node = N} <- Shards]),
- rexi_monitor:start(MonRefs).
-
-%% verify only id and rev are used in key.
-update_counter_test() ->
- Reply =
- {ok, #doc{
- id = <<"id">>,
- revs = <<"rev">>,
- body = <<"body">>,
- atts = <<"atts">>
- }},
- ?assertEqual(
- [{{<<"id">>, <<"rev">>}, {Reply, 1}}],
- update_counter(Reply, 1, [])
- ).
-
-remove_ancestors_test() ->
- Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
- Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
- Bar2 = {not_found, {1, <<"bar">>}},
- ?assertEqual(
- [kv(Bar1, 1), kv(Foo1, 1)],
- remove_ancestors([kv(Bar1, 1), kv(Foo1, 1)], [])
- ),
- ?assertEqual(
- [kv(Bar1, 1), kv(Foo2, 2)],
- remove_ancestors([kv(Bar1, 1), kv(Foo1, 1), kv(Foo2, 1)], [])
- ),
- ?assertEqual(
- [kv(Bar1, 2)],
- remove_ancestors([kv(Bar2, 1), kv(Bar1, 1)], [])
- ).
-
-is_replicator_db(DbName) ->
- path_ends_with(DbName, <<"_replicator">>).
-
-is_users_db(DbName) ->
- ConfigName = list_to_binary(
- config:get(
- "chttpd_auth", "authentication_db", "_users"
- )
- ),
- DbName == ConfigName orelse path_ends_with(DbName, <<"_users">>).
-
-path_ends_with(Path, Suffix) ->
- Suffix =:= couch_db:dbname_suffix(Path).
-
-open_cluster_db(#shard{dbname = DbName, opts = Options}) ->
- case couch_util:get_value(props, Options) of
- Props when is_list(Props) ->
- {ok, Db} = couch_db:clustered_db(DbName, [{props, Props}]),
- Db;
- _ ->
- {ok, Db} = couch_db:clustered_db(DbName, []),
- Db
- end.
-
-open_cluster_db(DbName, Opts) ->
- % as admin
- {SecProps} = fabric:get_security(DbName),
- UserCtx = couch_util:get_value(user_ctx, Opts, #user_ctx{}),
- {ok, Db} = couch_db:clustered_db(DbName, UserCtx, SecProps),
- Db.
-
-%% test function
-kv(Item, Count) ->
- {make_key(Item), {Item, Count}}.
-
-doc_id_and_rev(#doc{id = DocId, revs = {RevNum, [RevHash | _]}}) ->
- {DocId, {RevNum, RevHash}}.
-
-is_partitioned(DbName0) when is_binary(DbName0) ->
- Shards = mem3:shards(fabric:dbname(DbName0)),
- is_partitioned(open_cluster_db(hd(Shards)));
-is_partitioned(Db) ->
- couch_db:is_partitioned(Db).
-
-validate_all_docs_args(DbName, Args) when is_binary(DbName) ->
- Shards = mem3:shards(fabric:dbname(DbName)),
- Db = open_cluster_db(hd(Shards)),
- validate_all_docs_args(Db, Args);
-validate_all_docs_args(Db, Args) ->
- true = couch_db:is_clustered(Db),
- couch_mrview_util:validate_all_docs_args(Db, Args).
-
-validate_args(DbName, DDoc, Args) when is_binary(DbName) ->
- Shards = mem3:shards(fabric:dbname(DbName)),
- Db = open_cluster_db(hd(Shards)),
- validate_args(Db, DDoc, Args);
-validate_args(Db, DDoc, Args) ->
- true = couch_db:is_clustered(Db),
- couch_mrview_util:validate_args(Db, DDoc, Args).
-
-upgrade_mrargs(#mrargs{} = Args) ->
- Args;
-upgrade_mrargs(
- {mrargs, ViewType, Reduce, PreflightFun, StartKey, StartKeyDocId, EndKey, EndKeyDocId, Keys,
- Direction, Limit, Skip, GroupLevel, Group, Stale, MultiGet, InclusiveEnd, IncludeDocs,
- DocOptions, UpdateSeq, Conflicts, Callback, Sorted, Extra}
-) ->
- {Stable, Update} =
- case Stale of
- ok -> {true, false};
- update_after -> {true, lazy};
- _ -> {false, true}
- end,
- #mrargs{
- view_type = ViewType,
- reduce = Reduce,
- preflight_fun = PreflightFun,
- start_key = StartKey,
- start_key_docid = StartKeyDocId,
- end_key = EndKey,
- end_key_docid = EndKeyDocId,
- keys = Keys,
- direction = Direction,
- limit = Limit,
- skip = Skip,
- group_level = GroupLevel,
- group = Group,
- stable = Stable,
- update = Update,
- multi_get = MultiGet,
- inclusive_end = InclusiveEnd,
- include_docs = IncludeDocs,
- doc_options = DocOptions,
- update_seq = UpdateSeq,
- conflicts = Conflicts,
- callback = Callback,
- sorted = Sorted,
- extra = Extra
- }.
-
-worker_ranges(Workers) ->
- Ranges = fabric_dict:fold(
- fun(#shard{range = [X, Y]}, _, Acc) ->
- [{X, Y} | Acc]
- end,
- [],
- Workers
- ),
- lists:usort(Ranges).
-
-get_uuid_prefix_len() ->
- config:get_integer("fabric", "uuid_prefix_len", 7).
-
-% If we issue multiple fabric calls from the same process we have to isolate
-% them so in case of error they don't pollute the processes dictionary or the
-% mailbox
-
-isolate(Fun) ->
- isolate(Fun, infinity).
-
-isolate(Fun, Timeout) ->
- {Pid, Ref} = erlang:spawn_monitor(fun() -> exit(do_isolate(Fun)) end),
- receive
- {'DOWN', Ref, _, _, {'$isolres', Res}} ->
- Res;
- {'DOWN', Ref, _, _, {'$isolerr', Tag, Reason, Stack}} ->
- erlang:raise(Tag, Reason, Stack)
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- erlang:error(timeout)
- end.
-
-% OTP_RELEASE is defined in OTP 21+ only
--ifdef(OTP_RELEASE).
-
-do_isolate(Fun) ->
- try
- {'$isolres', Fun()}
- catch
- Tag:Reason:Stack ->
- {'$isolerr', Tag, Reason, Stack}
- end.
-
--else.
-
-do_isolate(Fun) ->
- try
- {'$isolres', Fun()}
- catch ?STACKTRACE(Tag, Reason, Stack)
- {'$isolerr', Tag, Reason, Stack}
- end.
-
--endif.
-
-get_db_timeout_test() ->
- % Q=1, N=1
- ?assertEqual(20000, get_db_timeout(1, 2, 100, 60000)),
-
- % Q=2, N=1
- ?assertEqual(8571, get_db_timeout(2, 2, 100, 60000)),
-
- % Q=2, N=3 (default)
- ?assertEqual(472, get_db_timeout(2 * 3, 2, 100, 60000)),
-
- % Q=3, N=3
- ?assertEqual(100, get_db_timeout(3 * 3, 2, 100, 60000)),
-
- % Q=4, N=1
- ?assertEqual(1935, get_db_timeout(4, 2, 100, 60000)),
-
- % Q=8, N=1
- ?assertEqual(117, get_db_timeout(8, 2, 100, 60000)),
-
- % Q=8, N=3 (default in 2.x)
- ?assertEqual(100, get_db_timeout(8 * 3, 2, 100, 60000)),
-
- % Q=256, N=3
- ?assertEqual(100, get_db_timeout(256 * 3, 2, 100, 60000)),
-
- % Large factor = 100
- ?assertEqual(100, get_db_timeout(2 * 3, 100, 100, 60000)),
-
- % Small total request timeout = 1 sec
- ?assertEqual(100, get_db_timeout(2 * 3, 2, 100, 1000)),
-
- % Large total request timeout
- ?assertEqual(28346, get_db_timeout(2 * 3, 2, 100, 3600000)),
-
- % No shards at all
- ?assertEqual(60000, get_db_timeout(0, 2, 100, 60000)),
-
- % request_timeout was set to infinity, with enough shards it still gets to
- % 100 min timeout at the start from the exponential logic
- ?assertEqual(100, get_db_timeout(64, 2, 100, infinity)).
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
deleted file mode 100644
index c2ef13392..000000000
--- a/src/fabric/src/fabric_view.erl
+++ /dev/null
@@ -1,561 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view).
-
--export([
- remove_overlapping_shards/2,
- maybe_send_row/1,
- transform_row/1,
- keydict/1,
- extract_view/4,
- get_shards/2,
- check_down_shards/2,
- handle_worker_exit/3,
- get_shard_replacements/2,
- maybe_update_others/5
-]).
--export([fix_skip_and_limit/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-%% @doc Check if a downed node affects any of our workers
--spec check_down_shards(#collector{}, node()) ->
- {ok, #collector{}} | {error, any()}.
-check_down_shards(Collector, BadNode) ->
- #collector{callback = Callback, counters = Counters, user_acc = Acc} = Collector,
- Filter = fun(#shard{node = Node}, _) -> Node == BadNode end,
- BadCounters = fabric_dict:filter(Filter, Counters),
- case fabric_dict:size(BadCounters) > 0 of
- true ->
- Reason = {nodedown, <<"progress not possible">>},
- Callback({error, Reason}, Acc),
- {error, Reason};
- false ->
- {ok, Collector}
- end.
-
-%% @doc Handle a worker that dies during a stream
--spec handle_worker_exit(#collector{}, #shard{}, any()) -> {error, any()}.
-handle_worker_exit(Collector, _Worker, Reason) ->
- #collector{callback = Callback, user_acc = Acc} = Collector,
- {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
- {error, Resp}.
-
--spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}]) ->
- [{#shard{}, any()}].
-remove_overlapping_shards(#shard{} = Shard, Counters) ->
- remove_overlapping_shards(Shard, Counters, fun stop_worker/1).
-
--spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}], fun()) ->
- [{#shard{}, any()}].
-remove_overlapping_shards(#shard{} = Shard, Counters, RemoveCb) ->
- Counters1 = filter_exact_copies(Shard, Counters, RemoveCb),
- filter_possible_overlaps(Shard, Counters1, RemoveCb).
-
-filter_possible_overlaps(Shard, Counters, RemoveCb) ->
- Ranges0 = fabric_util:worker_ranges(Counters),
- #shard{range = [BShard, EShard]} = Shard,
- Ranges = Ranges0 ++ [{BShard, EShard}],
- {Bs, Es} = lists:unzip(Ranges),
- {MinB, MaxE} = {lists:min(Bs), lists:max(Es)},
- % Use a custom sort function which prioritizes the given shard
- % range when the start endpoints match.
- SortFun = fun
- ({B, E}, {B, _}) when {B, E} =:= {BShard, EShard} ->
- % If start matches with the shard's start, shard always wins
- true;
- ({B, _}, {B, E}) when {B, E} =:= {BShard, EShard} ->
- % If start matches with the shard's start, shard always wins
- false;
- ({B, E1}, {B, E2}) ->
- % If start matches, pick the longest range first
- E2 >= E1;
- ({B1, _}, {B2, _}) ->
- % Then, by default, sort by start point
- B1 =< B2
- end,
- Ring = mem3_util:get_ring(Ranges, SortFun, MinB, MaxE),
- fabric_dict:filter(
- fun
- (S, _) when S =:= Shard ->
- % Keep the original shard
- true;
- (#shard{range = [B, E]} = S, _) ->
- case lists:member({B, E}, Ring) of
- true ->
- % Keep it
- true;
- false ->
- % Duplicate range, delete after calling callback function
- case is_function(RemoveCb) of
- true -> RemoveCb(S);
- false -> ok
- end,
- false
- end
- end,
- Counters
- ).
-
-filter_exact_copies(#shard{range = Range0} = Shard0, Shards, Cb) ->
- fabric_dict:filter(
- fun
- (Shard, _) when Shard =:= Shard0 ->
- % Don't remove ourselves
- true;
- (#shard{range = Range} = Shard, _) when Range =:= Range0 ->
- case is_function(Cb) of
- true -> Cb(Shard);
- false -> ok
- end,
- false;
- (_, _) ->
- true
- end,
- Shards
- ).
-
-stop_worker(#shard{ref = Ref, node = Node}) ->
- rexi:kill(Node, Ref).
-
-maybe_send_row(#collector{limit = 0} = State) ->
- #collector{counters = Counters, user_acc = AccIn, callback = Callback} = State,
- case fabric_dict:any(0, Counters) of
- true ->
- % we still need to send the total/offset header
- {ok, State};
- false ->
- erase(meta_sent),
- {_, Acc} = Callback(complete, AccIn),
- {stop, State#collector{user_acc = Acc}}
- end;
-maybe_send_row(State) ->
- #collector{
- callback = Callback,
- counters = Counters,
- skip = Skip,
- limit = Limit,
- user_acc = AccIn
- } = State,
- case fabric_dict:any(0, Counters) of
- true ->
- {ok, State};
- false ->
- try get_next_row(State) of
- {_, NewState} when Skip > 0 ->
- maybe_send_row(NewState#collector{skip = Skip - 1});
- {Row0, NewState} ->
- Row1 = possibly_embed_doc(NewState, Row0),
- Row2 = detach_partition(Row1),
- Row3 = transform_row(Row2),
- case Callback(Row3, AccIn) of
- {stop, Acc} ->
- {stop, NewState#collector{user_acc = Acc, limit = Limit - 1}};
- {ok, Acc} ->
- maybe_send_row(NewState#collector{user_acc = Acc, limit = Limit - 1})
- end
- catch
- complete ->
- erase(meta_sent),
- {_, Acc} = Callback(complete, AccIn),
- {stop, State#collector{user_acc = Acc}}
- end
- end.
-
-%% if include_docs=true is used when keys and
-%% the values contain "_id" then use the "_id"s
-%% to retrieve documents and embed in result
-possibly_embed_doc(
- _State,
- #view_row{id = reduced} = Row
-) ->
- Row;
-possibly_embed_doc(
- _State,
- #view_row{value = undefined} = Row
-) ->
- Row;
-possibly_embed_doc(
- #collector{db_name = DbName, query_args = Args},
- #view_row{key = _Key, id = _Id, value = Value, doc = _Doc} = Row
-) ->
- #mrargs{include_docs = IncludeDocs} = Args,
- case IncludeDocs andalso is_tuple(Value) of
- true ->
- {Props} = Value,
- Rev0 = couch_util:get_value(<<"_rev">>, Props),
- case couch_util:get_value(<<"_id">>, Props) of
- null ->
- Row#view_row{doc = null};
- undefined ->
- Row;
- IncId ->
- % use separate process to call fabric:open_doc
- % to not interfere with current call
- {Pid, Ref} = spawn_monitor(fun() ->
- exit(
- case Rev0 of
- undefined ->
- case fabric:open_doc(DbName, IncId, []) of
- {ok, NewDoc} ->
- Row#view_row{doc = couch_doc:to_json_obj(NewDoc, [])};
- {not_found, _} ->
- Row#view_row{doc = null};
- Else ->
- Row#view_row{doc = {error, Else}}
- end;
- Rev0 ->
- Rev = couch_doc:parse_rev(Rev0),
- case fabric:open_revs(DbName, IncId, [Rev], []) of
- {ok, [{ok, NewDoc}]} ->
- Row#view_row{doc = couch_doc:to_json_obj(NewDoc, [])};
- {ok, [{{not_found, _}, Rev}]} ->
- Row#view_row{doc = null};
- Else ->
- Row#view_row{doc = {error, Else}}
- end
- end
- )
- end),
- receive
- {'DOWN', Ref, process, Pid, Resp} ->
- Resp
- end
- end;
- _ ->
- Row
- end.
-
-detach_partition(#view_row{key = {p, _Partition, Key}} = Row) ->
- Row#view_row{key = Key};
-detach_partition(#view_row{} = Row) ->
- Row.
-
-keydict(undefined) ->
- undefined;
-keydict(Keys) ->
- {Dict, _} = lists:foldl(
- fun(K, {D, I}) -> {dict:store(K, I, D), I + 1} end,
- {dict:new(), 0},
- Keys
- ),
- Dict.
-
-%% internal %%
-
-get_next_row(#collector{rows = []}) ->
- throw(complete);
-get_next_row(#collector{reducer = RedSrc} = St) when RedSrc =/= undefined ->
- #collector{
- query_args = #mrargs{direction = Dir},
- keys = Keys,
- rows = RowDict,
- lang = Lang,
- counters = Counters0,
- collation = Collation
- } = St,
- {Key, RestKeys} = find_next_key(Keys, Dir, Collation, RowDict),
- case reduce_row_dict_take(Key, RowDict, Collation) of
- {Records, NewRowDict} ->
- Counters = lists:foldl(
- fun(#view_row{worker = {Worker, From}}, CntrsAcc) ->
- case From of
- {Pid, _} when is_pid(Pid) ->
- gen_server:reply(From, ok);
- Pid when is_pid(Pid) ->
- rexi:stream_ack(From)
- end,
- fabric_dict:update_counter(Worker, -1, CntrsAcc)
- end,
- Counters0,
- Records
- ),
- Wrapped = [[V] || #view_row{value = V} <- Records],
- {ok, [Reduced]} = couch_query_servers:rereduce(Lang, [RedSrc], Wrapped),
- {ok, Finalized} = couch_query_servers:finalize(RedSrc, Reduced),
- NewSt = St#collector{keys = RestKeys, rows = NewRowDict, counters = Counters},
- {#view_row{key = Key, id = reduced, value = Finalized}, NewSt};
- error ->
- get_next_row(St#collector{keys = RestKeys})
- end;
-get_next_row(State) ->
- #collector{rows = [Row | Rest], counters = Counters0} = State,
- {Worker, From} = Row#view_row.worker,
- rexi:stream_ack(From),
- Counters1 = fabric_dict:update_counter(Worker, -1, Counters0),
- {Row, State#collector{rows = Rest, counters = Counters1}}.
-
-reduce_row_dict_take(Key, Dict, <<"raw">>) ->
- dict:take(Key, Dict);
-reduce_row_dict_take(Key, Dict, _Collation) ->
- IsEq = fun(K, _) -> couch_ejson_compare:less(K, Key) =:= 0 end,
- KVs = dict:to_list(dict:filter(IsEq, Dict)),
- case KVs of
- [] ->
- error;
- [_ | _] ->
- {Keys, Vals} = lists:unzip(KVs),
- NewDict = lists:foldl(
- fun(K, Acc) ->
- dict:erase(K, Acc)
- end,
- Dict,
- Keys
- ),
- {lists:flatten(Vals), NewDict}
- end.
-
-%% TODO: rectify nil <-> undefined discrepancies
-find_next_key(nil, Dir, Collation, RowDict) ->
- find_next_key(undefined, Dir, Collation, RowDict);
-find_next_key(undefined, Dir, Collation, RowDict) ->
- CmpFun = fun(A, B) -> compare(Dir, Collation, A, B) end,
- case lists:sort(CmpFun, dict:fetch_keys(RowDict)) of
- [] ->
- throw(complete);
- [Key | _] ->
- {Key, nil}
- end;
-find_next_key([], _, _, _) ->
- throw(complete);
-find_next_key([Key | Rest], _, _, _) ->
- {Key, Rest}.
-
-transform_row(#view_row{value = {[{reduce_overflow_error, Msg}]}}) ->
- {row, [{key, null}, {id, error}, {value, reduce_overflow_error}, {reason, Msg}]};
-transform_row(#view_row{key = Key, id = reduced, value = Value}) ->
- {row, [{key, Key}, {value, Value}]};
-transform_row(#view_row{key = Key, id = undefined}) ->
- {row, [{key, Key}, {id, error}, {value, not_found}]};
-transform_row(#view_row{key = Key, id = Id, value = Value, doc = undefined}) ->
- {row, [{id, Id}, {key, Key}, {value, Value}]};
-transform_row(#view_row{key = Key, id = _Id, value = _Value, doc = {error, Reason}}) ->
- {row, [{id, error}, {key, Key}, {value, Reason}]};
-transform_row(#view_row{key = Key, id = Id, value = Value, doc = Doc}) ->
- {row, [{id, Id}, {key, Key}, {value, Value}, {doc, Doc}]}.
-
-compare(fwd, <<"raw">>, A, B) -> A < B;
-compare(rev, <<"raw">>, A, B) -> B < A;
-compare(fwd, _, A, B) -> couch_ejson_compare:less_json(A, B);
-compare(rev, _, A, B) -> couch_ejson_compare:less_json(B, A).
-
-extract_view(Pid, ViewName, [], _ViewType) ->
- couch_log:error("missing_named_view ~p", [ViewName]),
- exit(Pid, kill),
- exit(missing_named_view);
-extract_view(Pid, ViewName, [View | Rest], ViewType) ->
- case lists:member(ViewName, view_names(View, ViewType)) of
- true ->
- if
- ViewType == reduce ->
- {index_of(ViewName, view_names(View, reduce)), View};
- true ->
- View
- end;
- false ->
- extract_view(Pid, ViewName, Rest, ViewType)
- end.
-
-view_names(View, Type) when Type == red_map; Type == reduce ->
- [Name || {Name, _} <- View#mrview.reduce_funs];
-view_names(View, map) ->
- View#mrview.map_names.
-
-index_of(X, List) ->
- index_of(X, List, 1).
-
-index_of(_X, [], _I) ->
- not_found;
-index_of(X, [X | _Rest], I) ->
- I;
-index_of(X, [_ | Rest], I) ->
- index_of(X, Rest, I + 1).
-
-get_shards(Db, #mrargs{} = Args) ->
- DbPartitioned = fabric_util:is_partitioned(Db),
- Partition = couch_mrview_util:get_extra(Args, partition),
- if
- DbPartitioned orelse Partition == undefined -> ok;
- true -> throw({bad_request, <<"partition specified on non-partitioned db">>})
- end,
- DbName = fabric:dbname(Db),
- % Decide which version of mem3:shards/1,2 or
- % mem3:ushards/1,2 to use for the current
- % request.
- case {Args#mrargs.stable, Partition} of
- {true, undefined} ->
- {mem3:ushards(DbName), []};
- {true, Partition} ->
- Shards = mem3:ushards(DbName, couch_partition:shard_key(Partition)),
- {Shards, [{any, Shards}]};
- {false, undefined} ->
- {mem3:shards(DbName), []};
- {false, Partition} ->
- Shards = mem3:shards(DbName, couch_partition:shard_key(Partition)),
- {Shards, [{any, Shards}]}
- end.
-
-maybe_update_others(
- DbName,
- DDoc,
- ShardsInvolved,
- ViewName,
- #mrargs{update = lazy} = Args
-) ->
- ShardsNeedUpdated = mem3:shards(DbName) -- ShardsInvolved,
- lists:foreach(
- fun(#shard{node = Node, name = ShardName}) ->
- rpc:cast(Node, fabric_rpc, update_mrview, [ShardName, DDoc, ViewName, Args])
- end,
- ShardsNeedUpdated
- );
-maybe_update_others(_DbName, _DDoc, _ShardsInvolved, _ViewName, _Args) ->
- ok.
-
-get_shard_replacements(DbName, UsedShards0) ->
- % We only want to generate a replacements list from shards
- % that aren't already used.
- AllLiveShards = mem3:live_shards(DbName, [node() | nodes()]),
- UsedShards = [S#shard{ref = undefined} || S <- UsedShards0],
- get_shard_replacements_int(AllLiveShards -- UsedShards, UsedShards).
-
-get_shard_replacements_int(UnusedShards, UsedShards) ->
- % If we have more than one copy of a range then we don't
- % want to try and add a replacement to any copy.
- RangeCounts = lists:foldl(
- fun(#shard{range = R}, Acc) ->
- dict:update_counter(R, 1, Acc)
- end,
- dict:new(),
- UsedShards
- ),
-
- % For each seq shard range with a count of 1, find any
- % possible replacements from the unused shards. The
- % replacement list is keyed by range.
- lists:foldl(
- fun(#shard{range = [B, E] = Range}, Acc) ->
- case dict:find(Range, RangeCounts) of
- {ok, 1} ->
- Repls = mem3_util:non_overlapping_shards(UnusedShards, B, E),
- % Only keep non-empty lists of replacements
- if
- Repls == [] -> Acc;
- true -> [{Range, Repls} | Acc]
- end;
- _ ->
- Acc
- end
- end,
- [],
- UsedShards
- ).
-
--spec fix_skip_and_limit(#mrargs{}) -> {CoordArgs :: #mrargs{}, WorkerArgs :: #mrargs{}}.
-fix_skip_and_limit(#mrargs{} = Args) ->
- {CoordArgs, WorkerArgs} =
- case couch_mrview_util:get_extra(Args, partition) of
- undefined ->
- #mrargs{skip = Skip, limit = Limit} = Args,
- {Args, Args#mrargs{skip = 0, limit = Skip + Limit}};
- _Partition ->
- {Args#mrargs{skip = 0}, Args}
- end,
- %% the coordinator needs to finalize each row, so make sure the shards don't
- {CoordArgs, remove_finalizer(WorkerArgs)}.
-
-remove_finalizer(Args) ->
- couch_mrview_util:set_extra(Args, finalizer, null).
-
-remove_overlapping_shards_test() ->
- Cb = undefined,
-
- Shards = mk_cnts([[0, 10], [11, 20], [21, ?RING_END]], 3),
-
- % Simple (exact) overlap
- Shard1 = mk_shard("node-3", [11, 20]),
- Shards1 = fabric_dict:store(Shard1, nil, Shards),
- R1 = remove_overlapping_shards(Shard1, Shards1, Cb),
- ?assertEqual(
- [{0, 10}, {11, 20}, {21, ?RING_END}],
- fabric_util:worker_ranges(R1)
- ),
- ?assert(fabric_dict:is_key(Shard1, R1)),
-
- % Split overlap (shard overlap multiple workers)
- Shard2 = mk_shard("node-3", [0, 20]),
- Shards2 = fabric_dict:store(Shard2, nil, Shards),
- R2 = remove_overlapping_shards(Shard2, Shards2, Cb),
- ?assertEqual(
- [{0, 20}, {21, ?RING_END}],
- fabric_util:worker_ranges(R2)
- ),
- ?assert(fabric_dict:is_key(Shard2, R2)).
-
-get_shard_replacements_test() ->
- Unused = [
- mk_shard(N, [B, E])
- || {N, B, E} <- [
- {"n1", 11, 20},
- {"n1", 21, ?RING_END},
- {"n2", 0, 4},
- {"n2", 5, 10},
- {"n2", 11, 20},
- {"n3", 0, 21, ?RING_END}
- ]
- ],
- Used = [
- mk_shard(N, [B, E])
- || {N, B, E} <- [
- {"n2", 21, ?RING_END},
- {"n3", 0, 10},
- {"n3", 11, 20}
- ]
- ],
- Res = lists:sort(get_shard_replacements_int(Unused, Used)),
- % Notice that [0, 10] range can be replaced by spawning the [0, 4] and [5,
- % 10] workers on n1
- Expect = [
- {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
- {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
- {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
- ],
- ?assertEqual(Expect, Res).
-
-mk_cnts(Ranges, NoNodes) ->
- orddict:from_list([
- {Shard, nil}
- || Shard <-
- lists:flatten(
- lists:map(
- fun(Range) ->
- mk_shards(NoNodes, Range, [])
- end,
- Ranges
- )
- )
- ]).
-
-mk_shards(0, _Range, Shards) ->
- Shards;
-mk_shards(NoNodes, Range, Shards) ->
- Name = "node-" ++ integer_to_list(NoNodes),
- mk_shards(NoNodes - 1, Range, [mk_shard(Name, Range) | Shards]).
-
-mk_shard(Name, Range) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = Range}.
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
deleted file mode 100644
index 0a637a738..000000000
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ /dev/null
@@ -1,361 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_all_docs).
-
--export([go/5]).
-% exported for spawn
--export([open_doc/4]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-go(Db, Options, #mrargs{keys = undefined} = QueryArgs, Callback, Acc) ->
- {CoordArgs, WorkerArgs} = fabric_view:fix_skip_and_limit(QueryArgs),
- DbName = fabric:dbname(Db),
- {Shards, RingOpts} = shards(Db, QueryArgs),
- Workers0 = fabric_util:submit_jobs(
- Shards, fabric_rpc, all_docs, [Options, WorkerArgs]
- ),
- RexiMon = fabric_util:create_monitors(Workers0),
- try
- case fabric_streams:start(Workers0, #shard.ref, RingOpts) of
- {ok, Workers} ->
- try
- go(DbName, Options, Workers, CoordArgs, Callback, Acc)
- after
- fabric_streams:cleanup(Workers)
- end;
- {timeout, NewState} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- NewState#stream_acc.workers, waiting
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "all_docs"
- ),
- Callback({error, timeout}, Acc);
- {error, Error} ->
- Callback({error, Error}, Acc)
- end
- after
- rexi_monitor:stop(RexiMon)
- end;
-go(DbName, Options, QueryArgs, Callback, Acc0) ->
- #mrargs{
- direction = Dir,
- include_docs = IncludeDocs,
- doc_options = DocOptions0,
- limit = Limit,
- conflicts = Conflicts,
- skip = Skip,
- keys = Keys0,
- extra = Extra,
- update_seq = UpdateSeq
- } = QueryArgs,
- DocOptions1 =
- case Conflicts of
- true -> [conflicts | DocOptions0];
- _ -> DocOptions0
- end,
- SpawnFun = fun(Key) ->
- spawn_monitor(?MODULE, open_doc, [DbName, Options ++ DocOptions1, Key, IncludeDocs])
- end,
- MaxJobs = all_docs_concurrency(),
- Keys1 =
- case Dir of
- fwd -> Keys0;
- _ -> lists:reverse(Keys0)
- end,
- Keys2 =
- case Skip < length(Keys1) of
- true -> lists:nthtail(Skip, Keys1);
- false -> []
- end,
- Keys3 =
- case Limit < length(Keys2) of
- true -> lists:sublist(Keys2, Limit);
- false -> Keys2
- end,
- %% namespace can be _set_ to `undefined`, so we want simulate enum here
- Namespace =
- case couch_util:get_value(namespace, Extra) of
- <<"_all_docs">> -> <<"_all_docs">>;
- <<"_design">> -> <<"_design">>;
- <<"_non_design">> -> <<"_non_design">>;
- <<"_local">> -> <<"_local">>;
- _ -> <<"_all_docs">>
- end,
- Timeout = fabric_util:all_docs_timeout(),
- {_, Ref} = spawn_monitor(fun() ->
- exit(fabric:get_doc_count(DbName, Namespace))
- end),
- receive
- {'DOWN', Ref, _, _, {ok, TotalRows}} ->
- Meta =
- case UpdateSeq of
- false ->
- [{total, TotalRows}, {offset, null}];
- true ->
- [{total, TotalRows}, {offset, null}, {update_seq, null}]
- end,
- {ok, Acc1} = Callback({meta, Meta}, Acc0),
- Resp = doc_receive_loop(
- Keys3, queue:new(), SpawnFun, MaxJobs, Callback, Acc1
- ),
- case Resp of
- {ok, Acc2} ->
- Callback(complete, Acc2);
- timeout ->
- Callback({error, timeout}, Acc0)
- end;
- {'DOWN', Ref, _, _, Error} ->
- Callback({error, Error}, Acc0)
- after Timeout ->
- Callback({error, timeout}, Acc0)
- end.
-
-go(DbName, _Options, Workers, QueryArgs, Callback, Acc0) ->
- #mrargs{limit = Limit, skip = Skip, update_seq = UpdateSeq} = QueryArgs,
- State = #collector{
- db_name = DbName,
- query_args = QueryArgs,
- callback = Callback,
- counters = fabric_dict:init(Workers, 0),
- skip = Skip,
- limit = Limit,
- user_acc = Acc0,
- update_seq =
- case UpdateSeq of
- true -> [];
- false -> nil
- end
- },
- case
- rexi_utils:recv(
- Workers,
- #shard.ref,
- fun handle_message/3,
- State,
- fabric_util:view_timeout(QueryArgs),
- 5000
- )
- of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
- end.
-
-shards(Db, Args) ->
- DbPartitioned = fabric_util:is_partitioned(Db),
- Partition = couch_mrview_util:get_extra(Args, partition),
- NewArgs =
- case {DbPartitioned, Partition} of
- {true, undefined} ->
- % If a user specifies the same partition on both
- % the start and end keys we can optimize the
- % query by limiting to the partition shard.
- Start = couch_partition:extract(Args#mrargs.start_key),
- End = couch_partition:extract(Args#mrargs.end_key),
- case {Start, End} of
- {{Partition, SK}, {Partition, EK}} ->
- A1 = Args#mrargs{
- start_key = SK,
- end_key = EK
- },
- couch_mrview_util:set_extra(A1, partition, Partition);
- _ ->
- Args
- end;
- _ ->
- Args
- end,
- fabric_view:get_shards(Db, NewArgs).
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
- fabric_view:check_down_shards(State, NodeRef);
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- fabric_view:handle_worker_exit(State, Worker, Reason);
-handle_message({meta, Meta0}, {Worker, From}, State) ->
- Tot = couch_util:get_value(total, Meta0, 0),
- Off = couch_util:get_value(offset, Meta0, 0),
- Seq = couch_util:get_value(update_seq, Meta0, 0),
- #collector{
- callback = Callback,
- counters = Counters0,
- total_rows = Total0,
- offset = Offset0,
- user_acc = AccIn,
- update_seq = UpdateSeq0
- } = State,
- % Assert that we don't have other messages from this
- % worker when the total_and_offset message arrives.
- 0 = fabric_dict:lookup_element(Worker, Counters0),
- rexi:stream_ack(From),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- Total =
- if
- Tot == null -> null;
- true -> Total0 + Tot
- end,
- Offset =
- if
- Off == null -> null;
- true -> Offset0 + Off
- end,
- UpdateSeq =
- case {UpdateSeq0, Seq} of
- {nil, _} -> nil;
- {_, null} -> null;
- _ -> [{Worker, Seq} | UpdateSeq0]
- end,
- case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- total_rows = Total,
- update_seq = UpdateSeq,
- offset = Offset
- }};
- false ->
- FinalOffset =
- case Offset of
- null -> null;
- _ -> erlang:min(Total, Offset + State#collector.skip)
- end,
- Meta =
- [{total, Total}, {offset, FinalOffset}] ++
- case UpdateSeq of
- nil ->
- [];
- null ->
- [{update_seq, null}];
- _ ->
- [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
- end,
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc,
- update_seq = UpdateSeq0
- }}
- end;
-handle_message(#view_row{} = Row, {Worker, From}, State) ->
- #collector{query_args = Args, counters = Counters0, rows = Rows0} = State,
- Dir = Args#mrargs.direction,
- Rows = merge_row(Dir, Row#view_row{worker = {Worker, From}}, Rows0),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows = Rows, counters = Counters1},
- fabric_view:maybe_send_row(State1);
-handle_message(complete, Worker, State) ->
- Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
- fabric_view:maybe_send_row(State#collector{counters = Counters});
-handle_message({execution_stats, _} = Msg, {_, From}, St) ->
- #collector{callback = Callback, user_acc = AccIn} = St,
- {Go, Acc} = Callback(Msg, AccIn),
- rexi:stream_ack(From),
- {Go, St#collector{user_acc = Acc}}.
-
-merge_row(fwd, Row, Rows) ->
- lists:keymerge(#view_row.id, [Row], Rows);
-merge_row(rev, Row, Rows) ->
- lists:rkeymerge(#view_row.id, [Row], Rows).
-
-all_docs_concurrency() ->
- Value = config:get("fabric", "all_docs_concurrency", "10"),
- try
- list_to_integer(Value)
- catch
- _:_ ->
- 10
- end.
-
-doc_receive_loop(Keys, Pids, SpawnFun, MaxJobs, Callback, AccIn) ->
- case {Keys, queue:len(Pids)} of
- {[], 0} ->
- {ok, AccIn};
- {[K | RKeys], Len} when Len < MaxJobs ->
- Pids1 = queue:in(SpawnFun(K), Pids),
- doc_receive_loop(RKeys, Pids1, SpawnFun, MaxJobs, Callback, AccIn);
- _ ->
- {{value, {Pid, Ref}}, RestPids} = queue:out(Pids),
- Timeout = fabric_util:all_docs_timeout(),
- receive
- {'DOWN', Ref, process, Pid, Row} ->
- case Row of
- #view_row{} ->
- case Callback(fabric_view:transform_row(Row), AccIn) of
- {ok, Acc} ->
- doc_receive_loop(
- Keys, RestPids, SpawnFun, MaxJobs, Callback, Acc
- );
- {stop, Acc} ->
- cancel_read_pids(RestPids),
- {ok, Acc}
- end;
- Error ->
- cancel_read_pids(RestPids),
- Callback({error, Error}, AccIn)
- end
- after Timeout ->
- timeout
- end
- end.
-
-open_doc(DbName, Options, Id, IncludeDocs) ->
- try open_doc_int(DbName, Options, Id, IncludeDocs) of
- #view_row{} = Row ->
- exit(Row)
- catch ?STACKTRACE(Type, Reason, Stack)
- couch_log:error("_all_docs open error: ~s ~s :: ~w ~w", [
- DbName, Id, {Type, Reason}, Stack]),
- exit({Id, Reason})
- end.
-
-open_doc_int(DbName, Options, Id, IncludeDocs) ->
- Row =
- case fabric:open_doc(DbName, Id, [deleted | Options]) of
- {not_found, missing} ->
- Doc = undefined,
- #view_row{key = Id};
- {ok, #doc{deleted = true, revs = Revs}} ->
- Doc = null,
- {RevPos, [RevId | _]} = Revs,
- Value = {[{rev, couch_doc:rev_to_str({RevPos, RevId})}, {deleted, true}]},
- #view_row{key = Id, id = Id, value = Value};
- {ok, #doc{revs = Revs} = Doc0} ->
- Doc = couch_doc:to_json_obj(Doc0, Options),
- {RevPos, [RevId | _]} = Revs,
- Value = {[{rev, couch_doc:rev_to_str({RevPos, RevId})}]},
- #view_row{key = Id, id = Id, value = Value}
- end,
- if
- IncludeDocs -> Row#view_row{doc = Doc};
- true -> Row
- end.
-
-cancel_read_pids(Pids) ->
- case queue:out(Pids) of
- {{value, {Pid, Ref}}, RestPids} ->
- exit(Pid, kill),
- erlang:demonitor(Ref, [flush]),
- cancel_read_pids(RestPids);
- {empty, _} ->
- ok
- end.
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
deleted file mode 100644
index b561da151..000000000
--- a/src/fabric/src/fabric_view_changes.erl
+++ /dev/null
@@ -1,1042 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_changes).
-
--export([go/5, pack_seqs/1, unpack_seqs/2]).
--export([increment_changes_epoch/0]).
-
-%% exported for upgrade purposes.
--export([keep_sending_changes/8]).
-
-%% exported for testing and remsh debugging
--export([decode_seq/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--import(fabric_db_update_listener, [wait_db_updated/1]).
-
-go(DbName, Feed, Options, Callback, Acc0) when
- Feed == "continuous" orelse
- Feed == "longpoll" orelse Feed == "eventsource"
-->
- Args = make_changes_args(Options),
- Since = get_start_seq(DbName, Args),
- case validate_start_seq(DbName, Since) of
- ok ->
- {ok, Acc} = Callback(start, Acc0),
- {Timeout, _} = couch_changes:get_changes_timeout(Args, Callback),
- Ref = make_ref(),
- Parent = self(),
- UpdateListener = {
- spawn_link(
- fabric_db_update_listener,
- go,
- [Parent, Ref, DbName, Timeout]
- ),
- Ref
- },
- put(changes_epoch, get_changes_epoch()),
- try
- keep_sending_changes(
- DbName,
- Args,
- Callback,
- Since,
- Acc,
- Timeout,
- UpdateListener,
- os:timestamp()
- )
- after
- fabric_db_update_listener:stop(UpdateListener)
- end;
- Error ->
- Callback(Error, Acc0)
- end;
-go(DbName, "normal", Options, Callback, Acc0) ->
- Args = make_changes_args(Options),
- Since = get_start_seq(DbName, Args),
- case validate_start_seq(DbName, Since) of
- ok ->
- {ok, Acc} = Callback(start, Acc0),
- {ok, Collector} = send_changes(
- DbName,
- Args,
- Callback,
- Since,
- Acc,
- 5000
- ),
- #collector{counters = Seqs, user_acc = AccOut, offset = Offset} = Collector,
- Callback({stop, pack_seqs(Seqs), pending_count(Offset)}, AccOut);
- Error ->
- Callback(Error, Acc0)
- end.
-
-keep_sending_changes(DbName, Args, Callback, Seqs, AccIn, Timeout, UpListen, T0) ->
- #changes_args{limit = Limit, feed = Feed, heartbeat = Heartbeat} = Args,
- {ok, Collector} = send_changes(DbName, Args, Callback, Seqs, AccIn, Timeout),
- #collector{
- limit = Limit2,
- counters = NewSeqs,
- offset = Offset,
- user_acc = AccOut0
- } = Collector,
- LastSeq = pack_seqs(NewSeqs),
- MaintenanceMode = config:get("couchdb", "maintenance_mode"),
- NewEpoch = get_changes_epoch() > erlang:get(changes_epoch),
- if
- Limit > Limit2, Feed == "longpoll";
- MaintenanceMode == "true";
- MaintenanceMode == "nolb";
- NewEpoch ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut0);
- true ->
- {ok, AccOut} = Callback(waiting_for_updates, AccOut0),
- WaitForUpdate = wait_db_updated(UpListen),
- AccumulatedTime = timer:now_diff(os:timestamp(), T0) div 1000,
- Max =
- case config:get("fabric", "changes_duration") of
- undefined ->
- infinity;
- MaxStr ->
- list_to_integer(MaxStr)
- end,
- case {Heartbeat, AccumulatedTime > Max, WaitForUpdate} of
- {_, _, changes_feed_died} ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
- {undefined, _, timeout} ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
- {_, true, timeout} ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
- _ ->
- {ok, AccTimeout} = Callback(timeout, AccOut),
- ?MODULE:keep_sending_changes(
- DbName,
- Args#changes_args{limit = Limit2},
- Callback,
- LastSeq,
- AccTimeout,
- Timeout,
- UpListen,
- T0
- )
- end
- end.
-
-send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) ->
- LiveNodes = [node() | nodes()],
- AllLiveShards = mem3:live_shards(DbName, LiveNodes),
- Seqs0 = unpack_seqs(PackedSeqs, DbName),
- {WSeqs0, Dead, Reps} = find_replacements(Seqs0, AllLiveShards),
- % Start workers which didn't need replacements
- WSeqs = lists:map(
- fun({#shard{name = Name, node = N} = S, Seq}) ->
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, Seq]}),
- {S#shard{ref = Ref}, Seq}
- end,
- WSeqs0
- ),
- % For some dead workers see if they are a result of split shards. In that
- % case make a replacement argument so that local rexi workers can calculate
- % (hopefully) a > 0 update sequence.
- {WSplitSeqs0, Reps1} = find_split_shard_replacements(Dead, Reps),
- WSplitSeqs = lists:map(
- fun({#shard{name = Name, node = N} = S, Seq}) ->
- Arg = make_replacement_arg(N, Seq),
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, Arg]}),
- {S#shard{ref = Ref}, Seq}
- end,
- WSplitSeqs0
- ),
- % For ranges that were not split, look for a replacement on a different node
- WReps = lists:map(
- fun(#shard{name = Name, node = NewNode, range = R} = S) ->
- Arg = find_replacement_sequence(Dead, R),
- case Arg =/= 0 of
- true -> ok;
- false -> couch_log:warning("~p reset seq for ~p", [?MODULE, S])
- end,
- Ref = rexi:cast(NewNode, {fabric_rpc, changes, [Name, ChangesArgs, Arg]}),
- {S#shard{ref = Ref}, 0}
- end,
- Reps1
- ),
- Seqs = WSeqs ++ WSplitSeqs ++ WReps,
- {Workers0, _} = lists:unzip(Seqs),
- Repls = fabric_ring:get_shard_replacements(DbName, Workers0),
- StartFun = fun(#shard{name = Name, node = N, range = R0} = Shard) ->
- SeqArg = find_replacement_sequence(Seqs, R0),
- case SeqArg =/= 0 of
- true -> ok;
- false -> couch_log:warning("~p StartFun reset seq for ~p", [?MODULE, Shard])
- end,
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, SeqArg]}),
- Shard#shard{ref = Ref}
- end,
- RexiMon = fabric_util:create_monitors(Workers0),
- try
- case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls) of
- {ok, Workers} ->
- try
- LiveSeqs = lists:map(
- fun(W) ->
- case lists:keyfind(W, 1, Seqs) of
- {W, Seq} -> {W, Seq};
- _ -> {W, 0}
- end
- end,
- Workers
- ),
- send_changes(
- DbName,
- Workers,
- LiveSeqs,
- ChangesArgs,
- Callback,
- AccIn,
- Timeout
- )
- after
- fabric_streams:cleanup(Workers)
- end;
- {timeout, NewState} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- NewState#stream_acc.workers,
- waiting
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "changes"
- ),
- throw({error, timeout});
- {error, Reason} ->
- throw({error, Reason});
- Else ->
- throw({error, Else})
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-send_changes(DbName, Workers, Seqs, ChangesArgs, Callback, AccIn, Timeout) ->
- State = #collector{
- db_name = DbName,
- query_args = ChangesArgs,
- callback = Callback,
- counters = orddict:from_list(Seqs),
- user_acc = AccIn,
- limit = ChangesArgs#changes_args.limit,
- offset = fabric_dict:init(Workers, null),
- % store sequence positions instead
- rows = Seqs
- },
- %% TODO: errors need to be handled here
- receive_results(Workers, State, Timeout, Callback).
-
-receive_results(Workers, State, Timeout, Callback) ->
- case
- rexi_utils:recv(
- Workers,
- #shard.ref,
- fun handle_message/3,
- State,
- Timeout,
- infinity
- )
- of
- {timeout, NewState0} ->
- {ok, AccOut} = Callback(timeout, NewState0#collector.user_acc),
- NewState = NewState0#collector{user_acc = AccOut},
- receive_results(Workers, NewState, Timeout, Callback);
- {_, NewState} ->
- {ok, NewState}
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
- fabric_view:check_down_shards(State, NodeRef);
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- fabric_view:handle_worker_exit(State, Worker, Reason);
-% Temporary upgrade clause - Case 24236
-handle_message({complete, Key}, Worker, State) when is_tuple(Key) ->
- handle_message({complete, [{seq, Key}, {pending, 0}]}, Worker, State);
-handle_message({change, Props}, {Worker, _}, #collector{limit = 0} = State) ->
- O0 = State#collector.offset,
- O1 =
- case fabric_dict:lookup_element(Worker, O0) of
- null ->
- % Use Pending+1 because we're ignoring this row in the response
- Pending = couch_util:get_value(pending, Props, 0),
- fabric_dict:store(Worker, Pending + 1, O0);
- _ ->
- O0
- end,
- maybe_stop(State#collector{offset = O1});
-handle_message({complete, Props}, Worker, #collector{limit = 0} = State) ->
- O0 = State#collector.offset,
- O1 =
- case fabric_dict:lookup_element(Worker, O0) of
- null ->
- fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0);
- _ ->
- O0
- end,
- maybe_stop(State#collector{offset = O1});
-handle_message({no_pass, Props}, {Worker, From}, #collector{limit = 0} = State) when
- is_list(Props)
-->
- #collector{counters = S0, offset = O0} = State,
- O1 =
- case fabric_dict:lookup_element(Worker, O0) of
- null ->
- fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0);
- _ ->
- O0
- end,
- S1 = fabric_dict:store(Worker, couch_util:get_value(seq, Props), S0),
- rexi:stream_ack(From),
- maybe_stop(State#collector{counters = S1, offset = O1});
-handle_message(#change{} = Row, {Worker, From}, St) ->
- Change =
- {change, [
- {seq, Row#change.key},
- {id, Row#change.id},
- {changes, Row#change.value},
- {deleted, Row#change.deleted},
- {doc, Row#change.doc}
- ]},
- handle_message(Change, {Worker, From}, St);
-handle_message({change, Props}, {Worker, From}, St) ->
- #collector{
- callback = Callback,
- counters = S0,
- offset = O0,
- limit = Limit,
- user_acc = AccIn
- } = St,
- true = fabric_dict:is_key(Worker, S0),
- S1 = fabric_dict:store(Worker, couch_util:get_value(seq, Props), S0),
- O1 = fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0),
- % Temporary hack for FB 23637
- Interval = erlang:get(changes_seq_interval),
- if
- (Interval == undefined) orelse (Limit rem Interval == 0) ->
- Props2 = lists:keyreplace(seq, 1, Props, {seq, pack_seqs(S1)});
- true ->
- Props2 = lists:keyreplace(seq, 1, Props, {seq, null})
- end,
- {Go, Acc} = Callback(changes_row(Props2), AccIn),
- rexi:stream_ack(From),
- {Go, St#collector{counters = S1, offset = O1, limit = Limit - 1, user_acc = Acc}};
-%% upgrade clause
-handle_message({no_pass, Seq}, From, St) when is_integer(Seq) ->
- handle_message({no_pass, [{seq, Seq}]}, From, St);
-handle_message({no_pass, Props}, {Worker, From}, St) ->
- Seq = couch_util:get_value(seq, Props),
- #collector{counters = S0} = St,
- true = fabric_dict:is_key(Worker, S0),
- S1 = fabric_dict:store(Worker, Seq, S0),
- rexi:stream_ack(From),
- {ok, St#collector{counters = S1}};
-handle_message({complete, Props}, Worker, State) ->
- Key = couch_util:get_value(seq, Props),
- #collector{
- counters = S0,
- offset = O0,
- % override
- total_rows = Completed
- } = State,
- true = fabric_dict:is_key(Worker, S0),
- S1 = fabric_dict:store(Worker, Key, S0),
- O1 = fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0),
- NewState = State#collector{counters = S1, offset = O1, total_rows = Completed + 1},
- % We're relying on S1 having exactly the numnber of workers that
- % are participtaing in this response. With the new stream_start
- % that's a bit more obvious but historically it wasn't quite
- % so clear. The Completed variable is just a hacky override
- % of the total_rows field in the #collector{} record.
- NumWorkers = fabric_dict:size(S1),
- Go =
- case NumWorkers =:= (Completed + 1) of
- true -> stop;
- false -> ok
- end,
- {Go, NewState}.
-
-make_replacement_arg(Node, {Seq, Uuid}) ->
- {replace, Node, Uuid, Seq};
-make_replacement_arg(_Node, {Seq, Uuid, EpochNode}) ->
- % The replacement should properly be computed aginst the node that owned
- % the sequence when it was written to disk (the EpochNode) rather than the
- % node we're trying to replace.
- {replace, EpochNode, Uuid, Seq};
-make_replacement_arg(_, _) ->
- 0.
-
-maybe_stop(#collector{offset = Offset} = State) ->
- case fabric_dict:any(null, Offset) of
- false ->
- {stop, State};
- true ->
- % Wait till we've heard from everyone to compute pending count
- {ok, State}
- end.
-
-make_changes_args(#changes_args{style = Style, filter_fun = undefined} = Args) ->
- Args#changes_args{filter_fun = {default, Style}};
-make_changes_args(Args) ->
- Args.
-
-get_start_seq(DbName, #changes_args{dir = Dir, since = Since}) when
- Dir == rev; Since == "now"
-->
- {ok, Info} = fabric:get_db_info(DbName),
- couch_util:get_value(update_seq, Info);
-get_start_seq(_DbName, #changes_args{dir = fwd, since = Since}) ->
- Since.
-
-pending_count(Dict) ->
- fabric_dict:fold(
- fun
- (_Worker, Count, Acc) when is_integer(Count), is_integer(Acc) ->
- Count + Acc;
- (_Worker, _Count, _Acc) ->
- null
- end,
- 0,
- Dict
- ).
-
-pack_seqs(Workers) ->
- SeqList = [{N, R, S} || {#shard{node = N, range = R}, S} <- Workers],
- SeqSum = lists:sum([seq(S) || {_, _, S} <- SeqList]),
- Opaque = couch_util:encodeBase64Url(term_to_binary(SeqList, [compressed])),
- ?l2b([integer_to_list(SeqSum), $-, Opaque]).
-
-seq({Seq, _Uuid, _Node}) -> Seq;
-seq({Seq, _Uuid}) -> Seq;
-seq(Seq) -> Seq.
-
-unpack_seq_regex_match(Packed) ->
- NewPattern = "^\\[[0-9]+\s*,\s*\"(?<opaque>.*)\"\\]$",
- OldPattern = "^\"?([0-9]+-)?(?<opaque>.*?)\"?$",
- Options = [{capture, [opaque], binary}],
- case re:run(Packed, NewPattern, Options) of
- {match, Match} ->
- Match;
- nomatch ->
- {match, Match} = re:run(Packed, OldPattern, Options),
- Match
- end.
-
-unpack_seq_decode_term(Opaque) ->
- binary_to_term(couch_util:decodeBase64Url(Opaque)).
-
-% This is used for testing and for remsh debugging
-%
-% Return the unpacked list of sequences from a raw update seq string. The input
-% string is expected to include the N- prefix. The result looks like:
-% [{Node, Range, {SeqNum, Uuid, EpochNode}}, ...]
-%
--spec decode_seq(binary()) -> [tuple()].
-decode_seq(Packed) ->
- Opaque = unpack_seq_regex_match(Packed),
- unpack_seq_decode_term(Opaque).
-
-% Returns fabric_dict with {Shard, Seq} entries
-%
--spec unpack_seqs(pos_integer() | list() | binary(), binary()) ->
- orddict:orddict().
-unpack_seqs(0, DbName) ->
- fabric_dict:init(mem3:shards(DbName), 0);
-unpack_seqs("0", DbName) ->
- fabric_dict:init(mem3:shards(DbName), 0);
-% deprecated
-unpack_seqs([_SeqNum, Opaque], DbName) ->
- do_unpack_seqs(Opaque, DbName);
-unpack_seqs(Packed, DbName) ->
- Opaque = unpack_seq_regex_match(Packed),
- do_unpack_seqs(Opaque, DbName).
-
-do_unpack_seqs(Opaque, DbName) ->
- % A preventative fix for FB 13533 to remove duplicate shards.
- % This just picks each unique shard and keeps the largest seq
- % value recorded.
- Decoded = unpack_seq_decode_term(Opaque),
- DedupDict = lists:foldl(
- fun({Node, [A, B], Seq}, Acc) ->
- dict:append({Node, [A, B]}, Seq, Acc)
- end,
- dict:new(),
- Decoded
- ),
- Deduped = lists:map(
- fun({{Node, [A, B]}, SeqList}) ->
- {Node, [A, B], lists:max(SeqList)}
- end,
- dict:to_list(DedupDict)
- ),
-
- % Create a fabric_dict of {Shard, Seq} entries
- % TODO relies on internal structure of fabric_dict as keylist
- Unpacked = lists:flatmap(
- fun({Node, [A, B], Seq}) ->
- case mem3:get_shard(DbName, Node, [A, B]) of
- {ok, Shard} ->
- [{Shard, Seq}];
- {error, not_found} ->
- []
- end
- end,
- Deduped
- ),
-
- % This just handles the case if the ring in the unpacked sequence
- % received is not complete and in that case tries to fill in the
- % missing ranges with shards from the shard map
- case fabric_ring:is_progress_possible(Unpacked) of
- true ->
- Unpacked;
- false ->
- Uuids = get_db_uuid_shards(DbName),
- PotentialWorkers = lists:map(
- fun({Node, [A, B], Seq}) ->
- case mem3:get_shard(DbName, Node, [A, B]) of
- {ok, Shard} ->
- {Shard, Seq};
- {error, not_found} ->
- Shard = replace_moved_shard(Node, [A, B], Seq, Uuids),
- {Shard, Seq}
- end
- end,
- Deduped
- ),
- Shards = mem3:shards(DbName),
- {Unpacked1, Dead, Reps} = find_replacements(PotentialWorkers, Shards),
- {Splits, Reps1} = find_split_shard_replacements(Dead, Reps),
- RepSeqs = lists:map(
- fun(#shard{} = S) ->
- {S, get_old_seq(S, Deduped)}
- end,
- Reps1
- ),
- Unpacked1 ++ Splits ++ RepSeqs
- end.
-
-get_old_seq(#shard{range = R} = Shard, SinceSeqs) ->
- case lists:keyfind(R, 2, SinceSeqs) of
- {Node, R, Seq} when is_number(Seq) ->
- % Unfortunately we don't have access to the db
- % uuid so we can't set a replacememnt here.
- couch_log:warning(
- "~p get_old_seq missing uuid "
- "node: ~p, range: ~p, seq: ~p",
- [?MODULE, Node, R, Seq]
- ),
- 0;
- {Node, R, {Seq, Uuid}} ->
- % This update seq is using the old format that
- % didn't include the node. This information is
- % important for replacement.
- {Seq, Uuid, Node};
- {_Node, R, {Seq, Uuid, EpochNode}} ->
- % This is the newest sequence format that we
- % can use for replacement.
- {Seq, Uuid, EpochNode};
- Error ->
- couch_log:warning(
- "~p get_old_seq error: ~p, shard: ~p, seqs: ~p",
- [?MODULE, Error, Shard, SinceSeqs]
- ),
- 0
- end.
-
-get_db_uuid_shards(DbName) ->
- % Need to use an isolated process as we are performing a fabric call from
- % another fabric call and there is a good chance we'd polute the mailbox
- % with returned messages
- Timeout = fabric_util:request_timeout(),
- IsolatedFun = fun() -> fabric:db_uuids(DbName) end,
- try fabric_util:isolate(IsolatedFun, Timeout) of
- {ok, Uuids} ->
- % Trim uuids so we match exactly based on the currently configured
- % uuid_prefix_len. The assumption is that we are getting an older
- % sequence from the same cluster and we didn't tweak that
- % relatively obscure config option in the meantime.
- PrefixLen = fabric_util:get_uuid_prefix_len(),
- maps:fold(
- fun(Uuid, Shard, Acc) ->
- TrimmedUuid = binary:part(Uuid, {0, PrefixLen}),
- Acc#{TrimmedUuid => Shard}
- end,
- #{},
- Uuids
- );
- {error, Error} ->
- % Since we are doing a best-effort approach to match moved shards,
- % tolerate and log errors. This should also handle cases when the
- % cluster is partially upgraded, as some nodes will not have the
- % newer get_uuid fabric_rpc handler.
- ErrMsg = "~p : could not get db_uuids for Db:~p Error:~p",
- couch_log:error(ErrMsg, [?MODULE, DbName, Error]),
- #{}
- catch
- _Tag:Error ->
- ErrMsg = "~p : could not get db_uuids for Db:~p Error:~p",
- couch_log:error(ErrMsg, [?MODULE, DbName, Error]),
- #{}
- end.
-
-%% Determine if the missing shard moved to a new node. Do that by matching the
-%% uuids from the current shard map. If we cannot find a moved shard, we return
-%% the original node and range as a shard and hope for the best.
-replace_moved_shard(Node, Range, Seq, #{} = _UuidShards) when is_number(Seq) ->
- % Cannot figure out shard moves without uuid matching
- #shard{node = Node, range = Range};
-replace_moved_shard(Node, Range, {Seq, Uuid}, #{} = UuidShards) ->
- % Compatibility case for an old seq format which didn't have epoch nodes
- replace_moved_shard(Node, Range, {Seq, Uuid, Node}, UuidShards);
-replace_moved_shard(Node, Range, {_Seq, Uuid, _EpochNode}, #{} = UuidShards) ->
- case UuidShards of
- #{Uuid := #shard{range = Range} = Shard} ->
- % Found a moved shard by matching both the uuid and the range
- Shard;
- #{} ->
- % Did not find a moved shard, use the original node
- #shard{node = Node, range = Range}
- end.
-
-changes_row(Props0) ->
- Props1 =
- case couch_util:get_value(deleted, Props0) of
- true ->
- Props0;
- _ ->
- lists:keydelete(deleted, 1, Props0)
- end,
- Allowed = [seq, id, changes, deleted, doc, error],
- Props2 = lists:filter(fun({K, _V}) -> lists:member(K, Allowed) end, Props1),
- {change, {Props2}}.
-
-find_replacements(Workers, AllShards) ->
- % Build map [B, E] => [Worker1, Worker2, ...] for all workers
- WrkMap = lists:foldl(
- fun({#shard{range = [B, E]}, _} = W, Acc) ->
- maps:update_with({B, E}, fun(Ws) -> [W | Ws] end, [W], Acc)
- end,
- #{},
- fabric_dict:to_list(Workers)
- ),
-
- % Build map [B, E] => [Shard1, Shard2, ...] for all shards
- AllMap = lists:foldl(
- fun(#shard{range = [B, E]} = S, Acc) ->
- maps:update_with({B, E}, fun(Ss) -> [S | Ss] end, [S], Acc)
- end,
- #{},
- AllShards
- ),
-
- % Custom sort function will prioritize workers over other shards.
- % The idea is to not unnecessarily kill workers if we don't have to
- SortFun = fun
- (R1 = {B, E1}, R2 = {B, E2}) ->
- case {maps:is_key(R1, WrkMap), maps:is_key(R2, WrkMap)} of
- {true, true} ->
- % Both are workers, larger interval wins
- E1 >= E2;
- {true, false} ->
- % First element is a worker range, it wins
- true;
- {false, true} ->
- % Second element is a worker range, it wins
- false;
- {false, false} ->
- % Neither one is a worker interval, pick larger one
- E1 >= E2
- end;
- ({B1, _}, {B2, _}) ->
- B1 =< B2
- end,
- Ring = mem3_util:get_ring(maps:keys(AllMap), SortFun),
-
- % Keep only workers in the ring and from one of the available nodes
- Keep = fun(#shard{range = [B, E], node = N}) ->
- lists:member({B, E}, Ring) andalso
- lists:keyfind(
- N,
- #shard.node,
- maps:get({B, E}, AllMap)
- ) =/= false
- end,
- Workers1 = fabric_dict:filter(fun(S, _) -> Keep(S) end, Workers),
- Removed = fabric_dict:filter(fun(S, _) -> not Keep(S) end, Workers),
-
- {Rep, _} = lists:foldl(
- fun(R, {RepAcc, AllMapAcc}) ->
- case maps:is_key(R, WrkMap) of
- true ->
- % It's a worker and in the map of available shards. Make sure
- % to keep it only if there is a range available on that node
- % only (reuse Keep/1 predicate from above)
- WorkersInRange = maps:get(R, WrkMap),
- case lists:any(fun({S, _}) -> Keep(S) end, WorkersInRange) of
- true ->
- {RepAcc, AllMapAcc};
- false ->
- [Shard | Rest] = maps:get(R, AllMapAcc),
- {[Shard | RepAcc], AllMapAcc#{R := Rest}}
- end;
- false ->
- % No worker for this range. Replace from available shards
- [Shard | Rest] = maps:get(R, AllMapAcc),
- {[Shard | RepAcc], AllMapAcc#{R := Rest}}
- end
- end,
- {[], AllMap},
- Ring
- ),
-
- % Return the list of workers that are part of ring, list of removed workers
- % and a list of replacement shards that could be used to make sure the ring
- % completes.
- {Workers1, Removed, Rep}.
-
-% From the list of dead workers determine if any are a result of a split shard.
-% In that case perhaps there is a way to not rewind the changes feed back to 0.
-% Returns {NewWorkers, Available} where NewWorkers is the list of
-% viable workers Available is the list of still unused input Shards
-find_split_shard_replacements(DeadWorkers, Shards) ->
- Acc0 = {[], Shards},
- AccF = fabric_dict:fold(
- fun(#shard{node = WN, range = R}, Seq, Acc) ->
- [B, E] = R,
- {SplitWorkers, Available} = Acc,
- ShardsOnSameNode = [S || #shard{node = N} = S <- Available, N =:= WN],
- SplitShards = mem3_util:non_overlapping_shards(ShardsOnSameNode, B, E),
- RepCount = length(SplitShards),
- NewWorkers = [{S, make_split_seq(Seq, RepCount)} || S <- SplitShards],
- NewAvailable = [S || S <- Available, not lists:member(S, SplitShards)],
- {NewWorkers ++ SplitWorkers, NewAvailable}
- end,
- Acc0,
- DeadWorkers
- ),
- {Workers, Available} = AccF,
- {fabric_dict:from_list(Workers), Available}.
-
-find_replacement_sequence(OriginalSeqs, R0) ->
- %% Find the original shard copy in the Seqs array
- case lists:dropwhile(fun({S, _}) -> S#shard.range =/= R0 end, OriginalSeqs) of
- [{#shard{}, {replace, _, _, _}} | _] ->
- % Don't attempt to replace a replacement
- 0;
- [{#shard{node = OldNode}, OldSeq} | _] ->
- make_replacement_arg(OldNode, OldSeq);
- _ ->
- % TODO we don't currently attempt to replace a shard with split
- % replicas of that range on other nodes, so it's possible to end
- % up with an empty list here.
- 0
- end.
-
-make_split_seq({Num, Uuid, Node}, RepCount) when RepCount > 1 ->
- {Num, {split, Uuid}, Node};
-make_split_seq(Seq, _) ->
- Seq.
-
-validate_start_seq(_DbName, "now") ->
- ok;
-validate_start_seq(_DbName, 0) ->
- ok;
-validate_start_seq(_DbName, "0") ->
- ok;
-validate_start_seq(_DbName, Seq) ->
- try
- case Seq of
- [_SeqNum, Opaque] ->
- unpack_seq_decode_term(Opaque);
- Seq ->
- Opaque = unpack_seq_regex_match(Seq),
- unpack_seq_decode_term(Opaque)
- end,
- ok
- catch
- _:_ ->
- Reason = <<"Malformed sequence supplied in 'since' parameter.">>,
- {error, {bad_request, Reason}}
- end.
-
-get_changes_epoch() ->
- case application:get_env(fabric, changes_epoch) of
- undefined ->
- increment_changes_epoch(),
- get_changes_epoch();
- {ok, Epoch} ->
- Epoch
- end.
-
-increment_changes_epoch() ->
- application:set_env(fabric, changes_epoch, os:timestamp()).
-
-unpack_seq_setup() ->
- meck:new(mem3),
- meck:new(fabric_view),
- meck:expect(mem3, get_shard, fun(_, _, _) -> {ok, #shard{}} end),
- meck:expect(fabric_ring, is_progress_possible, fun(_) -> true end),
- ok.
-
-unpack_seqs_test_() ->
- {
- setup,
- fun unpack_seq_setup/0,
- fun(_) -> meck:unload() end,
- [
- t_unpack_seqs()
- ]
- }.
-
-t_unpack_seqs() ->
- ?_test(begin
- % BigCouch 0.3 style.
- assert_shards(
- "23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"
- ),
-
- % BigCouch 0.4 style.
- assert_shards([
- 23423,
- <<
- "g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"
- >>
- ]),
-
- % BigCouch 0.4 style (as string).
- assert_shards(
- "[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"
- ),
- assert_shards(
- "[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"
- ),
- assert_shards(
- "[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"
- ),
- assert_shards(
- "[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"
- ),
-
- % with internal hypen
- assert_shards(
- "651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
- "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
- "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"
- ),
- assert_shards([
- 651,
- "g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
- "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
- "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"
- ]),
-
- % CouchDB 1.2 style
- assert_shards(
- "\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\""
- )
- end).
-
-assert_shards(Packed) ->
- ?assertMatch([{#shard{}, _} | _], unpack_seqs(Packed, <<"foo">>)).
-
-find_replacements_test() ->
- % None of the workers are in the live list of shard but there is a
- % replacement on n3 for the full range. It should get picked instead of
- % the two smaller one on n2.
- Workers1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
- AllShards1 = [
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10),
- mk_shard("n3", 0, ?RING_END)
- ],
- {WorkersRes1, Dead1, Reps1} = find_replacements(Workers1, AllShards1),
- ?assertEqual([], WorkersRes1),
- ?assertEqual(Workers1, Dead1),
- ?assertEqual([mk_shard("n3", 0, ?RING_END)], Reps1),
-
- % None of the workers are in the live list of shards and there is a
- % split replacement from n2 (range [0, 10] replaced with [0, 4], [5, 10])
- Workers2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
- AllShards2 = [
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10)
- ],
- {WorkersRes2, Dead2, Reps2} = find_replacements(Workers2, AllShards2),
- ?assertEqual([], WorkersRes2),
- ?assertEqual(Workers2, Dead2),
- ?assertEqual(
- [
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10)
- ],
- lists:sort(Reps2)
- ),
-
- % One worker is available and one needs to be replaced. Replacement will be
- % from two split shards
- Workers3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
- AllShards3 = [
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10),
- mk_shard("n2", 11, ?RING_END)
- ],
- {WorkersRes3, Dead3, Reps3} = find_replacements(Workers3, AllShards3),
- ?assertEqual(mk_workers([{"n2", 11, ?RING_END}]), WorkersRes3),
- ?assertEqual(mk_workers([{"n1", 0, 10}]), Dead3),
- ?assertEqual(
- [
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10)
- ],
- lists:sort(Reps3)
- ),
-
- % All workers are available. Make sure they are not killed even if there is
- % a longer (single) shard to replace them.
- Workers4 = mk_workers([{"n1", 0, 10}, {"n1", 11, ?RING_END}]),
- AllShards4 = [
- mk_shard("n1", 0, 10),
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10),
- mk_shard("n3", 0, ?RING_END)
- ],
- {WorkersRes4, Dead4, Reps4} = find_replacements(Workers4, AllShards4),
- ?assertEqual(Workers4, WorkersRes4),
- ?assertEqual([], Dead4),
- ?assertEqual([], Reps4).
-
-mk_workers(NodesRanges) ->
- mk_workers(NodesRanges, nil).
-
-mk_workers(NodesRanges, Val) ->
- orddict:from_list([{mk_shard(N, B, E), Val} || {N, B, E} <- NodesRanges]).
-
-mk_shard(Name, B, E) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = [B, E]}.
-
-find_split_shard_replacements_test() ->
- % One worker is can be replaced and one can't
- Dead1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
- Shards1 = [
- mk_shard("n1", 0, 4),
- mk_shard("n1", 5, 10),
- mk_shard("n3", 11, ?RING_END)
- ],
- {Workers1, ShardsLeft1} = find_split_shard_replacements(Dead1, Shards1),
- ?assertEqual(mk_workers([{"n1", 0, 4}, {"n1", 5, 10}], 42), Workers1),
- ?assertEqual([mk_shard("n3", 11, ?RING_END)], ShardsLeft1),
-
- % All workers can be replaced - one by 1 shard, another by 3 smaller shards
- Dead2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
- Shards2 = [
- mk_shard("n1", 0, 10),
- mk_shard("n2", 11, 12),
- mk_shard("n2", 13, 14),
- mk_shard("n2", 15, ?RING_END)
- ],
- {Workers2, ShardsLeft2} = find_split_shard_replacements(Dead2, Shards2),
- ?assertEqual(
- mk_workers(
- [
- {"n1", 0, 10},
- {"n2", 11, 12},
- {"n2", 13, 14},
- {"n2", 15, ?RING_END}
- ],
- 42
- ),
- Workers2
- ),
- ?assertEqual([], ShardsLeft2),
-
- % No workers can be replaced. Ranges match but they are on different nodes
- Dead3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
- Shards3 = [
- mk_shard("n2", 0, 10),
- mk_shard("n3", 11, ?RING_END)
- ],
- {Workers3, ShardsLeft3} = find_split_shard_replacements(Dead3, Shards3),
- ?assertEqual([], Workers3),
- ?assertEqual(Shards3, ShardsLeft3).
-
-find_replacement_sequence_test() ->
- Shards = [{"n2", 0, 10}, {"n3", 0, 5}],
- Uuid = <<"abc1234">>,
- Epoch = 'n1',
-
- % Not safe to use a plain integer sequence number
- Dead1 = mk_workers(Shards, 42),
- ?assertEqual(0, find_replacement_sequence(Dead1, [0, 10])),
- ?assertEqual(0, find_replacement_sequence(Dead1, [0, 5])),
-
- % {Seq, Uuid} should work
- Dead2 = mk_workers(Shards, {43, Uuid}),
- ?assertEqual(
- {replace, 'n2', Uuid, 43},
- find_replacement_sequence(Dead2, [0, 10])
- ),
- ?assertEqual(
- {replace, 'n3', Uuid, 43},
- find_replacement_sequence(Dead2, [0, 5])
- ),
-
- % Can't find the range at all
- ?assertEqual(0, find_replacement_sequence(Dead2, [0, 4])),
-
- % {Seq, Uuids, EpochNode} should work
- Dead3 = mk_workers(Shards, {44, Uuid, Epoch}),
- ?assertEqual(
- {replace, 'n1', Uuid, 44},
- find_replacement_sequence(Dead3, [0, 10])
- ),
- ?assertEqual(
- {replace, 'n1', Uuid, 44},
- find_replacement_sequence(Dead3, [0, 5])
- ),
-
- % Cannot replace a replacement
- Dead4 = mk_workers(Shards, {replace, 'n1', Uuid, 45}),
- ?assertEqual(0, find_replacement_sequence(Dead4, [0, 10])),
- ?assertEqual(0, find_replacement_sequence(Dead4, [0, 5])).
diff --git a/src/fabric/src/fabric_view_map.erl b/src/fabric/src/fabric_view_map.erl
deleted file mode 100644
index 104086d67..000000000
--- a/src/fabric/src/fabric_view_map.erl
+++ /dev/null
@@ -1,292 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_map).
-
--export([go/8]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-go(DbName, Options, GroupId, View, Args, Callback, Acc, VInfo) when
- is_binary(GroupId)
-->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- go(DbName, Options, DDoc, View, Args, Callback, Acc, VInfo);
-go(Db, Options, DDoc, View, Args0, Callback, Acc, VInfo) ->
- DbName = fabric:dbname(Db),
- Args =
- case Args0 of
- #mrargs{keys = Keys, direction = rev} when is_list(Keys) ->
- Args0#mrargs{keys = lists:reverse(Keys)};
- #mrargs{} ->
- Args0
- end,
- {Shards, RingOpts} = fabric_view:get_shards(Db, Args),
- {CoordArgs, WorkerArgs} = fabric_view:fix_skip_and_limit(Args),
- DocIdAndRev = fabric_util:doc_id_and_rev(DDoc),
- fabric_view:maybe_update_others(DbName, DocIdAndRev, Shards, View, Args),
- Repls = fabric_ring:get_shard_replacements(DbName, Shards),
- RPCArgs = [DocIdAndRev, View, WorkerArgs, Options],
- StartFun = fun(Shard) ->
- hd(fabric_util:submit_jobs([Shard], fabric_rpc, map_view, RPCArgs))
- end,
- Workers0 = fabric_util:submit_jobs(Shards, fabric_rpc, map_view, RPCArgs),
- RexiMon = fabric_util:create_monitors(Workers0),
- try
- case
- fabric_streams:start(
- Workers0,
- #shard.ref,
- StartFun,
- Repls,
- RingOpts
- )
- of
- {ok, ddoc_updated} ->
- Callback({error, ddoc_updated}, Acc);
- {ok, Workers} ->
- try
- go(DbName, Workers, VInfo, CoordArgs, Callback, Acc)
- after
- fabric_streams:cleanup(Workers)
- end;
- {timeout, NewState} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- NewState#stream_acc.workers,
- waiting
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "map_view"
- ),
- Callback({error, timeout}, Acc);
- {error, Error} ->
- Callback({error, Error}, Acc)
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-go(DbName, Workers, {map, View, _}, Args, Callback, Acc0) ->
- #mrargs{limit = Limit, skip = Skip, keys = Keys, update_seq = UpdateSeq} = Args,
- Collation = couch_util:get_value(<<"collation">>, View#mrview.options),
- State = #collector{
- db_name = DbName,
- query_args = Args,
- callback = Callback,
- counters = fabric_dict:init(Workers, 0),
- skip = Skip,
- limit = Limit,
- keys = fabric_view:keydict(Keys),
- sorted = Args#mrargs.sorted,
- collation = Collation,
- user_acc = Acc0,
- update_seq =
- case UpdateSeq of
- true -> [];
- false -> nil
- end
- },
- case
- rexi_utils:recv(
- Workers,
- #shard.ref,
- fun handle_message/3,
- State,
- fabric_util:view_timeout(Args),
- 1000 * 60 * 60
- )
- of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
- fabric_view:check_down_shards(State, NodeRef);
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- fabric_view:handle_worker_exit(State, Worker, Reason);
-handle_message({meta, Meta0}, {Worker, From}, State) ->
- Tot = couch_util:get_value(total, Meta0, 0),
- Off = couch_util:get_value(offset, Meta0, 0),
- Seq = couch_util:get_value(update_seq, Meta0, 0),
- #collector{
- callback = Callback,
- counters = Counters0,
- total_rows = Total0,
- offset = Offset0,
- user_acc = AccIn,
- update_seq = UpdateSeq0
- } = State,
- % Assert that we don't have other messages from this
- % worker when the total_and_offset message arrives.
- 0 = fabric_dict:lookup_element(Worker, Counters0),
- rexi:stream_ack(From),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- Total = Total0 + Tot,
- Offset = Offset0 + Off,
- UpdateSeq =
- case UpdateSeq0 of
- nil -> nil;
- _ -> [{Worker, Seq} | UpdateSeq0]
- end,
- case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- total_rows = Total,
- update_seq = UpdateSeq,
- offset = Offset
- }};
- false ->
- FinalOffset = erlang:min(Total, Offset + State#collector.skip),
- Meta =
- [{total, Total}, {offset, FinalOffset}] ++
- case UpdateSeq of
- nil ->
- [];
- _ ->
- [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
- end,
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc
- }}
- end;
-handle_message(#view_row{}, {_, _}, #collector{sorted = false, limit = 0} = State) ->
- #collector{callback = Callback} = State,
- {_, Acc} = Callback(complete, State#collector.user_acc),
- {stop, State#collector{user_acc = Acc}};
-handle_message(#view_row{} = Row, {_, From}, #collector{sorted = false} = St) ->
- #collector{callback = Callback, user_acc = AccIn, limit = Limit} = St,
- {Go, Acc} = Callback(fabric_view:transform_row(Row), AccIn),
- rexi:stream_ack(From),
- {Go, St#collector{user_acc = Acc, limit = Limit - 1}};
-handle_message(#view_row{} = Row, {Worker, From}, State) ->
- #collector{
- query_args = #mrargs{direction = Dir},
- counters = Counters0,
- rows = Rows0,
- keys = KeyDict0,
- collation = Collation
- } = State,
- {Rows, KeyDict} = merge_row(
- Dir,
- Collation,
- KeyDict0,
- Row#view_row{worker = {Worker, From}},
- Rows0
- ),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows = Rows, counters = Counters1, keys = KeyDict},
- fabric_view:maybe_send_row(State1);
-handle_message(complete, Worker, State) ->
- Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
- fabric_view:maybe_send_row(State#collector{counters = Counters});
-handle_message({execution_stats, _} = Msg, {_, From}, St) ->
- #collector{callback = Callback, user_acc = AccIn} = St,
- {Go, Acc} = Callback(Msg, AccIn),
- rexi:stream_ack(From),
- {Go, St#collector{user_acc = Acc}};
-handle_message(ddoc_updated, _Worker, State) ->
- {stop, State}.
-
-merge_row(Dir, Collation, undefined, Row, Rows0) ->
- Rows1 = lists:merge(
- fun(#view_row{key = KeyA, id = IdA}, #view_row{key = KeyB, id = IdB}) ->
- compare(Dir, Collation, {KeyA, IdA}, {KeyB, IdB})
- end,
- [Row],
- Rows0
- ),
- {Rows1, undefined};
-merge_row(Dir, Collation, KeyDict0, Row, Rows0) ->
- CmpFun =
- case Collation of
- <<"raw">> ->
- fun
- (A, A) ->
- 0;
- (A, B) ->
- case A < B of
- true -> -1;
- false -> 1
- end
- end;
- _ ->
- fun couch_ejson_compare:less/2
- end,
- case maybe_update_keydict(Row#view_row.key, KeyDict0, CmpFun) of
- undefined ->
- {Rows0, KeyDict0};
- KeyDict1 ->
- Rows1 = lists:merge(
- fun(#view_row{key = A, id = IdA}, #view_row{key = B, id = IdB}) ->
- case {Dir, CmpFun(A, B)} of
- {fwd, 0} ->
- IdA < IdB;
- {rev, 0} ->
- IdB < IdA;
- {_, _} ->
- % We already have a reversed key dict, and sent the
- % workers the same reversed keys list. So here we
- % just enforce sorting according to the order in
- % the key dict
- dict:fetch(A, KeyDict1) < dict:fetch(B, KeyDict1)
- end
- end,
- [Row],
- Rows0
- ),
- {Rows1, KeyDict1}
- end.
-
-compare(fwd, <<"raw">>, A, B) -> A < B;
-compare(rev, <<"raw">>, A, B) -> B < A;
-compare(fwd, _, A, B) -> couch_ejson_compare:less_json_ids(A, B);
-compare(rev, _, A, B) -> couch_ejson_compare:less_json_ids(B, A).
-
-% KeyDict captures the user-supplied ordering of keys POSTed by the user by
-% mapping to integers (see fabric_view:keydict/1). It's possible that these keys
-% do not compare equal (i.e., =:=, used by dict) to those returned by the view
-% but are in fact equal under ICU. In this case (assuming the view uses ICU
-% collation) we must update KeyDict with a mapping from the ICU-equal key to its
-% appropriate value.
-maybe_update_keydict(Key, KeyDict, CmpFun) ->
- case dict:find(Key, KeyDict) of
- {ok, _} ->
- KeyDict;
- error ->
- case key_index(Key, dict:to_list(KeyDict), CmpFun) of
- undefined ->
- undefined;
- Value ->
- dict:store(Key, Value, KeyDict)
- end
- end.
-
-key_index(_, [], _) ->
- undefined;
-key_index(KeyA, [{KeyB, Value} | KVs], CmpFun) ->
- case CmpFun(KeyA, KeyB) of
- 0 -> Value;
- _ -> key_index(KeyA, KVs, CmpFun)
- end.
diff --git a/src/fabric/src/fabric_view_reduce.erl b/src/fabric/src/fabric_view_reduce.erl
deleted file mode 100644
index 600c8d01a..000000000
--- a/src/fabric/src/fabric_view_reduce.erl
+++ /dev/null
@@ -1,181 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_reduce).
-
--export([go/7]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-go(DbName, GroupId, View, Args, Callback, Acc0, VInfo) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- go(DbName, DDoc, View, Args, Callback, Acc0, VInfo);
-go(Db, DDoc, VName, Args, Callback, Acc, VInfo) ->
- DbName = fabric:dbname(Db),
- {Shards, RingOpts} = fabric_view:get_shards(Db, Args),
- {CoordArgs, WorkerArgs} = fabric_view:fix_skip_and_limit(Args),
- DocIdAndRev = fabric_util:doc_id_and_rev(DDoc),
- RPCArgs = [DocIdAndRev, VName, WorkerArgs],
- fabric_view:maybe_update_others(DbName, DocIdAndRev, Shards, VName, Args),
- Repls = fabric_ring:get_shard_replacements(DbName, Shards),
- StartFun = fun(Shard) ->
- hd(fabric_util:submit_jobs([Shard], fabric_rpc, reduce_view, RPCArgs))
- end,
- Workers0 = fabric_util:submit_jobs(Shards, fabric_rpc, reduce_view, RPCArgs),
- RexiMon = fabric_util:create_monitors(Workers0),
- try
- case
- fabric_streams:start(
- Workers0,
- #shard.ref,
- StartFun,
- Repls,
- RingOpts
- )
- of
- {ok, ddoc_updated} ->
- Callback({error, ddoc_updated}, Acc);
- {ok, Workers} ->
- try
- go2(DbName, Workers, VInfo, CoordArgs, Callback, Acc)
- after
- fabric_streams:cleanup(Workers)
- end;
- {timeout, NewState} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- NewState#stream_acc.workers,
- waiting
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "reduce_view"
- ),
- Callback({error, timeout}, Acc);
- {error, Error} ->
- Callback({error, Error}, Acc)
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-go2(DbName, Workers, {red, {_, Lang, View}, _} = VInfo, Args, Callback, Acc0) ->
- #mrargs{limit = Limit, skip = Skip, keys = Keys, update_seq = UpdateSeq} = Args,
- RedSrc = couch_mrview_util:extract_view_reduce(VInfo),
- OsProc =
- case os_proc_needed(RedSrc) of
- true -> couch_query_servers:get_os_process(Lang);
- _ -> nil
- end,
- State = #collector{
- db_name = DbName,
- query_args = Args,
- callback = Callback,
- counters = fabric_dict:init(Workers, 0),
- keys = Keys,
- skip = Skip,
- limit = Limit,
- lang = Lang,
- os_proc = OsProc,
- reducer = RedSrc,
- collation = couch_util:get_value(<<"collation">>, View#mrview.options),
- rows = dict:new(),
- user_acc = Acc0,
- update_seq =
- case UpdateSeq of
- true -> [];
- false -> nil
- end
- },
- try
- rexi_utils:recv(
- Workers,
- #shard.ref,
- fun handle_message/3,
- State,
- fabric_util:view_timeout(Args),
- 1000 * 60 * 60
- )
- of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
- after
- if
- OsProc == nil -> ok;
- true -> catch couch_query_servers:ret_os_process(OsProc)
- end
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
- fabric_view:check_down_shards(State, NodeRef);
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- fabric_view:handle_worker_exit(State, Worker, Reason);
-handle_message({meta, Meta0}, {Worker, From}, State) ->
- Seq = couch_util:get_value(update_seq, Meta0, 0),
- #collector{
- callback = Callback,
- counters = Counters0,
- user_acc = AccIn,
- update_seq = UpdateSeq0
- } = State,
- % Assert that we don't have other messages from this
- % worker when the total_and_offset message arrives.
- 0 = fabric_dict:lookup_element(Worker, Counters0),
- rexi:stream_ack(From),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- UpdateSeq =
- case UpdateSeq0 of
- nil -> nil;
- _ -> [{Worker, Seq} | UpdateSeq0]
- end,
- case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- update_seq = UpdateSeq
- }};
- false ->
- Meta =
- case UpdateSeq of
- nil ->
- [];
- _ ->
- [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
- end,
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- user_acc = Acc
- }}
- end;
-handle_message(#view_row{key = Key} = Row, {Worker, From}, State) ->
- #collector{counters = Counters0, rows = Rows0} = State,
- true = fabric_dict:is_key(Worker, Counters0),
- Rows = dict:append(Key, Row#view_row{worker = {Worker, From}}, Rows0),
- C1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows = Rows, counters = C1},
- fabric_view:maybe_send_row(State1);
-handle_message(complete, Worker, #collector{counters = Counters0} = State) ->
- true = fabric_dict:is_key(Worker, Counters0),
- C1 = fabric_dict:update_counter(Worker, 1, Counters0),
- fabric_view:maybe_send_row(State#collector{counters = C1});
-handle_message(ddoc_updated, _Worker, State) ->
- {stop, State}.
-
-os_proc_needed(<<"_", _/binary>>) -> false;
-os_proc_needed(_) -> true.
diff --git a/src/fabric/test/eunit/fabric_db_create_tests.erl b/src/fabric/test/eunit/fabric_db_create_tests.erl
deleted file mode 100644
index 60acd3031..000000000
--- a/src/fabric/test/eunit/fabric_db_create_tests.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_create_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(TDEF(A), {atom_to_list(A), fun A/0}).
-
-main_test_() ->
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- ?TDEF(t_handle_shard_doc_conflict)
- ]
- }.
-
-setup() ->
- test_util:start_couch([fabric]).
-
-teardown(Ctx) ->
- test_util:stop_couch(Ctx).
-
-t_handle_shard_doc_conflict() ->
- DbName = ?tempdb(),
- meck:new(mem3, [passthrough]),
- meck:new(fabric_util, [passthrough]),
- ok = meck:sequence(mem3, shards, 1, [
- fun(_) -> meck:raise(error, database_does_not_exist) end,
- [#shard{dbname = DbName}]
- ]),
- meck:expect(fabric_util, recv, 4, {error, conflict}),
- ?assertEqual({error, file_exists}, fabric_db_create:go(DbName, [])),
-
- meck:unload(),
- ok = fabric:delete_db(DbName).
diff --git a/src/fabric/test/eunit/fabric_db_info_tests.erl b/src/fabric/test/eunit/fabric_db_info_tests.erl
deleted file mode 100644
index d4fb1e73f..000000000
--- a/src/fabric/test/eunit/fabric_db_info_tests.erl
+++ /dev/null
@@ -1,62 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_info_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(TDEF(A), {atom_to_list(A), fun A/0}).
-
-main_test_() ->
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- ?TDEF(t_update_seq_has_uuids)
- ]
- }.
-
-setup() ->
- test_util:start_couch([fabric]).
-
-teardown(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-t_update_seq_has_uuids() ->
- DbName = ?tempdb(),
- ok = fabric:create_db(DbName, [{q, 1}, {n, 1}]),
-
- {ok, Info} = fabric:get_db_info(DbName),
- UpdateSeq = couch_util:get_value(update_seq, Info),
- UnpackedSeq = fabric_view_changes:decode_seq(UpdateSeq),
-
- ?assertMatch([{_, _, _}], UnpackedSeq),
- [{Node, Range, Seq}] = UnpackedSeq,
- ?assert(is_atom(Node)),
- ?assertMatch([_, _], Range),
- ?assertMatch({_, _, _}, Seq),
- {SeqNum, SeqUuid, EpochNode} = Seq,
- ?assert(is_integer(SeqNum)),
- ?assert(is_binary(SeqUuid)),
- ?assert(is_atom(EpochNode)),
-
- {ok, UuidMap} = fabric:db_uuids(DbName),
- PrefixLen = fabric_util:get_uuid_prefix_len(),
- Uuids = [binary:part(Uuid, {0, PrefixLen}) || Uuid <- maps:keys(UuidMap)],
- [UuidFromShard] = Uuids,
- ?assertEqual(UuidFromShard, SeqUuid),
-
- ok = fabric:delete_db(DbName, []).
diff --git a/src/fabric/test/eunit/fabric_db_uuids_tests.erl b/src/fabric/test/eunit/fabric_db_uuids_tests.erl
deleted file mode 100644
index 1d99019a8..000000000
--- a/src/fabric/test/eunit/fabric_db_uuids_tests.erl
+++ /dev/null
@@ -1,55 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_uuids_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(TDEF(A), {atom_to_list(A), fun A/0}).
-
-main_test_() ->
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- ?TDEF(t_can_get_shard_uuids)
- ]
- }.
-
-setup() ->
- test_util:start_couch([fabric]).
-
-teardown(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-t_can_get_shard_uuids() ->
- DbName = ?tempdb(),
- ok = fabric:create_db(DbName, []),
- Shards = mem3:shards(DbName),
- {ok, Uuids} = fabric:db_uuids(DbName),
- ?assertEqual(length(Shards), map_size(Uuids)),
- UuidsFromShards = lists:foldl(
- fun(#shard{} = Shard, Acc) ->
- Uuid = couch_util:with_db(Shard#shard.name, fun(Db) ->
- couch_db:get_uuid(Db)
- end),
- Acc#{Uuid => Shard}
- end,
- #{},
- Shards
- ),
- ?assertEqual(UuidsFromShards, Uuids),
- ok = fabric:delete_db(DbName, []).
diff --git a/src/fabric/test/eunit/fabric_moved_shards_seq_tests.erl b/src/fabric/test/eunit/fabric_moved_shards_seq_tests.erl
deleted file mode 100644
index 5463a5170..000000000
--- a/src/fabric/test/eunit/fabric_moved_shards_seq_tests.erl
+++ /dev/null
@@ -1,111 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_moved_shards_seq_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(TDEF(A), {atom_to_list(A), fun A/0}).
-
-main_test_() ->
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- ?TDEF(t_shard_moves_avoid_sequence_rewinds)
- ]
- }.
-
-setup() ->
- test_util:start_couch([fabric]).
-
-teardown(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-t_shard_moves_avoid_sequence_rewinds() ->
- DocCnt = 30,
- DbName = ?tempdb(),
-
- ok = fabric:create_db(DbName, [{q, 1}, {n, 1}]),
- lists:foreach(
- fun(I) ->
- update_doc(DbName, #doc{id = erlang:integer_to_binary(I)})
- end,
- lists:seq(1, DocCnt)
- ),
-
- {ok, _, Seq1, 0} = changes(DbName, #changes_args{limit = 1, since = "now"}),
- [{_, Range, {Seq, Uuid, _}}] = seq_decode(Seq1),
-
- % Transform Seq1 pretending it came from a fake source node, before the
- % shard was moved to the current node.
- SrcNode = 'srcnode@srchost',
- Seq2 = seq_encode([{SrcNode, Range, {Seq, Uuid, SrcNode}}]),
-
- % First, check when the shard file epoch is mismatched epoch and the
- % sequence would rewind. This ensures the epoch and uuid check protection
- % in couch_db works as intended.
- Result1 = changes(DbName, #changes_args{limit = 1, since = Seq2}),
- ?assertMatch({ok, _, _, _}, Result1),
- {ok, _, _, PendingRewind} = Result1,
- ?assertEqual(DocCnt - 1, PendingRewind),
-
- % Mock epoch checking to pretend that shard actually used to live on
- % SrcNode. In this case, we should not have rewinds.
- mock_epochs([{node(), DocCnt}, {SrcNode, 1}]),
- Result2 = changes(DbName, #changes_args{limit = 1, since = Seq2}),
- ?assertMatch({ok, _, _, _}, Result2),
- {ok, _, _, PendingNoRewind} = Result2,
- ?assertEqual(0, PendingNoRewind),
-
- ok = fabric:delete_db(DbName, []).
-
-changes_callback(start, Acc) ->
- {ok, Acc};
-changes_callback({change, {Change}}, Acc) ->
- CM = maps:from_list(Change),
- {ok, [CM | Acc]};
-changes_callback({stop, EndSeq, Pending}, Acc) ->
- {ok, Acc, EndSeq, Pending}.
-
-changes(DbName, #changes_args{} = Args) ->
- fabric_util:isolate(fun() ->
- fabric:changes(DbName, fun changes_callback/2, [], Args)
- end).
-
-update_doc(DbName, #doc{} = Doc) ->
- fabric_util:isolate(fun() ->
- case fabric:update_doc(DbName, Doc, [?ADMIN_CTX]) of
- {ok, Res} -> Res
- end
- end).
-
-seq_decode(Seq) ->
- % This is copied from fabric_view_changes
- Pattern = "^\"?([0-9]+-)?(?<opaque>.*?)\"?$",
- Options = [{capture, [opaque], binary}],
- {match, Seq1} = re:run(Seq, Pattern, Options),
- binary_to_term(couch_util:decodeBase64Url(Seq1)).
-
-seq_encode(Unpacked) ->
- % Copied from fabric_view_changes
- Opaque = couch_util:encodeBase64Url(term_to_binary(Unpacked, [compressed])),
- ?l2b(["30", $-, Opaque]).
-
-mock_epochs(Epochs) ->
- % Since we made up a node name we'll have to mock epoch checking
- meck:new(couch_db_engine, [passthrough]),
- meck:expect(couch_db_engine, get_epochs, fun(_) -> Epochs end).
diff --git a/src/fabric/test/eunit/fabric_rpc_purge_tests.erl b/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
deleted file mode 100644
index d3872410e..000000000
--- a/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
+++ /dev/null
@@ -1,288 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc_purge_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TDEF(A), {A, fun A/1}).
-
-% TODO: Add tests:
-% - filter some updates
-% - allow for an update that was filtered by a node
-% - ignore lagging nodes
-
-main_test_() ->
- {
- setup,
- spawn,
- fun setup_all/0,
- fun teardown_all/1,
- [
- {
- foreach,
- fun setup_no_purge/0,
- fun teardown_no_purge/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_no_purge_no_filter)
- ])
- },
- {
- foreach,
- fun setup_single_purge/0,
- fun teardown_single_purge/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_filter),
- ?TDEF(t_filter_unknown_node),
- ?TDEF(t_filter_local_node),
- ?TDEF(t_no_filter_old_node),
- ?TDEF(t_no_filter_different_node),
- ?TDEF(t_no_filter_after_repl)
- ])
- },
- {
- foreach,
- fun setup_multi_purge/0,
- fun teardown_multi_purge/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_filter),
- ?TDEF(t_filter_unknown_node),
- ?TDEF(t_filter_local_node),
- ?TDEF(t_no_filter_old_node),
- ?TDEF(t_no_filter_different_node),
- ?TDEF(t_no_filter_after_repl)
- ])
- }
- ]
- }.
-
-setup_all() ->
- test_util:start_couch().
-
-teardown_all(Ctx) ->
- test_util:stop_couch(Ctx).
-
-setup_no_purge() ->
- {ok, Db} = create_db(),
- populate_db(Db),
- couch_db:name(Db).
-
-teardown_no_purge(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-setup_single_purge() ->
- DbName = setup_no_purge(),
- DocId = <<"0003">>,
- {ok, OldDoc} = open_doc(DbName, DocId),
- purge_doc(DbName, DocId),
- {DbName, DocId, OldDoc, 1}.
-
-teardown_single_purge({DbName, _, _, _}) ->
- teardown_no_purge(DbName).
-
-setup_multi_purge() ->
- DbName = setup_no_purge(),
- DocId = <<"0003">>,
- {ok, OldDoc} = open_doc(DbName, DocId),
- lists:foreach(
- fun(I) ->
- PDocId = iolist_to_binary(io_lib:format("~4..0b", [I])),
- purge_doc(DbName, PDocId)
- end,
- lists:seq(1, 5)
- ),
- {DbName, DocId, OldDoc, 3}.
-
-teardown_multi_purge(Ctx) ->
- teardown_single_purge(Ctx).
-
-t_no_purge_no_filter(DbName) ->
- DocId = <<"0003">>,
-
- {ok, OldDoc} = open_doc(DbName, DocId),
- NewDoc = create_update(OldDoc, 2),
-
- rpc_update_doc(DbName, NewDoc),
-
- {ok, CurrDoc} = open_doc(DbName, DocId),
- ?assert(CurrDoc /= OldDoc),
- ?assert(CurrDoc == NewDoc).
-
-t_filter({DbName, DocId, OldDoc, _PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, 0),
-
- rpc_update_doc(DbName, OldDoc),
-
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
-t_filter_unknown_node({DbName, DocId, OldDoc, _PSeq}) ->
- % Unknown nodes are assumed to start at PurgeSeq = 0
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, 0),
-
- {Pos, [Rev | _]} = OldDoc#doc.revs,
- RROpt = {read_repair, [{'blargh@127.0.0.1', [{Pos, Rev}]}]},
- rpc_update_doc(DbName, OldDoc, [RROpt]),
-
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
-t_no_filter_old_node({DbName, DocId, OldDoc, PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, PSeq),
-
- % The random UUID is to generate a badarg exception when
- % we try and convert it to an existing atom.
- create_purge_checkpoint(DbName, 0, couch_uuids:random()),
-
- rpc_update_doc(DbName, OldDoc),
-
- ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-t_no_filter_different_node({DbName, DocId, OldDoc, PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, PSeq),
-
- % Create a valid purge for a different node
- TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')),
- create_purge_checkpoint(DbName, 0, TgtNode),
-
- rpc_update_doc(DbName, OldDoc),
-
- ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-t_filter_local_node({DbName, DocId, OldDoc, PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, PSeq),
-
- % Create a valid purge for a different node
- TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')),
- create_purge_checkpoint(DbName, 0, TgtNode),
-
- % Add a local node rev to the list of node revs. It should
- % be filtered out
- {Pos, [Rev | _]} = OldDoc#doc.revs,
- RROpts = [
- {read_repair, [
- {tgt_node(), [{Pos, Rev}]},
- {node(), [{1, <<"123">>}]}
- ]}
- ],
- rpc_update_doc(DbName, OldDoc, RROpts),
-
- ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-t_no_filter_after_repl({DbName, DocId, OldDoc, PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, PSeq),
-
- rpc_update_doc(DbName, OldDoc),
-
- ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-wrap({Name, Fun}) ->
- fun(Arg) ->
- {timeout, 60,
- {atom_to_list(Name), fun() ->
- process_flag(trap_exit, true),
- Fun(Arg)
- end}}
- end.
-
-create_db() ->
- DbName = ?tempdb(),
- couch_db:create(DbName, [?ADMIN_CTX]).
-
-populate_db(Db) ->
- Docs = lists:map(
- fun(Idx) ->
- DocId = lists:flatten(io_lib:format("~4..0b", [Idx])),
- #doc{
- id = list_to_binary(DocId),
- body = {[{<<"int">>, Idx}, {<<"vsn">>, 2}]}
- }
- end,
- lists:seq(1, 100)
- ),
- {ok, _} = couch_db:update_docs(Db, Docs).
-
-open_doc(DbName, DocId) ->
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:open_doc(Db, DocId, [])
- end).
-
-create_update(Doc, NewVsn) ->
- #doc{
- id = DocId,
- revs = {Pos, [Rev | _] = Revs},
- body = {Props}
- } = Doc,
- NewProps = lists:keyreplace(<<"vsn">>, 1, Props, {<<"vsn">>, NewVsn}),
- NewRev = couch_hash:md5_hash(term_to_binary({DocId, Rev, {NewProps}})),
- Doc#doc{
- revs = {Pos + 1, [NewRev | Revs]},
- body = {NewProps}
- }.
-
-purge_doc(DbName, DocId) ->
- {ok, Doc} = open_doc(DbName, DocId),
- {Pos, [Rev | _]} = Doc#doc.revs,
- PInfo = {couch_uuids:random(), DocId, [{Pos, Rev}]},
- Resp = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, [PInfo], [])
- end),
- ?assertEqual({ok, [{ok, [{Pos, Rev}]}]}, Resp).
-
-create_purge_checkpoint(DbName, PurgeSeq) ->
- create_purge_checkpoint(DbName, PurgeSeq, tgt_node_bin()).
-
-create_purge_checkpoint(DbName, PurgeSeq, TgtNode) when is_binary(TgtNode) ->
- Resp = couch_util:with_db(DbName, fun(Db) ->
- SrcUUID = couch_db:get_uuid(Db),
- TgtUUID = couch_uuids:random(),
- CPDoc = #doc{
- id = mem3_rep:make_purge_id(SrcUUID, TgtUUID),
- body =
- {[
- {<<"target_node">>, TgtNode},
- {<<"purge_seq">>, PurgeSeq}
- ]}
- },
- couch_db:update_docs(Db, [CPDoc], [])
- end),
- ?assertMatch({ok, [_]}, Resp).
-
-rpc_update_doc(DbName, Doc) ->
- {Pos, [Rev | _]} = Doc#doc.revs,
- RROpt = {read_repair, [{tgt_node(), [{Pos, Rev}]}]},
- rpc_update_doc(DbName, Doc, [RROpt]).
-
-rpc_update_doc(DbName, Doc, Opts) ->
- Ref = erlang:make_ref(),
- put(rexi_from, {self(), Ref}),
- fabric_rpc:update_docs(DbName, [Doc], Opts),
- Reply = test_util:wait(fun() ->
- receive
- {Ref, Reply} ->
- Reply
- after 0 ->
- wait
- end
- end),
- ?assertEqual({ok, []}, Reply).
-
-tgt_node() ->
- 'foo@127.0.0.1'.
-
-tgt_node_bin() ->
- iolist_to_binary(atom_to_list(tgt_node())).
diff --git a/src/fabric/test/eunit/fabric_rpc_tests.erl b/src/fabric/test/eunit/fabric_rpc_tests.erl
deleted file mode 100644
index 0fc295cf4..000000000
--- a/src/fabric/test/eunit/fabric_rpc_tests.erl
+++ /dev/null
@@ -1,187 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TDEF(A), {A, fun A/1}).
-
-main_test_() ->
- {
- setup,
- spawn,
- fun setup_all/0,
- fun teardown_all/1,
- [
- {
- foreach,
- fun setup_no_db_or_config/0,
- fun teardown_db/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_no_config_non_shard_db_create_succeeds)
- ])
- },
- {
- foreach,
- fun setup_shard/0,
- fun teardown_noop/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_no_db),
- ?TDEF(t_no_config_db_create_fails_for_shard),
- ?TDEF(t_no_config_db_create_fails_for_shard_rpc)
- ])
- },
- {
- foreach,
- fun setup_shard/0,
- fun teardown_db/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_db_create_with_config)
- ])
- }
- ]
- }.
-
-setup_all() ->
- Ctx = test_util:start_couch([rexi, mem3, fabric]),
- DatabaseDir = config:get("couchdb", "database_dir"),
- Suffix = ?b2l(couch_uuids:random()),
- test_util:with_couch_server_restart(fun() ->
- config:set("couchdb", "database_dir", DatabaseDir ++ "/" ++ Suffix, _Persist = false)
- end),
- Ctx.
-
-teardown_all(Ctx) ->
- config:delete("couchdb", "database_dir", false),
- test_util:stop_couch(Ctx).
-
-setup_no_db_or_config() ->
- ?tempdb().
-
-setup_shard() ->
- ?tempshard().
-
-teardown_noop(_DbName) ->
- ok.
-
-teardown_db(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-wrap({Name, Fun}) ->
- fun(Arg) ->
- {timeout, 60,
- {atom_to_list(Name), fun() ->
- process_flag(trap_exit, true),
- Fun(Arg)
- end}}
- end.
-
-t_no_db(DbName) ->
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])).
-
-t_no_config_non_shard_db_create_succeeds(DbName) ->
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- ?assertEqual(DbName, mem3:dbname(DbName)),
- ?assertMatch({ok, _}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])).
-
-t_no_config_db_create_fails_for_shard(DbName) ->
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- ?assertException(
- throw, {error, missing_target}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])
- ).
-
-t_no_config_db_create_fails_for_shard_rpc(DbName) ->
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- ?assertException(
- throw, {error, missing_target}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])
- ),
- MFA = {fabric_rpc, get_db_info, [DbName]},
- Ref = rexi:cast(node(), self(), MFA),
- Resp =
- receive
- Resp0 -> Resp0
- end,
- ?assertMatch({Ref, {'rexi_EXIT', {{error, missing_target}, _}}}, Resp).
-
-t_db_create_with_config(DbName) ->
- MDbName = mem3:dbname(DbName),
- DbDoc = #doc{id = MDbName, body = test_db_doc()},
-
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
-
- %% Write the dbs db config
- couch_util:with_db(mem3_sync:shards_db(), fun(Db) ->
- ?assertEqual({not_found, missing}, couch_db:open_doc(Db, MDbName, [ejson_body])),
- ?assertMatch({ok, _}, couch_db:update_docs(Db, [DbDoc]))
- end),
-
- %% Test get_or_create_db loads the properties as expected
- couch_util:with_db(mem3_sync:shards_db(), fun(Db) ->
- ?assertMatch({ok, _}, couch_db:open_doc(Db, MDbName, [ejson_body])),
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- Resp = mem3_util:get_or_create_db(DbName, [?ADMIN_CTX]),
- ?assertMatch({ok, _}, Resp),
- {ok, LDb} = Resp,
-
- {Body} = test_db_doc(),
- DbProps = mem3_util:get_shard_opts(Body),
- {Props} =
- case couch_db_engine:get_props(LDb) of
- undefined -> {[]};
- Else -> {Else}
- end,
- %% We don't normally store the default engine name
- EngineProps =
- case couch_db_engine:get_engine(LDb) of
- couch_bt_engine ->
- [];
- EngineName ->
- [{engine, EngineName}]
- end,
- ?assertEqual([{props, Props} | EngineProps], DbProps)
- end),
- ok = fabric:delete_db(MDbName).
-
-test_db_doc() ->
- {[
- {<<"shard_suffix">>, ".1584997648"},
- {<<"changelog">>, [
- [<<"add">>, <<"00000000-7fffffff">>, <<"node1@127.0.0.1">>],
- [<<"add">>, <<"00000000-7fffffff">>, <<"node2@127.0.0.1">>],
- [<<"add">>, <<"00000000-7fffffff">>, <<"node3@127.0.0.1">>],
- [<<"add">>, <<"80000000-ffffffff">>, <<"node1@127.0.0.1">>],
- [<<"add">>, <<"80000000-ffffffff">>, <<"node2@127.0.0.1">>],
- [<<"add">>, <<"80000000-ffffffff">>, <<"node3@127.0.0.1">>]
- ]},
- {<<"by_node">>,
- {[
- {<<"node1@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]},
- {<<"node2@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]},
- {<<"node3@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]}
- ]}},
- {<<"by_range">>,
- {[
- {<<"00000000-7fffffff">>, [
- <<"node1@127.0.0.1">>, <<"node2@127.0.0.1">>, <<"node3@127.0.0.1">>
- ]},
- {<<"80000000-ffffffff">>, [
- <<"node1@127.0.0.1">>, <<"node2@127.0.0.1">>, <<"node3@127.0.0.1">>
- ]}
- ]}},
- {<<"props">>,
- {[
- {partitioned, true},
- {hash, [couch_partition, hash, []]}
- ]}}
- ]}.
diff --git a/src/fabric/test/eunit/fabric_tests.erl b/src/fabric/test/eunit/fabric_tests.erl
deleted file mode 100644
index c0e2b626b..000000000
--- a/src/fabric/test/eunit/fabric_tests.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-cleanup_index_files_test_() ->
- {
- setup,
- fun setup/0,
- fun teardown/1,
- fun(Ctx) ->
- [
- t_cleanup_index_files(),
- t_cleanup_index_files_with_existing_db(Ctx),
- t_cleanup_index_files_with_deleted_db(Ctx)
- ]
- end
- }.
-
-setup() ->
- Ctx = test_util:start_couch([fabric]),
- % TempDb is deleted in the test "t_cleanup_index_files_with_deleted_db".
- TempDb = ?tempdb(),
- fabric:create_db(TempDb),
- {Ctx, TempDb}.
-
-teardown({Ctx, _TempDb}) ->
- test_util:stop_couch(Ctx).
-
-t_cleanup_index_files() ->
- ?_assert(
- lists:all(fun(Res) -> Res =:= ok end, fabric:cleanup_index_files())
- ).
-
-t_cleanup_index_files_with_existing_db({_Ctx, TempDb}) ->
- ?_assertEqual(ok, fabric:cleanup_index_files(TempDb)).
-
-t_cleanup_index_files_with_deleted_db({_Ctx, TempDb}) ->
- ?_test(
- begin
- fabric:delete_db(TempDb, []),
- ?assertError(
- database_does_not_exist,
- fabric:inactive_index_files(TempDb)
- ),
- ?assertEqual(ok, fabric:cleanup_index_files(TempDb))
- end
- ).
diff --git a/src/global_changes/.gitignore b/src/global_changes/.gitignore
deleted file mode 100644
index e1b16d52c..000000000
--- a/src/global_changes/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.eunit/
-ebin/
diff --git a/src/global_changes/LICENSE b/src/global_changes/LICENSE
deleted file mode 100644
index 94ad231b8..000000000
--- a/src/global_changes/LICENSE
+++ /dev/null
@@ -1,203 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/src/global_changes/README.md b/src/global_changes/README.md
deleted file mode 100644
index f22ee2ce9..000000000
--- a/src/global_changes/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-### global\_changes
-
-This app supplies the functionality for the `/_db_updates` endpoint.
-
-When a database is created, deleted, or updated, a corresponding event will be persisted to disk (Note: This was designed without the guarantee that a DB event will be persisted or ever occur in the `_db_updates` feed. It probably will, but it isn't guaranteed). Users can subscribe to a `_changes`-like feed of these database events by querying the `_db_updates` endpoint.
-
-When an admin user queries the `/_db_updates` endpoint, they will see the account name associated with the DB update as well as update
-
-### Captured Metrics
-
-1: `global_changes`, `db_writes`: The number of doc updates caused by global\_changes.
-
-2: `global_changes`, `server_pending_updates`: The number of documents aggregated into the pending write batch.
-
-3: `global_changes`, `listener_pending_updates`: The number of documents aggregated into the pending event batch.
-
-4: `global_changes`, `event_doc_conflict`: The number of rev tree branches in event docs encountered by global\_changes. Should never happen.
-
-5: `global_changes`, `rpcs`: The number of non-fabric RPCs caused by global\_changes.
-
-### Important Configs
-
-1: `global_changes`, `max_event_delay`: (integer, milliseconds) The total timed added before an event is forwarded to the writer.
-
-2: `global_changes`, `max_write_delay`: (integer, milliseconds) The time added before an event is sent to disk.
-
-3: `global_changes`, `update_db`: (true/false) A flag setting whether to update the global\_changes database. If false, changes will be lost and there will be no performance impact of global\_changes on the cluster.
diff --git a/src/global_changes/priv/stats_descriptions.cfg b/src/global_changes/priv/stats_descriptions.cfg
deleted file mode 100644
index beb524895..000000000
--- a/src/global_changes/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-{[global_changes, db_writes], [
- {type, counter},
- {desc, <<"number of db writes performed by global changes">>}
-]}.
-{[global_changes, event_doc_conflict], [
- {type, counter},
- {desc, <<"number of conflicted event docs encountered by global changes">>}
-]}.
-{[global_changes, listener_pending_updates], [
- {type, gauge},
- {desc, <<"number of global changes updates pending writes in global_changes_listener">>}
-]}.
-{[global_changes, rpcs], [
- {type, counter},
- {desc, <<"number of rpc operations performed by global_changes">>}
-]}.
-{[global_changes, server_pending_updates], [
- {type, gauge},
- {desc, <<"number of global changes updates pending writes in global_changes_server">>}
-]}.
diff --git a/src/global_changes/src/global_changes.app.src b/src/global_changes/src/global_changes.app.src
deleted file mode 100644
index a1dc2f38b..000000000
--- a/src/global_changes/src/global_changes.app.src
+++ /dev/null
@@ -1,32 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, global_changes, [
- {description, "_changes-like feeds for multiple DBs"},
- {vsn, git},
- {registered, [global_changes_config_listener, global_changes_server]},
- {applications, [
- kernel,
- stdlib,
- couch_epi,
- config,
- couch_log,
- couch_stats,
- couch,
- mem3,
- fabric
- ]},
- {mod, {global_changes_app, []}},
- {env, [
- {dbname, <<"_global_changes">>}
- ]}
-]}.
diff --git a/src/global_changes/src/global_changes_app.erl b/src/global_changes/src/global_changes_app.erl
deleted file mode 100644
index aa0e5d3fd..000000000
--- a/src/global_changes/src/global_changes_app.erl
+++ /dev/null
@@ -1,25 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_app).
--behavior(application).
-
--export([
- start/2,
- stop/1
-]).
-
-start(_StartType, _StartArgs) ->
- global_changes_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/global_changes/src/global_changes_epi.erl b/src/global_changes/src/global_changes_epi.erl
deleted file mode 100644
index 25e204001..000000000
--- a/src/global_changes/src/global_changes_epi.erl
+++ /dev/null
@@ -1,50 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- global_changes.
-
-providers() ->
- [
- {chttpd_handlers, global_changes_httpd_handlers}
- ].
-
-services() ->
- [
- {global_changes, global_changes_plugin}
- ].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/global_changes/src/global_changes_httpd.erl b/src/global_changes/src/global_changes_httpd.erl
deleted file mode 100644
index cb4016b63..000000000
--- a/src/global_changes/src/global_changes_httpd.erl
+++ /dev/null
@@ -1,298 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_httpd).
-
--export([handle_global_changes_req/1]).
--export([default_transform_change/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(acc, {
- heartbeat_interval,
- last_data_sent_time,
- feed,
- prepend,
- resp,
- etag,
- username,
- limit
-}).
-
-handle_global_changes_req(#httpd{method = 'GET'} = Req) ->
- Db = global_changes_util:get_dbname(),
- Feed = chttpd:qs_value(Req, "feed", "normal"),
- Options = parse_global_changes_query(Req),
- Heartbeat =
- case lists:keyfind(heartbeat, 1, Options) of
- {heartbeat, true} -> 60000;
- {heartbeat, Other} -> Other;
- false -> false
- end,
- % Limit is handled in the changes callback, since the limit count needs to
- % only account for changes which happen after the filter.
- Limit = couch_util:get_value(limit, Options),
- %Options1 = lists:keydelete(limit, 1, Options),
- Options1 = Options,
- Owner = allowed_owner(Req),
- Acc = #acc{
- username = Owner,
- feed = Feed,
- resp = Req,
- heartbeat_interval = Heartbeat,
- limit = Limit
- },
- case Feed of
- "normal" ->
- {ok, Info} = fabric:get_db_info(Db),
- Suffix = mem3:shard_suffix(Db),
- Etag = chttpd:make_etag({Info, Suffix}),
- chttpd:etag_respond(Req, Etag, fun() ->
- fabric:changes(Db, fun changes_callback/2, Acc#acc{etag = Etag}, Options1)
- end);
- Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource" ->
- fabric:changes(Db, fun changes_callback/2, Acc, Options1);
- _ ->
- Msg = <<"Supported `feed` types: normal, continuous, longpoll, eventsource">>,
- throw({bad_request, Msg})
- end;
-handle_global_changes_req(Req) ->
- chttpd:send_method_not_allowed(Req, "GET").
-
-transform_change(Username, Change) ->
- global_changes_plugin:transform_change(
- Username,
- Change,
- fun default_transform_change/2
- ).
-
-default_transform_change(Username, {Props}) ->
- {id, Id} = lists:keyfind(id, 1, Props),
- {seq, Seq} = lists:keyfind(seq, 1, Props),
- Info =
- case binary:split(Id, <<":">>) of
- [Event0, DbName0] ->
- {Event0, DbName0};
- _ ->
- skip
- end,
- case Info of
- % Client is an admin, show them everything.
- {Event, DbName} when Username == admin ->
- {[
- {db_name, DbName},
- {type, Event},
- {seq, Seq}
- ]};
- _ ->
- skip
- end.
-
-changes_callback(waiting_for_updates, Acc) ->
- {ok, Acc};
-% This clause is only hit when _db_updates is queried with limit=0. For
-% limit>0, the request is stopped by maybe_finish/1.
-changes_callback({change, _}, #acc{limit = 0} = Acc) ->
- {stop, Acc};
-% callbacks for continuous feed (newline-delimited JSON Objects)
-changes_callback(start, #acc{feed = "continuous"} = Acc) ->
- #acc{resp = Req} = Acc,
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200),
- {ok, Acc#acc{resp = Resp, last_data_sent_time = os:timestamp()}};
-changes_callback({change, Change0}, #acc{feed = "continuous"} = Acc) ->
- #acc{resp = Resp, username = Username} = Acc,
- case transform_change(Username, Change0) of
- skip ->
- {ok, maybe_send_heartbeat(Acc)};
- Change ->
- Line = [?JSON_ENCODE(Change) | "\n"],
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Line),
- Acc1 = Acc#acc{
- resp = Resp1,
- last_data_sent_time = os:timestamp()
- },
- maybe_finish(Acc1)
- end;
-changes_callback({stop, EndSeq}, #acc{feed = "continuous"} = Acc) ->
- % Temporary upgrade clause - Case 24236
- changes_callback({stop, EndSeq, null}, Acc);
-changes_callback({stop, EndSeq, _Pending}, #acc{feed = "continuous"} = Acc) ->
- #acc{resp = Resp} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(
- Resp,
- [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
- ),
- chttpd:end_delayed_json_response(Resp1);
-% callbacks for eventsource feed (newline-delimited eventsource Objects)
-changes_callback(start, #acc{feed = "eventsource"} = Acc) ->
- #acc{resp = Req} = Acc,
- Headers = [
- {"Content-Type", "text/event-stream"},
- {"Cache-Control", "no-cache"}
- ],
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, Headers),
- {ok, Acc#acc{resp = Resp, last_data_sent_time = os:timestamp()}};
-changes_callback({change, {ChangeProp} = Change}, #acc{resp = Resp, feed = "eventsource"} = Acc) ->
- Seq = proplists:get_value(seq, ChangeProp),
- Chunk = [
- "data: ",
- ?JSON_ENCODE(Change),
- "\n",
- "id: ",
- ?JSON_ENCODE(Seq),
- "\n\n"
- ],
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Chunk),
- maybe_finish(Acc#acc{resp = Resp1});
-changes_callback(timeout, #acc{feed = "eventsource"} = Acc) ->
- #acc{resp = Resp} = Acc,
- Chunk = "event: heartbeat\ndata: \n\n",
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Chunk),
- {ok, {"eventsource", Resp1}};
-changes_callback({stop, _EndSeq}, #acc{feed = "eventsource"} = Acc) ->
- #acc{resp = Resp} = Acc,
- % {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Buf),
- chttpd:end_delayed_json_response(Resp);
-% callbacks for longpoll and normal (single JSON Object)
-changes_callback(start, #acc{feed = "normal", etag = Etag} = Acc) when
- Etag =/= undefined
-->
- #acc{resp = Req} = Acc,
- FirstChunk = "{\"results\":[\n",
- {ok, Resp} = chttpd:start_delayed_json_response(
- Req,
- 200,
- [{"Etag", Etag}],
- FirstChunk
- ),
- {ok, Acc#acc{resp = Resp, prepend = "", last_data_sent_time = os:timestamp()}};
-changes_callback(start, Acc) ->
- #acc{resp = Req} = Acc,
- FirstChunk = "{\"results\":[\n",
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [], FirstChunk),
- {ok, Acc#acc{
- resp = Resp,
- prepend = "",
- last_data_sent_time = os:timestamp()
- }};
-changes_callback({change, Change0}, Acc) ->
- #acc{resp = Resp, prepend = Prepend, username = Username} = Acc,
- case transform_change(Username, Change0) of
- skip ->
- {ok, maybe_send_heartbeat(Acc)};
- Change ->
- #acc{resp = Resp, prepend = Prepend} = Acc,
- Line = [Prepend, ?JSON_ENCODE(Change)],
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Line),
- Acc1 = Acc#acc{
- prepend = ",\r\n",
- resp = Resp1,
- last_data_sent_time = os:timestamp()
- },
- maybe_finish(Acc1)
- end;
-changes_callback({stop, EndSeq}, Acc) ->
- % Temporary upgrade clause - Case 24236
- changes_callback({stop, EndSeq, null}, Acc);
-changes_callback({stop, EndSeq, _Pending}, Acc) ->
- #acc{resp = Resp} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(
- Resp,
- ["\n],\n\"last_seq\":", ?JSON_ENCODE(EndSeq), "}\n"]
- ),
- chttpd:end_delayed_json_response(Resp1);
-changes_callback(timeout, Acc) ->
- {ok, maybe_send_heartbeat(Acc)};
-changes_callback({error, Reason}, #acc{resp = Req = #httpd{}}) ->
- chttpd:send_error(Req, Reason);
-changes_callback({error, Reason}, Acc) ->
- #acc{etag = Etag, feed = Feed, resp = Resp} = Acc,
- case {Feed, Etag} of
- {"normal", Etag} when Etag =/= undefined ->
- chttpd:send_error(Resp, Reason);
- _ ->
- chttpd:send_delayed_error(Resp, Reason)
- end.
-
-maybe_finish(Acc) ->
- case Acc#acc.limit of
- 1 ->
- {stop, Acc};
- undefined ->
- {ok, Acc};
- Limit ->
- {ok, Acc#acc{limit = Limit - 1}}
- end.
-
-maybe_send_heartbeat(#acc{heartbeat_interval = false} = Acc) ->
- Acc;
-maybe_send_heartbeat(Acc) ->
- #acc{last_data_sent_time = LastSentTime, heartbeat_interval = Interval, resp = Resp} = Acc,
- Now = os:timestamp(),
- case timer:now_diff(Now, LastSentTime) div 1000 > Interval of
- true ->
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\n"),
- Acc#acc{last_data_sent_time = Now, resp = Resp1};
- false ->
- Acc
- end.
-
-parse_global_changes_query(Req) ->
- lists:foldl(
- fun({Key, Value}, Args) ->
- case {Key, Value} of
- {"feed", _} ->
- [{feed, Value} | Args];
- {"descending", "true"} ->
- [{dir, rev} | Args];
- {"since", _} ->
- [{since, Value} | Args];
- {"limit", _} ->
- [{limit, to_non_neg_int(Value)} | Args];
- {"heartbeat", "true"} ->
- [{heartbeat, true} | Args];
- {"heartbeat", "false"} ->
- Args;
- {"heartbeat", _} ->
- [{heartbeat, to_non_neg_int(Value)} | Args];
- {"timeout", _} ->
- [{timeout, to_non_neg_int(Value)} | Args];
- % unknown key value pair, ignore.
- _Else ->
- Args
- end
- end,
- [],
- chttpd:qs(Req)
- ).
-
-to_non_neg_int(Value) ->
- try list_to_integer(Value) of
- V when V >= 0 ->
- V;
- _ ->
- throw({bad_request, invalid_integer})
- catch
- error:badarg ->
- throw({bad_request, invalid_integer})
- end.
-
-allowed_owner(Req) ->
- case config:get("global_changes", "allowed_owner", undefined) of
- undefined ->
- chttpd:verify_is_server_admin(Req),
- admin;
- SpecStr ->
- {ok, {M, F, A}} = couch_util:parse_term(SpecStr),
- couch_util:validate_callback_exists(M, F, 2),
- M:F(Req, A)
- end.
diff --git a/src/global_changes/src/global_changes_httpd_handlers.erl b/src/global_changes/src/global_changes_httpd_handlers.erl
deleted file mode 100644
index b21a64b8f..000000000
--- a/src/global_changes/src/global_changes_httpd_handlers.erl
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1]).
-
-url_handler(<<"_db_updates">>) -> fun global_changes_httpd:handle_global_changes_req/1;
-url_handler(_) -> no_match.
-
-db_handler(_) -> no_match.
-
-design_handler(_) -> no_match.
diff --git a/src/global_changes/src/global_changes_listener.erl b/src/global_changes/src/global_changes_listener.erl
deleted file mode 100644
index 71d14e274..000000000
--- a/src/global_changes/src/global_changes_listener.erl
+++ /dev/null
@@ -1,175 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_listener).
--behavior(couch_event_listener).
-
--export([
- start/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_event/3,
- handle_cast/2,
- handle_info/2
-]).
-
--record(state, {
- update_db,
- pending_update_count,
- pending_updates,
- last_update_time,
- max_event_delay,
- dbname
-}).
-
--include_lib("mem3/include/mem3.hrl").
-
-start() ->
- couch_event_listener:start(?MODULE, nil, [all_dbs]).
-
-init(_) ->
- % get configs as strings
- UpdateDb0 = config:get("global_changes", "update_db", "true"),
- MaxEventDelay0 = config:get("global_changes", "max_event_delay", "25"),
-
- % make config strings into other data types
- UpdateDb =
- case UpdateDb0 of
- "false" -> false;
- _ -> true
- end,
- MaxEventDelay = list_to_integer(MaxEventDelay0),
-
- State = #state{
- update_db = UpdateDb,
- pending_update_count = 0,
- pending_updates = sets:new(),
- max_event_delay = MaxEventDelay,
- dbname = global_changes_util:get_dbname()
- },
- {ok, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_event(_ShardName, _Event, #state{update_db = false} = State) ->
- {ok, State};
-handle_event(ShardName, Event, State0) when
- Event =:= updated orelse Event =:= deleted orelse
- Event =:= created
-->
- #state{dbname = ChangesDbName} = State0,
- State =
- case mem3:dbname(ShardName) of
- ChangesDbName ->
- State0;
- DbName ->
- #state{pending_update_count = Count} = State0,
- EventBin = erlang:atom_to_binary(Event, latin1),
- Key = <<EventBin/binary, <<":">>/binary, DbName/binary>>,
- Pending = sets:add_element(Key, State0#state.pending_updates),
- couch_stats:update_gauge(
- [global_changes, listener_pending_updates],
- Count + 1
- ),
- State0#state{pending_updates = Pending, pending_update_count = Count + 1}
- end,
- maybe_send_updates(State);
-handle_event(_DbName, _Event, State) ->
- maybe_send_updates(State).
-
-handle_cast({set_max_event_delay, MaxEventDelay}, State) ->
- maybe_send_updates(State#state{max_event_delay = MaxEventDelay});
-handle_cast({set_update_db, Boolean}, State0) ->
- % If turning update_db off, clear out server state
- State =
- case {Boolean, State0#state.update_db} of
- {false, true} ->
- State0#state{
- update_db = Boolean,
- pending_updates = sets:new(),
- pending_update_count = 0,
- last_update_time = undefined
- };
- _ ->
- State0#state{update_db = Boolean}
- end,
- maybe_send_updates(State);
-handle_cast(_Msg, State) ->
- maybe_send_updates(State).
-
-maybe_send_updates(#state{pending_update_count = 0} = State) ->
- {ok, State};
-maybe_send_updates(#state{update_db = true} = State) ->
- #state{max_event_delay = MaxEventDelay, last_update_time = LastUpdateTime} = State,
- Now = os:timestamp(),
- case LastUpdateTime of
- undefined ->
- {ok, State#state{last_update_time = Now}, MaxEventDelay};
- _ ->
- Delta = timer:now_diff(Now, LastUpdateTime) div 1000,
- if
- Delta >= MaxEventDelay ->
- Updates = sets:to_list(State#state.pending_updates),
- try group_updates_by_node(State#state.dbname, Updates) of
- Grouped ->
- dict:map(
- fun(Node, Docs) ->
- couch_stats:increment_counter([global_changes, rpcs]),
- global_changes_server:update_docs(Node, Docs)
- end,
- Grouped
- )
- catch
- error:database_does_not_exist ->
- ok
- end,
- couch_stats:update_gauge(
- [global_changes, listener_pending_updates],
- 0
- ),
- State1 = State#state{
- pending_updates = sets:new(),
- pending_update_count = 0,
- last_update_time = undefined
- },
- {ok, State1};
- true ->
- {ok, State, MaxEventDelay - Delta}
- end
- end;
-maybe_send_updates(State) ->
- {ok, State}.
-
-handle_info(_Msg, State) ->
- maybe_send_updates(State).
-
-%% restore spec when R14 support is dropped
-%% -spec group_updates_by_node(binary(), [binary()]) -> dict:dict().
-group_updates_by_node(DbName, Updates) ->
- lists:foldl(
- fun(Key, OuterAcc) ->
- Shards = mem3:shards(DbName, Key),
- lists:foldl(
- fun(#shard{node = Node}, InnerAcc) ->
- dict:append(Node, Key, InnerAcc)
- end,
- OuterAcc,
- Shards
- )
- end,
- dict:new(),
- Updates
- ).
diff --git a/src/global_changes/src/global_changes_plugin.erl b/src/global_changes/src/global_changes_plugin.erl
deleted file mode 100644
index 8e42d56bd..000000000
--- a/src/global_changes/src/global_changes_plugin.erl
+++ /dev/null
@@ -1,39 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_plugin).
-
--export([transform_change/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(SERVICE_ID, global_changes).
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-transform_change(Username, Change, Default) ->
- maybe_handle(transform_change, [Username, Change], Default).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-maybe_handle(Func, Args, Default) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- case couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, []) of
- [] ->
- apply(Default, Args);
- [Result] ->
- Result
- end.
diff --git a/src/global_changes/src/global_changes_server.erl b/src/global_changes/src/global_changes_server.erl
deleted file mode 100644
index e4902e207..000000000
--- a/src/global_changes/src/global_changes_server.erl
+++ /dev/null
@@ -1,227 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_server).
--behaviour(gen_server).
--vsn(1).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--export([
- update_docs/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--record(state, {
- update_db,
- pending_update_count,
- pending_updates,
- max_write_delay,
- dbname,
- handler_ref
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
- {ok, Handler} = global_changes_listener:start(),
- % get configs as strings
- UpdateDb0 = config:get("global_changes", "update_db", "true"),
- MaxWriteDelay0 = config:get("global_changes", "max_write_delay", "500"),
-
- % make config strings into other data types
- UpdateDb =
- case UpdateDb0 of
- "false" -> false;
- _ -> true
- end,
- MaxWriteDelay = list_to_integer(MaxWriteDelay0),
-
- % Start our write triggers
- erlang:send_after(MaxWriteDelay, self(), flush_updates),
-
- State = #state{
- update_db = UpdateDb,
- pending_update_count = 0,
- pending_updates = sets:new(),
- max_write_delay = MaxWriteDelay,
- dbname = global_changes_util:get_dbname(),
- handler_ref = erlang:monitor(process, Handler)
- },
- {ok, State}.
-
-terminate(_Reason, _Srv) ->
- ok.
-
-handle_call(_Msg, _From, State) ->
- {reply, ok, State}.
-
-handle_cast(_Msg, #state{update_db = false} = State) ->
- {noreply, State};
-handle_cast({update_docs, DocIds}, State) ->
- Pending = sets:union(sets:from_list(DocIds), State#state.pending_updates),
- PendingCount = sets:size(Pending),
- couch_stats:update_gauge(
- [global_changes, server_pending_updates],
- PendingCount
- ),
- NewState = State#state{
- pending_updates = Pending,
- pending_update_count = PendingCount
- },
- {noreply, NewState};
-handle_cast({set_max_write_delay, MaxWriteDelay}, State) ->
- NewState = State#state{max_write_delay = MaxWriteDelay},
- {noreply, NewState};
-handle_cast({set_update_db, Boolean}, State0) ->
- % If turning update_db off, clear out server state
- State =
- case {Boolean, State0#state.update_db} of
- {false, true} ->
- State0#state{
- update_db = Boolean,
- pending_updates = sets:new(),
- pending_update_count = 0
- };
- _ ->
- State0#state{update_db = Boolean}
- end,
- {noreply, State};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(flush_updates, #state{pending_update_count = 0} = State) ->
- erlang:send_after(State#state.max_write_delay, self(), flush_updates),
- {noreply, State};
-handle_info(flush_updates, #state{update_db = false} = State) ->
- erlang:send_after(State#state.max_write_delay, self(), flush_updates),
- {noreply, State};
-handle_info(flush_updates, State) ->
- erlang:send_after(State#state.max_write_delay, self(), flush_updates),
- flush_updates(State);
-handle_info(start_listener, State) ->
- {ok, Handler} = global_changes_listener:start(),
- NewState = State#state{
- handler_ref = erlang:monitor(process, Handler)
- },
- {noreply, NewState};
-handle_info({'DOWN', Ref, _, _, Reason}, #state{handler_ref = Ref} = State) ->
- couch_log:error("global_changes_listener terminated: ~w", [Reason]),
- erlang:send_after(5000, self(), start_listener),
- {noreply, State};
-handle_info(_, State) ->
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-flush_updates(State) ->
- DocIds = sets:to_list(State#state.pending_updates),
- try group_ids_by_shard(State#state.dbname, DocIds) of
- GroupedIds ->
- Docs = dict:fold(
- fun(ShardName, Ids, DocInfoAcc) ->
- {ok, Shard} = couch_db:open(ShardName, [?ADMIN_CTX]),
- try
- GroupedDocs = get_docs_locally(Shard, Ids),
- GroupedDocs ++ DocInfoAcc
- after
- couch_db:close(Shard)
- end
- end,
- [],
- GroupedIds
- ),
-
- spawn(fun() ->
- fabric:update_docs(State#state.dbname, Docs, [])
- end),
-
- Count = State#state.pending_update_count,
- couch_stats:increment_counter(
- [global_changes, db_writes],
- Count
- )
- catch
- error:database_does_not_exist ->
- {noreply, State}
- end,
- couch_stats:update_gauge(
- [global_changes, server_pending_updates],
- 0
- ),
- {noreply, State#state{
- pending_updates = sets:new(),
- pending_update_count = 0
- }}.
-
-update_docs(Node, Updates) ->
- gen_server:cast({?MODULE, Node}, {update_docs, Updates}).
-
-group_ids_by_shard(DbName, DocIds) ->
- LocalNode = node(),
- lists:foldl(
- fun(DocId, Acc) ->
- Shards = mem3:shards(DbName, DocId),
- lists:foldl(
- fun
- (#shard{node = Node, name = Name}, Acc1) when Node == LocalNode ->
- dict:append(Name, DocId, Acc1);
- (_, Acc1) ->
- Acc1
- end,
- Acc,
- Shards
- )
- end,
- dict:new(),
- DocIds
- ).
-
-get_docs_locally(Shard, Ids) ->
- lists:map(
- fun(Id) ->
- DocInfo = couch_db:get_doc_info(Shard, Id),
- #doc{id = Id, revs = get_rev(DocInfo)}
- end,
- Ids
- ).
-
-get_rev(not_found) ->
- {0, []};
-get_rev({ok, #doc_info{revs = [RevInfo]}}) ->
- {Pos, Rev} = RevInfo#rev_info.rev,
- {Pos, [Rev]};
-get_rev({ok, #doc_info{revs = [RevInfo | _]}}) ->
- % couch_doc:to_doc_info/1 sorts things so that the first
- % #rev_info in the list is the "winning" revision which is
- % the one we'd want to base our edit off of. In theory
- % global_changes should never encounter a conflict by design
- % but we should record if it happens in case our design isn't
- % quite right.
- couch_stats:increment_counter([global_changes, event_doc_conflict]),
- {Pos, Rev} = RevInfo#rev_info.rev,
- {Pos, [Rev]}.
diff --git a/src/global_changes/src/global_changes_sup.erl b/src/global_changes/src/global_changes_sup.erl
deleted file mode 100644
index 3229ac0d3..000000000
--- a/src/global_changes/src/global_changes_sup.erl
+++ /dev/null
@@ -1,82 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_sup).
--behavior(supervisor).
-
--export([start_link/0]).
-
--export([init/1]).
-
--export([handle_config_change/5]).
--export([handle_config_terminate/3]).
-
--define(LISTENER, global_changes_listener).
--define(SERVER, global_changes_server).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init([]) ->
- {ok, {
- {one_for_one, 5, 10},
- couch_epi:register_service(global_changes_epi, [
- {
- config_listener_mon,
- {config_listener_mon, start_link, [?MODULE, nil]},
- permanent,
- 5000,
- worker,
- [config_listener_mon]
- },
- {
- global_changes_server,
- {global_changes_server, start_link, []},
- permanent,
- 5000,
- worker,
- [global_changes_server]
- }
- ])
- }}.
-
-handle_config_change("global_changes", "max_event_delay", MaxDelayStr, _, _) ->
- try list_to_integer(MaxDelayStr) of
- MaxDelay ->
- gen_server:cast(?LISTENER, {set_max_event_delay, MaxDelay})
- catch
- error:badarg ->
- ok
- end,
- {ok, nil};
-handle_config_change("global_changes", "max_write_delay", MaxDelayStr, _, _) ->
- try list_to_integer(MaxDelayStr) of
- MaxDelay ->
- gen_server:cast(?SERVER, {set_max_write_delay, MaxDelay})
- catch
- error:badarg ->
- ok
- end,
- {ok, nil};
-handle_config_change("global_changes", "update_db", "false", _, _) ->
- gen_server:cast(?LISTENER, {set_update_db, false}),
- gen_server:cast(?SERVER, {set_update_db, false}),
- {ok, nil};
-handle_config_change("global_changes", "update_db", _, _, _) ->
- gen_server:cast(?LISTENER, {set_update_db, true}),
- gen_server:cast(?SERVER, {set_update_db, true}),
- {ok, nil};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_Server, _Reason, _State) ->
- ok.
diff --git a/src/global_changes/src/global_changes_util.erl b/src/global_changes/src/global_changes_util.erl
deleted file mode 100644
index 910e0137f..000000000
--- a/src/global_changes/src/global_changes_util.erl
+++ /dev/null
@@ -1,25 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_util).
-
--export([get_dbname/0]).
-
-get_dbname() ->
- case application:get_env(global_changes, dbname) of
- {ok, DbName} when is_binary(DbName) ->
- DbName;
- {ok, DbName} when is_list(DbName) ->
- iolist_to_binary(DbName);
- _ ->
- <<"_global_changes">>
- end.
diff --git a/src/global_changes/test/eunit/global_changes_hooks_tests.erl b/src/global_changes/test/eunit/global_changes_hooks_tests.erl
deleted file mode 100644
index 5d6bbd13d..000000000
--- a/src/global_changes/test/eunit/global_changes_hooks_tests.erl
+++ /dev/null
@@ -1,164 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_hooks_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([allowed_owner/2]).
-
--define(t2l(V), lists:flatten(io_lib:format("~p", [V]))).
-
-start() ->
- Ctx = test_util:start_couch([chttpd, global_changes]),
- DbName = ?tempdb(),
- ok = fabric:create_db(DbName, [?ADMIN_CTX]),
- application:set_env(global_changes, dbname, DbName),
- {Ctx, DbName}.
-
-stop({Ctx, DbName}) ->
- ok = fabric:delete_db(DbName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx),
- ok.
-
-setup(default) ->
- add_admin("admin", <<"pass">>),
- config:delete("chttpd_auth", "authentication_redirect", false),
- config:set("chttpd_auth", "require_valid_user", "false", false),
- get_host();
-setup(A) ->
- Host = setup(default),
- ok = config:set(
- "global_changes",
- "allowed_owner",
- ?t2l({?MODULE, allowed_owner, A}),
- false
- ),
- Host.
-
-teardown(_) ->
- delete_admin("admin"),
- config:delete("global_changes", "allowed_owner", false),
- ok.
-
-allowed_owner(_Req, "throw") ->
- throw({unauthorized, <<"Exception thrown.">>});
-allowed_owner(_Req, "pass") ->
- "super".
-
-allowed_owner_hook_test_() ->
- {
- "Check allowed_owner hook",
- {
- setup,
- fun start/0,
- fun stop/1,
- [
- disabled_allowed_owner_integration_point(),
- enabled_allowed_owner_integration_point()
- ]
- }
- }.
-
-disabled_allowed_owner_integration_point() ->
- {
- "disabled allowed_owner integration point",
- {
- foreach,
- fun() -> setup(default) end,
- fun teardown/1,
- [
- fun should_not_fail_for_admin/1,
- fun should_fail_for_non_admin/1
- ]
- }
- }.
-
-enabled_allowed_owner_integration_point() ->
- {
- "enabled allowed_owner integration point",
- [
- {
- foreach,
- fun() -> setup("throw") end,
- fun teardown/1,
- [fun should_throw/1]
- },
- {
- foreach,
- fun() -> setup("pass") end,
- fun teardown/1,
- [fun should_pass/1]
- }
- ]
- }.
-
-should_not_fail_for_admin(Host) ->
- ?_test(begin
- Headers = [{basic_auth, {"admin", "pass"}}],
- {Status, [Error, Reason]} =
- request(Host, Headers, [<<"error">>, <<"reason">>]),
- ?assertEqual(200, Status),
- ?assertEqual(undefined, Error),
- ?assertEqual(undefined, Reason)
- end).
-
-should_fail_for_non_admin(Host) ->
- ?_test(begin
- Headers = [],
- {Status, [Error, Reason]} =
- request(Host, Headers, [<<"error">>, <<"reason">>]),
- ?assertEqual(401, Status),
- ?assertEqual(<<"unauthorized">>, Error),
- ?assertEqual(<<"You are not a server admin.">>, Reason)
- end).
-
-should_pass(Host) ->
- ?_test(begin
- Headers = [{basic_auth, {"admin", "pass"}}],
- {Status, [Error, Reason]} =
- request(Host, Headers, [<<"error">>, <<"reason">>]),
- ?assertEqual(200, Status),
- ?assertEqual(undefined, Error),
- ?assertEqual(undefined, Reason)
- end).
-
-should_throw(Host) ->
- ?_test(begin
- Headers = [{basic_auth, {"admin", "pass"}}],
- {Status, [Error, Reason]} =
- request(Host, Headers, [<<"error">>, <<"reason">>]),
- ?assertEqual(401, Status),
- ?assertEqual(<<"unauthorized">>, Error),
- ?assertEqual(<<"Exception thrown.">>, Reason)
- end).
-
-request(Host, Headers, ToDecode) ->
- Url = Host ++ "/_db_updates",
- {ok, Status, _Headers, BinBody} = test_request:get(Url, Headers),
- {Body} = jiffy:decode(BinBody),
- Values = [couch_util:get_value(Key, Body) || Key <- ToDecode],
- {Status, Values}.
-
-add_admin(User, Pass) ->
- Hashed = couch_passwords:hash_admin_password(Pass),
- config:set("admins", User, ?b2l(Hashed), false).
-
-delete_admin(User) ->
- config:delete("admins", User, false).
-
-get_host() ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- Host = "http://" ++ Addr ++ ":" ++ Port,
- Host.
diff --git a/src/ioq/.gitignore b/src/ioq/.gitignore
deleted file mode 100644
index 21cf3d388..000000000
--- a/src/ioq/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.rebar
-ebin/
diff --git a/src/ioq/src/ioq.app.src b/src/ioq/src/ioq.app.src
deleted file mode 100644
index 65ea50d6d..000000000
--- a/src/ioq/src/ioq.app.src
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application,ioq, [
- {description, "I/O prioritizing engine"},
- {vsn, git},
- {registered,[]},
- {applications,[kernel,stdlib,config]},
- {mod,{ioq_app,[]}},
- {env, []},
- {modules,[ioq,ioq_app,ioq_sup]}
-]}.
diff --git a/src/ioq/src/ioq.erl b/src/ioq/src/ioq.erl
deleted file mode 100644
index 51934d544..000000000
--- a/src/ioq/src/ioq.erl
+++ /dev/null
@@ -1,208 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ioq).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([start_link/0, call/3]).
--export([get_queue_lengths/0]).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, code_change/3, terminate/2]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
--define(RELISTEN_DELAY, 5000).
-
--record(state, {
- concurrency,
- ratio,
- interactive = queue:new(),
- background = queue:new(),
- running = []
-}).
-
--record(request, {
- fd,
- msg,
- priority,
- from,
- ref
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-call(Fd, Msg, Metadata) ->
- Priority = io_class(Msg, Metadata),
- case bypass(Priority) of
- true ->
- gen_server:call(Fd, Msg, infinity);
- false ->
- queued_call(Fd, Msg, Priority)
- end.
-
-get_queue_lengths() ->
- gen_server:call(?MODULE, get_queue_lengths).
-
-bypass(Priority) ->
- case Priority of
- os_process -> config:get_boolean("ioq.bypass", "os_process", true);
- read -> config:get_boolean("ioq.bypass", "read", true);
- write -> config:get_boolean("ioq.bypass", "write", true);
- view_update -> config:get_boolean("ioq.bypass", "view_update", true);
- shard_sync -> config:get_boolean("ioq.bypass", "shard_sync", false);
- compaction -> config:get_boolean("ioq.bypass", "compaction", false);
- _ -> config:get("ioq.bypass", atom_to_list(Priority)) =:= "true"
- end.
-
-io_class({prompt, _}, _) ->
- os_process;
-io_class({data, _}, _) ->
- os_process;
-io_class(_, {interactive, _}) ->
- read;
-io_class(_, {db_update, _}) ->
- write;
-io_class(_, {view_update, _, _}) ->
- view_update;
-io_class(_, {internal_repl, _}) ->
- shard_sync;
-io_class(_, {db_compact, _}) ->
- compaction;
-io_class(_, {view_compact, _, _}) ->
- compaction;
-io_class(_, _) ->
- other.
-
-queued_call(Fd, Msg, Priority) ->
- Request = #request{fd = Fd, msg = Msg, priority = Priority, from = self()},
- try
- gen_server:call(?MODULE, Request, infinity)
- catch
- exit:{noproc, _} ->
- gen_server:call(Fd, Msg, infinity)
- end.
-
-init(_) ->
- ok = config:listen_for_changes(?MODULE, nil),
- State = #state{},
- {ok, read_config(State)}.
-
-read_config(State) ->
- Ratio = config:get_float("ioq", "ratio", 0.01),
- Concurrency = config:get_integer("ioq", "concurrency", 10),
- State#state{concurrency = Concurrency, ratio = Ratio}.
-
-handle_call(get_queue_lengths, _From, State) ->
- Response = #{
- interactive => queue:len(State#state.interactive),
- background => queue:len(State#state.background)
- },
- {reply, Response, State, 0};
-handle_call(#request{} = Request, From, State) ->
- {noreply, enqueue_request(Request#request{from = From}, State), 0}.
-
-handle_cast(change, State) ->
- {noreply, read_config(State)};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({Ref, Reply}, State) ->
- case lists:keytake(Ref, #request.ref, State#state.running) of
- {value, Request, Remaining} ->
- erlang:demonitor(Ref, [flush]),
- gen_server:reply(Request#request.from, Reply),
- {noreply, State#state{running = Remaining}, 0};
- false ->
- {noreply, State, 0}
- end;
-handle_info({'DOWN', Ref, _, _, Reason}, State) ->
- case lists:keytake(Ref, #request.ref, State#state.running) of
- {value, Request, Remaining} ->
- gen_server:reply(Request#request.from, {'EXIT', Reason}),
- {noreply, State#state{running = Remaining}, 0};
- false ->
- {noreply, State, 0}
- end;
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(timeout, State) ->
- {noreply, maybe_submit_request(State)}.
-
-handle_config_change("ioq", _, _, _, _) ->
- {ok, gen_server:cast(?MODULE, change)};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_Server, stop, _State) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-code_change(_Vsn, State, _Extra) ->
- {ok, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-enqueue_request(#request{priority = compaction} = Request, #state{} = State) ->
- State#state{background = queue:in(Request, State#state.background)};
-enqueue_request(#request{priority = shard_sync} = Request, #state{} = State) ->
- State#state{background = queue:in(Request, State#state.background)};
-enqueue_request(#request{} = Request, #state{} = State) ->
- State#state{interactive = queue:in(Request, State#state.interactive)}.
-
-maybe_submit_request(#state{concurrency = Concurrency, running = Running} = State) when
- length(Running) < Concurrency
-->
- case make_next_request(State) of
- State ->
- State;
- NewState when length(Running) >= Concurrency - 1 ->
- NewState;
- NewState ->
- maybe_submit_request(NewState)
- end;
-maybe_submit_request(State) ->
- State.
-
-make_next_request(#state{} = State) ->
- case {queue:is_empty(State#state.background), queue:is_empty(State#state.interactive)} of
- {true, true} ->
- State;
- {true, false} ->
- choose_next_request(#state.interactive, State);
- {false, true} ->
- choose_next_request(#state.background, State);
- {false, false} ->
- case couch_rand:uniform() < State#state.ratio of
- true ->
- choose_next_request(#state.background, State);
- false ->
- choose_next_request(#state.interactive, State)
- end
- end.
-
-choose_next_request(Index, State) ->
- case queue:out(element(Index, State)) of
- {empty, _} ->
- State;
- {{value, Request}, Q} ->
- submit_request(Request, setelement(Index, State, Q))
- end.
-
-submit_request(#request{} = Request, #state{} = State) ->
- Ref = erlang:monitor(process, Request#request.fd),
- Request#request.fd ! {'$gen_call', {self(), Ref}, Request#request.msg},
- State#state{running = [Request#request{ref = Ref} | State#state.running]}.
diff --git a/src/ioq/src/ioq_app.erl b/src/ioq/src/ioq_app.erl
deleted file mode 100644
index 2e6d75acb..000000000
--- a/src/ioq/src/ioq_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ioq_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_StartType, _StartArgs) ->
- ioq_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/ioq/src/ioq_sup.erl b/src/ioq/src/ioq_sup.erl
deleted file mode 100644
index 937e5a952..000000000
--- a/src/ioq/src/ioq_sup.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ioq_sup).
--behaviour(supervisor).
--export([start_link/0, init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init([]) ->
- {ok, {{one_for_one, 5, 10}, [?CHILD(ioq, worker)]}}.
diff --git a/src/jwtf/.gitignore b/src/jwtf/.gitignore
deleted file mode 100644
index 5eadeac89..000000000
--- a/src/jwtf/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-*~
-_build/
-doc/
-rebar.lock
diff --git a/src/jwtf/LICENSE b/src/jwtf/LICENSE
deleted file mode 100644
index d9a10c0d8..000000000
--- a/src/jwtf/LICENSE
+++ /dev/null
@@ -1,176 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/src/jwtf/README.md b/src/jwtf/README.md
deleted file mode 100644
index e6038fbc0..000000000
--- a/src/jwtf/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# jwtf
-
-JSON Web Token Functions
-
-This library provides JWT parsing and validation functions
-
-Supports;
-
-* Verify
-* RS256
-* RS384
-* RS512
-* HS256
-* HS384
-* HS512
-* ES256
-* ES384
-* ES512
diff --git a/src/jwtf/rebar.config b/src/jwtf/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/jwtf/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/jwtf/src/jwtf.app.src b/src/jwtf/src/jwtf.app.src
deleted file mode 100644
index 24081bf6f..000000000
--- a/src/jwtf/src/jwtf.app.src
+++ /dev/null
@@ -1,32 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, jwtf, [
- {description, "JSON Web Token Functions"},
- {vsn, git},
- {registered, []},
- {applications, [
- kernel,
- stdlib,
- b64url,
- config,
- crypto,
- jiffy,
- public_key
- ]},
- {mod, {jwtf_app, []}},
- {env,[]},
- {modules, []},
- {maintainers, []},
- {licenses, []},
- {links, []}
-]}.
diff --git a/src/jwtf/src/jwtf.erl b/src/jwtf/src/jwtf.erl
deleted file mode 100644
index b8b30c4f6..000000000
--- a/src/jwtf/src/jwtf.erl
+++ /dev/null
@@ -1,414 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% @doc
-% This module decodes and validates JWT tokens. Almost all property
-% checks are optional. If not checked, the presence or validity of the
-% field is not verified. Signature check is mandatory, though.
-
--module(jwtf).
-
--export([
- encode/3,
- decode/3,
- decode/4,
- decode_b64url_json/1,
- decode_passthrough/1,
- valid_algorithms/0,
- verification_algorithm/1
-]).
-
--include_lib("public_key/include/public_key.hrl").
-
--define(ALGS, [
- % RSA PKCS#1 signature with SHA-256
- {<<"RS256">>, {public_key, sha256}},
- {<<"RS384">>, {public_key, sha384}},
- {<<"RS512">>, {public_key, sha512}},
- {<<"ES256">>, {public_key, sha256}},
- {<<"ES384">>, {public_key, sha384}},
- {<<"ES512">>, {public_key, sha512}},
- {<<"HS256">>, {hmac, sha256}},
- {<<"HS384">>, {hmac, sha384}},
- {<<"HS512">>, {hmac, sha512}}
-]).
-
--define(CHECKS, [
- alg,
- exp,
- iat,
- iss,
- kid,
- nbf,
- sig,
- typ
-]).
-
-% @doc encode
-% Encode the JSON Header and Claims using Key and Alg obtained from Header
--spec encode(term(), term(), term()) ->
- {ok, binary()} | no_return().
-encode(Header = {HeaderProps}, Claims, Key) ->
- try
- Alg =
- case prop(<<"alg">>, HeaderProps) of
- undefined ->
- throw({bad_request, <<"Missing alg header parameter">>});
- Val ->
- Val
- end,
- EncodedHeader = b64url:encode(jiffy:encode(Header)),
- EncodedClaims = b64url:encode(jiffy:encode(Claims)),
- Message = <<EncodedHeader/binary, $., EncodedClaims/binary>>,
- SignatureOrMac =
- case verification_algorithm(Alg) of
- {public_key, Algorithm} ->
- Signature = public_key:sign(Message, Algorithm, Key),
- case Alg of
- <<"ES", _/binary>> ->
- der_to_jose(Alg, Signature);
- _ ->
- Signature
- end;
- {hmac, Algorithm} ->
- hmac(Algorithm, Key, Message)
- end,
- EncodedSignatureOrMac = b64url:encode(SignatureOrMac),
- {ok, <<Message/binary, $., EncodedSignatureOrMac/binary>>}
- catch
- throw:Error ->
- {error, Error}
- end.
-
-% @doc decode
-% Decodes the supplied encoded token, checking
-% for the attributes defined in Checks, calling
-% the key store function to retrieve the key needed
-% to verify the signature, and decoding the Payload
-% with the Decoder, defaulting to decode_b64url_json/1.
-decode(EncodedToken, Checks, KS) ->
- decode(EncodedToken, Checks, KS, fun decode_b64url_json/1).
-
-decode(EncodedToken, Checks, KS, Decoder) ->
- try
- [Header, Payload, Signature] = split(EncodedToken),
- validate(Header, Payload, Signature, Checks, KS),
- {ok, Decoder(Payload)}
- catch
- throw:Error ->
- {error, Error}
- end.
-
-% @doc valid_algorithms
-% Return a list of supported algorithms
--spec valid_algorithms() -> [binary()].
-valid_algorithms() ->
- proplists:get_keys(?ALGS).
-
-% @doc verification_algorithm
-% Return {VerificationMethod, Algorithm} tuple for the specified Alg
--spec verification_algorithm(binary()) ->
- {atom(), atom()} | no_return().
-verification_algorithm(Alg) ->
- case lists:keyfind(Alg, 1, ?ALGS) of
- {Alg, Val} ->
- Val;
- false ->
- throw({bad_request, <<"Invalid alg header parameter">>})
- end.
-
-validate(Header0, Payload0, Signature, Checks, KS) ->
- validate_checks(Checks),
- Header1 = props(decode_b64url_json(Header0)),
- validate_header(Header1, Checks),
-
- Payload1 = props(decode_b64url_json(Payload0)),
- validate_payload(Payload1, Checks),
-
- Alg = prop(<<"alg">>, Header1),
- Key = key(Header1, Checks, KS),
- verify(Alg, Header0, Payload0, Signature, Key).
-
-validate_checks(Checks) when is_list(Checks) ->
- case {lists:usort(Checks), lists:sort(Checks)} of
- {L, L} ->
- ok;
- {L1, L2} ->
- error({duplicate_checks, L2 -- L1})
- end,
- {_, UnknownChecks} = lists:partition(fun valid_check/1, Checks),
- case UnknownChecks of
- [] ->
- ok;
- UnknownChecks ->
- error({unknown_checks, UnknownChecks})
- end.
-
-valid_check(Check) when is_atom(Check) ->
- lists:member(Check, ?CHECKS);
-valid_check({Check, _}) when is_atom(Check) ->
- lists:member(Check, ?CHECKS);
-valid_check(_) ->
- false.
-
-validate_header(Props, Checks) ->
- validate_typ(Props, Checks),
- validate_alg(Props, Checks).
-
-validate_typ(Props, Checks) ->
- Required = prop(typ, Checks),
- TYP = prop(<<"typ">>, Props),
- case {Required, TYP} of
- {undefined, undefined} ->
- ok;
- {true, undefined} ->
- throw({bad_request, <<"Missing typ header parameter">>});
- {_, <<"JWT">>} ->
- ok;
- {true, _} ->
- throw({bad_request, <<"Invalid typ header parameter">>})
- end.
-
-validate_alg(Props, Checks) ->
- Required = prop(alg, Checks),
- Alg = prop(<<"alg">>, Props),
- case {Required, Alg} of
- {undefined, undefined} ->
- ok;
- {true, undefined} ->
- throw({bad_request, <<"Missing alg header parameter">>});
- {_, Alg} ->
- case lists:member(Alg, valid_algorithms()) of
- true ->
- ok;
- false ->
- throw({bad_request, <<"Invalid alg header parameter">>})
- end
- end.
-
-%% Only validate required checks.
-validate_payload(Props, Checks) ->
- validate_iss(Props, Checks),
- validate_iat(Props, Checks),
- validate_nbf(Props, Checks),
- validate_exp(Props, Checks).
-
-validate_iss(Props, Checks) ->
- ExpectedISS = prop(iss, Checks),
- ActualISS = prop(<<"iss">>, Props),
-
- case {ExpectedISS, ActualISS} of
- % ignore unrequired check
- {undefined, _} ->
- ok;
- {ISS, undefined} when ISS /= undefined ->
- throw({bad_request, <<"Missing iss claim">>});
- {ISS, ISS} ->
- ok;
- {_, _} ->
- throw({bad_request, <<"Invalid iss claim">>})
- end.
-
-validate_iat(Props, Checks) ->
- Required = prop(iat, Checks),
- IAT = prop(<<"iat">>, Props),
-
- case {Required, IAT} of
- % ignore unrequired check
- {undefined, _} ->
- ok;
- {true, undefined} ->
- throw({bad_request, <<"Missing iat claim">>});
- {true, IAT} when is_integer(IAT) ->
- ok;
- {true, _} ->
- throw({bad_request, <<"Invalid iat claim">>})
- end.
-
-validate_nbf(Props, Checks) ->
- Required = prop(nbf, Checks),
- NBF = prop(<<"nbf">>, Props),
-
- case {Required, NBF} of
- % ignore unrequired check
- {undefined, _} ->
- ok;
- {true, undefined} ->
- throw({bad_request, <<"Missing nbf claim">>});
- {true, NBF} ->
- assert_past(<<"nbf">>, NBF)
- end.
-
-validate_exp(Props, Checks) ->
- Required = prop(exp, Checks),
- EXP = prop(<<"exp">>, Props),
-
- case {Required, EXP} of
- % ignore unrequired check
- {undefined, _} ->
- ok;
- {true, undefined} ->
- throw({bad_request, <<"Missing exp claim">>});
- {true, EXP} ->
- assert_future(<<"exp">>, EXP)
- end.
-
-key(Props, Checks, KS) ->
- Alg = prop(<<"alg">>, Props),
- Required = prop(kid, Checks),
- KID = prop(<<"kid">>, Props),
- case {Required, KID} of
- {true, undefined} ->
- throw({bad_request, <<"Missing kid claim">>});
- {_, KID} ->
- KS(Alg, KID)
- end.
-
-verify(Alg, Header, Payload, SignatureOrMac0, Key) ->
- Message = <<Header/binary, $., Payload/binary>>,
- SignatureOrMac1 = b64url:decode(SignatureOrMac0),
- SignatureOrMac2 =
- case Alg of
- <<"ES", _/binary>> ->
- jose_to_der(SignatureOrMac1);
- _ ->
- SignatureOrMac1
- end,
- {VerificationMethod, Algorithm} = verification_algorithm(Alg),
- case VerificationMethod of
- public_key ->
- public_key_verify(Algorithm, Message, SignatureOrMac2, Key);
- hmac ->
- hmac_verify(Algorithm, Message, SignatureOrMac2, Key)
- end.
-
-public_key_verify(Algorithm, Message, Signature, PublicKey) ->
- case public_key:verify(Message, Algorithm, Signature, PublicKey) of
- true ->
- ok;
- false ->
- throw({bad_request, <<"Bad signature">>})
- end.
-
-hmac_verify(Algorithm, Message, HMAC, SecretKey) ->
- case hmac(Algorithm, SecretKey, Message) of
- HMAC ->
- ok;
- _ ->
- throw({bad_request, <<"Bad HMAC">>})
- end.
-
-jose_to_der(Signature) ->
- NumLen = 8 * byte_size(Signature) div 2,
- <<R:NumLen, S:NumLen>> = Signature,
- SigValue = #'ECDSA-Sig-Value'{r = R, s = S},
- public_key:der_encode('ECDSA-Sig-Value', SigValue).
-
-der_to_jose(Alg, Signature) ->
- #'ECDSA-Sig-Value'{r = R, s = S} = public_key:der_decode('ECDSA-Sig-Value', Signature),
- Len = rs_len(Alg),
- <<R:Len, S:Len>>.
-
-rs_len(<<"ES", SizeBin/binary>>) ->
- binary_to_integer(SizeBin).
-
-split(EncodedToken) ->
- case binary:split(EncodedToken, <<$.>>, [global]) of
- [_, _, _] = Split -> Split;
- _ -> throw({bad_request, <<"Malformed token">>})
- end.
-
-decode_passthrough(B64UrlEncoded) ->
- B64UrlEncoded.
-
-decode_b64url_json(B64UrlEncoded) ->
- try
- case b64url:decode(B64UrlEncoded) of
- {error, Reason} ->
- throw({bad_request, Reason});
- JsonEncoded ->
- jiffy:decode(JsonEncoded)
- end
- catch
- _:_ ->
- throw({bad_request, <<"Malformed token">>})
- end.
-
-props({Props}) ->
- Props;
-props(_) ->
- throw({bad_request, <<"Not an object">>}).
-
-assert_past(Name, Time) ->
- case Time < now_seconds() of
- true ->
- ok;
- false ->
- throw({unauthorized, <<Name/binary, " not in past">>})
- end.
-
-assert_future(Name, Time) ->
- case Time > now_seconds() of
- true ->
- ok;
- false ->
- throw({unauthorized, <<Name/binary, " not in future">>})
- end.
-
-now_seconds() ->
- {MegaSecs, Secs, _MicroSecs} = os:timestamp(),
- MegaSecs * 1000000 + Secs.
-
-prop(Prop, Props) ->
- proplists:get_value(Prop, Props).
-
--ifdef(OTP_RELEASE).
-
--if(?OTP_RELEASE >= 22).
-
-% OTP >= 22
-hmac(Alg, Key, Message) ->
- crypto:mac(hmac, Alg, Key, Message).
-
--else.
-
-% OTP >= 21, < 22
-hmac(Alg, Key, Message) ->
- crypto:hmac(Alg, Key, Message).
-
-% -if(?OTP_RELEASE >= 22)
--endif.
-
--else.
-
-% OTP < 21
-hmac(Alg, Key, Message) ->
- crypto:hmac(Alg, Key, Message).
-
-% -ifdef(OTP_RELEASE)
--endif.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-validate_payload_ignore_unchecked_props_test() ->
- ?assertEqual(ok, validate_payload(_Props = [], _Checks = [])),
- BogusProps = [
- {iss, bogus},
- {iat, bogus},
- {nbf, bogus},
- {exp, bogus}
- ],
- ?assertEqual(ok, validate_payload(BogusProps, _Checks = [])),
- ok.
-
--endif.
diff --git a/src/jwtf/src/jwtf_app.erl b/src/jwtf/src/jwtf_app.erl
deleted file mode 100644
index bd708e2a3..000000000
--- a/src/jwtf/src/jwtf_app.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(jwtf_app).
-
--behaviour(application).
-
-%% Application callbacks
--export([start/2, stop/1]).
-
-%% ===================================================================
-%% Application callbacks
-%% ===================================================================
-
-start(_StartType, _StartArgs) ->
- jwtf_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/jwtf/src/jwtf_keystore.erl b/src/jwtf/src/jwtf_keystore.erl
deleted file mode 100644
index 4c2933264..000000000
--- a/src/jwtf/src/jwtf_keystore.erl
+++ /dev/null
@@ -1,152 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(jwtf_keystore).
--behaviour(gen_server).
--behaviour(config_listener).
-
--include_lib("public_key/include/public_key.hrl").
-
-% public api.
--export([
- get/2,
- start_link/0
-]).
-
-% gen_server api.
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- terminate/2
-]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
-% public functions
-
-get(Alg, undefined) when is_binary(Alg) ->
- get(Alg, <<"_default">>);
-get(Alg, KID0) when is_binary(Alg), is_binary(KID0) ->
- Kty = kty(Alg),
- KID = binary_to_list(KID0),
- case ets:lookup(?MODULE, {Kty, KID}) of
- [] ->
- Key = get_from_config(Kty, KID),
- ok = gen_server:call(?MODULE, {set, Kty, KID, Key}),
- Key;
- [{{Kty, KID}, Key}] ->
- Key
- end.
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-% gen_server functions
-
-init(_) ->
- ok = config:listen_for_changes(?MODULE, nil),
- ets:new(?MODULE, [public, named_table]),
- {ok, nil}.
-
-handle_call({set, Kty, KID, Key}, _From, State) ->
- true = ets:insert(?MODULE, {{Kty, KID}, Key}),
- {reply, ok, State}.
-
-handle_cast({delete, Kty, KID}, State) ->
- true = ets:delete(?MODULE, {Kty, KID}),
- {noreply, State};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-% config listener callback
-
-handle_config_change("jwt_keys", ConfigKey, _ConfigValue, _, _) ->
- case string:split(ConfigKey, ":") of
- [Kty, KID] ->
- gen_server:cast(?MODULE, {delete, Kty, KID});
- _ ->
- ignored
- end,
- {ok, nil};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_Server, stop, _State) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(100, whereis(?MODULE), restart_config_listener).
-
-% private functions
-
-get_from_config(Kty, KID) ->
- case config:get("jwt_keys", string:join([Kty, KID], ":")) of
- undefined ->
- throw({bad_request, <<"Unknown kid">>});
- Encoded ->
- case Kty of
- "hmac" ->
- try
- base64:decode(Encoded)
- catch
- error:_ ->
- throw({bad_request, <<"Not a valid key">>})
- end;
- "rsa" ->
- case pem_decode(Encoded) of
- #'RSAPublicKey'{} = Key ->
- Key;
- _ ->
- throw({bad_request, <<"not an RSA public key">>})
- end;
- "ec" ->
- case pem_decode(Encoded) of
- {#'ECPoint'{}, _} = Key ->
- Key;
- _ ->
- throw({bad_request, <<"not an EC public key">>})
- end
- end
- end.
-
-pem_decode(PEM) ->
- BinPEM = iolist_to_binary(string:replace(PEM, "\\n", "\n", all)),
- case public_key:pem_decode(BinPEM) of
- [PEMEntry] ->
- public_key:pem_entry_decode(PEMEntry);
- [] ->
- throw({bad_request, <<"Not a valid key">>})
- end.
-
-kty(<<"HS", _/binary>>) ->
- "hmac";
-kty(<<"RS", _/binary>>) ->
- "rsa";
-kty(<<"ES", _/binary>>) ->
- "ec";
-kty(_) ->
- throw({bad_request, <<"Unknown kty">>}).
diff --git a/src/jwtf/src/jwtf_sup.erl b/src/jwtf/src/jwtf_sup.erl
deleted file mode 100644
index 98d354c96..000000000
--- a/src/jwtf/src/jwtf_sup.erl
+++ /dev/null
@@ -1,38 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(jwtf_sup).
-
--behaviour(supervisor).
-
-%% API
--export([start_link/0]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-%% ===================================================================
-%% API functions
-%% ===================================================================
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% ===================================================================
-%% Supervisor callbacks
-%% ===================================================================
-
-init([]) ->
- {ok, {{one_for_one, 5, 10}, [?CHILD(jwtf_keystore, worker)]}}.
diff --git a/src/jwtf/test/jwtf_keystore_tests.erl b/src/jwtf/test/jwtf_keystore_tests.erl
deleted file mode 100644
index c05d7f1b4..000000000
--- a/src/jwtf/test/jwtf_keystore_tests.erl
+++ /dev/null
@@ -1,61 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(jwtf_keystore_tests).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("public_key/include/public_key.hrl").
-
--define(HMAC_SECRET, "aGVsbG8=").
--define(RSA_SECRET,
- "-----BEGIN PUBLIC KEY-----\\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAztanwQtIx0sms+x7m1SF\\nh7EHJHkM2biTJ41jR89FsDE2gd3MChpaqxemS5GpNvfFKRvuHa4PUZ3JtRCBG1KM\\n/7EWIVTy1JQDr2mb8couGlQNqz4uXN2vkNQ0XszgjU4Wn6ZpvYxmqPFbmkRe8QSn\\nAy2Wf8jQgjsbez8eaaX0G9S1hgFZUN3KFu7SVmUDQNvWpQdaJPP+ms5Z0CqF7JLa\\nvJmSdsU49nlYw9VH/XmwlUBMye6HgR4ZGCLQS85frqF0xLWvi7CsMdchcIjHudXH\\nQK1AumD/VVZVdi8Q5Qew7F6VXeXqnhbw9n6Px25cCuNuh6u5+E6GUzXRrMpqo9vO\\nqQIDAQAB\\n-----END PUBLIC KEY-----\\n"
-).
--define(EC_SECRET,
- "-----BEGIN PUBLIC KEY-----\\nMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEDsr0lz/Dg3luarb+Kua0Wcj9WrfR23os\\nwHzakglb8GhWRDn+oZT0Bt/26sX8uB4/ij9PEOLHPo+IHBtX4ELFFVr5GTzlqcJe\\nyctaTDd1OOAPXYuc67EWtGZ3pDAzztRs\\n-----END PUBLIC KEY-----\\n"
-).
-
-setup() ->
- test_util:start_applications([couch_log, config, jwtf]),
- config:set("jwt_keys", "hmac:hmac", ?HMAC_SECRET),
- config:set("jwt_keys", "rsa:hmac", ?HMAC_SECRET),
- config:set("jwt_keys", "ec:hmac", ?HMAC_SECRET),
-
- config:set("jwt_keys", "hmac:rsa", ?RSA_SECRET),
- config:set("jwt_keys", "rsa:rsa", ?RSA_SECRET),
- config:set("jwt_keys", "ec:rsa", ?RSA_SECRET),
-
- config:set("jwt_keys", "hmac:ec", ?EC_SECRET),
- config:set("jwt_keys", "rsa:ec", ?EC_SECRET),
- config:set("jwt_keys", "ec:ec", ?EC_SECRET).
-
-teardown(_) ->
- test_util:stop_applications([couch_log, config, jwtf]).
-
-jwtf_keystore_test_() ->
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- ?_assertEqual(<<"hello">>, jwtf_keystore:get(<<"HS256">>, <<"hmac">>)),
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"RS256">>, <<"hmac">>)),
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"ES256">>, <<"hmac">>)),
-
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"HS256">>, <<"rsa">>)),
- ?_assertMatch(#'RSAPublicKey'{}, jwtf_keystore:get(<<"RS256">>, <<"rsa">>)),
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"ES256">>, <<"rsa">>)),
-
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"HS256">>, <<"ec">>)),
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"RS256">>, <<"ec">>)),
- ?_assertMatch({#'ECPoint'{}, _}, jwtf_keystore:get(<<"ES256">>, <<"ec">>))
- ]
- }.
diff --git a/src/jwtf/test/jwtf_tests.erl b/src/jwtf/test/jwtf_tests.erl
deleted file mode 100644
index f7f410e67..000000000
--- a/src/jwtf/test/jwtf_tests.erl
+++ /dev/null
@@ -1,425 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(jwtf_tests).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("public_key/include/public_key.hrl").
-
-encode(Header0, Payload0) ->
- Header1 = b64url:encode(jiffy:encode(Header0)),
- Payload1 = b64url:encode(jiffy:encode(Payload0)),
- Sig = b64url:encode(<<"bad">>),
- <<Header1/binary, $., Payload1/binary, $., Sig/binary>>.
-
-valid_header() ->
- {[{<<"typ">>, <<"JWT">>}, {<<"alg">>, <<"RS256">>}]}.
-
-jwt_io_rsa_pubkey() ->
- PublicKeyPEM = <<
- "-----BEGIN PUBLIC KEY-----\n"
- "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGH"
- "FHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6"
- "dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkl"
- "e+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB\n"
- "-----END PUBLIC KEY-----\n"
- >>,
- [PEMEntry] = public_key:pem_decode(PublicKeyPEM),
- public_key:pem_entry_decode(PEMEntry).
-
-jwt_io_ec_pubkey() ->
- PublicKeyPEM = <<
- "-----BEGIN PUBLIC KEY-----\n"
- "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEVs/o5+uQbTjL3chynL4wXgUg2R9"
- "q9UU8I5mEovUf86QZ7kOBIjJwqnzD1omageEHWwHdBO6B+dFabmdT9POxg==\n"
- "-----END PUBLIC KEY-----\n"
- >>,
- [PEMEntry] = public_key:pem_decode(PublicKeyPEM),
- public_key:pem_entry_decode(PEMEntry).
-
-b64_badarg_test() ->
- Encoded = <<"0.0.0">>,
- ?assertEqual(
- {error, {bad_request, <<"Malformed token">>}},
- jwtf:decode(Encoded, [], nil)
- ).
-
-b64_bad_block_test() ->
- Encoded = <<" aGVsbG8. aGVsbG8. aGVsbG8">>,
- ?assertEqual(
- {error, {bad_request, <<"Malformed token">>}},
- jwtf:decode(Encoded, [], nil)
- ).
-
-invalid_json_test() ->
- Encoded = <<"fQ.fQ.fQ">>,
- ?assertEqual(
- {error, {bad_request, <<"Malformed token">>}},
- jwtf:decode(Encoded, [], nil)
- ).
-
-truncated_json_test() ->
- Encoded = <<"ew.ew.ew">>,
- ?assertEqual(
- {error, {bad_request, <<"Malformed token">>}},
- jwtf:decode(Encoded, [], nil)
- ).
-
-missing_typ_test() ->
- Encoded = encode({[]}, []),
- ?assertEqual(
- {error, {bad_request, <<"Missing typ header parameter">>}},
- jwtf:decode(Encoded, [typ], nil)
- ).
-
-invalid_typ_test() ->
- Encoded = encode({[{<<"typ">>, <<"NOPE">>}]}, []),
- ?assertEqual(
- {error, {bad_request, <<"Invalid typ header parameter">>}},
- jwtf:decode(Encoded, [typ], nil)
- ).
-
-missing_alg_test() ->
- Encoded = encode({[]}, []),
- ?assertEqual(
- {error, {bad_request, <<"Missing alg header parameter">>}},
- jwtf:decode(Encoded, [alg], nil)
- ).
-
-invalid_alg_test() ->
- Encoded = encode({[{<<"alg">>, <<"NOPE">>}]}, []),
- ?assertEqual(
- {error, {bad_request, <<"Invalid alg header parameter">>}},
- jwtf:decode(Encoded, [alg], nil)
- ).
-
-missing_iss_test() ->
- Encoded = encode(valid_header(), {[]}),
- ?assertEqual(
- {error, {bad_request, <<"Missing iss claim">>}},
- jwtf:decode(Encoded, [{iss, right}], nil)
- ).
-
-invalid_iss_test() ->
- Encoded = encode(valid_header(), {[{<<"iss">>, <<"wrong">>}]}),
- ?assertEqual(
- {error, {bad_request, <<"Invalid iss claim">>}},
- jwtf:decode(Encoded, [{iss, right}], nil)
- ).
-
-missing_iat_test() ->
- Encoded = encode(valid_header(), {[]}),
- ?assertEqual(
- {error, {bad_request, <<"Missing iat claim">>}},
- jwtf:decode(Encoded, [iat], nil)
- ).
-
-invalid_iat_test() ->
- Encoded = encode(valid_header(), {[{<<"iat">>, <<"hello">>}]}),
- ?assertEqual(
- {error, {bad_request, <<"Invalid iat claim">>}},
- jwtf:decode(Encoded, [iat], nil)
- ).
-
-missing_nbf_test() ->
- Encoded = encode(valid_header(), {[]}),
- ?assertEqual(
- {error, {bad_request, <<"Missing nbf claim">>}},
- jwtf:decode(Encoded, [nbf], nil)
- ).
-
-invalid_nbf_test() ->
- Encoded = encode(valid_header(), {[{<<"nbf">>, 2 * now_seconds()}]}),
- ?assertEqual(
- {error, {unauthorized, <<"nbf not in past">>}},
- jwtf:decode(Encoded, [nbf], nil)
- ).
-
-missing_exp_test() ->
- Encoded = encode(valid_header(), {[]}),
- ?assertEqual(
- {error, {bad_request, <<"Missing exp claim">>}},
- jwtf:decode(Encoded, [exp], nil)
- ).
-
-invalid_exp_test() ->
- Encoded = encode(valid_header(), {[{<<"exp">>, 0}]}),
- ?assertEqual(
- {error, {unauthorized, <<"exp not in future">>}},
- jwtf:decode(Encoded, [exp], nil)
- ).
-
-missing_kid_test() ->
- Encoded = encode({[]}, {[]}),
- ?assertEqual(
- {error, {bad_request, <<"Missing kid claim">>}},
- jwtf:decode(Encoded, [kid], nil)
- ).
-
-public_key_not_found_test() ->
- Encoded = encode(
- {[{<<"alg">>, <<"RS256">>}, {<<"kid">>, <<"1">>}]},
- {[]}
- ),
- KS = fun(_, _) -> throw(not_found) end,
- Expected = {error, not_found},
- ?assertEqual(Expected, jwtf:decode(Encoded, [], KS)).
-
-bad_rs256_sig_test() ->
- Encoded = encode(
- {[{<<"typ">>, <<"JWT">>}, {<<"alg">>, <<"RS256">>}]},
- {[]}
- ),
- KS = fun(<<"RS256">>, undefined) -> jwt_io_rsa_pubkey() end,
- ?assertEqual(
- {error, {bad_request, <<"Bad signature">>}},
- jwtf:decode(Encoded, [], KS)
- ).
-
-bad_hs256_sig_test() ->
- Encoded = encode(
- {[{<<"typ">>, <<"JWT">>}, {<<"alg">>, <<"HS256">>}]},
- {[]}
- ),
- KS = fun(<<"HS256">>, undefined) -> <<"bad">> end,
- ?assertEqual(
- {error, {bad_request, <<"Bad HMAC">>}},
- jwtf:decode(Encoded, [], KS)
- ).
-
-malformed_token_test() ->
- ?assertEqual(
- {error, {bad_request, <<"Malformed token">>}},
- jwtf:decode(<<"a.b.c.d">>, [], nil)
- ).
-
-unknown_atom_check_test() ->
- ?assertError(
- {unknown_checks, [foo, bar]},
- jwtf:decode(<<"a.b.c">>, [exp, foo, iss, bar], nil)
- ).
-
-unknown_binary_check_test() ->
- ?assertError(
- {unknown_checks, [<<"bar">>]},
- jwtf:decode(<<"a.b.c">>, [exp, iss, <<"bar">>], nil)
- ).
-
-duplicate_check_test() ->
- ?assertError(
- {duplicate_checks, [exp]},
- jwtf:decode(<<"a.b.c">>, [exp, exp], nil)
- ).
-
-%% jwt.io generated
-hs256_test() ->
- EncodedToken = <<
- "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjEyMzQ1Ni"
- "J9.eyJpc3MiOiJodHRwczovL2Zvby5jb20iLCJpYXQiOjAsImV4cCI"
- "6MTAwMDAwMDAwMDAwMDAsImtpZCI6ImJhciJ9.iS8AH11QHHlczkBn"
- "Hl9X119BYLOZyZPllOVhSBZ4RZs"
- >>,
- KS = fun(<<"HS256">>, <<"123456">>) -> <<"secret">> end,
- Checks = [{iss, <<"https://foo.com">>}, iat, exp, typ, alg, kid],
- ?assertMatch({ok, _}, catch jwtf:decode(EncodedToken, Checks, KS)).
-
-%% pip install PyJWT
-%% > import jwt
-%% > jwt.encode({'foo':'bar'}, 'secret', algorithm='HS384')
-hs384_test() ->
- EncodedToken = <<
- "eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIif"
- "Q.2quwghs6I56GM3j7ZQbn-ASZ53xdBqzPzTDHm_CtVec32LUy-Ezy"
- "L3JjIe7WjL93"
- >>,
- KS = fun(<<"HS384">>, _) -> <<"secret">> end,
- ?assertMatch(
- {ok, {[{<<"foo">>, <<"bar">>}]}},
- catch jwtf:decode(EncodedToken, [], KS)
- ).
-
-%% pip install PyJWT
-%% > import jwt
-%% > jwt.encode({'foo':'bar'}, 'secret', algorithm='HS512')
-hs512_test() ->
- EncodedToken = <<
- "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYX"
- "IifQ.WePl7achkd0oGNB8XRF_LJwxlyiPZqpdNgdKpDboAjSTsW"
- "q-aOGNynTp8TOv8KjonFym8vwFwppXOLoLXbkIaQ"
- >>,
- KS = fun(<<"HS512">>, _) -> <<"secret">> end,
- ?assertMatch(
- {ok, {[{<<"foo">>, <<"bar">>}]}},
- catch jwtf:decode(EncodedToken, [], KS)
- ).
-
-%% jwt.io generated
-rs256_test() ->
- EncodedToken = <<
- "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0N"
- "TY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.Ek"
- "N-DOsnsuRjRO6BxXemmJDm3HbxrbRzXglbN2S4sOkopdU4IsDxTI8j"
- "O19W_A4K8ZPJijNLis4EZsHeY559a4DFOd50_OqgHGuERTqYZyuhtF"
- "39yxJPAjUESwxk2J5k_4zM3O-vtd1Ghyo4IbqKKSy6J9mTniYJPenn"
- "5-HIirE"
- >>,
-
- Checks = [sig, alg],
- KS = fun(<<"RS256">>, undefined) -> jwt_io_rsa_pubkey() end,
-
- ExpectedPayload =
- {[
- {<<"sub">>, <<"1234567890">>},
- {<<"name">>, <<"John Doe">>},
- {<<"admin">>, true}
- ]},
-
- ?assertMatch({ok, ExpectedPayload}, jwtf:decode(EncodedToken, Checks, KS)).
-
-%% jwt.io generated
-es256_test() ->
- EncodedToken = <<
- "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0N"
- "TY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.1g"
- "LptYop2guxSZHmf0ga292suPxwBdkijA1ZopCSSYLBdEl8Bg2fsxoU"
- "cZuSGztMU9qAKV2p80NQn8czeGhHXA"
- >>,
-
- Checks = [sig, alg],
- KS = fun(<<"ES256">>, undefined) -> jwt_io_ec_pubkey() end,
-
- ExpectedPayload =
- {[
- {<<"sub">>, <<"1234567890">>},
- {<<"name">>, <<"John Doe">>},
- {<<"admin">>, true}
- ]},
-
- ?assertMatch({ok, ExpectedPayload}, jwtf:decode(EncodedToken, Checks, KS)).
-
-encode_missing_alg_test() ->
- ?assertEqual(
- {error, {bad_request, <<"Missing alg header parameter">>}},
- jwtf:encode({[]}, {[]}, <<"foo">>)
- ).
-
-encode_invalid_alg_test() ->
- ?assertEqual(
- {error, {bad_request, <<"Invalid alg header parameter">>}},
- jwtf:encode({[{<<"alg">>, <<"BOGUS">>}]}, {[]}, <<"foo">>)
- ).
-
-encode_decode_test_() ->
- [{Alg, encode_decode(Alg)} || Alg <- jwtf:valid_algorithms()].
-
-encode_decode(Alg) ->
- {EncodeKey, DecodeKey} =
- case Alg of
- <<"RS", _/binary>> ->
- create_rsa_keypair();
- <<"ES", _/binary>> ->
- create_ec_keypair();
- <<"HS", _/binary>> ->
- Key = <<"a-super-secret-key">>,
- {Key, Key}
- end,
- Claims = claims(),
- {ok, Encoded} = jwtf:encode(header(Alg), Claims, EncodeKey),
- KS = fun(_, _) -> DecodeKey end,
- {ok, Decoded} = jwtf:decode(Encoded, [], KS),
- ?_assertMatch(Claims, Decoded).
-
-header(Alg) ->
- {[
- {<<"typ">>, <<"JWT">>},
- {<<"alg">>, Alg},
- {<<"kid">>, <<"20170520-00:00:00">>}
- ]}.
-
-claims() ->
- EpochSeconds = os:system_time(second),
- {[
- {<<"iat">>, EpochSeconds},
- {<<"exp">>, EpochSeconds + 3600}
- ]}.
-
-create_rsa_keypair() ->
- %% https://tools.ietf.org/html/rfc7517#appendix-C
- N = decode(<<
- "t6Q8PWSi1dkJj9hTP8hNYFlvadM7DflW9mWepOJhJ66w7nyoK1gPNqFMSQRy"
- "O125Gp-TEkodhWr0iujjHVx7BcV0llS4w5ACGgPrcAd6ZcSR0-Iqom-QFcNP"
- "8Sjg086MwoqQU_LYywlAGZ21WSdS_PERyGFiNnj3QQlO8Yns5jCtLCRwLHL0"
- "Pb1fEv45AuRIuUfVcPySBWYnDyGxvjYGDSM-AqWS9zIQ2ZilgT-GqUmipg0X"
- "OC0Cc20rgLe2ymLHjpHciCKVAbY5-L32-lSeZO-Os6U15_aXrk9Gw8cPUaX1"
- "_I8sLGuSiVdt3C_Fn2PZ3Z8i744FPFGGcG1qs2Wz-Q"
- >>),
- E = decode(<<"AQAB">>),
- D = decode(<<
- "GRtbIQmhOZtyszfgKdg4u_N-R_mZGU_9k7JQ_jn1DnfTuMdSNprTeaSTyWfS"
- "NkuaAwnOEbIQVy1IQbWVV25NY3ybc_IhUJtfri7bAXYEReWaCl3hdlPKXy9U"
- "vqPYGR0kIXTQRqns-dVJ7jahlI7LyckrpTmrM8dWBo4_PMaenNnPiQgO0xnu"
- "ToxutRZJfJvG4Ox4ka3GORQd9CsCZ2vsUDmsXOfUENOyMqADC6p1M3h33tsu"
- "rY15k9qMSpG9OX_IJAXmxzAh_tWiZOwk2K4yxH9tS3Lq1yX8C1EWmeRDkK2a"
- "hecG85-oLKQt5VEpWHKmjOi_gJSdSgqcN96X52esAQ"
- >>),
- RSAPrivateKey = #'RSAPrivateKey'{
- modulus = N,
- publicExponent = E,
- privateExponent = D
- },
- RSAPublicKey = #'RSAPublicKey'{
- modulus = N,
- publicExponent = E
- },
- {RSAPrivateKey, RSAPublicKey}.
-
-create_ec_keypair() ->
- PublicPEM = <<
- "-----BEGIN PUBLIC KEY-----\n"
- "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEVs/o5+uQbTjL3chynL4wXgUg2R9"
- "q9UU8I5mEovUf86QZ7kOBIjJwqnzD1omageEHWwHdBO6B+dFabmdT9POxg==\n"
- "-----END PUBLIC KEY-----"
- >>,
- PrivatePEM = <<
- "-----BEGIN PRIVATE KEY-----\n"
- "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgevZzL1gdAFr88hb2"
- "OF/2NxApJCzGCEDdfSp6VQO30hyhRANCAAQRWz+jn65BtOMvdyHKcvjBeBSDZH2r"
- "1RTwjmYSi9R/zpBnuQ4EiMnCqfMPWiZqB4QdbAd0E7oH50VpuZ1P087G\n"
- "-----END PRIVATE KEY-----"
- >>,
-
- [PublicEntry] = public_key:pem_decode(PublicPEM),
- ECPublicKey = public_key:pem_entry_decode(PublicEntry),
-
- [PrivateEntry] = public_key:pem_decode(PrivatePEM),
- ECPrivateKey =
- case public_key:pem_entry_decode(PrivateEntry) of
- #'ECPrivateKey'{} = Key ->
- Key;
- {'PrivateKeyInfo', v1,
- {'PrivateKeyInfo_privateKeyAlgorithm', ?'id-ecPublicKey',
- {asn1_OPENTYPE, Parameters}},
- PrivKey, _} ->
- EcPrivKey = public_key:der_decode('ECPrivateKey', PrivKey),
- EcPrivKey#'ECPrivateKey'{
- parameters = public_key:der_decode('EcpkParameters', Parameters)
- }
- end,
-
- {ECPrivateKey, ECPublicKey}.
-
-decode(Goop) ->
- crypto:bytes_to_integer(b64url:decode(Goop)).
-
-now_seconds() ->
- {MegaSecs, Secs, _MicroSecs} = os:timestamp(),
- MegaSecs * 1000000 + Secs.
diff --git a/src/ken/README.md b/src/ken/README.md
deleted file mode 100644
index a5a657611..000000000
--- a/src/ken/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-ken
-===
-
-Ken builds views and search indexes. Automatically.
-
-#### Overview
-
-When the couch\_db\_update event is triggered with an `updated` event, ken will spawn indexing jobs for view groups and search indexes (one job per view group shard or search index shard). If a `deleted` event is triggered, all jobs associated with the corresponding database shard will be removed.
-
-#### Testing
-
-Testing for ken expected to be executed from the top level `couchdb` repo as a part of `make check` run. The isolated ken test could be ran as `rebar eunit apps=ken verbose=1` from the `couchdb`'s root directory.
diff --git a/src/ken/rebar.config.script b/src/ken/rebar.config.script
deleted file mode 100644
index 3344206e5..000000000
--- a/src/ken/rebar.config.script
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-HaveDreyfus = element(1, file:list_dir("../dreyfus")) == ok.
-
-HastingsHome = os:getenv("HASTINGS_HOME", "../hastings").
-HaveHastings = element(1, file:list_dir(HastingsHome)) == ok.
-
-CurrOpts = case lists:keyfind(erl_opts, 1, CONFIG) of
- {erl_opts, Opts} -> Opts;
- false -> []
-end,
-
-NewOpts =
- if HaveDreyfus -> [{d, 'HAVE_DREYFUS'}]; true -> [] end ++
- if HaveHastings -> [{d, 'HAVE_HASTINGS'}]; true -> [] end ++
- [{i, "../"}] ++ CurrOpts.
-
-lists:keystore(erl_opts, 1, CONFIG, {erl_opts, NewOpts}).
diff --git a/src/ken/src/ken.app.src.script b/src/ken/src/ken.app.src.script
deleted file mode 100644
index dcf4a23d1..000000000
--- a/src/ken/src/ken.app.src.script
+++ /dev/null
@@ -1,38 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}.
-HaveHastings = code:lib_dir(hastings) /= {error, bad_name}.
-
-BaseApplications = [
- kernel,
- stdlib,
- couch_log,
- couch_event,
- couch,
- config
-].
-
-Applications =
- if HaveDreyfus -> [dreyfus]; true -> [] end ++
- if HaveHastings -> [hastings]; true -> [] end ++
- BaseApplications.
-
-{application, ken,
- [
- {description, ""},
- {vsn, git},
- {registered, []},
- {applications, Applications},
- {mod, { ken_app, []}},
- {env, []}
- ]}.
diff --git a/src/ken/src/ken.erl b/src/ken/src/ken.erl
deleted file mode 100644
index 87a724ba1..000000000
--- a/src/ken/src/ken.erl
+++ /dev/null
@@ -1,29 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken).
-
--export([add/1]).
--export([remove/1]).
--export([add_all_shards/1]).
-
-% Add a database shard to be indexed.
-add(DbName) ->
- ken_server:add(DbName).
-
-% Remove all pending jobs for a database shard.
-remove(DbName) ->
- ken_server:remove(DbName).
-
-% Add all shards for a database to be indexed.
-add_all_shards(DbName) ->
- ken_server:add_all_shards(DbName).
diff --git a/src/ken/src/ken_app.erl b/src/ken/src/ken_app.erl
deleted file mode 100644
index 15f235d42..000000000
--- a/src/ken/src/ken_app.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_app).
-
--behaviour(application).
-
-%% Application callbacks
--export([start/2, stop/1]).
-
-%% ===================================================================
-%% Application callbacks
-%% ===================================================================
-
-start(_StartType, _StartArgs) ->
- ken_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/ken/src/ken_event_handler.erl b/src/ken/src/ken_event_handler.erl
deleted file mode 100644
index f45fec087..000000000
--- a/src/ken/src/ken_event_handler.erl
+++ /dev/null
@@ -1,55 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_event_handler).
--behaviour(couch_event_listener).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_event/3,
- handle_cast/2,
- handle_info/2
-]).
-
-start_link() ->
- couch_event_listener:start_link(?MODULE, nil, [all_dbs]).
-
-%% couch_event_listener callbacks
-
-init(_) ->
- {ok, nil}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_event(DbName, updated, State) ->
- ken:add(DbName),
- {ok, State};
-handle_event(DbName, deleted, State) ->
- ken:remove(DbName),
- {ok, State};
-handle_event(DbName, ddoc_updated, State) ->
- ken:add_all_shards(DbName),
- {ok, State};
-handle_event(_DbName, _Event, State) ->
- {ok, State}.
-
-handle_cast(_Msg, State) ->
- {ok, State}.
-
-handle_info(_Msg, State) ->
- {ok, State}.
diff --git a/src/ken/src/ken_server.erl b/src/ken/src/ken_server.erl
deleted file mode 100644
index 9f869b379..000000000
--- a/src/ken/src/ken_server.erl
+++ /dev/null
@@ -1,605 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_server).
-
-% gen_server boilerplate
--behaviour(gen_server).
--vsn(1).
--export([init/1, terminate/2]).
--export([handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
-
-% Public interface
--export([start_link/0]).
--export([add/1]).
--export([remove/1]).
--export([add_all_shards/1]).
--export([set_batch_size/1]).
--export([set_delay/1]).
--export([set_limit/1]).
--export([set_prune_interval/1]).
-
-% exports for spawn
--export([update_db_indexes/2]).
-
--record(job, {
- % {DbName, GroupId} for view. {DbName, DDocId, IndexId} for search.
- name,
- % Pid of either view group or search index
- server,
- worker_pid = nil,
- seq = 0,
- lru = erlang:monotonic_time()
-}).
-
--record(state, {
- q = queue:new(),
- dbworker = nil,
- limit = 20,
- delay = 5000,
- batch_size = 1,
- prune_interval = 60000,
- pruned_last
-}).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--ifdef(HAVE_DREYFUS).
--include_lib("dreyfus/include/dreyfus.hrl").
--endif.
-
--ifdef(HAVE_HASTINGS).
--include_lib("hastings/src/hastings.hrl").
--endif.
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-%% @doc Adds a database shard to be indexed
--spec add(binary()) -> ok.
-add(DbName) ->
- gen_server:cast(?MODULE, {add, DbName}).
-
-%% @doc Removes all the pending jobs for a database shard.
--spec remove(binary()) -> ok.
-remove(DbName) ->
- gen_server:cast(?MODULE, {remove, DbName}).
-
-%% @doc Adds all the shards for a database to be indexed.
--spec add_all_shards(binary()) -> ok.
-add_all_shards(DbName) ->
- try
- Shards = mem3:shards(mem3:dbname(DbName)),
- lists:map(
- fun(Shard) ->
- rexi:cast(Shard#shard.node, {ken_server, add, [Shard#shard.name]})
- end,
- Shards
- )
- catch
- error:database_does_not_exist ->
- ok
- end.
-
-%% @doc Changes the configured value for a batch size.
-%% Returns previous value.
--spec set_batch_size(pos_integer()) -> pos_integer().
-set_batch_size(BS) when is_integer(BS), BS > 0 ->
- gen_server:call(?MODULE, {set_batch_size, BS}).
-
-%% @doc Changes the configured value for a delay between batches.
-%% Returns previous value.
--spec set_delay(non_neg_integer()) -> non_neg_integer().
-set_delay(Delay) when is_integer(Delay), Delay >= 0 ->
- gen_server:call(?MODULE, {set_delay, Delay}).
-
-%% @doc Changes the configured value for a limit.
-%% Returns previous value.
--spec set_limit(pos_integer()) -> pos_integer().
-set_limit(Limit) when is_integer(Limit), Limit > 0 ->
- gen_server:call(?MODULE, {set_limit, Limit}).
-
-%% @doc Changes the configured value for a prune interval.
-%% Returns previous value.
--spec set_prune_interval(pos_integer()) -> pos_integer().
-set_prune_interval(Interval) when is_integer(Interval), Interval > 1000 ->
- gen_server:call(?MODULE, {set_prune_interval, Interval}).
-
-%% gen_server callbacks
-
-init(_) ->
- erlang:send(self(), start_event_handler),
- ets:new(ken_pending, [named_table]),
- ets:new(ken_resubmit, [named_table]),
- ets:new(ken_workers, [named_table, public, {keypos, #job.name}]),
- Limit = list_to_integer(config("limit", "20")),
- {ok, #state{pruned_last = erlang:monotonic_time(), limit = Limit}}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call({set_batch_size, BS}, _From, #state{batch_size = Old} = State) ->
- {reply, Old, State#state{batch_size = BS}, 0};
-handle_call({set_delay, Delay}, _From, #state{delay = Old} = State) ->
- {reply, Old, State#state{delay = Delay}, 0};
-handle_call({set_limit, Limit}, _From, #state{limit = Old} = State) ->
- {reply, Old, State#state{limit = Limit}, 0};
-handle_call({set_prune_interval, Interval}, _From, State) ->
- Old = State#state.prune_interval,
- {reply, Old, State#state{prune_interval = Interval}, 0};
-handle_call(Msg, From, State) ->
- {stop, {unknown_call, Msg, From}, State}.
-
-% Queues a DB to (maybe) have indexing jobs spawned.
-handle_cast({add, DbName}, State) ->
- case ets:insert_new(ken_pending, {DbName}) of
- true ->
- {noreply, State#state{q = queue:in(DbName, State#state.q)}, 0};
- false ->
- {noreply, State, 0}
- end;
-handle_cast({remove, DbName}, State) ->
- Q2 = queue:filter(fun(X) -> X =/= DbName end, State#state.q),
- ets:delete(ken_pending, DbName),
- % Delete search index workers
- ets:match_delete(ken_workers, #job{name = {DbName, '_', '_'}, _ = '_'}),
- % Delete view index workers
- ets:match_delete(ken_workers, #job{name = {DbName, '_'}, _ = '_'}),
- % TODO kill off active jobs for this DB as well
- {noreply, State#state{q = Q2}, 0};
-handle_cast({resubmit, DbName}, State) ->
- ets:delete(ken_resubmit, DbName),
- handle_cast({add, DbName}, State);
-% st index job names have 3 elements, 3rd being 'hastings'. See job record definition.
-handle_cast({trigger_update, #job{name = {_, _, hastings}, server = GPid, seq = Seq} = Job}, State) ->
- % hastings_index:await will trigger a hastings index update
- {Pid, _} = erlang:spawn_monitor(
- hastings_index,
- await,
- [GPid, Seq]
- ),
- Now = erlang:monotonic_time(),
- ets:insert(ken_workers, Job#job{worker_pid = Pid, lru = Now}),
- {noreply, State, 0};
-% search index job names have 3 elements. See job record definition.
-handle_cast({trigger_update, #job{name = {_, _, _}, server = GPid, seq = Seq} = Job}, State) ->
- % dreyfus_index:await will trigger a search index update.
- {Pid, _} = erlang:spawn_monitor(
- dreyfus_index,
- await,
- [GPid, Seq]
- ),
- Now = erlang:monotonic_time(),
- ets:insert(ken_workers, Job#job{worker_pid = Pid, lru = Now}),
- {noreply, State, 0};
-handle_cast({trigger_update, #job{name = {_, _}, server = SrvPid, seq = Seq} = Job}, State) ->
- % couch_index:get_state/2 will trigger a view group index update.
- {Pid, _} = erlang:spawn_monitor(couch_index, get_state, [SrvPid, Seq]),
- Now = erlang:monotonic_time(),
- ets:insert(ken_workers, Job#job{worker_pid = Pid, lru = Now}),
- {noreply, State, 0};
-handle_cast(Msg, State) ->
- {stop, {unknown_cast, Msg}, State}.
-
-handle_info({gen_event_EXIT, ken_event_handler, Reason}, State) ->
- couch_log:error("ken_event_handler terminated: ~w", [Reason]),
- erlang:send_after(5000, self(), start_event_handler),
- {ok, State, 0};
-handle_info(start_event_handler, State) ->
- case ken_event_handler:start_link() of
- {ok, _Pid} ->
- ok;
- Error ->
- couch_log:error("ken_event_handler init: ~w", [Error]),
- erlang:send_after(5000, self(), start_event_handler)
- end,
- {noreply, State, 0};
-handle_info(timeout, #state{prune_interval = I, pruned_last = Last} = State) ->
- Now = erlang:monotonic_time(),
- Interval = erlang:convert_time_unit(
- State#state.delay, millisecond, native
- ),
- case Now - Last > Interval of
- true ->
- NewState = prune_worker_table(State);
- _ ->
- NewState = State
- end,
- {noreply, maybe_start_next_queued_job(NewState), I};
-handle_info({'DOWN', _, _, Pid, Reason}, #state{dbworker = {Name, Pid}} = St) ->
- maybe_resubmit(Name, Reason),
- {noreply, St#state{dbworker = nil}, 0};
-handle_info({'DOWN', _, _, Pid, Reason}, State) ->
- debrief_worker(Pid, Reason, State),
- {noreply, State, 0};
-handle_info(Msg, State) ->
- {stop, {unknown_info, Msg}, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%% private functions
-
-maybe_start_next_queued_job(#state{dbworker = {_, _}} = State) ->
- State;
-maybe_start_next_queued_job(#state{q = Q} = State) ->
- IncrementalChannels = list_to_integer(config("incremental_channels", "80")),
- BatchChannels = list_to_integer(config("batch_channels", "20")),
- TotalChannels = IncrementalChannels + BatchChannels,
- case queue:out(Q) of
- {{value, DbName}, Q2} ->
- case skip_job(DbName) of
- true ->
- % job is either being resubmitted or ignored, skip it
- ets:delete(ken_pending, DbName),
- maybe_start_next_queued_job(State#state{q = Q2});
- false ->
- case get_active_count() of
- A when A < TotalChannels ->
- Args = [DbName, State],
- {Pid, _} = spawn_monitor(?MODULE, update_db_indexes, Args),
- ets:delete(ken_pending, DbName),
- State#state{dbworker = {DbName, Pid}, q = Q2};
- _ ->
- State#state{q = queue:in_r(DbName, Q2)}
- end
- end;
- {empty, Q} ->
- State
- end.
-
-skip_job(DbName) ->
- ets:member(ken_resubmit, DbName) orelse ignore_db(DbName).
-
-ignore_db(DbName) ->
- case config:get("ken.ignore", ?b2l(DbName), false) of
- "true" ->
- true;
- _ ->
- false
- end.
-
-get_active_count() ->
- MatchSpec = [{#job{worker_pid = '$1', _ = '_'}, [{is_pid, '$1'}], [true]}],
- ets:select_count(ken_workers, MatchSpec).
-
-% If any indexing job fails, resubmit requests for all indexes.
-update_db_indexes(Name, State) ->
- {ok, DDocs} = design_docs(Name),
- RandomSorted = lists:sort([{rand:uniform(), D} || D <- DDocs]),
- Resubmit = lists:foldl(
- fun({_, DDoc}, Acc) ->
- JsonDDoc = couch_doc:from_json_obj(DDoc),
- case update_ddoc_indexes(Name, JsonDDoc, State) of
- ok -> Acc;
- _ -> true
- end
- end,
- false,
- RandomSorted
- ),
- if
- Resubmit -> exit(resubmit);
- true -> ok
- end.
-
-design_docs(Name) ->
- try
- case fabric:design_docs(mem3:dbname(Name)) of
- {error, {maintenance_mode, _, _Node}} ->
- {ok, []};
- Else ->
- Else
- end
- catch
- error:database_does_not_exist ->
- {ok, []}
- end.
-
-% Returns an error if any job creation fails.
-update_ddoc_indexes(Name, #doc{} = Doc, State) ->
- {ok, Db} =
- case couch_db:open_int(Name, []) of
- {ok, _} = Resp -> Resp;
- Else -> exit(Else)
- end,
- Seq = couch_db:get_update_seq(Db),
- couch_db:close(Db),
- ViewUpdated =
- case should_update(Doc, <<"views">>) of
- true ->
- try couch_mrview_util:ddoc_to_mrst(Name, Doc) of
- {ok, MRSt} -> update_ddoc_views(Name, MRSt, Seq, State)
- catch
- _:_ ->
- ok
- end;
- false ->
- ok
- end,
- SearchUpdated = search_updated(Name, Doc, Seq, State),
- STUpdated = st_updated(Name, Doc, Seq, State),
- case {ViewUpdated, SearchUpdated, STUpdated} of
- {ok, ok, ok} -> ok;
- _ -> resubmit
- end.
-
--ifdef(HAVE_DREYFUS).
-search_updated(Name, Doc, Seq, State) ->
- case should_update(Doc, <<"indexes">>) of
- true ->
- try dreyfus_index:design_doc_to_indexes(Doc) of
- SIndexes -> update_ddoc_search_indexes(Name, SIndexes, Seq, State)
- catch
- _:_ ->
- ok
- end;
- false ->
- ok
- end.
--else.
-search_updated(_Name, _Doc, _Seq, _State) ->
- ok.
--endif.
-
--ifdef(HAVE_HASTINGS).
-st_updated(Name, Doc, Seq, State) ->
- case should_update(Doc, <<"st_indexes">>) of
- true ->
- try hastings_index:design_doc_to_indexes(Doc) of
- STIndexes -> update_ddoc_st_indexes(Name, STIndexes, Seq, State)
- catch
- _:_ ->
- ok
- end;
- false ->
- ok
- end.
--else.
-st_updated(_Name, _Doc, _Seq, _State) ->
- ok.
--endif.
-
-should_update(#doc{body = {Props}}, IndexType) ->
- case couch_util:get_value(<<"autoupdate">>, Props) of
- false ->
- false;
- {AUProps} ->
- case couch_util:get_value(IndexType, AUProps) of
- false ->
- false;
- _ ->
- true
- end;
- _ ->
- true
- end.
-
-update_ddoc_views(Name, MRSt, Seq, State) ->
- Language = couch_mrview_index:get(language, MRSt),
- Allowed = lists:member(Language, allowed_languages()),
- Views = couch_mrview_index:get(views, MRSt),
- if
- Allowed andalso Views =/= [] ->
- {ok, Pid} = couch_index_server:get_index(couch_mrview_index, MRSt),
- GroupName = couch_mrview_index:get(idx_name, MRSt),
- maybe_start_job({Name, GroupName}, Pid, Seq, State);
- true ->
- ok
- end.
-
--ifdef(HAVE_DREYFUS).
-update_ddoc_search_indexes(DbName, Indexes, Seq, State) ->
- if
- Indexes =/= [] ->
- % Spawn a job for each search index in the ddoc
- lists:foldl(
- fun(#index{name = IName, ddoc_id = DDocName} = Index, Acc) ->
- case dreyfus_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- case maybe_start_job({DbName, DDocName, IName}, Pid, Seq, State) of
- resubmit -> resubmit;
- _ -> Acc
- end;
- _ ->
- % If any job fails, retry the db.
- resubmit
- end
- end,
- ok,
- Indexes
- );
- true ->
- ok
- end.
--endif.
-
--ifdef(HAVE_HASTINGS).
-update_ddoc_st_indexes(DbName, Indexes, Seq, State) ->
- if
- Indexes =/= [] ->
- % The record name in hastings is #h_idx rather than #index as it is for dreyfus
- % Spawn a job for each spatial index in the ddoc
- lists:foldl(
- fun(#h_idx{ddoc_id = DDocName} = Index, Acc) ->
- case hastings_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- case maybe_start_job({DbName, DDocName, hastings}, Pid, Seq, State) of
- resubmit -> resubmit;
- _ -> Acc
- end;
- _ ->
- % If any job fails, retry the db.
- resubmit
- end
- end,
- ok,
- Indexes
- );
- true ->
- ok
- end.
--endif.
-
-should_start_job(#job{name = Name, seq = Seq, server = Pid}, State) ->
- Threshold = list_to_integer(config("max_incremental_updates", "1000")),
- IncrementalChannels = list_to_integer(config("incremental_channels", "80")),
- BatchChannels = list_to_integer(config("batch_channels", "20")),
- TotalChannels = IncrementalChannels + BatchChannels,
- A = get_active_count(),
- #state{delay = Delay, batch_size = BS} = State,
- case ets:lookup(ken_workers, Name) of
- [] ->
- if
- A < BatchChannels ->
- true;
- A < TotalChannels ->
- case Name of
- % st_index name has three elements
- {_, _, hastings} ->
- {ok, CurrentSeq} = hastings_index:await(Pid, 0),
- (Seq - CurrentSeq) < Threshold;
- % View name has two elements.
- {_, _} ->
- % Since seq is 0, couch_index:get_state/2 won't
- % spawn an index update.
- {ok, MRSt} = couch_index:get_state(Pid, 0),
- CurrentSeq = couch_mrview_index:get(update_seq, MRSt),
- (Seq - CurrentSeq) < Threshold;
- % Search name has three elements.
- {_, _, _} ->
- {ok, _IndexPid, CurrentSeq} = dreyfus_index:await(Pid, 0),
- (Seq - CurrentSeq) < Threshold;
- % Should never happen, but if it does, ignore.
- _ ->
- false
- end;
- true ->
- false
- end;
- [#job{worker_pid = nil, lru = LRU, seq = OldSeq}] ->
- Now = erlang:monotonic_time(),
- DeltaT = erlang:convert_time_unit(Now - LRU, native, millisecond),
- if
- A < BatchChannels, (Seq - OldSeq) >= BS ->
- true;
- A < BatchChannels, DeltaT > Delay ->
- true;
- A < TotalChannels, (Seq - OldSeq) < Threshold, DeltaT > Delay ->
- true;
- true ->
- false
- end;
- _ ->
- false
- end.
-
-maybe_start_job(JobName, IndexPid, Seq, State) ->
- Job = #job{
- name = JobName,
- server = IndexPid,
- seq = Seq
- },
- case should_start_job(Job, State) of
- true ->
- gen_server:cast(?MODULE, {trigger_update, Job});
- false ->
- resubmit
- end.
-
-debrief_worker(Pid, Reason, _State) ->
- case ets:match_object(ken_workers, #job{worker_pid = Pid, _ = '_'}) of
- [#job{name = Name} = Job] ->
- case Name of
- {DbName, _} ->
- maybe_resubmit(DbName, Reason);
- {DbName, _, _} ->
- maybe_resubmit(DbName, Reason)
- end,
- ets:insert(ken_workers, Job#job{worker_pid = nil});
- % should never happen, but if it does, ignore
- [] ->
- ok
- end.
-
-maybe_resubmit(_DbName, normal) ->
- ok;
-maybe_resubmit(_DbName, {database_does_not_exist, _}) ->
- ok;
-maybe_resubmit(_DbName, {not_found, no_db_file}) ->
- ok;
-maybe_resubmit(DbName, resubmit) ->
- resubmit(60000, DbName);
-maybe_resubmit(DbName, _) ->
- resubmit(5000, DbName).
-
-resubmit(Delay, DbName) ->
- case ets:insert_new(ken_resubmit, {DbName}) of
- true ->
- erlang:send_after(Delay, ?MODULE, {'$gen_cast', {resubmit, DbName}});
- false ->
- ok
- end.
-
-prune_worker_table(State) ->
- % remove all entries older than specified `delay` in milliseconds
- Delay = erlang:convert_time_unit(State#state.delay, millisecond, native),
- C = erlang:monotonic_time() - Delay,
- %% fun(#job{worker_pid=nil, lru=A) when A < C -> true end
- MatchHead = #job{worker_pid = nil, lru = '$1', _ = '_'},
- Guard = {'<', '$1', C},
- ets:select_delete(ken_workers, [{MatchHead, [Guard], [true]}]),
- State#state{pruned_last = erlang:monotonic_time()}.
-
-allowed_languages() ->
- Config =
- couch_proc_manager:get_servers_from_env("COUCHDB_QUERY_SERVER_") ++
- couch_proc_manager:get_servers_from_env("COUCHDB_NATIVE_QUERY_SERVER_"),
- Allowed = [list_to_binary(string:to_lower(Lang)) || {Lang, _Cmd} <- Config],
- [<<"query">> | Allowed].
-
-config(Key, Default) ->
- config:get("ken", Key, Default).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-prune_old_entries_test() ->
- {
- setup,
- fun() ->
- ets:new(ken_workers, [named_table, public, {keypos, #job.name}])
- end,
- fun(_) ->
- catch ets:delete(ken_workers)
- end,
- ?_test(begin
- lists:foreach(
- fun(Idx) ->
- ets:insert(ken_workers, #job{name = Idx}),
- timer:sleep(100)
- end,
- lists:seq(1, 3)
- ),
- prune_worker_table(#state{delay = 250}),
- ?assertEqual(
- [2, 3],
- lists:usort(
- [N || #job{name = N} <- ets:tab2list(ken_workers)]
- )
- ),
- ok
- end)
- }.
-
--endif.
diff --git a/src/ken/src/ken_sup.erl b/src/ken/src/ken_sup.erl
deleted file mode 100644
index 8c06592e7..000000000
--- a/src/ken/src/ken_sup.erl
+++ /dev/null
@@ -1,32 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_sup).
-
--behaviour(supervisor).
-
-%% API
--export([start_link/0]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% supervisor callbacks
-
-init([]) ->
- {ok, {{one_for_one, 5, 10}, [?CHILD(ken_server, worker)]}}.
diff --git a/src/ken/test/config.ini b/src/ken/test/config.ini
deleted file mode 100644
index a28eae4c0..000000000
--- a/src/ken/test/config.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[ken]
-limit = 42
diff --git a/src/ken/test/ken_server_test.erl b/src/ken/test/ken_server_test.erl
deleted file mode 100644
index 090c5570a..000000000
--- a/src/ken/test/ken_server_test.erl
+++ /dev/null
@@ -1,90 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_server_test).
-
--include_lib("eunit/include/eunit.hrl").
-
-%% hardcoded defaults: limit: 20; batch: 1; delay: 5000; prune: 60000
-default_test_() ->
- {inorder,
- {setup, fun setup_default/0, fun teardown/1, [
- set_builder("returns default", set_limit, 12, 20),
- set_builder("keeps set", set_limit, 6, 12),
- set_builder("returns default", set_batch_size, 3, 1),
- set_builder("keeps set", set_batch_size, 6, 3),
- set_builder("returns default", set_delay, 7000, 5000),
- set_builder("keeps set", set_delay, 10000, 7000),
- set_builder("returns default", set_prune_interval, 70000, 60000),
- set_builder("keeps set", set_prune_interval, 80000, 70000)
- ]}}.
-
-exception_test_() ->
- {inorder,
- {foreach, fun setup_default/0, fun teardown/1, [
- exception_builder("exception on zero", set_limit, 0),
- exception_builder("exception on negative", set_limit, -12),
- exception_builder("exception on zero", set_batch_size, 0),
- exception_builder("exception on negative", set_batch_size, -12),
- set_builder("no exception on zero", set_delay, 0, 5000),
- exception_builder("exception on negative", set_delay, -12),
- exception_builder("exception on zero", set_prune_interval, 0),
- exception_builder("exception on negative", set_prune_interval, -12)
- ]}}.
-
-config_test_() ->
- {inorder,
- {setup, fun setup_config/0, fun teardown/1, [
- set_builder("reads config", set_limit, 24, 42),
- set_builder("keeps set", set_limit, 6, 24)
- ]}}.
-
-setup_default() ->
- {ok, EventPid} = start_server(couch_event_server),
- {ok, CfgPid} = start_server(config),
- {ok, KenPid} = start_server(ken_server),
- [{ken_pid, KenPid}, {cfg_pid, CfgPid}, {event_pid, EventPid}].
-
-setup_config() ->
- {ok, Pwd} = file:get_cwd(),
- Config = filename:join([Pwd, "..", "test", "config.ini"]),
- {ok, EventPid} = start_server(couch_event_server),
- {ok, CfgPid} = start_server(config, [[Config]]),
- {ok, KenPid} = start_server(ken_server),
- [{ken_pid, KenPid}, {cfg_pid, CfgPid}, {event_pid, EventPid}].
-
-teardown(Cfg) ->
- ok = stop_server(event_pid, Cfg),
- ok = stop_server(cfg_pid, Cfg),
- ok = stop_server(ken_pid, Cfg).
-
-exception_builder(Desc, F, Val) ->
- D = atom_to_list(F) ++ " " ++ Desc,
- {D, ?_assertException(error, function_clause, ken_server:F(Val))}.
-
-set_builder(Desc, F, In, Out) ->
- D = atom_to_list(F) ++ " " ++ Desc,
- {D, ?_assertEqual(Out, ken_server:F(In))}.
-
-start_server(Module) ->
- start_server(Module, []).
-
-start_server(Module, Config) ->
- gen_server:start({local, Module}, Module, Config, []).
-
-stop_server(Key, Cfg) ->
- {Key, Pid} = lists:keyfind(Key, 1, Cfg),
- MRef = erlang:monitor(process, Pid),
- true = exit(Pid, kill),
- receive
- {'DOWN', MRef, _, _, _} -> ok
- end.
diff --git a/src/mango/.gitignore b/src/mango/.gitignore
deleted file mode 100644
index 446945396..000000000
--- a/src/mango/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-.rebar/
-ebin/
-test/*.pyc
-venv/
-.eunit
diff --git a/src/mango/LICENSE.txt b/src/mango/LICENSE.txt
deleted file mode 100644
index b47557aaf..000000000
--- a/src/mango/LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2014 IBM Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/mango/README.md b/src/mango/README.md
deleted file mode 100644
index 4c4bb60a6..000000000
--- a/src/mango/README.md
+++ /dev/null
@@ -1,372 +0,0 @@
-Mango
-=====
-
-A MongoDB inspired query language interface for Apache CouchDB.
-
-
-Motivation
-----------
-
-Mango provides a single HTTP API endpoint that accepts JSON bodies via HTTP POST. These bodies provide a set of instructions that will be handled with the results being returned to the client in the same order as they were specified. The general principle of this API is to be simple to implement on the client side while providing users a more natural conversion to Apache CouchDB than would otherwise exist using the standard RESTful HTTP interface that already exists.
-
-
-Actions
--------
-
-The general API exposes a set of actions that are similar to what MongoDB exposes (although not all of MongoDB's API is supported). These are meant to be loosely and obviously inspired by MongoDB but without too much attention to maintaining the exact behavior.
-
-Each action is specified as a JSON object with a number of keys that affect the behavior. Each action object has at least one field named "action" which must
-have a string value indicating the action to be performed. For each action there are zero or more fields that will affect behavior. Some of these fields are required and some are optional.
-
-For convenience, the HTTP API will accept a JSON body that is either a single JSON object which specifies a single action or a JSON array that specifies a list of actions that will then be invoked serially. While multiple commands can be batched into a single HTTP request, there are no guarantees about atomicity or isolation for a batch of commands.
-
-Activating Query on a cluster
---------------------------------------------
-
-Query can be enabled by setting the following config:
-
-```
-rpc:multicall(config, set, ["native_query_servers", "query", "{mango_native_proc, start_link, []}"]).
-```
-
-HTTP API
-========
-
-This API adds a single URI endpoint to the existing CouchDB HTTP API. Creating databases, authentication, Map/Reduce views, etc are all still supported exactly as currently document. No existing behavior is changed.
-
-The endpoint added is for the URL pattern `/dbname/_query` and has the following characteristics:
-
-* The only HTTP method supported is `POST`.
-* The request `Content-Type` must be `application/json`.
-* The response status code will either be `200`, `4XX`, or `5XX`
-* The response `Content-Type` will be `application/json`
-* The response `Transfer-Encoding` will be `chunked`.
-* The response is a single JSON object or array that matches to the single command or list of commands that exist in the request.
-
-This is intended to be a significantly simpler use of HTTP than the current APIs. This is motivated by the fact that this entire API is aimed at customers who are not as savvy at HTTP or non-relational document stores. Once a customer is comfortable using this API we hope to expose any other "power features" through the existing HTTP API and its adherence to HTTP semantics.
-
-
-Supported Actions
-=================
-
-This is a list of supported actions that Mango understands. For the time being it is limited to the four normal CRUD actions plus one meta action to create indices on the database.
-
-insert
-------
-
-Insert a document or documents into the database.
-
-Keys:
-
-* action - "insert"
-* docs - The JSON document to insert
-* w (optional) (default: 2) - An integer > 0 for the write quorum size
-
-If the provided document or documents do not contain an "\_id" field one will be added using an automatically generated UUID.
-
-It is more performant to specify multiple documents in the "docs" field than it is to specify multiple independent insert actions. Each insert action is submitted as a single bulk update (ie, \_bulk\_docs in CouchDB terminology). This, however, does not make any guarantees on the isolation or atomicity of the bulk operation. It is merely a performance benefit.
-
-
-find
-----
-
-Retrieve documents from the database.
-
-Keys:
-
-* action - "find"
-* selector - JSON object following selector syntax, described below
-* limit (optional) (default: 25) - integer >= 0, Limit the number of rows returned
-* skip (optional) (default: 0) - integer >= 0, Skip the specified number of rows
-* sort (optional) (default: []) - JSON array following sort syntax, described below
-* fields (optional) (default: null) - JSON array following the field syntax, described below
-* r (optional) (default: 1) - By default a find will return the document that was found when traversing the index. Optionally there can be a quorum read for each document using `r` as the read quorum. This is obviously less performant than using the document local to the index.
-* conflicts (optional) (default: false) - boolean, whether or not to include information about any existing conflicts for the document.
-
-The important thing to note about the find command is that it must execute over a generated index. If a selector is provided that cannot be satisfied using an existing index the list of basic indices that could be used will be returned.
-
-For the most part, indices are generated in response to the "create\_index" action (described below) although there are two special indices that can be used as well. The "\_id" is automatically indexed and is similar to every other index. There is also a special "\_seq" index to retrieve documents in the order of their update sequence.
-
-Its also quite possible to generate a query that can't be satisfied by any index. In this case an error will be returned stating that fact. Generally speaking the easiest way to stumble onto this is to attempt to OR two separate fields which would require a complete table scan. In the future I expect to support these more complicated queries using an extended indexing API (which deviates from the current MongoDB model a bit).
-
-
-update
-------
-
-Update an existing document in the database
-
-Keys:
-
-* action - "update"
-* selector - JSON object following selector syntax, described below
-* update - JSON object following update syntax, described below
-* upsert - (optional) (default: false) - boolean, Whether or not to create a new document if the selector does not match any documents in the database
-* limit (optional) (default: 1) - integer > 0, How many documents returned from the selector should be modified. Currently has a maximum value of 100
-* sort - (optional) (default: []) - JSON array following sort syntax, described below
-* r (optional) (default: 1) - integer > 0, read quorum constant
-* w (optional) (default: 2) - integer > 0, write quorum constant
-
-Updates are fairly straightforward other than to mention that the selector (like find) must be satisifiable using an existing index.
-
-On the update field, if the provided JSON object has one or more update operator (described below) then the operation is applied onto the existing document (if one exists) else the entire contents are replaced with exactly the value of the `update` field.
-
-
-delete
-------
-
-Remove a document from the database.
-
-Keys:
-
-* action - "delete"
-* selector - JSON object following selector syntax, described below
-* force (optional) (default: false) - Delete all conflicted versions of the document as well
-* limit - (optional) (default: 1) - integer > 0, How many documents to delete from the database. Currently has a maximum value of 100
-* sort - (optional) (default: []) - JSON array following sort syntax, described below
-* r (optional) (default: 1) - integer > 1, read quorum constant
-* w (optional) (default: 2) - integer > 0, write quorum constant
-
-Deletes behave quite similarly to update except they attempt to remove documents from the database. Its important to note that if a document has conflicts it may "appear" that delete's aren't having an effect. This is because the delete operation by default only removes a single revision. Specify `"force":true` if you would like to attempt to delete all live revisions.
-
-If you wish to delete a specific revision of the document, you can specify it in the selector using the special "\_rev" field.
-
-
-create\_index
--------------
-
-Create an index on the database
-
-Keys:
-
-* action - "create\_index"
-* index - JSON array following sort syntax, described below
-* type (optional) (default: "json") - string, specifying the index type to create. Currently only "json" indexes are supported but in the future we will provide full-text indexes as well as Geo spatial indexes
-* name (optional) - string, optionally specify a name for the index. If a name is not provided one will be automatically generated
-* ddoc (optional) - Indexes can be grouped into design documents underneath the hood for efficiency. This is an advanced feature. Don't specify a design document here unless you know the consequences of index invalidation. By default each index is placed in its own separate design document for isolation.
-
-Anytime an operation is required to locate a document in the database it is required that an index must exist that can be used to locate it. By default the only two indices that exist are for the document "\_id" and the special "\_seq" index.
-
-Indices are created in the background. If you attempt to create an index on a large database and then immediately utilize it, the request may block for a considerable amount of time before the request completes.
-
-Indices can specify multiple fields to index simultaneously. This is roughly analogous to a compound index in SQL with the corresponding tradeoffs. For instance, an index may contain the (ordered set of) fields "foo", "bar", and "baz". If a selector specifying "bar" is received, it can not be answered. Although if a selector specifying "foo" and "bar" is received, it can be answered more efficiently than if there were only an index on "foo" and "bar" independently.
-
-NB: while the index allows the ability to specify sort directions these are currently not supported. The sort direction must currently be specified as "asc" in the JSON. [INTERNAL]: This will require that we patch the view engine as well as the cluster coordinators in Fabric to follow the specified sort orders. The concepts are straightforward but the implementation may need some thought to fit into the current shape of things.
-
-
-list\_indexes
--------------
-
-List the indexes that exist in a given database.
-
-Keys:
-
-* action - "list\_indexes"
-
-
-delete\_index
--------------
-
-Delete the specified index from the database.
-
-Keys:
-
-* action - "delete\_index"
-* name - string, the index to delete
-* design\_doc - string, the design doc id from which to delete the index. For auto-generated index names and design docs, you can retrieve this information from the `list\_indexes` action
-
-Indexes require resources to maintain. If you find that an index is no longer necessary then it can be beneficial to remove it from the database.
-
-
-describe\_selector
-------------------
-
-Shows debugging information for a given selector
-
-Keys:
-
-* action - "describe\_selector"
-* selector - JSON object in selector syntax, described below
-* extended (optional) (default: false) - Show information on what existing indexes could be used with this selector
-
-This is a useful debugging utility that will show how a given selector is normalized before execution as well as information on what indexes could be used to satisfy it.
-
-If `"extended": true` is included then the list of existing indices that could be used for this selector are also returned.
-
-
-
-JSON Syntax Descriptions
-========================
-
-This API uses a few defined JSON structures for various operations. Here we'll describe each in detail.
-
-
-Selector Syntax
----------------
-
-The Mango query language is expressed as a JSON object describing documents of interest. Within this structure it is also possible to express conditional logic using specially named fields. This is inspired by and intended to maintain a fairly close parity to the existing MongoDB behavior.
-
-As an example, the simplest selector for Mango might look something like such:
-
- {"_id": "Paul"}
-
-Which would match the document named "Paul" (if one exists). Extending this example using other fields might look like such:
-
- {"_id": "Paul", "location": "Boston"}
-
-This would match a document named "Paul" *AND* having a "location" value of "Boston". Seeing as though I'm sitting in my basement in Omaha, this is unlikely.
-
-There are two special syntax elements for the object keys in a selector. The first is that the period (full stop, or simply `.`) character denotes subfields in a document. For instance, here are two equivalent examples:
-
- {"location": {"city": "Omaha"}}
- {"location.city": "Omaha"}
-
-If the object's key contains the period it could be escaped with backslash, i.e.
-
- {"location\\.city": "Omaha"}
-
-Note that the double backslash here is necessary to encode an actual single backslash.
-
-The second important syntax element is the use of a dollar sign (`$`) prefix to denote operators. For example:
-
- {"age": {"$gt": 21}}
-
-In this example, we have created the boolean expression `age > 21`.
-
-There are two core types of operators in the selector syntax: combination operators and condition operators. In general, combination operators contain groups of condition operators. We'll describe the list of each below.
-
-### Implicit Operators
-
-For the most part every operator must be of the form `{"$operator": argument}`. Though there are two implicit operators for selectors.
-
-First, any JSON object that is not the argument to a condition operator is an implicit `$and` operator on each field. For instance, these two examples are identical:
-
- {"foo": "bar", "baz": true}
- {"$and": [{"foo": {"$eq": "bar"}}, {"baz": {"$eq": true}}]}
-
-And as shown, any field that contains a JSON value that has no operators in it is an equality condition. For instance, these are equivalent:
-
- {"foo": "bar"}
- {"foo": {"$eq": "bar"}}
-
-And to be clear, these are also equivalent:
-
- {"foo": {"bar": "baz"}}
- {"foo": {"$eq": {"bar": "baz"}}}
-
-Although, the previous example would actually be normalized internally to this:
-
- {"foo.bar": {"$eq": "baz"}}
-
-
-### Combination Operators
-
-These operators are responsible for combining groups of condition operators. Most familiar are the standard boolean operators plus a few extra for working with JSON arrays.
-
-Each of the combining operators take a single argument that is either a condition operator or an array of condition operators.
-
-The list of combining characters:
-
-* "$and" - array argument
-* "$or" - array argument
-* "$not" - single argument
-* "$nor" - array argument
-* "$all" - array argument (special operator for array values)
-* "$elemMatch" - single argument (special operator for array values)
-* "$allMatch" - single argument (special operator for array values)
-
-### Condition Operators
-
-Condition operators are specified on a per field basis and apply to the value indexed for that field. For instance, the basic "$eq" operator matches when the indexed field is equal to its argument. There is currently support for the basic equality and inequality operators as well as a number of meta operators. Some of these operators will accept any JSON argument while some require a specific JSON formatted argument. Each is noted below.
-
-The list of conditional arguments:
-
-(In)equality operators
-
-* "$lt" - any JSON
-* "$lte" - any JSON
-* "$eq" - any JSON
-* "$ne" - any JSON
-* "$gte" - any JSON
-* "$gt" - any JSON
-
-Object related operators
-
-* "$exists" - boolean, check whether the field exists or not regardless of its value
-* "$type" - string, check the document field's type
-
-Array related operators
-
-* "$in" - array of JSON values, the document field must exist in the list provided
-* "$nin" - array of JSON values, the document field must not exist in the list provided
-* "$size" - integer, special condition to match the length of an array field in a document. Non-array fields cannot match this condition.
-
-Misc related operators
-
-* "$mod" - [Divisor, Remainder], where Divisor and Remainder are both positive integers (ie, greater than 0). Matches documents where (field % Divisor == Remainder) is true. This is false for any non-integer field
-* "$regex" - string, a regular expression pattern to match against the document field. Only matches when the field is a string value and matches the supplied matches
-
-
-Update Syntax
--------------
-
-Need to describe the syntax for update operators.
-
-
-Sort Syntax
------------
-
-The sort syntax is a basic array of field name and direction pairs. It looks like such:
-
- [{field1: dir1} | ...]
-
-Where field1 can be any field (dotted notation is available for sub-document fields) and dir1 can be "asc" or "desc".
-
-Note that it is highly recommended that you specify a single key per object in your sort ordering so that the order is not dependent on the combination of JSON libraries between your application and the internals of Mango's indexing engine.
-
-
-Fields Syntax
--------------
-
-When retrieving documents from the database you can specify that only a subset of the fields are returned. This allows you to limit your results strictly to the parts of the document that are interesting for the local application logic. The fields returned are specified as an array. Unlike MongoDB only the fields specified are included, there is no automatic inclusion of the "\_id" or other metadata fields when a field list is included.
-
-A trivial example:
-
- ["foo", "bar", "baz"]
-
-
-HTTP API
-========
-
-Short summary until the full documentation can be brought over.
-
-POST /dbname/\_find
--------------------------
-
-Issue a query.
-
-Request body is a JSON object that has the selector and the various options like limit/skip etc. Or we could post the selector and put the other options into the query string. Though I'd probably prefer to have it all in the body for consistency.
-
-Response is streamed out like a view.
-
-POST /dbname/\_index
---------------------------
-
-Request body contains the index definition.
-
-Response body is empty and the result is returned as the status code (200 OK -> created, 3something for exists).
-
-GET /dbname/\_index
--------------------------
-
-Request body is empty.
-
-Response body is all of the indexes that are available for use by find.
-
-DELETE /dbname/\_index/ddocid/viewname
---------------------------------------------
-
-Remove the specified index.
-
-Request body is empty.
-
-Response body is empty. The status code gives enough information.
diff --git a/src/mango/TODO.md b/src/mango/TODO.md
deleted file mode 100644
index ce2d85f3d..000000000
--- a/src/mango/TODO.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-* Patch the view engine to do alternative sorts. This will include both the lower level couch\_view* modules as well as the fabric coordinators.
-
-* Patch the view engine so we can specify options when returning docs from cursors. We'll want this so that we can delete specific revisions from a document.
-
-* Need to figure out how to do raw collation on some indices because at
-least the _id index uses it forcefully.
-
-* Add lots more to the update API. Mongo appears to be missing some pretty obvious easy functionality here. Things like managing values doing things like multiplying numbers, or common string mutations would be obvious examples. Also it could be interesting to add to the language so that you can do conditional updates based on other document attributes. Definitely not a V1 endeavor. \ No newline at end of file
diff --git a/src/mango/rebar.config.script b/src/mango/rebar.config.script
deleted file mode 100644
index be92bf114..000000000
--- a/src/mango/rebar.config.script
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}.
-
-if not HaveDreyfus -> CONFIG; true ->
- CurrOpts = case lists:keyfind(erl_opts, 1, CONFIG) of
- {erl_opts, Opts} -> Opts;
- false -> []
- end,
- NewOpts = [
- {d, 'HAVE_DREYFUS'}
- ] ++ CurrOpts,
- lists:keystore(erl_opts, 1, CONFIG, {erl_opts, NewOpts})
-end.
-
diff --git a/src/mango/requirements.txt b/src/mango/requirements.txt
deleted file mode 100644
index 952c40c09..000000000
--- a/src/mango/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-nose2==0.11.0
-requests==2.27.1
-hypothesis==6.31.6
-
diff --git a/src/mango/src/mango.app.src b/src/mango/src/mango.app.src
deleted file mode 100644
index a63f036e0..000000000
--- a/src/mango/src/mango.app.src
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, mango, [
- {description, "MongoDB API compatibility layer for CouchDB"},
- {vsn, git},
- {registered, []},
- {applications, [
- kernel,
- stdlib,
- couch_epi,
- config,
- couch_log,
- fabric
- ]},
- {mod, {mango_app, []}}
-]}.
diff --git a/src/mango/src/mango.hrl b/src/mango/src/mango.hrl
deleted file mode 100644
index 26a9d43b9..000000000
--- a/src/mango/src/mango.hrl
+++ /dev/null
@@ -1,13 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(MANGO_ERROR(R), throw({mango_error, ?MODULE, R})).
diff --git a/src/mango/src/mango_app.erl b/src/mango/src/mango_app.erl
deleted file mode 100644
index 7a0c39db7..000000000
--- a/src/mango/src/mango_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, StartArgs) ->
- mango_sup:start_link(StartArgs).
-
-stop(_State) ->
- ok.
diff --git a/src/mango/src/mango_crud.erl b/src/mango/src/mango_crud.erl
deleted file mode 100644
index c13dbdcb9..000000000
--- a/src/mango/src/mango_crud.erl
+++ /dev/null
@@ -1,171 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_crud).
-
--export([
- insert/3,
- find/5,
- update/4,
- delete/3,
- explain/3
-]).
-
--export([
- collect_cb/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
-
-insert(Db, #doc{} = Doc, Opts) ->
- insert(Db, [Doc], Opts);
-insert(Db, {_} = Doc, Opts) ->
- insert(Db, [Doc], Opts);
-insert(Db, Docs, Opts0) when is_list(Docs) ->
- Opts1 = maybe_add_user_ctx(Db, Opts0),
- Opts2 = maybe_int_to_str(w, Opts1),
- case fabric:update_docs(Db, Docs, Opts2) of
- {ok, Results0} ->
- {ok, lists:zipwith(fun result_to_json/2, Docs, Results0)};
- {accepted, Results0} ->
- {ok, lists:zipwith(fun result_to_json/2, Docs, Results0)};
- {aborted, Errors} ->
- {error, lists:map(fun result_to_json/1, Errors)}
- end.
-
-find(Db, Selector, Callback, UserAcc, Opts0) ->
- Opts1 = maybe_add_user_ctx(Db, Opts0),
- Opts2 = maybe_int_to_str(r, Opts1),
- {ok, Cursor} = mango_cursor:create(Db, Selector, Opts2),
- mango_cursor:execute(Cursor, Callback, UserAcc).
-
-update(Db, Selector, Update, Options) ->
- Upsert = proplists:get_value(upsert, Options),
- case collect_docs(Db, Selector, Options) of
- {ok, []} when Upsert ->
- InitDoc = mango_doc:update_as_insert(Update),
- case mango_doc:has_operators(InitDoc) of
- true ->
- ?MANGO_ERROR(invalid_upsert_with_operators);
- false ->
- % Probably need to catch and rethrow errors from
- % this function.
- Doc = couch_doc:from_json_obj(InitDoc),
- NewDoc =
- case Doc#doc.id of
- <<"">> ->
- Doc#doc{id = couch_uuids:new(), revs = {0, []}};
- _ ->
- Doc
- end,
- insert(Db, NewDoc, Options)
- end;
- {ok, Docs} ->
- NewDocs = lists:map(
- fun(Doc) ->
- mango_doc:apply_update(Doc, Update)
- end,
- Docs
- ),
- insert(Db, NewDocs, Options);
- Else ->
- Else
- end.
-
-delete(Db, Selector, Options) ->
- case collect_docs(Db, Selector, Options) of
- {ok, Docs} ->
- NewDocs = lists:map(
- fun({Props}) ->
- {[
- {<<"_id">>, proplists:get_value(<<"_id">>, Props)},
- {<<"_rev">>, proplists:get_value(<<"_rev">>, Props)},
- {<<"_deleted">>, true}
- ]}
- end,
- Docs
- ),
- insert(Db, NewDocs, Options);
- Else ->
- Else
- end.
-
-explain(Db, Selector, Opts0) ->
- Opts1 = maybe_add_user_ctx(Db, Opts0),
- Opts2 = maybe_int_to_str(r, Opts1),
- {ok, Cursor} = mango_cursor:create(Db, Selector, Opts2),
- mango_cursor:explain(Cursor).
-
-maybe_add_user_ctx(Db, Opts) ->
- case lists:keyfind(user_ctx, 1, Opts) of
- {user_ctx, _} ->
- Opts;
- false ->
- [{user_ctx, couch_db:get_user_ctx(Db)} | Opts]
- end.
-
-maybe_int_to_str(_Key, []) ->
- [];
-maybe_int_to_str(Key, [{Key, Val} | Rest]) when is_integer(Val) ->
- [{Key, integer_to_list(Val)} | maybe_int_to_str(Key, Rest)];
-maybe_int_to_str(Key, [KV | Rest]) ->
- [KV | maybe_int_to_str(Key, Rest)].
-
-result_to_json(#doc{id = Id}, Result) ->
- result_to_json(Id, Result);
-result_to_json({Props}, Result) ->
- Id = couch_util:get_value(<<"_id">>, Props),
- result_to_json(Id, Result);
-result_to_json(DocId, {ok, NewRev}) ->
- {[
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)}
- ]};
-result_to_json(DocId, {accepted, NewRev}) ->
- {[
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)},
- {accepted, true}
- ]};
-result_to_json(DocId, Error) ->
- % chttpd:error_info/1 because this is coming from fabric
- % and not internal mango operations.
- {_Code, ErrorStr, Reason} = chttpd:error_info(Error),
- {[
- {id, DocId},
- {error, ErrorStr},
- {reason, Reason}
- ]}.
-
-% This is for errors because for some reason we
-% need a different return value for errors? Blargh.
-result_to_json({{Id, Rev}, Error}) ->
- {_Code, ErrorStr, Reason} = chttpd:error_info(Error),
- {[
- {id, Id},
- {rev, couch_doc:rev_to_str(Rev)},
- {error, ErrorStr},
- {reason, Reason}
- ]}.
-
-collect_docs(Db, Selector, Options) ->
- Cb = fun ?MODULE:collect_cb/2,
- case find(Db, Selector, Cb, [], Options) of
- {ok, Docs} ->
- {ok, lists:reverse(Docs)};
- Else ->
- Else
- end.
-
-collect_cb({row, Doc}, Acc) ->
- {ok, [Doc | Acc]}.
diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
deleted file mode 100644
index e9db4c3cf..000000000
--- a/src/mango/src/mango_cursor.erl
+++ /dev/null
@@ -1,253 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_cursor).
-
--export([
- create/3,
- explain/1,
- execute/3,
- maybe_filter_indexes_by_ddoc/2,
- remove_indexes_with_partial_filter_selector/1,
- maybe_add_warning/4,
- maybe_noop_range/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
--include("mango_cursor.hrl").
--include("mango_idx.hrl").
-
--ifdef(HAVE_DREYFUS).
--define(CURSOR_MODULES, [
- mango_cursor_view,
- mango_cursor_text,
- mango_cursor_special
-]).
--else.
--define(CURSOR_MODULES, [
- mango_cursor_view,
- mango_cursor_special
-]).
--endif.
-
--define(SUPERVISOR, mango_cursor_sup).
-
-create(Db, Selector0, Opts) ->
- Selector = mango_selector:normalize(Selector0),
- UsableIndexes = mango_idx:get_usable_indexes(Db, Selector, Opts),
- case mango_cursor:maybe_filter_indexes_by_ddoc(UsableIndexes, Opts) of
- [] ->
- % use_index doesn't match a valid index - fall back to a valid one
- create_cursor(Db, UsableIndexes, Selector, Opts);
- UserSpecifiedIndex ->
- create_cursor(Db, UserSpecifiedIndex, Selector, Opts)
- end.
-
-explain(#cursor{} = Cursor) ->
- #cursor{
- index = Idx,
- selector = Selector,
- opts = Opts0,
- limit = Limit,
- skip = Skip,
- fields = Fields
- } = Cursor,
- Mod = mango_idx:cursor_mod(Idx),
- Opts = lists:keydelete(user_ctx, 1, Opts0),
- {
- [
- {dbname, mango_idx:dbname(Idx)},
- {index, mango_idx:to_json(Idx)},
- {partitioned, mango_idx:partitioned(Idx)},
- {selector, Selector},
- {opts, {Opts}},
- {limit, Limit},
- {skip, Skip},
- {fields, Fields}
- ] ++ Mod:explain(Cursor)
- }.
-
-execute(#cursor{index = Idx} = Cursor, UserFun, UserAcc) ->
- Mod = mango_idx:cursor_mod(Idx),
- Mod:execute(Cursor, UserFun, UserAcc).
-
-maybe_filter_indexes_by_ddoc(Indexes, Opts) ->
- case lists:keyfind(use_index, 1, Opts) of
- {use_index, []} ->
- [];
- {use_index, [DesignId]} ->
- filter_indexes(Indexes, DesignId);
- {use_index, [DesignId, ViewName]} ->
- filter_indexes(Indexes, DesignId, ViewName)
- end.
-
-filter_indexes(Indexes, DesignId0) ->
- DesignId =
- case DesignId0 of
- <<"_design/", _/binary>> ->
- DesignId0;
- Else ->
- <<"_design/", Else/binary>>
- end,
- FiltFun = fun(I) -> mango_idx:ddoc(I) == DesignId end,
- lists:filter(FiltFun, Indexes).
-
-filter_indexes(Indexes0, DesignId, ViewName) ->
- Indexes = filter_indexes(Indexes0, DesignId),
- FiltFun = fun(I) -> mango_idx:name(I) == ViewName end,
- lists:filter(FiltFun, Indexes).
-
-remove_indexes_with_partial_filter_selector(Indexes) ->
- FiltFun = fun(Idx) ->
- case mango_idx:get_partial_filter_selector(Idx) of
- undefined -> true;
- _ -> false
- end
- end,
- lists:filter(FiltFun, Indexes).
-
-maybe_add_warning(UserFun, #cursor{index = Index, opts = Opts}, Stats, UserAcc) ->
- W0 = invalid_index_warning(Index, Opts),
- W1 = no_index_warning(Index),
- W2 = index_scan_warning(Stats),
- Warnings = lists:append([W0, W1, W2]),
- case Warnings of
- [] ->
- UserAcc;
- _ ->
- WarningStr = iolist_to_binary(lists:join(<<"\n">>, Warnings)),
- Arg = {add_key, warning, WarningStr},
- {_Go, UserAcc1} = UserFun(Arg, UserAcc),
- UserAcc1
- end.
-
-create_cursor(Db, Indexes, Selector, Opts) ->
- [{CursorMod, CursorModIndexes} | _] = group_indexes_by_type(Indexes),
- CursorMod:create(Db, CursorModIndexes, Selector, Opts).
-
-group_indexes_by_type(Indexes) ->
- IdxDict = lists:foldl(
- fun(I, D) ->
- dict:append(mango_idx:cursor_mod(I), I, D)
- end,
- dict:new(),
- Indexes
- ),
- % The first cursor module that has indexes will be
- % used to service this query. This is so that we
- % don't suddenly switch indexes for existing client
- % queries.
- lists:flatmap(
- fun(CMod) ->
- case dict:find(CMod, IdxDict) of
- {ok, CModIndexes} ->
- [{CMod, CModIndexes}];
- error ->
- []
- end
- end,
- ?CURSOR_MODULES
- ).
-
-% warn if the _all_docs index was used to fulfil a query
-no_index_warning(#idx{type = Type}) when Type =:= <<"special">> ->
- couch_stats:increment_counter([mango, unindexed_queries]),
- [<<"No matching index found, create an index to optimize query time.">>];
-no_index_warning(_) ->
- [].
-
-% warn if user specified an index which doesn't exist or isn't valid
-% for the selector.
-% In this scenario, Mango will ignore the index hint and auto-select an index.
-invalid_index_warning(Index, Opts) ->
- UseIndex = lists:keyfind(use_index, 1, Opts),
- invalid_index_warning_int(Index, UseIndex).
-
-invalid_index_warning_int(Index, {use_index, [DesignId]}) ->
- Filtered = filter_indexes([Index], DesignId),
- if
- Filtered /= [] ->
- [];
- true ->
- couch_stats:increment_counter([mango, query_invalid_index]),
- Reason = fmt(
- "_design/~s was not used because it does not contain a valid index for this query.",
- [ddoc_name(DesignId)]
- ),
- [Reason]
- end;
-invalid_index_warning_int(Index, {use_index, [DesignId, ViewName]}) ->
- Filtered = filter_indexes([Index], DesignId, ViewName),
- if
- Filtered /= [] ->
- [];
- true ->
- couch_stats:increment_counter([mango, query_invalid_index]),
- Reason = fmt(
- "_design/~s, ~s was not used because it is not a valid index for this query.",
- [ddoc_name(DesignId), ViewName]
- ),
- [Reason]
- end;
-invalid_index_warning_int(_, _) ->
- [].
-
-% warn if a large number of documents needed to be scanned per result
-% returned, implying a lot of in-memory filtering
-index_scan_warning(#execution_stats{
- totalDocsExamined = Docs,
- totalQuorumDocsExamined = DocsQuorum,
- resultsReturned = ResultCount
-}) ->
- % Docs and DocsQuorum are mutually exclusive so it's safe to sum them
- DocsScanned = Docs + DocsQuorum,
- Ratio = calculate_index_scan_ratio(DocsScanned, ResultCount),
- Threshold = config:get_integer("mango", "index_scan_warning_threshold", 10),
- case Threshold > 0 andalso Ratio > Threshold of
- true ->
- couch_stats:increment_counter([mango, too_many_docs_scanned]),
- Reason =
- <<"The number of documents examined is high in proportion to the number of results returned. Consider adding a more specific index to improve this.">>,
- [Reason];
- false ->
- []
- end.
-
-% When there is an empty array for certain operators, we don't actually
-% want to execute the query so we deny it by making the range [empty].
-% To clarify, we don't want this query to execute: {"$or": []}. Results should
-% be empty. We do want this query to execute: {"age": 22, "$or": []}. It should
-% return the same results as {"age": 22}
-maybe_noop_range({[{Op, []}]}, IndexRanges) ->
- Noops = [<<"$all">>, <<"$and">>, <<"$or">>, <<"$in">>],
- case lists:member(Op, Noops) of
- true ->
- [empty];
- false ->
- IndexRanges
- end;
-maybe_noop_range(_, IndexRanges) ->
- IndexRanges.
-
-calculate_index_scan_ratio(DocsScanned, 0) ->
- DocsScanned;
-calculate_index_scan_ratio(DocsScanned, ResultCount) ->
- DocsScanned / ResultCount.
-
-fmt(Format, Args) ->
- iolist_to_binary(io_lib:format(Format, Args)).
-
-ddoc_name(<<"_design/", Name/binary>>) ->
- Name;
-ddoc_name(Name) ->
- Name.
diff --git a/src/mango/src/mango_cursor.hrl b/src/mango/src/mango_cursor.hrl
deleted file mode 100644
index e204c1735..000000000
--- a/src/mango/src/mango_cursor.hrl
+++ /dev/null
@@ -1,31 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include("mango_execution_stats.hrl").
-
-
--record(cursor, {
- db,
- index,
- ranges,
- selector,
- opts,
- limit,
- skip = 0,
- fields = undefined,
- user_fun,
- user_acc,
- execution_stats = #execution_stats{},
- bookmark,
- bookmark_docid,
- bookmark_key
-}).
diff --git a/src/mango/src/mango_cursor_special.erl b/src/mango/src/mango_cursor_special.erl
deleted file mode 100644
index f20edebd1..000000000
--- a/src/mango/src/mango_cursor_special.erl
+++ /dev/null
@@ -1,65 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_cursor_special).
-
--export([
- create/4,
- explain/1,
- execute/3
-]).
-
--export([
- handle_message/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include("mango_cursor.hrl").
-
-create(Db, Indexes, Selector, Opts) ->
- InitialRange = mango_idx_view:field_ranges(Selector),
- CatchAll = [{<<"_id">>, {'$gt', null, '$lt', mango_json_max}}],
- % order matters here - we only want to use the catchall index
- % if no other range can fulfill the query (because we know)
- % catchall is the most expensive range
- FieldRanges = InitialRange ++ CatchAll,
- Composited = mango_cursor_view:composite_indexes(Indexes, FieldRanges),
- {Index, IndexRanges} = mango_cursor_view:choose_best_index(Db, Composited),
-
- Limit = couch_util:get_value(limit, Opts, mango_opts:default_limit()),
- Skip = couch_util:get_value(skip, Opts, 0),
- Fields = couch_util:get_value(fields, Opts, all_fields),
- Bookmark = couch_util:get_value(bookmark, Opts),
-
- IndexRanges1 = mango_cursor:maybe_noop_range(Selector, IndexRanges),
-
- {ok, #cursor{
- db = Db,
- index = Index,
- ranges = IndexRanges1,
- selector = Selector,
- opts = Opts,
- limit = Limit,
- skip = Skip,
- fields = Fields,
- bookmark = Bookmark
- }}.
-
-explain(Cursor) ->
- mango_cursor_view:explain(Cursor).
-
-execute(Cursor0, UserFun, UserAcc) ->
- mango_cursor_view:execute(Cursor0, UserFun, UserAcc).
-
-handle_message(Msg, Cursor) ->
- mango_cursor_view:handle_message(Msg, Cursor).
diff --git a/src/mango/src/mango_cursor_text.erl b/src/mango/src/mango_cursor_text.erl
deleted file mode 100644
index 53bf63edb..000000000
--- a/src/mango/src/mango_cursor_text.erl
+++ /dev/null
@@ -1,334 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_cursor_text).
-
--ifdef(HAVE_DREYFUS).
-
--export([
- create/4,
- explain/1,
- execute/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("dreyfus/include/dreyfus.hrl").
--include("mango_cursor.hrl").
--include("mango.hrl").
-
--record(cacc, {
- selector,
- dbname,
- ddocid,
- idx_name,
- query_args,
- bookmark,
- limit,
- skip,
- user_fun,
- user_acc,
- fields,
- execution_stats
-}).
-
-create(Db, Indexes, Selector, Opts0) ->
- Index =
- case Indexes of
- [Index0] ->
- Index0;
- _ ->
- ?MANGO_ERROR(multiple_text_indexes)
- end,
-
- Opts = unpack_bookmark(couch_db:name(Db), Opts0),
-
- DreyfusLimit = get_dreyfus_limit(),
- Limit = erlang:min(DreyfusLimit, couch_util:get_value(limit, Opts, mango_opts:default_limit())),
- Skip = couch_util:get_value(skip, Opts, 0),
- Fields = couch_util:get_value(fields, Opts, all_fields),
-
- {ok, #cursor{
- db = Db,
- index = Index,
- ranges = null,
- selector = Selector,
- opts = Opts,
- limit = Limit,
- skip = Skip,
- fields = Fields
- }}.
-
-explain(Cursor) ->
- #cursor{
- selector = Selector,
- opts = Opts
- } = Cursor,
- [
- {'query', mango_selector_text:convert(Selector)},
- {partition, get_partition(Opts, null)},
- {sort, sort_query(Opts, Selector)}
- ].
-
-execute(Cursor, UserFun, UserAcc) ->
- #cursor{
- db = Db,
- index = Idx,
- limit = Limit,
- skip = Skip,
- selector = Selector,
- opts = Opts,
- execution_stats = Stats
- } = Cursor,
- Query = mango_selector_text:convert(Selector),
- QueryArgs = #index_query_args{
- q = Query,
- partition = get_partition(Opts, nil),
- sort = sort_query(Opts, Selector),
- raw_bookmark = true
- },
- CAcc = #cacc{
- selector = Selector,
- dbname = couch_db:name(Db),
- ddocid = ddocid(Idx),
- idx_name = mango_idx:name(Idx),
- bookmark = get_bookmark(Opts),
- limit = Limit,
- skip = Skip,
- query_args = QueryArgs,
- user_fun = UserFun,
- user_acc = UserAcc,
- fields = Cursor#cursor.fields,
- execution_stats = mango_execution_stats:log_start(Stats)
- },
- try
- case Query of
- <<>> ->
- throw({stop, CAcc});
- _ ->
- execute(CAcc)
- end
- catch
- throw:{stop, FinalCAcc} ->
- #cacc{
- bookmark = FinalBM,
- user_fun = UserFun,
- user_acc = LastUserAcc,
- execution_stats = Stats0
- } = FinalCAcc,
- JsonBM = dreyfus_bookmark:pack(FinalBM),
- Arg = {add_key, bookmark, JsonBM},
- {_Go, FinalUserAcc} = UserFun(Arg, LastUserAcc),
- FinalUserAcc0 = mango_execution_stats:maybe_add_stats(
- Opts, UserFun, Stats0, FinalUserAcc
- ),
- FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, Stats0, FinalUserAcc0),
- {ok, FinalUserAcc1}
- end.
-
-execute(CAcc) ->
- case search_docs(CAcc) of
- {ok, Bookmark, []} ->
- % If we don't have any results from the
- % query it means the request has paged through
- % all possible results and the request is over.
- NewCAcc = CAcc#cacc{bookmark = Bookmark},
- throw({stop, NewCAcc});
- {ok, Bookmark, Hits} ->
- NewCAcc = CAcc#cacc{bookmark = Bookmark},
- HitDocs = get_json_docs(CAcc#cacc.dbname, Hits),
- {ok, FinalCAcc} = handle_hits(NewCAcc, HitDocs),
- execute(FinalCAcc)
- end.
-
-search_docs(CAcc) ->
- #cacc{
- dbname = DbName,
- ddocid = DDocId,
- idx_name = IdxName
- } = CAcc,
- QueryArgs = update_query_args(CAcc),
- case dreyfus_fabric_search:go(DbName, DDocId, IdxName, QueryArgs) of
- {ok, Bookmark, _, Hits, _, _} ->
- {ok, Bookmark, Hits};
- {error, Reason} ->
- ?MANGO_ERROR({text_search_error, {error, Reason}})
- end.
-
-handle_hits(CAcc, []) ->
- {ok, CAcc};
-handle_hits(CAcc0, [{Sort, Doc} | Rest]) ->
- CAcc1 = handle_hit(CAcc0, Sort, Doc),
- handle_hits(CAcc1, Rest).
-
-handle_hit(CAcc0, Sort, not_found) ->
- CAcc1 = update_bookmark(CAcc0, Sort),
- CAcc1;
-handle_hit(CAcc0, Sort, Doc) ->
- #cacc{
- limit = Limit,
- skip = Skip,
- execution_stats = Stats
- } = CAcc0,
- CAcc1 = update_bookmark(CAcc0, Sort),
- Stats1 = mango_execution_stats:incr_docs_examined(Stats),
- couch_stats:increment_counter([mango, docs_examined]),
- CAcc2 = CAcc1#cacc{execution_stats = Stats1},
- case mango_selector:match(CAcc2#cacc.selector, Doc) of
- true when Skip > 0 ->
- CAcc2#cacc{skip = Skip - 1};
- true when Limit == 0 ->
- % We hit this case if the user spcified with a
- % zero limit. Notice that in this case we need
- % to return the bookmark from before this match
- throw({stop, CAcc0});
- true when Limit == 1 ->
- NewCAcc = apply_user_fun(CAcc2, Doc),
- throw({stop, NewCAcc});
- true when Limit > 1 ->
- NewCAcc = apply_user_fun(CAcc2, Doc),
- NewCAcc#cacc{limit = Limit - 1};
- false ->
- CAcc2
- end.
-
-apply_user_fun(CAcc, Doc) ->
- FinalDoc = mango_fields:extract(Doc, CAcc#cacc.fields),
- #cacc{
- user_fun = UserFun,
- user_acc = UserAcc,
- execution_stats = Stats
- } = CAcc,
- Stats0 = mango_execution_stats:incr_results_returned(Stats),
- case UserFun({row, FinalDoc}, UserAcc) of
- {ok, NewUserAcc} ->
- CAcc#cacc{user_acc = NewUserAcc, execution_stats = Stats0};
- {stop, NewUserAcc} ->
- throw({stop, CAcc#cacc{user_acc = NewUserAcc, execution_stats = Stats0}})
- end.
-
-%% Convert Query to Dreyfus sort specifications
-%% Covert <<"Field">>, <<"desc">> to <<"-Field">>
-%% and append to the dreyfus query
-sort_query(Opts, Selector) ->
- {sort, {Sort}} = lists:keyfind(sort, 1, Opts),
- SortList = lists:map(
- fun(SortField) ->
- {Dir, RawSortField} =
- case SortField of
- {Field, <<"asc">>} -> {asc, Field};
- {Field, <<"desc">>} -> {desc, Field};
- Field when is_binary(Field) -> {asc, Field}
- end,
- SField = mango_selector_text:append_sort_type(RawSortField, Selector),
- case Dir of
- asc ->
- SField;
- desc ->
- <<"-", SField/binary>>
- end
- end,
- Sort
- ),
- case SortList of
- [] -> relevance;
- _ -> SortList
- end.
-
-get_partition(Opts, Default) ->
- case couch_util:get_value(partition, Opts) of
- <<>> -> Default;
- Else -> Else
- end.
-
-get_bookmark(Opts) ->
- case lists:keyfind(bookmark, 1, Opts) of
- {_, BM} when is_list(BM), BM /= [] ->
- BM;
- _ ->
- nil
- end.
-
-update_bookmark(CAcc, Sortable) ->
- BM = CAcc#cacc.bookmark,
- QueryArgs = CAcc#cacc.query_args,
- Sort = QueryArgs#index_query_args.sort,
- NewBM = dreyfus_bookmark:update(Sort, BM, [Sortable]),
- CAcc#cacc{bookmark = NewBM}.
-
-pack_bookmark(Bookmark) ->
- case dreyfus_bookmark:pack(Bookmark) of
- null -> nil;
- Enc -> Enc
- end.
-
-unpack_bookmark(DbName, Opts) ->
- NewBM =
- case lists:keyfind(bookmark, 1, Opts) of
- {_, nil} ->
- [];
- {_, Bin} ->
- try
- dreyfus_bookmark:unpack(DbName, Bin)
- catch
- _:_ ->
- ?MANGO_ERROR({invalid_bookmark, Bin})
- end
- end,
- lists:keystore(bookmark, 1, Opts, {bookmark, NewBM}).
-
-ddocid(Idx) ->
- case mango_idx:ddoc(Idx) of
- <<"_design/", Rest/binary>> ->
- Rest;
- Else ->
- Else
- end.
-
-update_query_args(CAcc) ->
- #cacc{
- bookmark = Bookmark,
- query_args = QueryArgs
- } = CAcc,
- QueryArgs#index_query_args{
- bookmark = pack_bookmark(Bookmark),
- limit = get_limit(CAcc)
- }.
-
-get_limit(CAcc) ->
- erlang:min(get_dreyfus_limit(), CAcc#cacc.limit + CAcc#cacc.skip).
-
-get_dreyfus_limit() ->
- config:get_integer("dreyfus", "max_limit", 200).
-
-get_json_docs(DbName, Hits) ->
- Ids = lists:map(
- fun(#sortable{item = Item}) ->
- couch_util:get_value(<<"_id">>, Item#hit.fields)
- end,
- Hits
- ),
- % TODO: respect R query parameter (same as json indexes)
- {ok, IdDocs} = dreyfus_fabric:get_json_docs(DbName, Ids),
- lists:map(
- fun(#sortable{item = Item} = Sort) ->
- Id = couch_util:get_value(<<"_id">>, Item#hit.fields),
- case lists:keyfind(Id, 1, IdDocs) of
- {Id, {doc, Doc}} ->
- {Sort, Doc};
- false ->
- {Sort, not_found}
- end
- end,
- Hits
- ).
-
--endif.
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
deleted file mode 100644
index 5656ffc0b..000000000
--- a/src/mango/src/mango_cursor_view.erl
+++ /dev/null
@@ -1,504 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_cursor_view).
-
--export([
- create/4,
- explain/1,
- execute/3
-]).
-
--export([
- view_cb/2,
- handle_message/2,
- handle_all_docs_message/2,
- composite_indexes/2,
- choose_best_index/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("fabric/include/fabric.hrl").
-
--include("mango_cursor.hrl").
--include("mango_idx_view.hrl").
-
--define(HEARTBEAT_INTERVAL_IN_USEC, 4000000).
-
-create(Db, Indexes, Selector, Opts) ->
- FieldRanges = mango_idx_view:field_ranges(Selector),
- Composited = composite_indexes(Indexes, FieldRanges),
- {Index, IndexRanges} = choose_best_index(Db, Composited),
-
- Limit = couch_util:get_value(limit, Opts, mango_opts:default_limit()),
- Skip = couch_util:get_value(skip, Opts, 0),
- Fields = couch_util:get_value(fields, Opts, all_fields),
- Bookmark = couch_util:get_value(bookmark, Opts),
-
- IndexRanges1 = mango_cursor:maybe_noop_range(Selector, IndexRanges),
-
- {ok, #cursor{
- db = Db,
- index = Index,
- ranges = IndexRanges1,
- selector = Selector,
- opts = Opts,
- limit = Limit,
- skip = Skip,
- fields = Fields,
- bookmark = Bookmark
- }}.
-
-explain(Cursor) ->
- #cursor{
- opts = Opts
- } = Cursor,
-
- BaseArgs = base_args(Cursor),
- Args = apply_opts(Opts, BaseArgs),
-
- [
- {mrargs,
- {[
- {include_docs, Args#mrargs.include_docs},
- {view_type, Args#mrargs.view_type},
- {reduce, Args#mrargs.reduce},
- {partition, couch_mrview_util:get_extra(Args, partition, null)},
- {start_key, maybe_replace_max_json(Args#mrargs.start_key)},
- {end_key, maybe_replace_max_json(Args#mrargs.end_key)},
- {direction, Args#mrargs.direction},
- {stable, Args#mrargs.stable},
- {update, Args#mrargs.update},
- {conflicts, Args#mrargs.conflicts}
- ]}}
- ].
-
-% replace internal values that cannot
-% be represented as a valid UTF-8 string
-% with a token for JSON serialization
-maybe_replace_max_json([]) ->
- [];
-maybe_replace_max_json(?MAX_STR) ->
- <<"<MAX>">>;
-maybe_replace_max_json([H | T] = EndKey) when is_list(EndKey) ->
- H1 =
- if
- H == ?MAX_JSON_OBJ -> <<"<MAX>">>;
- true -> H
- end,
- [H1 | maybe_replace_max_json(T)];
-maybe_replace_max_json(EndKey) ->
- EndKey.
-
-base_args(#cursor{index = Idx, selector = Selector} = Cursor) ->
- {StartKey, EndKey} =
- case Cursor#cursor.ranges of
- [empty] ->
- {null, null};
- _ ->
- {
- mango_idx:start_key(Idx, Cursor#cursor.ranges),
- mango_idx:end_key(Idx, Cursor#cursor.ranges)
- }
- end,
- #mrargs{
- view_type = map,
- reduce = false,
- start_key = StartKey,
- end_key = EndKey,
- include_docs = true,
- extra = [
- {callback, {?MODULE, view_cb}},
- {selector, Selector},
- {ignore_partition_query_limit, true}
- ]
- }.
-
-execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFun, UserAcc) ->
- Cursor = Cursor0#cursor{
- user_fun = UserFun,
- user_acc = UserAcc,
- execution_stats = mango_execution_stats:log_start(Stats)
- },
- case Cursor#cursor.ranges of
- [empty] ->
- % empty indicates unsatisfiable ranges, so don't perform search
- {ok, UserAcc};
- _ ->
- BaseArgs = base_args(Cursor),
- #cursor{opts = Opts, bookmark = Bookmark} = Cursor,
- Args0 = apply_opts(Opts, BaseArgs),
- Args = mango_json_bookmark:update_args(Bookmark, Args0),
- UserCtx = couch_util:get_value(user_ctx, Opts, #user_ctx{}),
- DbOpts = [{user_ctx, UserCtx}],
- Result =
- case mango_idx:def(Idx) of
- all_docs ->
- CB = fun ?MODULE:handle_all_docs_message/2,
- fabric:all_docs(Db, DbOpts, CB, Cursor, Args);
- _ ->
- CB = fun ?MODULE:handle_message/2,
- % Normal view
- DDoc = ddocid(Idx),
- Name = mango_idx:name(Idx),
- fabric:query_view(Db, DbOpts, DDoc, Name, CB, Cursor, Args)
- end,
- case Result of
- {ok, LastCursor} ->
- NewBookmark = mango_json_bookmark:create(LastCursor),
- Arg = {add_key, bookmark, NewBookmark},
- {_Go, FinalUserAcc} = UserFun(Arg, LastCursor#cursor.user_acc),
- Stats0 = LastCursor#cursor.execution_stats,
- FinalUserAcc0 = mango_execution_stats:maybe_add_stats(
- Opts, UserFun, Stats0, FinalUserAcc
- ),
- FinalUserAcc1 = mango_cursor:maybe_add_warning(
- UserFun, Cursor, Stats0, FinalUserAcc0
- ),
- {ok, FinalUserAcc1};
- {error, Reason} ->
- {error, Reason}
- end
- end.
-
-% Any of these indexes may be a composite index. For each
-% index find the most specific set of fields for each
-% index. Ie, if an index has columns a, b, c, d, then
-% check FieldRanges for a, b, c, and d and return
-% the longest prefix of columns found.
-composite_indexes(Indexes, FieldRanges) ->
- lists:foldl(
- fun(Idx, Acc) ->
- Cols = mango_idx:columns(Idx),
- Prefix = composite_prefix(Cols, FieldRanges),
- % Calcuate the difference between the FieldRanges/Selector
- % and the Prefix. We want to select the index with a prefix
- % that is as close to the FieldRanges as possible
- PrefixDifference = length(FieldRanges) - length(Prefix),
- [{Idx, Prefix, PrefixDifference} | Acc]
- end,
- [],
- Indexes
- ).
-
-composite_prefix([], _) ->
- [];
-composite_prefix([Col | Rest], Ranges) ->
- case lists:keyfind(Col, 1, Ranges) of
- {Col, Range} ->
- [Range | composite_prefix(Rest, Ranges)];
- false ->
- []
- end.
-
-% The query planner
-% First choose the index with the lowest difference between its
-% Prefix and the FieldRanges. If that is equal, then
-% choose the index with the least number of
-% fields in the index. If we still cannot break the tie,
-% then choose alphabetically based on ddocId.
-% Return the first element's Index and IndexRanges.
-%
-% In the future we can look into doing a cached parallel
-% reduce view read on each index with the ranges to find
-% the one that has the fewest number of rows or something.
-choose_best_index(_DbName, IndexRanges) ->
- Cmp = fun({IdxA, _PrefixA, PrefixDifferenceA}, {IdxB, _PrefixB, PrefixDifferenceB}) ->
- case PrefixDifferenceA - PrefixDifferenceB of
- N when N < 0 -> true;
- N when N == 0 ->
- ColsLenA = length(mango_idx:columns(IdxA)),
- ColsLenB = length(mango_idx:columns(IdxB)),
- case ColsLenA - ColsLenB of
- M when M < 0 ->
- true;
- M when M == 0 ->
- % We have no other way to choose, so at this point
- % select the index based on (dbname, ddocid, view_name) triple
- IdxA =< IdxB;
- _ ->
- false
- end;
- _ ->
- false
- end
- end,
- {SelectedIndex, SelectedIndexRanges, _} = hd(lists:sort(Cmp, IndexRanges)),
- {SelectedIndex, SelectedIndexRanges}.
-
-view_cb({meta, Meta}, Acc) ->
- % Map function starting
- put(mango_docs_examined, 0),
- set_mango_msg_timestamp(),
- ok = rexi:stream2({meta, Meta}),
- {ok, Acc};
-view_cb({row, Row}, #mrargs{extra = Options} = Acc) ->
- ViewRow = #view_row{
- id = couch_util:get_value(id, Row),
- key = couch_util:get_value(key, Row),
- doc = couch_util:get_value(doc, Row)
- },
- case ViewRow#view_row.doc of
- null ->
- maybe_send_mango_ping();
- undefined ->
- % include_docs=false. Use quorum fetch at coordinator
- ok = rexi:stream2(ViewRow),
- set_mango_msg_timestamp();
- Doc ->
- put(mango_docs_examined, get(mango_docs_examined) + 1),
- Selector = couch_util:get_value(selector, Options),
- couch_stats:increment_counter([mango, docs_examined]),
- case mango_selector:match(Selector, Doc) of
- true ->
- ok = rexi:stream2(ViewRow),
- set_mango_msg_timestamp();
- false ->
- maybe_send_mango_ping()
- end
- end,
- {ok, Acc};
-view_cb(complete, Acc) ->
- % Send shard-level execution stats
- ok = rexi:stream2({execution_stats, {docs_examined, get(mango_docs_examined)}}),
- % Finish view output
- ok = rexi:stream_last(complete),
- {ok, Acc};
-view_cb(ok, ddoc_updated) ->
- rexi:reply({ok, ddoc_updated}).
-
-maybe_send_mango_ping() ->
- Current = os:timestamp(),
- LastPing = get(mango_last_msg_timestamp),
- % Fabric will timeout if it has not heard a response from a worker node
- % after 5 seconds. Send a ping every 4 seconds so the timeout doesn't happen.
- case timer:now_diff(Current, LastPing) > ?HEARTBEAT_INTERVAL_IN_USEC of
- false ->
- ok;
- true ->
- rexi:ping(),
- set_mango_msg_timestamp()
- end.
-
-set_mango_msg_timestamp() ->
- put(mango_last_msg_timestamp, os:timestamp()).
-
-handle_message({meta, _}, Cursor) ->
- {ok, Cursor};
-handle_message({row, Props}, Cursor) ->
- case doc_member(Cursor, Props) of
- {ok, Doc, {execution_stats, Stats}} ->
- Cursor1 = Cursor#cursor{
- execution_stats = Stats
- },
- Cursor2 = update_bookmark_keys(Cursor1, Props),
- FinalDoc = mango_fields:extract(Doc, Cursor2#cursor.fields),
- handle_doc(Cursor2, FinalDoc);
- {no_match, _, {execution_stats, Stats}} ->
- Cursor1 = Cursor#cursor{
- execution_stats = Stats
- },
- {ok, Cursor1};
- Error ->
- couch_log:error("~s :: Error loading doc: ~p", [?MODULE, Error]),
- {ok, Cursor}
- end;
-handle_message({execution_stats, ShardStats}, #cursor{execution_stats = Stats} = Cursor) ->
- {docs_examined, DocsExamined} = ShardStats,
- Cursor1 = Cursor#cursor{
- execution_stats = mango_execution_stats:incr_docs_examined(Stats, DocsExamined)
- },
- {ok, Cursor1};
-handle_message(complete, Cursor) ->
- {ok, Cursor};
-handle_message({error, Reason}, _Cursor) ->
- {error, Reason}.
-
-handle_all_docs_message({row, Props}, Cursor) ->
- case is_design_doc(Props) of
- true -> {ok, Cursor};
- false -> handle_message({row, Props}, Cursor)
- end;
-handle_all_docs_message(Message, Cursor) ->
- handle_message(Message, Cursor).
-
-handle_doc(#cursor{skip = S} = C, _) when S > 0 ->
- {ok, C#cursor{skip = S - 1}};
-handle_doc(#cursor{limit = L, execution_stats = Stats} = C, Doc) when L > 0 ->
- UserFun = C#cursor.user_fun,
- UserAcc = C#cursor.user_acc,
- {Go, NewAcc} = UserFun({row, Doc}, UserAcc),
- {Go, C#cursor{
- user_acc = NewAcc,
- limit = L - 1,
- execution_stats = mango_execution_stats:incr_results_returned(Stats)
- }};
-handle_doc(C, _Doc) ->
- {stop, C}.
-
-ddocid(Idx) ->
- case mango_idx:ddoc(Idx) of
- <<"_design/", Rest/binary>> ->
- Rest;
- Else ->
- Else
- end.
-
-apply_opts([], Args) ->
- Args;
-apply_opts([{r, RStr} | Rest], Args) ->
- IncludeDocs =
- case list_to_integer(RStr) of
- 1 ->
- true;
- R when R > 1 ->
- % We don't load the doc in the view query because
- % we have to do a quorum read in the coordinator
- % so there's no point.
- false
- end,
- NewArgs = Args#mrargs{include_docs = IncludeDocs},
- apply_opts(Rest, NewArgs);
-apply_opts([{conflicts, true} | Rest], Args) ->
- NewArgs = Args#mrargs{conflicts = true},
- apply_opts(Rest, NewArgs);
-apply_opts([{conflicts, false} | Rest], Args) ->
- % Ignored cause default
- apply_opts(Rest, Args);
-apply_opts([{sort, Sort} | Rest], Args) ->
- % We only support single direction sorts
- % so nothing fancy here.
- case mango_sort:directions(Sort) of
- [] ->
- apply_opts(Rest, Args);
- [<<"asc">> | _] ->
- apply_opts(Rest, Args);
- [<<"desc">> | _] ->
- SK = Args#mrargs.start_key,
- SKDI = Args#mrargs.start_key_docid,
- EK = Args#mrargs.end_key,
- EKDI = Args#mrargs.end_key_docid,
- NewArgs = Args#mrargs{
- direction = rev,
- start_key = EK,
- start_key_docid = EKDI,
- end_key = SK,
- end_key_docid = SKDI
- },
- apply_opts(Rest, NewArgs)
- end;
-apply_opts([{stale, ok} | Rest], Args) ->
- NewArgs = Args#mrargs{
- stable = true,
- update = false
- },
- apply_opts(Rest, NewArgs);
-apply_opts([{stable, true} | Rest], Args) ->
- NewArgs = Args#mrargs{
- stable = true
- },
- apply_opts(Rest, NewArgs);
-apply_opts([{update, false} | Rest], Args) ->
- NewArgs = Args#mrargs{
- update = false
- },
- apply_opts(Rest, NewArgs);
-apply_opts([{partition, <<>>} | Rest], Args) ->
- apply_opts(Rest, Args);
-apply_opts([{partition, Partition} | Rest], Args) when is_binary(Partition) ->
- NewArgs = couch_mrview_util:set_extra(Args, partition, Partition),
- apply_opts(Rest, NewArgs);
-apply_opts([{_, _} | Rest], Args) ->
- % Ignore unknown options
- apply_opts(Rest, Args).
-
-doc_member(Cursor, RowProps) ->
- Db = Cursor#cursor.db,
- Opts = Cursor#cursor.opts,
- ExecutionStats = Cursor#cursor.execution_stats,
- Selector = Cursor#cursor.selector,
- case couch_util:get_value(doc, RowProps) of
- {DocProps} ->
- % only matching documents are returned; the selector
- % is evaluated at the shard level in view_cb({row, Row},
- {ok, {DocProps}, {execution_stats, ExecutionStats}};
- undefined ->
- % an undefined doc was returned, indicating we should
- % perform a quorum fetch
- ExecutionStats1 = mango_execution_stats:incr_quorum_docs_examined(ExecutionStats),
- couch_stats:increment_counter([mango, quorum_docs_examined]),
- Id = couch_util:get_value(id, RowProps),
- case mango_util:defer(fabric, open_doc, [Db, Id, Opts]) of
- {ok, #doc{} = DocProps} ->
- Doc = couch_doc:to_json_obj(DocProps, []),
- match_doc(Selector, Doc, ExecutionStats1);
- Else ->
- Else
- end;
- _ ->
- % no doc, no match
- {no_match, null, {execution_stats, ExecutionStats}}
- end.
-
-match_doc(Selector, Doc, ExecutionStats) ->
- case mango_selector:match(Selector, Doc) of
- true ->
- {ok, Doc, {execution_stats, ExecutionStats}};
- false ->
- {no_match, Doc, {execution_stats, ExecutionStats}}
- end.
-
-is_design_doc(RowProps) ->
- case couch_util:get_value(id, RowProps) of
- <<"_design/", _/binary>> -> true;
- _ -> false
- end.
-
-update_bookmark_keys(#cursor{limit = Limit} = Cursor, Props) when Limit > 0 ->
- Id = couch_util:get_value(id, Props),
- Key = couch_util:get_value(key, Props),
- Cursor#cursor{
- bookmark_docid = Id,
- bookmark_key = Key
- };
-update_bookmark_keys(Cursor, _Props) ->
- Cursor.
-
-%%%%%%%% module tests below %%%%%%%%
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-does_not_refetch_doc_with_value_test() ->
- Cursor = #cursor{
- db = <<"db">>,
- opts = [],
- execution_stats = #execution_stats{},
- selector = mango_selector:normalize({[{<<"user_id">>, <<"1234">>}]})
- },
- RowProps = [
- {id, <<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
- {key, <<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
- {doc,
- {
- [
- {<<"_id">>, <<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
- {<<"_rev">>, <<"1-a954fe2308f14307756067b0e18c2968">>},
- {<<"user_id">>, 11}
- ]
- }}
- ],
- {Match, _, _} = doc_member(Cursor, RowProps),
- ?assertEqual(Match, ok).
-
--endif.
diff --git a/src/mango/src/mango_doc.erl b/src/mango/src/mango_doc.erl
deleted file mode 100644
index f8cb4c63b..000000000
--- a/src/mango/src/mango_doc.erl
+++ /dev/null
@@ -1,543 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_doc).
-
--export([
- from_bson/1,
-
- apply_update/2,
- update_as_insert/1,
- has_operators/1,
-
- get_field/2,
- get_field/3,
- rem_field/2,
- set_field/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
-
-from_bson({Props}) ->
- DocProps =
- case lists:keytake(<<"_id">>, 1, Props) of
- {value, {<<"_id">>, DocId0}, RestProps} ->
- DocId =
- case DocId0 of
- {[{<<"$id">>, Id}]} ->
- Id;
- Else ->
- Else
- end,
- [{<<"_id">>, DocId} | RestProps];
- false ->
- Props
- end,
- Doc = couch_doc:from_json_obj({DocProps}),
- case Doc#doc.id of
- <<"">> ->
- Doc#doc{id = couch_uuids:new(), revs = {0, []}};
- _ ->
- Doc
- end.
-
-apply_update(#doc{body = {Props}} = Doc, Update) ->
- NewProps = apply_update(Props, Update),
- Doc#doc{body = {NewProps}};
-apply_update({Props}, {Update}) ->
- Result = do_update({Props}, Update),
- case has_operators(Result) of
- true ->
- ?MANGO_ERROR(update_leaves_operators);
- false ->
- ok
- end,
- Result.
-
-update_as_insert({Update}) ->
- NewProps = do_update_to_insert(Update, {[]}),
- apply_update(NewProps, {Update}).
-
-has_operators(#doc{body = Body}) ->
- has_operators(Body);
-has_operators({Props}) when is_list(Props) ->
- has_operators_obj(Props);
-has_operators(Arr) when is_list(Arr) ->
- has_operators_arr(Arr);
-has_operators(Val) when is_atom(Val) ->
- false;
-has_operators(Val) when is_number(Val) ->
- false;
-has_operators(Val) when is_binary(Val) ->
- false.
-
-has_operators_obj([]) ->
- false;
-has_operators_obj([{K, V} | Rest]) ->
- case K of
- <<"$", _/binary>> ->
- true;
- _ ->
- case has_operators(V) of
- true ->
- true;
- false ->
- has_operators_obj(Rest)
- end
- end.
-
-has_operators_arr([]) ->
- false;
-has_operators_arr([V | Rest]) ->
- case has_operators(V) of
- true ->
- true;
- false ->
- has_operators_arr(Rest)
- end.
-
-do_update(Props, []) ->
- Props;
-do_update(Props, [{Op, Value} | Rest]) ->
- UpdateFun = update_operator_fun(Op),
- NewProps =
- case UpdateFun of
- undefined ->
- lists:keystore(Op, 1, Props, {Op, Value});
- Fun when is_function(Fun, 2) ->
- case Value of
- {ValueProps} ->
- Fun(Props, ValueProps);
- _ ->
- ?MANGO_ERROR({invalid_operand, Op, Value})
- end
- end,
- do_update(NewProps, Rest).
-
-update_operator_fun(<<"$", _/binary>> = Op) ->
- OperatorFuns = [
- % Object operators
- {<<"$inc">>, fun do_update_inc/2},
- {<<"$rename">>, fun do_update_rename/2},
- {<<"$setOnInsert">>, fun do_update_set_on_insert/2},
- {<<"$set">>, fun do_update_set/2},
- {<<"$unset">>, fun do_update_unset/2},
-
- % Array opereators
- {<<"$addToSet">>, fun do_update_add_to_set/2},
- {<<"$pop">>, fun do_update_pop/2},
- {<<"$pullAll">>, fun do_update_pull_all/2},
- {<<"$pull">>, fun do_update_pull/2},
- {<<"$pushAll">>, fun do_update_push_all/2},
- {<<"$push">>, fun do_update_push/2},
-
- % Bitwise Operators
- {<<"$bit">>, fun do_update_bitwise/2}
- ],
- case lists:keyfind(Op, 1, OperatorFuns) of
- {Op, Fun} ->
- Fun;
- false ->
- ?MANGO_ERROR({update_operator_not_supported, Op})
- end;
-update_operator_fun(_) ->
- undefined.
-
-do_update_inc(Props, []) ->
- Props;
-do_update_inc(Props, [{Field, Incr} | Rest]) ->
- if
- is_number(Incr) -> ok;
- true -> ?MANGO_ERROR({invalid_increment, Incr})
- end,
- NewProps =
- case get_field(Props, Field, fun is_number/1) of
- Value when is_number(Value) ->
- set_field(Props, Field, Value + Incr);
- not_found ->
- set_field(Props, Field, Incr);
- _ ->
- Props
- end,
- do_update_inc(NewProps, Rest).
-
-do_update_rename(Props, []) ->
- Props;
-do_update_rename(Props, [{OldField, NewField} | Rest]) ->
- NewProps =
- case rem_field(Props, OldField) of
- {RemProps, OldValue} ->
- set_field(RemProps, NewField, OldValue);
- _ ->
- Props
- end,
- do_update_rename(NewProps, Rest).
-
-do_update_set_on_insert(Props, _) ->
- % This is only called during calls to apply_update/2
- % which means this isn't an insert, so drop it on
- % the floor.
- Props.
-
-do_update_set(Props, []) ->
- Props;
-do_update_set(Props, [{Field, Value} | Rest]) ->
- NewProps = set_field(Props, Field, Value),
- do_update_set(NewProps, Rest).
-
-do_update_unset(Props, []) ->
- Props;
-do_update_unset(Props, [{Field, _} | Rest]) ->
- NewProps =
- case rem_field(Props, Field) of
- {RemProps, _} ->
- RemProps;
- _ ->
- Props
- end,
- do_update_unset(NewProps, Rest).
-
-do_update_add_to_set(Props, []) ->
- Props;
-do_update_add_to_set(Props, [{Field, NewValue} | Rest]) ->
- ToAdd =
- case NewValue of
- {[{<<"$each">>, NewValues}]} when is_list(NewValues) ->
- NewValues;
- {[{<<"$each">>, NewValue}]} ->
- [NewValue];
- Else ->
- [Else]
- end,
- NewProps =
- case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- FinalValues = lists:foldl(
- fun(V, Acc) ->
- lists:append(Acc, [V])
- end,
- OldValues,
- ToAdd
- ),
- set_field(Props, Field, FinalValues);
- _ ->
- Props
- end,
- do_update_add_to_set(NewProps, Rest).
-
-do_update_pop(Props, []) ->
- Props;
-do_update_pop(Props, [{Field, Pos} | Rest]) ->
- NewProps =
- case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- NewValues =
- case Pos > 0 of
- true ->
- lists:sublist(OldValues, 1, length(OldValues) - 1);
- false ->
- lists:sublist(OldValues, 2, length(OldValues) - 1)
- end,
- set_field(Props, Field, NewValues);
- _ ->
- Props
- end,
- do_update_pop(NewProps, Rest).
-
-do_update_pull_all(Props, []) ->
- Props;
-do_update_pull_all(Props, [{Field, Values} | Rest]) ->
- ToRem =
- case is_list(Values) of
- true -> Values;
- false -> [Values]
- end,
- NewProps =
- case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- NewValues = lists:foldl(
- fun(ValToRem, Acc) ->
- % The logic in these filter functions is a bit
- % subtle. The way to think of this is that we
- % return true for all elements we want to keep.
- FilterFun =
- case has_operators(ValToRem) of
- true ->
- fun(A) ->
- Sel = mango_selector:normalize(ValToRem),
- not mango_selector:match(A, Sel)
- end;
- false ->
- fun(A) -> A /= ValToRem end
- end,
- lists:filter(FilterFun, Acc)
- end,
- OldValues,
- ToRem
- ),
- set_field(Props, Field, NewValues);
- _ ->
- Props
- end,
- do_update_add_to_set(NewProps, Rest).
-
-do_update_pull(Props, []) ->
- Props;
-do_update_pull(Props, [{Field, Value} | Rest]) ->
- ToRem =
- case Value of
- {[{<<"$each">>, Values}]} when is_list(Values) ->
- Values;
- {[{<<"$each">>, Value}]} ->
- [Value];
- Else ->
- [Else]
- end,
- NewProps = do_update_pull_all(Props, [{Field, ToRem}]),
- do_update_pull(NewProps, Rest).
-
-do_update_push_all(_, []) ->
- [];
-do_update_push_all(Props, [{Field, Values} | Rest]) ->
- ToAdd =
- case is_list(Values) of
- true -> Values;
- false -> [Values]
- end,
- NewProps =
- case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- NewValues = OldValues ++ ToAdd,
- set_field(Props, Field, NewValues);
- _ ->
- Props
- end,
- do_update_push_all(NewProps, Rest).
-
-do_update_push(Props, []) ->
- Props;
-do_update_push(Props, [{Field, Value} | Rest]) ->
- ToAdd =
- case Value of
- {[{<<"$each">>, Values}]} when is_list(Values) ->
- Values;
- {[{<<"$each">>, Value}]} ->
- [Value];
- Else ->
- [Else]
- end,
- NewProps = do_update_push_all(Props, [{Field, ToAdd}]),
- do_update_push(NewProps, Rest).
-
-do_update_bitwise(Props, []) ->
- Props;
-do_update_bitwise(Props, [{Field, Value} | Rest]) ->
- DoOp =
- case Value of
- {[{<<"and">>, Val}]} when is_integer(Val) ->
- fun(V) -> V band Val end;
- {[{<<"or">>, Val}]} when is_integer(Val) ->
- fun(V) -> V bor Val end;
- _ ->
- fun(V) -> V end
- end,
- NewProps =
- case get_field(Props, Field, fun is_number/1) of
- Value when is_number(Value) ->
- NewValue = DoOp(Value),
- set_field(Props, Field, NewValue);
- _ ->
- Props
- end,
- do_update_bitwise(NewProps, Rest).
-
-do_update_to_insert([], Doc) ->
- Doc;
-do_update_to_insert([{<<"$setOnInsert">>, {FieldProps}}], Doc) ->
- lists:foldl(
- fun({Field, Value}, DocAcc) ->
- set_field(DocAcc, Field, Value)
- end,
- Doc,
- FieldProps
- );
-do_update_to_insert([{_, _} | Rest], Doc) ->
- do_update_to_insert(Rest, Doc).
-
-get_field(Props, Field) ->
- get_field(Props, Field, no_validation).
-
-get_field(Props, Field, Validator) when is_binary(Field) ->
- {ok, Path} = mango_util:parse_field(Field),
- get_field(Props, Path, Validator);
-get_field(Props, [], no_validation) ->
- Props;
-get_field(Props, [], Validator) ->
- case (catch Validator(Props)) of
- true ->
- Props;
- _ ->
- invalid_value
- end;
-get_field({Props}, [Name | Rest], Validator) ->
- case lists:keyfind(Name, 1, Props) of
- {Name, Value} ->
- get_field(Value, Rest, Validator);
- false ->
- not_found
- end;
-get_field(Values, [Name | Rest], Validator) when is_list(Values) ->
- % Name might be an integer index into an array
- try
- Pos = list_to_integer(binary_to_list(Name)),
- case Pos >= 0 andalso Pos < length(Values) of
- true ->
- % +1 because Erlang uses 1 based list indices
- Value = lists:nth(Pos + 1, Values),
- get_field(Value, Rest, Validator);
- false ->
- bad_path
- end
- catch
- error:badarg ->
- bad_path
- end;
-get_field(_, [_ | _], _) ->
- bad_path.
-
-rem_field(Props, Field) when is_binary(Field) ->
- {ok, Path} = mango_util:parse_field(Field),
- rem_field(Props, Path);
-rem_field({Props}, [Name]) ->
- case lists:keytake(Name, 1, Props) of
- {value, Value, NewProps} ->
- {NewProps, Value};
- false ->
- not_found
- end;
-rem_field({Props}, [Name | Rest]) ->
- case lists:keyfind(Name, 1, Props) of
- {Name, Value} ->
- case rem_field(Value, Rest) of
- {NewValue, Ret} ->
- NewObj = {lists:keystore(Name, 1, Props, {Name, NewValue})},
- {NewObj, Ret};
- Else ->
- Else
- end;
- false ->
- not_found
- end;
-rem_field(Values, [Name]) when is_list(Values) ->
- % Name might be an integer index into an array
- try
- Pos = list_to_integer(binary_to_list(Name)),
- case Pos >= 0 andalso Pos < length(Values) of
- true ->
- % +1 because Erlang uses 1 based list indices
- rem_elem(Pos + 1, Values);
- false ->
- bad_path
- end
- catch
- error:badarg ->
- bad_path
- end;
-rem_field(Values, [Name | Rest]) when is_list(Values) ->
- % Name might be an integer index into an array
- try
- Pos = list_to_integer(binary_to_list(Name)),
- case Pos >= 0 andalso Pos < length(Values) of
- true ->
- % +1 because Erlang uses 1 based list indices
- Value = lists:nth(Pos + 1, Values),
- case rem_field(Value, Rest) of
- {NewValue, Ret} ->
- {set_elem(Pos + 1, Values, NewValue), Ret};
- Else ->
- Else
- end;
- false ->
- bad_path
- end
- catch
- error:badarg ->
- bad_path
- end;
-rem_field(_, [_ | _]) ->
- bad_path.
-
-set_field(Props, Field, Value) when is_binary(Field) ->
- {ok, Path} = mango_util:parse_field(Field),
- set_field(Props, Path, Value);
-set_field({Props}, [Name], Value) ->
- {lists:keystore(Name, 1, Props, {Name, Value})};
-set_field({Props}, [Name | Rest], Value) ->
- case lists:keyfind(Name, 1, Props) of
- {Name, Elem} ->
- Result = set_field(Elem, Rest, Value),
- {lists:keystore(Name, 1, Props, {Name, Result})};
- false ->
- Nested = make_nested(Rest, Value),
- {lists:keystore(Name, 1, Props, {Name, Nested})}
- end;
-set_field(Values, [Name], Value) when is_list(Values) ->
- % Name might be an integer index into an array
- try
- Pos = list_to_integer(binary_to_list(Name)),
- case Pos >= 0 andalso Pos < length(Values) of
- true ->
- % +1 because Erlang uses 1 based list indices
- set_elem(Pos, Values, Value);
- false ->
- Values
- end
- catch
- error:badarg ->
- Values
- end;
-set_field(Values, [Name | Rest], Value) when is_list(Values) ->
- % Name might be an integer index into an array
- try
- Pos = list_to_integer(binary_to_list(Name)),
- case Pos >= 0 andalso Pos < length(Values) of
- true ->
- % +1 because Erlang uses 1 based list indices
- Elem = lists:nth(Pos + 1, Values),
- Result = set_field(Elem, Rest, Value),
- set_elem(Pos, Values, Result);
- false ->
- Values
- end
- catch
- error:badarg ->
- Values
- end;
-set_field(Value, [_ | _], _) ->
- Value.
-
-make_nested([], Value) ->
- Value;
-make_nested([Name | Rest], Value) ->
- {[{Name, make_nested(Rest, Value)}]}.
-
-rem_elem(1, [Value | Rest]) ->
- {Rest, Value};
-rem_elem(I, [Item | Rest]) when I > 1 ->
- {Tail, Value} = rem_elem(I + 1, Rest),
- {[Item | Tail], Value}.
-
-set_elem(1, [_ | Rest], Value) ->
- [Value | Rest];
-set_elem(I, [Item | Rest], Value) when I > 1 ->
- [Item | set_elem(I - 1, Rest, Value)].
diff --git a/src/mango/src/mango_epi.erl b/src/mango/src/mango_epi.erl
deleted file mode 100644
index b7ee68857..000000000
--- a/src/mango/src/mango_epi.erl
+++ /dev/null
@@ -1,48 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- mango.
-
-providers() ->
- [
- {chttpd_handlers, mango_httpd_handlers}
- ].
-
-services() ->
- [].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl
deleted file mode 100644
index d8ae3fcbf..000000000
--- a/src/mango/src/mango_error.erl
+++ /dev/null
@@ -1,380 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_error).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([
- info/2
-]).
-
-info(mango_idx, {no_usable_index, missing_sort_index}) ->
- {
- 400,
- <<"no_usable_index">>,
- <<
- "No index exists for this sort, "
- "try indexing by the sort fields."
- >>
- };
-info(mango_idx, {no_usable_index, missing_sort_index_partitioned}) ->
- {
- 400,
- <<"no_usable_index">>,
- <<
- "No partitioned index exists for this sort, "
- "try indexing by the sort fields."
- >>
- };
-info(mango_idx, {no_usable_index, missing_sort_index_global}) ->
- {
- 400,
- <<"no_usable_index">>,
- <<"No global index exists for this sort, try indexing by the sort fields.">>
- };
-info(mango_json_bookmark, {invalid_bookmark, BadBookmark}) ->
- {
- 400,
- <<"invalid_bookmark">>,
- fmt("Invalid bookmark value: ~s", [?JSON_ENCODE(BadBookmark)])
- };
-info(mango_cursor_text, {invalid_bookmark, BadBookmark}) ->
- {
- 400,
- <<"invalid_bookmark">>,
- fmt("Invalid bookmark value: ~s", [?JSON_ENCODE(BadBookmark)])
- };
-info(mango_cursor_text, multiple_text_indexes) ->
- {
- 400,
- <<"multiple_text_indexes">>,
- <<"You must specify an index with the `use_index` parameter.">>
- };
-info(mango_cursor_text, {text_search_error, {error, {bad_request, Msg}}}) when
- is_binary(Msg)
-->
- {
- 400,
- <<"text_search_error">>,
- Msg
- };
-info(mango_cursor_text, {text_search_error, {error, Error}}) ->
- {
- 500,
- <<"text_search_error">>,
- fmt("~p", [Error])
- };
-info(mango_fields, {invalid_fields_json, BadFields}) ->
- {
- 400,
- <<"invalid_fields">>,
- fmt("Fields must be an array of strings, not: ~w", [BadFields])
- };
-info(mango_fields, {invalid_field_json, BadField}) ->
- {
- 400,
- <<"invalid_field">>,
- fmt("Invalid JSON for field spec: ~w", [BadField])
- };
-info(mango_httpd, error_saving_ddoc) ->
- {
- 500,
- <<"error_saving_ddoc">>,
- <<"Unknown error while saving the design document.">>
- };
-info(mango_httpd, {error_saving_ddoc, <<"conflict">>}) ->
- {
- 500,
- <<"error_saving_ddoc">>,
- <<"Encountered a conflict while saving the design document.">>
- };
-info(mango_httpd, {error_saving_ddoc, Reason}) ->
- {
- 500,
- <<"error_saving_ddoc">>,
- fmt("Unknown error while saving the design document: ~s", [Reason])
- };
-info(mango_httpd, invalid_list_index_params) ->
- {
- 500,
- <<"invalid_list_index_params">>,
- <<"Index parameter ranges: limit > 1, skip > 0">>
- };
-info(mango_idx, {invalid_index_type, BadType}) ->
- {
- 400,
- <<"invalid_index">>,
- fmt("Invalid type for index: ~s", [BadType])
- };
-info(mango_idx, {partitioned_option_mismatch, BadDDoc}) ->
- {
- 400,
- <<"invalid_partitioned_option">>,
- fmt(
- "Requested partitioned option does not match existing value on"
- " design document ~s",
- [BadDDoc]
- )
- };
-info(mango_idx, invalid_query_ddoc_language) ->
- {
- 400,
- <<"invalid_index">>,
- <<"Invalid design document query language.">>
- };
-info(mango_idx, no_index_definition) ->
- {
- 400,
- <<"invalid_index">>,
- <<"Index is missing its definition.">>
- };
-info(mango_idx, {index_not_implemented, IndexName}) ->
- {
- 501,
- <<"index_not_implemented">>,
- fmt("~s", [IndexName])
- };
-info(mango_idx, {index_service_unavailable, IndexName}) ->
- {
- 503,
- <<"required index service unavailable">>,
- fmt("~s", [IndexName])
- };
-info(mango_idx_view, {invalid_index_json, BadIdx}) ->
- {
- 400,
- <<"invalid_index">>,
- fmt("JSON indexes must be an object, not: ~w", [BadIdx])
- };
-info(mango_idx_text, {invalid_index_fields_definition, Def}) ->
- {
- 400,
- <<"invalid_index_fields_definition">>,
- fmt(
- "Text Index field definitions must be of the form\n"
- " {\"name\": \"non-empty fieldname\", \"type\":\n"
- " \"boolean,number, or string\"}. Def: ~p",
- [Def]
- )
- };
-info(mango_idx_view, {index_not_found, BadIdx}) ->
- {
- 404,
- <<"invalid_index">>,
- fmt("JSON index ~s not found in this design doc.", [BadIdx])
- };
-info(mango_idx_text, {invalid_index_text, BadIdx}) ->
- {
- 400,
- <<"invalid_index">>,
- fmt("Text indexes must be an object, not: ~w", [BadIdx])
- };
-info(mango_idx_text, {index_not_found, BadIdx}) ->
- {
- 404,
- <<"index_not_found">>,
- fmt("Text index ~s not found in this design doc.", [BadIdx])
- };
-info(mango_idx_text, index_all_disabled) ->
- {
- 403,
- <<"index_all_disabled">>,
- <<"New text indexes are forbidden to index all fields.">>
- };
-info(mango_opts, {invalid_bulk_docs, Val}) ->
- {
- 400,
- <<"invalid_bulk_docs">>,
- fmt(
- "Bulk Delete requires an array of non-null docids. Docids: ~w",
- [Val]
- )
- };
-info(mango_opts, {invalid_ejson, Val}) ->
- {
- 400,
- <<"invalid_ejson">>,
- fmt("Invalid JSON value: ~w", [Val])
- };
-info(mango_opts, {invalid_key, Key}) ->
- {
- 400,
- <<"invalid_key">>,
- fmt("Invalid key ~s for this request.", [Key])
- };
-info(mango_opts, {missing_required_key, Key}) ->
- {
- 400,
- <<"missing_required_key">>,
- fmt("Missing required key: ~s", [Key])
- };
-info(mango_opts, {invalid_value, Name, Expect, Found}) ->
- {
- 400,
- <<"invalid_value">>,
- fmt("Value for ~s is ~w, should be ~w", [Name, Found, Expect])
- };
-info(mango_opts, {invalid_value, Name, Value}) ->
- {
- 400,
- <<"invalid_value">>,
- fmt("Invalid value for ~s: ~w", [Name, Value])
- };
-info(mango_opts, {invalid_string, Val}) ->
- {
- 400,
- <<"invalid_string">>,
- fmt("Invalid string: ~w", [Val])
- };
-info(mango_opts, {invalid_boolean, Val}) ->
- {
- 400,
- <<"invalid_boolean">>,
- fmt("Invalid boolean value: ~w", [Val])
- };
-info(mango_opts, {invalid_pos_integer, Val}) ->
- {
- 400,
- <<"invalid_pos_integer">>,
- fmt("~w is not an integer greater than zero", [Val])
- };
-info(mango_opts, {invalid_non_neg_integer, Val}) ->
- {
- 400,
- <<"invalid_non_neg_integer">>,
- fmt("~w is not an integer greater than or equal to zero", [Val])
- };
-info(mango_opts, {invalid_object, BadObj}) ->
- {
- 400,
- <<"invalid_object">>,
- fmt("~w is not a JSON object", [BadObj])
- };
-info(mango_opts, {invalid_selector_json, BadSel}) ->
- {
- 400,
- <<"invalid_selector_json">>,
- fmt("Selector must be a JSON object, not: ~w", [BadSel])
- };
-info(mango_opts, invalid_empty_string) ->
- {
- 400,
- <<"invalid_empty_string">>,
- <<"Index name or ddoc cannot be empty string">>
- };
-info(mango_opts, {multiple_text_operator, {invalid_selector, BadSel}}) ->
- {
- 400,
- <<"multiple_text_selector">>,
- fmt(
- "Selector cannot contain more than one $text operator: ~w",
- [BadSel]
- )
- };
-info(mango_selector, {invalid_selector, missing_field_name}) ->
- {
- 400,
- <<"invalid_selector">>,
- <<"One or more conditions is missing a field name.">>
- };
-info(mango_selector, {bad_arg, Op, Arg}) ->
- {
- 400,
- <<"bad_arg">>,
- fmt("Bad argument for operator ~s: ~w", [Op, Arg])
- };
-info(mango_selector, {not_supported, Op}) ->
- {
- 400,
- <<"not_supported">>,
- fmt("Unsupported operator: ~s", [Op])
- };
-info(mango_selector, {invalid_operator, Op}) ->
- {
- 400,
- <<"invalid_operator">>,
- fmt("Invalid operator: ~s", [Op])
- };
-info(mango_selector, {bad_field, BadSel}) ->
- {
- 400,
- <<"bad_field">>,
- fmt("Invalid field normalization on selector: ~w", [BadSel])
- };
-info(mango_selector_text, {invalid_operator, Op}) ->
- {
- 400,
- <<"invalid_operator">>,
- fmt("Invalid text operator: ~s", [Op])
- };
-info(mango_selector_text, {text_sort_error, Field}) ->
- S = binary_to_list(Field),
- Msg =
- "Unspecified or ambiguous sort type. Try appending :number or"
- " :string to the sort field. ~s",
- {
- 400,
- <<"text_sort_error">>,
- fmt(Msg, [S])
- };
-info(mango_sort, {invalid_sort_json, BadSort}) ->
- {
- 400,
- <<"invalid_sort_json">>,
- fmt("Sort must be an array of sort specs, not: ~p", [BadSort])
- };
-info(mango_sort, {invalid_sort_dir, BadSpec}) ->
- {
- 400,
- <<"invalid_sort_dir">>,
- fmt("Invalid sort direction: ~w", BadSpec)
- };
-info(mango_sort, {invalid_sort_field, BadField}) ->
- {
- 400,
- <<"invalid_sort_field">>,
- fmt("Invalid sort field: ~p", [BadField])
- };
-info(mango_sort, {unsupported, mixed_sort}) ->
- {
- 400,
- <<"unsupported_mixed_sort">>,
- <<"Sorts currently only support a single direction for all fields.">>
- };
-info(mango_util, {error_loading_doc, DocId}) ->
- {
- 500,
- <<"internal_error">>,
- fmt("Error loading doc: ~s", [DocId])
- };
-info(mango_util, error_loading_ddocs) ->
- {
- 500,
- <<"internal_error">>,
- <<"Error loading design documents">>
- };
-info(mango_util, {invalid_ddoc_lang, Lang}) ->
- {
- 400,
- <<"invalid_ddoc_lang">>,
- fmt("Existing design doc has an invalid language: ~w", [Lang])
- };
-info(Module, Reason) ->
- {
- 500,
- <<"unknown_error">>,
- fmt("Unknown Error: ~s :: ~w", [Module, Reason])
- }.
-
-fmt(Format, Args) ->
- iolist_to_binary(io_lib:format(Format, Args)).
diff --git a/src/mango/src/mango_execution_stats.erl b/src/mango/src/mango_execution_stats.erl
deleted file mode 100644
index 0db3edf5f..000000000
--- a/src/mango/src/mango_execution_stats.erl
+++ /dev/null
@@ -1,86 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_execution_stats).
-
--export([
- to_json/1,
- incr_keys_examined/1,
- incr_docs_examined/1,
- incr_docs_examined/2,
- incr_quorum_docs_examined/1,
- incr_results_returned/1,
- log_start/1,
- log_end/1,
- maybe_add_stats/4
-]).
-
--include("mango_cursor.hrl").
-
-to_json(Stats) ->
- {[
- {total_keys_examined, Stats#execution_stats.totalKeysExamined},
- {total_docs_examined, Stats#execution_stats.totalDocsExamined},
- {total_quorum_docs_examined, Stats#execution_stats.totalQuorumDocsExamined},
- {results_returned, Stats#execution_stats.resultsReturned},
- {execution_time_ms, Stats#execution_stats.executionTimeMs}
- ]}.
-
-incr_keys_examined(Stats) ->
- Stats#execution_stats{
- totalKeysExamined = Stats#execution_stats.totalKeysExamined + 1
- }.
-
-incr_docs_examined(Stats) ->
- incr_docs_examined(Stats, 1).
-
-incr_docs_examined(Stats, N) ->
- Stats#execution_stats{
- totalDocsExamined = Stats#execution_stats.totalDocsExamined + N
- }.
-
-incr_quorum_docs_examined(Stats) ->
- Stats#execution_stats{
- totalQuorumDocsExamined = Stats#execution_stats.totalQuorumDocsExamined + 1
- }.
-
-incr_results_returned(Stats) ->
- couch_stats:increment_counter([mango, results_returned]),
- Stats#execution_stats{
- resultsReturned = Stats#execution_stats.resultsReturned + 1
- }.
-
-log_start(Stats) ->
- Stats#execution_stats{
- executionStartTime = os:timestamp()
- }.
-
-log_end(Stats) ->
- End = os:timestamp(),
- Diff = timer:now_diff(End, Stats#execution_stats.executionStartTime) / 1000,
- Stats#execution_stats{
- executionTimeMs = Diff
- }.
-
-maybe_add_stats(Opts, UserFun, Stats0, UserAcc) ->
- Stats1 = log_end(Stats0),
- couch_stats:update_histogram([mango, query_time], Stats1#execution_stats.executionTimeMs),
-
- case couch_util:get_value(execution_stats, Opts) of
- true ->
- JSONValue = to_json(Stats1),
- Arg = {add_key, execution_stats, JSONValue},
- {_Go, FinalUserAcc} = UserFun(Arg, UserAcc),
- FinalUserAcc;
- _ ->
- UserAcc
- end.
diff --git a/src/mango/src/mango_execution_stats.hrl b/src/mango/src/mango_execution_stats.hrl
deleted file mode 100644
index ea5ed5ee8..000000000
--- a/src/mango/src/mango_execution_stats.hrl
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(execution_stats, {
- totalKeysExamined = 0,
- totalDocsExamined = 0,
- totalQuorumDocsExamined = 0,
- resultsReturned = 0,
- executionStartTime,
- executionTimeMs
-}).
diff --git a/src/mango/src/mango_fields.erl b/src/mango/src/mango_fields.erl
deleted file mode 100644
index 1745cf9dd..000000000
--- a/src/mango/src/mango_fields.erl
+++ /dev/null
@@ -1,55 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_fields).
-
--export([
- new/1,
- extract/2
-]).
-
--include("mango.hrl").
-
-new([]) ->
- {ok, all_fields};
-new(Fields) when is_list(Fields) ->
- {ok, [field(F) || F <- Fields]};
-new(Else) ->
- ?MANGO_ERROR({invalid_fields_json, Else}).
-
-extract(Doc, undefined) ->
- Doc;
-extract(Doc, all_fields) ->
- Doc;
-extract(Doc, Fields) ->
- lists:foldl(
- fun(F, NewDoc) ->
- {ok, Path} = mango_util:parse_field(F),
- case mango_doc:get_field(Doc, Path) of
- not_found ->
- NewDoc;
- bad_path ->
- NewDoc;
- Value ->
- mango_doc:set_field(NewDoc, Path, Value)
- end
- end,
- {[]},
- Fields
- ).
-
-field(Val) when is_binary(Val) ->
- Val;
-field({Val}) when is_list(Val) ->
- {Val};
-field(Else) ->
- ?MANGO_ERROR({invalid_field_json, Else}).
diff --git a/src/mango/src/mango_httpd.erl b/src/mango/src/mango_httpd.erl
deleted file mode 100644
index 002c45b2f..000000000
--- a/src/mango/src/mango_httpd.erl
+++ /dev/null
@@ -1,336 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_httpd).
-
--export([
- handle_req/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
--include("mango_idx.hrl").
--include("mango_execution_stats.hrl").
-
--record(vacc, {
- resp,
- prepend,
- kvs,
- buffer = [],
- bufsize = 0,
- threshold = 1490
-}).
-
-handle_req(#httpd{} = Req, Db0) ->
- try
- Db = set_user_ctx(Req, Db0),
- handle_req_int(Req, Db)
- catch
- ?STACKTRACE(throw, {mango_error, Module, Reason}, Stack)
- case mango_error:info(Module, Reason) of
- {500, ErrorStr, ReasonStr} ->
- chttpd:send_error(Req, {ErrorStr, ReasonStr, Stack});
- {Code, ErrorStr, ReasonStr} ->
- chttpd:send_error(Req, Code, ErrorStr, ReasonStr)
- end
- end.
-
-handle_req_int(#httpd{path_parts = [_, <<"_index">> | _]} = Req, Db) ->
- handle_index_req(Req, Db);
-handle_req_int(#httpd{path_parts = [_, <<"_explain">> | _]} = Req, Db) ->
- handle_explain_req(Req, Db);
-handle_req_int(#httpd{path_parts = [_, <<"_find">> | _]} = Req, Db) ->
- handle_find_req(Req, Db);
-handle_req_int(_, _) ->
- throw({not_found, missing}).
-
-handle_index_req(#httpd{method = 'GET', path_parts = [_, _]} = Req, Db) ->
- Params = lists:flatmap(
- fun({K, V}) -> parse_index_param(K, V) end,
- chttpd:qs(Req)
- ),
- Idxs = lists:sort(mango_idx:list(Db)),
- JsonIdxs0 = lists:map(fun mango_idx:to_json/1, Idxs),
- TotalRows = length(JsonIdxs0),
- Limit =
- case couch_util:get_value(limit, Params, TotalRows) of
- Limit0 when Limit0 < 1 ->
- ?MANGO_ERROR(invalid_list_index_params);
- Limit0 ->
- Limit0
- end,
- Skip =
- case couch_util:get_value(skip, Params, 0) of
- Skip0 when Skip0 < 0 ->
- ?MANGO_ERROR(invalid_list_index_params);
- Skip0 when Skip0 > TotalRows ->
- TotalRows;
- Skip0 ->
- Skip0
- end,
- JsonIdxs = lists:sublist(JsonIdxs0, Skip + 1, Limit),
- chttpd:send_json(Req, {[{total_rows, TotalRows}, {indexes, JsonIdxs}]});
-handle_index_req(#httpd{method = 'POST', path_parts = [_, _]} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- {ok, Opts} = mango_opts:validate_idx_create(chttpd:json_body_obj(Req)),
- {ok, Idx0} = mango_idx:new(Db, Opts),
- {ok, Idx} = mango_idx:validate_new(Idx0, Db),
- DbOpts = [{user_ctx, Req#httpd.user_ctx}, deleted, ejson_body],
- {ok, DDoc} = mango_util:load_ddoc(Db, mango_idx:ddoc(Idx), DbOpts),
- Id = Idx#idx.ddoc,
- Name = Idx#idx.name,
- Status =
- case mango_idx:add(DDoc, Idx) of
- {ok, DDoc} ->
- <<"exists">>;
- {ok, NewDDoc} ->
- CreateOpts = get_idx_w_opts(Opts),
- case mango_crud:insert(Db, NewDDoc, CreateOpts) of
- {ok, [{RespProps}]} ->
- case lists:keyfind(error, 1, RespProps) of
- {error, Reason} ->
- ?MANGO_ERROR({error_saving_ddoc, Reason});
- _ ->
- <<"created">>
- end;
- _ ->
- ?MANGO_ERROR(error_saving_ddoc)
- end
- end,
- chttpd:send_json(Req, {[{result, Status}, {id, Id}, {name, Name}]});
-handle_index_req(#httpd{path_parts = [_, _]} = Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET,POST");
-%% Essentially we just iterate through the list of ddoc ids passed in and
-%% delete one by one. If an error occurs, all previous documents will be
-%% deleted, but an error will be thrown for the current ddoc id.
-handle_index_req(
- #httpd{
- method = 'POST',
- path_parts = [
- _,
- <<"_index">>,
- <<"_bulk_delete">>
- ]
- } = Req,
- Db
-) ->
- chttpd:validate_ctype(Req, "application/json"),
- {ok, Opts} = mango_opts:validate_bulk_delete(chttpd:json_body_obj(Req)),
- Idxs = mango_idx:list(Db),
- DDocs = get_bulk_delete_ddocs(Opts),
- DelOpts = get_idx_w_opts(Opts),
- {Success, Fail} = lists:foldl(
- fun(DDocId0, {Success0, Fail0}) ->
- DDocId = convert_to_design_id(DDocId0),
- Filt = fun(Idx) -> mango_idx:ddoc(Idx) == DDocId end,
- Id = {<<"id">>, DDocId},
- case mango_idx:delete(Filt, Db, Idxs, DelOpts) of
- {ok, true} ->
- {[{[Id, {<<"ok">>, true}]} | Success0], Fail0};
- {error, Error} ->
- {Success0, [{[Id, {<<"error">>, Error}]} | Fail0]}
- end
- end,
- {[], []},
- DDocs
- ),
- chttpd:send_json(Req, {[{<<"success">>, Success}, {<<"fail">>, Fail}]});
-handle_index_req(
- #httpd{
- path_parts = [
- _,
- <<"_index">>,
- <<"_bulk_delete">>
- ]
- } = Req,
- _Db
-) ->
- chttpd:send_method_not_allowed(Req, "POST");
-handle_index_req(
- #httpd{
- method = 'DELETE',
- path_parts = [A, B, <<"_design">>, DDocId0, Type, Name]
- } = Req,
- Db
-) ->
- PathParts = [A, B, <<"_design/", DDocId0/binary>>, Type, Name],
- handle_index_req(Req#httpd{path_parts = PathParts}, Db);
-handle_index_req(
- #httpd{
- method = 'DELETE',
- path_parts = [_, _, DDocId0, Type, Name]
- } = Req,
- Db
-) ->
- Idxs = mango_idx:list(Db),
- DDocId = convert_to_design_id(DDocId0),
- DelOpts = get_idx_del_opts(Req),
- Filt = fun(Idx) ->
- IsDDoc = mango_idx:ddoc(Idx) == DDocId,
- IsType = mango_idx:type(Idx) == Type,
- IsName = mango_idx:name(Idx) == Name,
- IsDDoc andalso IsType andalso IsName
- end,
- case mango_idx:delete(Filt, Db, Idxs, DelOpts) of
- {ok, true} ->
- chttpd:send_json(Req, {[{ok, true}]});
- {error, not_found} ->
- throw({not_found, missing});
- {error, Error} ->
- ?MANGO_ERROR({error_saving_ddoc, Error})
- end;
-handle_index_req(#httpd{path_parts = [_, _, _DDocId0, _Type, _Name]} = Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "DELETE").
-
-handle_explain_req(#httpd{method = 'POST'} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- Body = maybe_set_partition(Req),
- {ok, Opts0} = mango_opts:validate_find(Body),
- {value, {selector, Sel}, Opts} = lists:keytake(selector, 1, Opts0),
- Resp = mango_crud:explain(Db, Sel, Opts),
- chttpd:send_json(Req, Resp);
-handle_explain_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-handle_find_req(#httpd{method = 'POST'} = Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- Body = maybe_set_partition(Req),
- {ok, Opts0} = mango_opts:validate_find(Body),
- {value, {selector, Sel}, Opts} = lists:keytake(selector, 1, Opts0),
- {ok, Resp0} = start_find_resp(Req),
- case run_find(Resp0, Db, Sel, Opts) of
- {ok, AccOut} ->
- end_find_resp(AccOut);
- {error, Error} ->
- chttpd:send_error(Req, Error)
- end;
-handle_find_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-set_user_ctx(#httpd{user_ctx = Ctx}, Db) ->
- {ok, NewDb} = couch_db:set_user_ctx(Db, Ctx),
- NewDb.
-
-get_idx_w_opts(Opts) ->
- case lists:keyfind(w, 1, Opts) of
- {w, N} when is_integer(N), N > 0 ->
- [{w, integer_to_list(N)}];
- _ ->
- [{w, "2"}]
- end.
-
-get_bulk_delete_ddocs(Opts) ->
- case lists:keyfind(docids, 1, Opts) of
- {docids, DDocs} when is_list(DDocs) ->
- DDocs;
- _ ->
- []
- end.
-
-get_idx_del_opts(Req) ->
- try
- WStr = chttpd:qs_value(Req, "w", "2"),
- _ = list_to_integer(WStr),
- [{w, WStr}]
- catch
- _:_ ->
- [{w, "2"}]
- end.
-
-maybe_set_partition(Req) ->
- {Props} = chttpd:json_body_obj(Req),
- case chttpd:qs_value(Req, "partition", undefined) of
- undefined ->
- {Props};
- Partition ->
- case couch_util:get_value(<<"partition">>, Props) of
- undefined ->
- {[{<<"partition">>, ?l2b(Partition)} | Props]};
- Partition ->
- {Props};
- OtherPartition ->
- ?MANGO_ERROR({bad_partition, OtherPartition})
- end
- end.
-
-convert_to_design_id(DDocId) ->
- case DDocId of
- <<"_design/", _/binary>> -> DDocId;
- _ -> <<"_design/", DDocId/binary>>
- end.
-
-start_find_resp(Req) ->
- chttpd:start_delayed_json_response(Req, 200, [], "{\"docs\":[").
-
-end_find_resp(Acc0) ->
- #vacc{resp = Resp00, buffer = Buf, kvs = KVs, threshold = Max} = Acc0,
- {ok, Resp0} = chttpd:close_delayed_json_object(Resp00, Buf, "\r\n]", Max),
- FinalAcc = lists:foldl(
- fun({K, V}, Acc) ->
- JK = ?JSON_ENCODE(K),
- JV = ?JSON_ENCODE(V),
- [JV, ": ", JK, ",\r\n" | Acc]
- end,
- [],
- KVs
- ),
- Chunk = lists:reverse(FinalAcc, ["}\r\n"]),
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, Chunk),
- chttpd:end_delayed_json_response(Resp1).
-
-run_find(Resp, Db, Sel, Opts) ->
- Acc0 = #vacc{
- resp = Resp,
- prepend = "\r\n",
- kvs = [],
- threshold = chttpd:chunked_response_buffer_size()
- },
- mango_crud:find(Db, Sel, fun handle_doc/2, Acc0, Opts).
-
-handle_doc({add_key, Key, Value}, Acc0) ->
- #vacc{kvs = KVs} = Acc0,
- NewKVs = lists:keystore(Key, 1, KVs, {Key, Value}),
- {ok, Acc0#vacc{kvs = NewKVs}};
-handle_doc({row, Doc}, Acc0) ->
- #vacc{prepend = Prepend} = Acc0,
- Chunk = [Prepend, ?JSON_ENCODE(Doc)],
- maybe_flush_response(Acc0, Chunk, iolist_size(Chunk)).
-
-maybe_flush_response(#vacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
- Size > 0 andalso (Size + Len) > Max
-->
- #vacc{buffer = Buffer, resp = Resp} = Acc,
- {ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
- {ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
-maybe_flush_response(Acc0, Data, Len) ->
- #vacc{buffer = Buf, bufsize = Size} = Acc0,
- Acc = Acc0#vacc{
- prepend = ",\r\n",
- buffer = [Buf | Data],
- bufsize = Size + Len
- },
- {ok, Acc}.
-
-parse_index_param("limit", Value) ->
- [{limit, parse_val(Value)}];
-parse_index_param("skip", Value) ->
- [{skip, parse_val(Value)}];
-parse_index_param(_Key, _Value) ->
- [].
-
-parse_val(Value) ->
- case (catch list_to_integer(Value)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- ?MANGO_ERROR(invalid_list_index_params)
- end.
diff --git a/src/mango/src/mango_httpd_handlers.erl b/src/mango/src/mango_httpd_handlers.erl
deleted file mode 100644
index feb693e94..000000000
--- a/src/mango/src/mango_httpd_handlers.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1]).
-
-url_handler(_) -> no_match.
-
-db_handler(<<"_index">>) -> fun mango_httpd:handle_req/2;
-db_handler(<<"_explain">>) -> fun mango_httpd:handle_req/2;
-db_handler(<<"_find">>) -> fun mango_httpd:handle_req/2;
-db_handler(_) -> no_match.
-
-design_handler(_) -> no_match.
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
deleted file mode 100644
index a20d730a2..000000000
--- a/src/mango/src/mango_idx.erl
+++ /dev/null
@@ -1,513 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% This module is for the "index object" as in, the data structure
-% representing an index. Not to be confused with mango_index which
-% contains APIs for managing indexes.
-
--module(mango_idx).
-
--export([
- list/1,
- recover/1,
-
- new/2,
- validate_new/2,
- add/2,
- remove/2,
- from_ddoc/2,
- special/1,
-
- dbname/1,
- ddoc/1,
- name/1,
- type/1,
- def/1,
- partitioned/1,
- opts/1,
- columns/1,
- is_usable/3,
- start_key/2,
- end_key/2,
- cursor_mod/1,
- idx_mod/1,
- to_json/1,
- delete/4,
- get_usable_indexes/3,
- get_partial_filter_selector/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
--include("mango_idx.hrl").
-
-list(Db) ->
- {ok, Indexes} = ddoc_cache:open(db_to_name(Db), ?MODULE),
- Indexes.
-
-get_usable_indexes(Db, Selector, Opts) ->
- ExistingIndexes = mango_idx:list(Db),
- GlobalIndexes = mango_cursor:remove_indexes_with_partial_filter_selector(
- ExistingIndexes
- ),
- UserSpecifiedIndex = mango_cursor:maybe_filter_indexes_by_ddoc(ExistingIndexes, Opts),
- UsableIndexes0 = lists:usort(GlobalIndexes ++ UserSpecifiedIndex),
- UsableIndexes1 = filter_partition_indexes(UsableIndexes0, Opts),
-
- SortFields = get_sort_fields(Opts),
- UsableFilter = fun(I) -> is_usable(I, Selector, SortFields) end,
-
- case lists:filter(UsableFilter, UsableIndexes1) of
- [] ->
- mango_sort_error(Db, Opts);
- UsableIndexes ->
- UsableIndexes
- end.
-
-mango_sort_error(Db, Opts) ->
- case {fabric_util:is_partitioned(Db), is_opts_partitioned(Opts)} of
- {false, _} ->
- ?MANGO_ERROR({no_usable_index, missing_sort_index});
- {true, true} ->
- ?MANGO_ERROR({no_usable_index, missing_sort_index_partitioned});
- {true, false} ->
- ?MANGO_ERROR({no_usable_index, missing_sort_index_global})
- end.
-
-recover(Db) ->
- {ok, DDocs0} = mango_util:open_ddocs(Db),
- Pred = fun({Props}) ->
- case proplists:get_value(<<"language">>, Props) of
- <<"query">> -> true;
- _ -> false
- end
- end,
- DDocs = lists:filter(Pred, DDocs0),
- Special = special(Db),
- {ok,
- Special ++
- lists:flatmap(
- fun(Doc) ->
- from_ddoc(Db, Doc)
- end,
- DDocs
- )}.
-
-get_sort_fields(Opts) ->
- case lists:keyfind(sort, 1, Opts) of
- {sort, Sort} ->
- mango_sort:fields(Sort);
- _ ->
- []
- end.
-
-new(Db, Opts) ->
- Def = get_idx_def(Opts),
- Type = get_idx_type(Opts),
- IdxName = get_idx_name(Def, Opts),
- DDoc = get_idx_ddoc(Def, Opts),
- {ok, #idx{
- dbname = db_to_name(Db),
- ddoc = DDoc,
- name = IdxName,
- type = Type,
- def = Def,
- partitioned = get_idx_partitioned(Opts),
- opts = filter_opts(Opts)
- }}.
-
-validate_new(Idx, Db) ->
- Mod = idx_mod(Idx),
- Mod:validate_new(Idx, Db).
-
-add(DDoc, Idx) ->
- Mod = idx_mod(Idx),
- {ok, NewDDoc1} = Mod:add(DDoc, Idx),
- NewDDoc2 = set_ddoc_partitioned(NewDDoc1, Idx),
- % Round trip through JSON for normalization
- Body = ?JSON_DECODE(?JSON_ENCODE(NewDDoc2#doc.body)),
- {ok, NewDDoc2#doc{body = Body}}.
-
-remove(DDoc, Idx) ->
- Mod = idx_mod(Idx),
- {ok, NewDDoc} = Mod:remove(DDoc, Idx),
- % Round trip through JSON for normalization
- Body = ?JSON_DECODE(?JSON_ENCODE(NewDDoc#doc.body)),
- {ok, NewDDoc#doc{body = Body}}.
-
-delete(Filt, Db, Indexes, DelOpts) ->
- case lists:filter(Filt, Indexes) of
- [Idx] ->
- {ok, DDoc} = mango_util:load_ddoc(Db, mango_idx:ddoc(Idx)),
- {ok, NewDDoc} = mango_idx:remove(DDoc, Idx),
- FinalDDoc =
- case NewDDoc#doc.body of
- {[{<<"language">>, <<"query">>}]} ->
- NewDDoc#doc{deleted = true, body = {[]}};
- _ ->
- NewDDoc
- end,
- case mango_crud:insert(Db, FinalDDoc, DelOpts) of
- {ok, _} ->
- {ok, true};
- Error ->
- {error, Error}
- end;
- [] ->
- {error, not_found}
- end.
-
-from_ddoc(Db, {Props}) ->
- DbName = db_to_name(Db),
- DDoc = proplists:get_value(<<"_id">>, Props),
-
- case proplists:get_value(<<"language">>, Props) of
- <<"query">> -> ok;
- _ -> ?MANGO_ERROR(invalid_query_ddoc_language)
- end,
- IdxMods =
- case dreyfus:available() of
- true ->
- [mango_idx_view, mango_idx_text];
- false ->
- [mango_idx_view]
- end,
- Idxs = lists:flatmap(fun(Mod) -> Mod:from_ddoc({Props}) end, IdxMods),
- lists:map(
- fun(Idx) ->
- Idx#idx{
- dbname = DbName,
- ddoc = DDoc,
- partitioned = get_idx_partitioned(Db, Props)
- }
- end,
- Idxs
- ).
-
-special(Db) ->
- AllDocs = #idx{
- dbname = db_to_name(Db),
- name = <<"_all_docs">>,
- type = <<"special">>,
- def = all_docs,
- opts = []
- },
- % Add one for _update_seq
- [AllDocs].
-
-dbname(#idx{dbname = DbName}) ->
- DbName.
-
-ddoc(#idx{ddoc = DDoc}) ->
- DDoc.
-
-name(#idx{name = Name}) ->
- Name.
-
-type(#idx{type = Type}) ->
- Type.
-
-def(#idx{def = Def}) ->
- Def.
-
-partitioned(#idx{partitioned = Partitioned}) ->
- Partitioned.
-
-opts(#idx{opts = Opts}) ->
- Opts.
-
-to_json(#idx{} = Idx) ->
- Mod = idx_mod(Idx),
- Mod:to_json(Idx).
-
-columns(#idx{} = Idx) ->
- Mod = idx_mod(Idx),
- Mod:columns(Idx).
-
-is_usable(#idx{} = Idx, Selector, SortFields) ->
- Mod = idx_mod(Idx),
- Mod:is_usable(Idx, Selector, SortFields).
-
-start_key(#idx{} = Idx, Ranges) ->
- Mod = idx_mod(Idx),
- Mod:start_key(Ranges).
-
-end_key(#idx{} = Idx, Ranges) ->
- Mod = idx_mod(Idx),
- Mod:end_key(Ranges).
-
-cursor_mod(#idx{type = <<"json">>}) ->
- mango_cursor_view;
-cursor_mod(#idx{def = all_docs, type = <<"special">>}) ->
- mango_cursor_special;
-cursor_mod(#idx{type = <<"text">>}) ->
- case dreyfus:available() of
- true ->
- mango_cursor_text;
- false ->
- ?MANGO_ERROR({index_service_unavailable, <<"text">>})
- end.
-
-idx_mod(#idx{type = <<"json">>}) ->
- mango_idx_view;
-idx_mod(#idx{type = <<"special">>}) ->
- mango_idx_special;
-idx_mod(#idx{type = <<"text">>}) ->
- case dreyfus:available() of
- true ->
- mango_idx_text;
- false ->
- ?MANGO_ERROR({index_service_unavailable, <<"text">>})
- end.
-
-db_to_name(Name) when is_binary(Name) ->
- Name;
-db_to_name(Name) when is_list(Name) ->
- iolist_to_binary(Name);
-db_to_name(Db) ->
- couch_db:name(Db).
-
-get_idx_def(Opts) ->
- case proplists:get_value(def, Opts) of
- undefined ->
- ?MANGO_ERROR(no_index_definition);
- Def ->
- Def
- end.
-
-get_idx_type(Opts) ->
- case proplists:get_value(type, Opts) of
- <<"json">> ->
- <<"json">>;
- <<"text">> ->
- case dreyfus:available() of
- true ->
- <<"text">>;
- false ->
- ?MANGO_ERROR({index_service_unavailable, <<"text">>})
- end;
- %<<"geo">> -> <<"geo">>;
- undefined ->
- <<"json">>;
- BadType ->
- ?MANGO_ERROR({invalid_index_type, BadType})
- end.
-
-get_idx_ddoc(Idx, Opts) ->
- case proplists:get_value(ddoc, Opts) of
- <<"_design/", _Rest/binary>> = Name ->
- Name;
- Name when is_binary(Name) ->
- <<"_design/", Name/binary>>;
- _ ->
- Bin = gen_name(Idx, Opts),
- <<"_design/", Bin/binary>>
- end.
-
-get_idx_name(Idx, Opts) ->
- case proplists:get_value(name, Opts) of
- Name when is_binary(Name) ->
- Name;
- _ ->
- gen_name(Idx, Opts)
- end.
-
-gen_name(Idx, Opts0) ->
- Opts = lists:usort(Opts0),
- TermBin = term_to_binary({Idx, Opts}),
- Sha = crypto:hash(sha, TermBin),
- mango_util:enc_hex(Sha).
-
-get_idx_partitioned(Opts) ->
- case proplists:get_value(partitioned, Opts) of
- B when is_boolean(B) ->
- B;
- db_default ->
- % Default to the partitioned setting on
- % the database.
- undefined
- end.
-
-set_ddoc_partitioned(DDoc, Idx) ->
- % We have to verify that the new index being added
- % to this design document either matches the current
- % ddoc's design options *or* this is a new design doc
- #doc{
- id = DDocId,
- revs = Revs,
- body = {BodyProps}
- } = DDoc,
- OldDOpts = couch_util:get_value(<<"options">>, BodyProps),
- OldOpt =
- case OldDOpts of
- {OldDOptProps} when is_list(OldDOptProps) ->
- couch_util:get_value(<<"partitioned">>, OldDOptProps);
- _ ->
- undefined
- end,
- % If new matches old we're done
- if
- Idx#idx.partitioned == OldOpt ->
- DDoc;
- true ->
- % If we're creating a ddoc then we can set the options
- case Revs == {0, []} of
- true when Idx#idx.partitioned /= undefined ->
- set_ddoc_partitioned_option(DDoc, Idx#idx.partitioned);
- true when Idx#idx.partitioned == undefined ->
- DDoc;
- false ->
- ?MANGO_ERROR({partitioned_option_mismatch, DDocId})
- end
- end.
-
-set_ddoc_partitioned_option(DDoc, Partitioned) ->
- #doc{
- body = {BodyProps}
- } = DDoc,
- NewProps =
- case couch_util:get_value(<<"options">>, BodyProps) of
- {Existing} when is_list(Existing) ->
- Opt = {<<"partitioned">>, Partitioned},
- New = lists:keystore(<<"partitioned">>, 1, Existing, Opt),
- lists:keystore(<<"options">>, 1, BodyProps, {<<"options">>, New});
- undefined ->
- New = {<<"options">>, {[{<<"partitioned">>, Partitioned}]}},
- lists:keystore(<<"options">>, 1, BodyProps, New)
- end,
- DDoc#doc{body = {NewProps}}.
-
-get_idx_partitioned(Db, DDocProps) ->
- Default = fabric_util:is_partitioned(Db),
- case couch_util:get_value(<<"options">>, DDocProps) of
- {DesignOpts} ->
- case couch_util:get_value(<<"partitioned">>, DesignOpts) of
- P when is_boolean(P) ->
- P;
- undefined ->
- Default
- end;
- undefined ->
- Default
- end.
-
-is_opts_partitioned(Opts) ->
- case couch_util:get_value(partition, Opts, <<>>) of
- <<>> ->
- false;
- Partition when is_binary(Partition) ->
- true
- end.
-
-filter_partition_indexes(Indexes, Opts) ->
- PFilt =
- case is_opts_partitioned(Opts) of
- false ->
- fun(#idx{partitioned = P}) -> not P end;
- true ->
- fun(#idx{partitioned = P}) -> P end
- end,
- Filt = fun(Idx) -> type(Idx) == <<"special">> orelse PFilt(Idx) end,
- lists:filter(Filt, Indexes).
-
-filter_opts([]) ->
- [];
-filter_opts([{user_ctx, _} | Rest]) ->
- filter_opts(Rest);
-filter_opts([{ddoc, _} | Rest]) ->
- filter_opts(Rest);
-filter_opts([{name, _} | Rest]) ->
- filter_opts(Rest);
-filter_opts([{type, _} | Rest]) ->
- filter_opts(Rest);
-filter_opts([{w, _} | Rest]) ->
- filter_opts(Rest);
-filter_opts([{partitioned, _} | Rest]) ->
- filter_opts(Rest);
-filter_opts([Opt | Rest]) ->
- [Opt | filter_opts(Rest)].
-
-get_partial_filter_selector(#idx{def = Def}) when Def =:= all_docs; Def =:= undefined ->
- undefined;
-get_partial_filter_selector(#idx{def = {Def}}) ->
- case proplists:get_value(<<"partial_filter_selector">>, Def) of
- undefined -> get_legacy_selector(Def);
- {[]} -> undefined;
- Selector -> Selector
- end.
-
-% Partial filter selectors is supported in text indexes via the selector field
-% This adds backwards support for existing indexes that might have a selector in it
-get_legacy_selector(Def) ->
- case proplists:get_value(<<"selector">>, Def) of
- undefined -> undefined;
- {[]} -> undefined;
- Selector -> Selector
- end.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-index(SelectorName, Selector) ->
- {
- idx,
- <<"mango_test_46418cd02081470d93290dc12306ebcb">>,
- <<"_design/57e860dee471f40a2c74ea5b72997b81dda36a24">>,
- <<"Selected">>,
- <<"json">>,
- {[
- {<<"fields">>, {[{<<"location">>, <<"asc">>}]}},
- {SelectorName, {Selector}}
- ]},
- false,
- [{<<"def">>, {[{<<"fields">>, [<<"location">>]}]}}]
- }.
-
-get_partial_filter_all_docs_test() ->
- Idx = #idx{def = all_docs},
- ?assertEqual(undefined, get_partial_filter_selector(Idx)).
-
-get_partial_filter_undefined_def_test() ->
- Idx = #idx{def = undefined},
- ?assertEqual(undefined, get_partial_filter_selector(Idx)).
-
-get_partial_filter_selector_default_test() ->
- Idx = index(<<"partial_filter_selector">>, []),
- ?assertEqual(undefined, get_partial_filter_selector(Idx)).
-
-get_partial_filter_selector_missing_test() ->
- Idx = index(<<"partial_filter_selector">>, []),
- ?assertEqual(undefined, get_partial_filter_selector(Idx)).
-
-get_partial_filter_selector_with_selector_test() ->
- Selector = [{<<"location">>, {[{<<"$gt">>, <<"FRA">>}]}}],
- Idx = index(<<"partial_filter_selector">>, Selector),
- ?assertEqual({Selector}, get_partial_filter_selector(Idx)).
-
-get_partial_filter_selector_with_legacy_selector_test() ->
- Selector = [{<<"location">>, {[{<<"$gt">>, <<"FRA">>}]}}],
- Idx = index(<<"selector">>, Selector),
- ?assertEqual({Selector}, get_partial_filter_selector(Idx)).
-
-get_partial_filter_selector_with_legacy_default_selector_test() ->
- Idx = index(<<"selector">>, []),
- ?assertEqual(undefined, get_partial_filter_selector(Idx)).
-
-get_idx_ddoc_name_only_test() ->
- Opts = [{ddoc, <<"foo">>}],
- ?assertEqual(<<"_design/foo">>, get_idx_ddoc({}, Opts)).
-
-get_idx_ddoc_design_slash_name_test() ->
- Opts = [{ddoc, <<"_design/foo">>}],
- ?assertEqual(<<"_design/foo">>, get_idx_ddoc({}, Opts)).
-
--endif.
diff --git a/src/mango/src/mango_idx.hrl b/src/mango/src/mango_idx.hrl
deleted file mode 100644
index 97259500b..000000000
--- a/src/mango/src/mango_idx.hrl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(idx, {
- dbname,
- ddoc,
- name,
- type,
- def,
- partitioned,
- opts
-}).
diff --git a/src/mango/src/mango_idx_special.erl b/src/mango/src/mango_idx_special.erl
deleted file mode 100644
index 4c4001c80..000000000
--- a/src/mango/src/mango_idx_special.erl
+++ /dev/null
@@ -1,98 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_idx_special).
-
--export([
- validate/1,
- add/2,
- remove/2,
- from_ddoc/1,
- to_json/1,
- columns/1,
- is_usable/3,
- start_key/1,
- end_key/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango_idx.hrl").
-
-validate(_) ->
- erlang:exit(invalid_call).
-
-add(_, _) ->
- erlang:exit(invalid_call).
-
-remove(_, _) ->
- erlang:exit(invalid_call).
-
-from_ddoc(_) ->
- erlang:exit(invalid_call).
-
-to_json(#idx{def = all_docs}) ->
- {[
- {ddoc, null},
- {name, <<"_all_docs">>},
- {type, <<"special">>},
- {def,
- {[
- {<<"fields">>, [
- {[
- {<<"_id">>, <<"asc">>}
- ]}
- ]}
- ]}}
- ]}.
-
-columns(#idx{def = all_docs}) ->
- [<<"_id">>].
-
-is_usable(#idx{def = all_docs}, _Selector, []) ->
- true;
-is_usable(#idx{def = all_docs} = Idx, Selector, SortFields) ->
- Fields = mango_idx_view:indexable_fields(Selector),
- lists:member(<<"_id">>, Fields) and can_use_sort(Idx, SortFields, Selector).
-
-start_key([{'$gt', Key, _, _}]) ->
- case mango_json:special(Key) of
- true ->
- ?MIN_STR;
- false ->
- Key
- end;
-start_key([{'$gte', Key, _, _}]) ->
- false = mango_json:special(Key),
- Key;
-start_key([{'$eq', Key, '$eq', Key}]) ->
- false = mango_json:special(Key),
- Key.
-
-end_key([{_, _, '$lt', Key}]) ->
- case mango_json:special(Key) of
- true ->
- ?MAX_STR;
- false ->
- Key
- end;
-end_key([{_, _, '$lte', Key}]) ->
- false = mango_json:special(Key),
- Key;
-end_key([{'$eq', Key, '$eq', Key}]) ->
- false = mango_json:special(Key),
- Key.
-
-can_use_sort(_Idx, [], _Selector) ->
- true;
-can_use_sort(Idx, SortFields, _Selector) ->
- Cols = columns(Idx),
- lists:prefix(SortFields, Cols).
diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl
deleted file mode 100644
index b4a46d688..000000000
--- a/src/mango/src/mango_idx_text.erl
+++ /dev/null
@@ -1,459 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_idx_text).
-
--export([
- validate_new/2,
- validate_fields/1,
- validate_index_def/1,
- add/2,
- remove/2,
- from_ddoc/1,
- to_json/1,
- columns/1,
- is_usable/3,
- get_default_field_options/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
--include("mango_idx.hrl").
-
-validate_new(#idx{} = Idx, Db) ->
- {ok, Def} = do_validate(Idx#idx.def),
- maybe_reject_index_all_req(Def, Db),
- {ok, Idx#idx{def = Def}}.
-
-validate_index_def(IndexInfo) ->
- do_validate(IndexInfo).
-
-add(#doc{body = {Props0}} = DDoc, Idx) ->
- Texts1 =
- case proplists:get_value(<<"indexes">>, Props0) of
- {Texts0} -> Texts0;
- _ -> []
- end,
- NewText = make_text(Idx),
- Texts2 = lists:keystore(element(1, NewText), 1, Texts1, NewText),
- Props1 = lists:keystore(<<"indexes">>, 1, Props0, {<<"indexes">>, {Texts2}}),
- {ok, DDoc#doc{body = {Props1}}}.
-
-remove(#doc{body = {Props0}} = DDoc, Idx) ->
- Texts1 =
- case proplists:get_value(<<"indexes">>, Props0) of
- {Texts0} ->
- Texts0;
- _ ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
- Texts2 = lists:keydelete(Idx#idx.name, 1, Texts1),
- if
- Texts2 /= Texts1 -> ok;
- true -> ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
- Props1 =
- case Texts2 of
- [] ->
- lists:keydelete(<<"indexes">>, 1, Props0);
- _ ->
- lists:keystore(<<"indexes">>, 1, Props0, {<<"indexes">>, {Texts2}})
- end,
- {ok, DDoc#doc{body = {Props1}}}.
-
-from_ddoc({Props}) ->
- case lists:keyfind(<<"indexes">>, 1, Props) of
- {<<"indexes">>, {Texts}} when is_list(Texts) ->
- lists:flatmap(
- fun({Name, {VProps}}) ->
- case validate_ddoc(VProps) of
- invalid_ddoc ->
- [];
- Def ->
- I = #idx{
- type = <<"text">>,
- name = Name,
- def = Def
- },
- [I]
- end
- end,
- Texts
- );
- _ ->
- []
- end.
-
-to_json(Idx) ->
- {[
- {ddoc, Idx#idx.ddoc},
- {name, Idx#idx.name},
- {type, Idx#idx.type},
- {partitioned, Idx#idx.partitioned},
- {def, {def_to_json(Idx#idx.def)}}
- ]}.
-
-columns(Idx) ->
- {Props} = Idx#idx.def,
- {<<"fields">>, Fields} = lists:keyfind(<<"fields">>, 1, Props),
- case Fields of
- <<"all_fields">> ->
- all_fields;
- _ ->
- {DFProps} = couch_util:get_value(<<"default_field">>, Props, {[]}),
- Enabled = couch_util:get_value(<<"enabled">>, DFProps, true),
- Default =
- case Enabled of
- true -> [<<"$default">>];
- false -> []
- end,
- Default ++
- lists:map(
- fun({FProps}) ->
- {_, Name} = lists:keyfind(<<"name">>, 1, FProps),
- {_, Type} = lists:keyfind(<<"type">>, 1, FProps),
- iolist_to_binary([Name, ":", Type])
- end,
- Fields
- )
- end.
-
-is_usable(_, Selector, _) when Selector =:= {[]} ->
- false;
-is_usable(Idx, Selector, _) ->
- case columns(Idx) of
- all_fields ->
- true;
- Cols ->
- Fields = indexable_fields(Selector),
- sets:is_subset(sets:from_list(Fields), sets:from_list(Cols))
- end.
-
-do_validate({Props}) ->
- {ok, Opts} = mango_opts:validate(Props, opts()),
- {ok, {Opts}};
-do_validate(Else) ->
- ?MANGO_ERROR({invalid_index_text, Else}).
-
-def_to_json({Props}) ->
- def_to_json(Props);
-def_to_json([]) ->
- [];
-def_to_json([{<<"fields">>, <<"all_fields">>} | Rest]) ->
- [{<<"fields">>, []} | def_to_json(Rest)];
-def_to_json([{fields, Fields} | Rest]) ->
- [{<<"fields">>, fields_to_json(Fields)} | def_to_json(Rest)];
-def_to_json([{<<"fields">>, Fields} | Rest]) ->
- [{<<"fields">>, fields_to_json(Fields)} | def_to_json(Rest)];
-% Don't include partial_filter_selector in the json conversion
-% if its the default value
-def_to_json([{<<"partial_filter_selector">>, {[]}} | Rest]) ->
- def_to_json(Rest);
-def_to_json([{Key, Value} | Rest]) ->
- [{Key, Value} | def_to_json(Rest)].
-
-fields_to_json([]) ->
- [];
-fields_to_json([{[{<<"name">>, Name}, {<<"type">>, Type0}]} | Rest]) ->
- ok = validate_field_name(Name),
- Type = validate_field_type(Type0),
- [{[{Name, Type}]} | fields_to_json(Rest)];
-fields_to_json([{[{<<"type">>, Type0}, {<<"name">>, Name}]} | Rest]) ->
- ok = validate_field_name(Name),
- Type = validate_field_type(Type0),
- [{[{Name, Type}]} | fields_to_json(Rest)].
-
-%% In the future, we can possibly add more restrictive validation.
-%% For now, let's make sure the field name is not blank.
-validate_field_name(<<"">>) ->
- throw(invalid_field_name);
-validate_field_name(Else) when is_binary(Else) ->
- ok;
-validate_field_name(_) ->
- throw(invalid_field_name).
-
-validate_field_type(<<"string">>) ->
- <<"string">>;
-validate_field_type(<<"number">>) ->
- <<"number">>;
-validate_field_type(<<"boolean">>) ->
- <<"boolean">>.
-
-validate_fields(<<"all_fields">>) ->
- {ok, all_fields};
-validate_fields(Fields) ->
- try fields_to_json(Fields) of
- _ ->
- mango_fields:new(Fields)
- catch
- error:function_clause ->
- ?MANGO_ERROR({invalid_index_fields_definition, Fields});
- throw:invalid_field_name ->
- ?MANGO_ERROR({invalid_index_fields_definition, Fields})
- end.
-
-validate_ddoc(VProps) ->
- try
- Def = proplists:get_value(<<"index">>, VProps),
- validate_index_def(Def),
- Def
- catch
- Error:Reason ->
- couch_log:error(
- "Invalid Index Def ~p: Error. ~p, Reason: ~p",
- [VProps, Error, Reason]
- ),
- invalid_ddoc
- end.
-
-opts() ->
- [
- {<<"default_analyzer">>, [
- {tag, default_analyzer},
- {optional, true},
- {default, <<"keyword">>}
- ]},
- {<<"default_field">>, [
- {tag, default_field},
- {optional, true},
- {default, {[]}}
- ]},
- {<<"partial_filter_selector">>, [
- {tag, partial_filter_selector},
- {optional, true},
- {default, {[]}},
- {validator, fun mango_opts:validate_selector/1}
- ]},
- {<<"selector">>, [
- {tag, selector},
- {optional, true},
- {default, {[]}},
- {validator, fun mango_opts:validate_selector/1}
- ]},
- {<<"fields">>, [
- {tag, fields},
- {optional, true},
- {default, []},
- {validator, fun ?MODULE:validate_fields/1}
- ]},
- {<<"index_array_lengths">>, [
- {tag, index_array_lengths},
- {optional, true},
- {default, true},
- {validator, fun mango_opts:is_boolean/1}
- ]}
- ].
-
-make_text(Idx) ->
- Text =
- {[
- {<<"index">>, Idx#idx.def},
- {<<"analyzer">>, construct_analyzer(Idx#idx.def)}
- ]},
- {Idx#idx.name, Text}.
-
-get_default_field_options(Props) ->
- Default = couch_util:get_value(default_field, Props, {[]}),
- case Default of
- Bool when is_boolean(Bool) ->
- {Bool, <<"standard">>};
- {[]} ->
- {true, <<"standard">>};
- {Opts} ->
- Enabled = couch_util:get_value(<<"enabled">>, Opts, true),
- Analyzer = couch_util:get_value(
- <<"analyzer">>,
- Opts,
- <<"standard">>
- ),
- {Enabled, Analyzer}
- end.
-
-construct_analyzer({Props}) ->
- DefaultAnalyzer = couch_util:get_value(
- default_analyzer,
- Props,
- <<"keyword">>
- ),
- {DefaultField, DefaultFieldAnalyzer} = get_default_field_options(Props),
- DefaultAnalyzerDef =
- case DefaultField of
- true ->
- [{<<"$default">>, DefaultFieldAnalyzer}];
- _ ->
- []
- end,
- case DefaultAnalyzerDef of
- [] ->
- <<"keyword">>;
- _ ->
- {[
- {<<"name">>, <<"perfield">>},
- {<<"default">>, DefaultAnalyzer},
- {<<"fields">>, {DefaultAnalyzerDef}}
- ]}
- end.
-
-indexable_fields(Selector) ->
- TupleTree = mango_selector_text:convert([], Selector),
- indexable_fields([], TupleTree).
-
-indexable_fields(Fields, {op_and, Args}) when is_list(Args) ->
- lists:foldl(
- fun(Arg, Fields0) -> indexable_fields(Fields0, Arg) end,
- Fields,
- Args
- );
-%% For queries that use array element access or $in operations, two
-%% fields get generated by mango_selector_text:convert. At index
-%% definition time, only one field gets defined. In this situation, we
-%% remove the extra generated field so that the index can be used. For
-%% all other situations, we include the fields as normal.
-indexable_fields(
- Fields,
- {op_or, [
- {op_field, Field0},
- {op_field, {[Name | _], _}} = Field1
- ]}
-) ->
- case lists:member(<<"[]">>, Name) of
- true ->
- indexable_fields(Fields, {op_field, Field0});
- false ->
- Fields1 = indexable_fields(Fields, {op_field, Field0}),
- indexable_fields(Fields1, Field1)
- end;
-indexable_fields(Fields, {op_or, Args}) when is_list(Args) ->
- lists:foldl(
- fun(Arg, Fields0) -> indexable_fields(Fields0, Arg) end,
- Fields,
- Args
- );
-indexable_fields(Fields, {op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) ->
- Fields0 = indexable_fields(Fields, ExistsQuery),
- indexable_fields(Fields0, Arg);
-% forces "$exists" : false to use _all_docs
-indexable_fields(_, {op_not, {_, false}}) ->
- [];
-indexable_fields(Fields, {op_insert, Arg}) when is_binary(Arg) ->
- Fields;
-%% fieldname.[]:length is not a user defined field.
-indexable_fields(Fields, {op_field, {[_, <<":length">>], _}}) ->
- Fields;
-indexable_fields(Fields, {op_field, {Name, _}}) ->
- [iolist_to_binary(Name) | Fields];
-%% In this particular case, the lucene index is doing a field_exists query
-%% so it is looking at all sorts of combinations of field:* and field.*
-%% We don't add the field because we cannot pre-determine what field will exist.
-%% Hence we just return Fields and make it less restrictive.
-indexable_fields(Fields, {op_fieldname, {_, _}}) ->
- Fields;
-%% Similar idea to op_fieldname but with fieldname:null
-indexable_fields(Fields, {op_null, {_, _}}) ->
- Fields;
-indexable_fields(Fields, {op_default, _}) ->
- [<<"$default">> | Fields].
-
-maybe_reject_index_all_req({Def}, Db) ->
- DbName = couch_db:name(Db),
- #user_ctx{name = User} = couch_db:get_user_ctx(Db),
- Fields = couch_util:get_value(fields, Def),
- case {Fields, forbid_index_all()} of
- {all_fields, "true"} ->
- ?MANGO_ERROR(index_all_disabled);
- {all_fields, "warn"} ->
- couch_log:warning(
- "User ~p is indexing all fields in db ~p",
- [User, DbName]
- );
- _ ->
- ok
- end.
-
-forbid_index_all() ->
- config:get("mango", "index_all_disabled", "false").
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- Ctx = test_util:start_couch(),
- meck:expect(
- couch_log,
- warning,
- 2,
- fun(_, _) ->
- throw({test_error, logged_warning})
- end
- ),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-setup() ->
- %default index all def that generates {fields, all_fields}
- Index = #idx{def = {[]}},
- DbName = <<"testdb">>,
- UserCtx = #user_ctx{name = <<"u1">>},
- {ok, Db} = couch_db:clustered_db(DbName, UserCtx),
- {Index, Db}.
-
-teardown(_) ->
- ok.
-
-index_all_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun forbid_index_all/1,
- fun default_and_false_index_all/1,
- fun warn_index_all/1
- ]
- }
- }.
-
-forbid_index_all({Idx, Db}) ->
- ?_test(begin
- ok = config:set("mango", "index_all_disabled", "true", false),
- ?assertThrow(
- {mango_error, ?MODULE, index_all_disabled},
- validate_new(Idx, Db)
- )
- end).
-
-default_and_false_index_all({Idx, Db}) ->
- ?_test(begin
- config:delete("mango", "index_all_disabled", false),
- {ok, #idx{def = {Def}}} = validate_new(Idx, Db),
- Fields = couch_util:get_value(fields, Def),
- ?assertEqual(all_fields, Fields),
- ok = config:set("mango", "index_all_disabled", "false", false),
- {ok, #idx{def = {Def2}}} = validate_new(Idx, Db),
- Fields2 = couch_util:get_value(fields, Def2),
- ?assertEqual(all_fields, Fields2)
- end).
-
-warn_index_all({Idx, Db}) ->
- ?_test(begin
- ok = config:set("mango", "index_all_disabled", "warn", false),
- ?assertThrow({test_error, logged_warning}, validate_new(Idx, Db))
- end).
-
--endif.
diff --git a/src/mango/src/mango_idx_view.erl b/src/mango/src/mango_idx_view.erl
deleted file mode 100644
index ff8f6c6bb..000000000
--- a/src/mango/src/mango_idx_view.erl
+++ /dev/null
@@ -1,523 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_idx_view).
-
--export([
- validate_new/2,
- validate_index_def/1,
- add/2,
- remove/2,
- from_ddoc/1,
- to_json/1,
- is_usable/3,
- columns/1,
- start_key/1,
- end_key/1,
-
- indexable_fields/1,
- field_ranges/1,
- field_ranges/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
--include("mango_idx.hrl").
--include("mango_idx_view.hrl").
-
-validate_new(#idx{} = Idx, _Db) ->
- {ok, Def} = do_validate(Idx#idx.def),
- {ok, Idx#idx{def = Def}}.
-
-validate_index_def(Def) ->
- def_to_json(Def).
-
-add(#doc{body = {Props0}} = DDoc, Idx) ->
- Views1 =
- case proplists:get_value(<<"views">>, Props0) of
- {Views0} -> Views0;
- _ -> []
- end,
- NewView = make_view(Idx),
- Views2 = lists:keystore(element(1, NewView), 1, Views1, NewView),
- Props1 = lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}}),
- {ok, DDoc#doc{body = {Props1}}}.
-
-remove(#doc{body = {Props0}} = DDoc, Idx) ->
- Views1 =
- case proplists:get_value(<<"views">>, Props0) of
- {Views0} ->
- Views0;
- _ ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
- Views2 = lists:keydelete(Idx#idx.name, 1, Views1),
- if
- Views2 /= Views1 -> ok;
- true -> ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
- Props1 =
- case Views2 of
- [] ->
- lists:keydelete(<<"views">>, 1, Props0);
- _ ->
- lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}})
- end,
- {ok, DDoc#doc{body = {Props1}}}.
-
-from_ddoc({Props}) ->
- case lists:keyfind(<<"views">>, 1, Props) of
- {<<"views">>, {Views}} when is_list(Views) ->
- lists:flatmap(
- fun({Name, {VProps}}) ->
- case validate_ddoc(VProps) of
- invalid_view ->
- [];
- {Def, Opts} ->
- I = #idx{
- type = <<"json">>,
- name = Name,
- def = Def,
- opts = Opts
- },
- [I]
- end
- end,
- Views
- );
- _ ->
- []
- end.
-
-to_json(Idx) ->
- {[
- {ddoc, Idx#idx.ddoc},
- {name, Idx#idx.name},
- {type, Idx#idx.type},
- {partitioned, Idx#idx.partitioned},
- {def, {def_to_json(Idx#idx.def)}}
- ]}.
-
-columns(Idx) ->
- {Props} = Idx#idx.def,
- {<<"fields">>, {Fields}} = lists:keyfind(<<"fields">>, 1, Props),
- [Key || {Key, _} <- Fields].
-
-is_usable(Idx, Selector, SortFields) ->
- % This index is usable if all of the columns are
- % restricted by the selector such that they are required to exist
- % and the selector is not a text search (so requires a text index)
- RequiredFields = columns(Idx),
-
- % sort fields are required to exist in the results so
- % we don't need to check the selector for these
- RequiredFields1 = ordsets:subtract(lists:usort(RequiredFields), lists:usort(SortFields)),
-
- % _id and _rev are implicitly in every document so
- % we don't need to check the selector for these either
- RequiredFields2 = ordsets:subtract(
- RequiredFields1,
- [<<"_id">>, <<"_rev">>]
- ),
-
- mango_selector:has_required_fields(Selector, RequiredFields2) andalso
- not is_text_search(Selector) andalso
- can_use_sort(RequiredFields, SortFields, Selector).
-
-is_text_search({[]}) ->
- false;
-is_text_search({[{<<"$default">>, _}]}) ->
- true;
-is_text_search({[{_Field, Cond}]}) when is_list(Cond) ->
- lists:foldl(
- fun(C, Exists) ->
- Exists orelse is_text_search(C)
- end,
- false,
- Cond
- );
-is_text_search({[{_Field, Cond}]}) when is_tuple(Cond) ->
- is_text_search(Cond);
-is_text_search({[{_Field, _Cond}]}) ->
- false;
-%% we reached values, which should always be false
-is_text_search(Val) when
- is_number(Val); is_boolean(Val); is_binary(Val)
-->
- false.
-
-start_key([]) ->
- [];
-start_key([{'$gt', Key, _, _} | Rest]) ->
- case mango_json:special(Key) of
- true ->
- [];
- false ->
- [Key | start_key(Rest)]
- end;
-start_key([{'$gte', Key, _, _} | Rest]) ->
- false = mango_json:special(Key),
- [Key | start_key(Rest)];
-start_key([{'$eq', Key, '$eq', Key} | Rest]) ->
- false = mango_json:special(Key),
- [Key | start_key(Rest)].
-
-end_key([]) ->
- [?MAX_JSON_OBJ];
-end_key([{_, _, '$lt', Key} | Rest]) ->
- case mango_json:special(Key) of
- true ->
- [?MAX_JSON_OBJ];
- false ->
- [Key | end_key(Rest)]
- end;
-end_key([{_, _, '$lte', Key} | Rest]) ->
- false = mango_json:special(Key),
- [Key | end_key(Rest)];
-end_key([{'$eq', Key, '$eq', Key} | Rest]) ->
- false = mango_json:special(Key),
- [Key | end_key(Rest)].
-
-do_validate({Props}) ->
- {ok, Opts} = mango_opts:validate(Props, opts()),
- {ok, {Opts}};
-do_validate(Else) ->
- ?MANGO_ERROR({invalid_index_json, Else}).
-
-def_to_json({Props}) ->
- def_to_json(Props);
-def_to_json([]) ->
- [];
-def_to_json([{fields, Fields} | Rest]) ->
- [{<<"fields">>, mango_sort:to_json(Fields)} | def_to_json(Rest)];
-def_to_json([{<<"fields">>, Fields} | Rest]) ->
- [{<<"fields">>, mango_sort:to_json(Fields)} | def_to_json(Rest)];
-% Don't include partial_filter_selector in the json conversion
-% if its the default value
-def_to_json([{<<"partial_filter_selector">>, {[]}} | Rest]) ->
- def_to_json(Rest);
-def_to_json([{Key, Value} | Rest]) ->
- [{Key, Value} | def_to_json(Rest)].
-
-opts() ->
- [
- {<<"fields">>, [
- {tag, fields},
- {validator, fun mango_opts:validate_sort/1}
- ]},
- {<<"partial_filter_selector">>, [
- {tag, partial_filter_selector},
- {optional, true},
- {default, {[]}},
- {validator, fun mango_opts:validate_selector/1}
- ]}
- ].
-
-make_view(Idx) ->
- View =
- {[
- {<<"map">>, Idx#idx.def},
- {<<"reduce">>, <<"_count">>},
- {<<"options">>, {Idx#idx.opts}}
- ]},
- {Idx#idx.name, View}.
-
-validate_ddoc(VProps) ->
- try
- Def = proplists:get_value(<<"map">>, VProps),
- validate_index_def(Def),
- {Opts0} = proplists:get_value(<<"options">>, VProps),
- Opts = lists:keydelete(<<"sort">>, 1, Opts0),
- {Def, Opts}
- catch
- Error:Reason ->
- couch_log:error(
- "Invalid Index Def ~p. Error: ~p, Reason: ~p",
- [VProps, Error, Reason]
- ),
- invalid_view
- end.
-
-% This function returns a list of indexes that
-% can be used to restrict this query. This works by
-% searching the selector looking for field names that
-% can be "seen".
-%
-% Operators that can be seen through are '$and' and any of
-% the logical comparisons ('$lt', '$eq', etc). Things like
-% '$regex', '$in', '$nin', and '$or' can't be serviced by
-% a single index scan so we disallow them. In the future
-% we may become more clever and increase our ken such that
-% we will be able to see through these with crafty indexes
-% or new uses for existing indexes. For instance, I could
-% see an '$or' between comparisons on the same field becoming
-% the equivalent of a multi-query. But that's for another
-% day.
-
-% We can see through '$and' trivially
-indexable_fields({[{<<"$and">>, Args}]}) ->
- lists:usort(lists:flatten([indexable_fields(A) || A <- Args]));
-% So far we can't see through any other operator
-indexable_fields({[{<<"$", _/binary>>, _}]}) ->
- [];
-% If we have a field with a terminator that is locatable
-% using an index then the field is a possible index
-indexable_fields({[{Field, Cond}]}) ->
- case indexable(Cond) of
- true ->
- [Field];
- false ->
- []
- end;
-% An empty selector
-indexable_fields({[]}) ->
- [].
-
-% Check if a condition is indexable. The logical
-% comparisons are mostly straight forward. We
-% currently don't understand '$in' which is
-% theoretically supportable. '$nin' and '$ne'
-% aren't currently supported because they require
-% multiple index scans.
-indexable({[{<<"$lt">>, _}]}) ->
- true;
-indexable({[{<<"$lte">>, _}]}) ->
- true;
-indexable({[{<<"$eq">>, _}]}) ->
- true;
-indexable({[{<<"$gt">>, _}]}) ->
- true;
-indexable({[{<<"$gte">>, _}]}) ->
- true;
-% All other operators are currently not indexable.
-% This is also a subtle assertion that we don't
-% call indexable/1 on a field name.
-indexable({[{<<"$", _/binary>>, _}]}) ->
- false.
-
-% For each field, return {Field, Range}
-field_ranges(Selector) ->
- Fields = indexable_fields(Selector),
- field_ranges(Selector, Fields).
-
-field_ranges(Selector, Fields) ->
- field_ranges(Selector, Fields, []).
-
-field_ranges(_Selector, [], Acc) ->
- lists:reverse(Acc);
-field_ranges(Selector, [Field | Rest], Acc) ->
- case range(Selector, Field) of
- empty ->
- [{Field, empty}];
- Range ->
- field_ranges(Selector, Rest, [{Field, Range} | Acc])
- end.
-
-% Find the complete range for a given index in this
-% selector. This works by AND'ing logical comparisons
-% together so that we can define the start and end
-% keys for a given index.
-%
-% Selector must have been normalized before calling
-% this function.
-range(Selector, Index) ->
- range(Selector, Index, '$gt', mango_json:min(), '$lt', mango_json:max()).
-
-% Adjust Low and High based on values found for the
-% givend Index in Selector.
-range({[{<<"$and">>, Args}]}, Index, LCmp, Low, HCmp, High) ->
- lists:foldl(
- fun
- (Arg, {LC, L, HC, H}) ->
- range(Arg, Index, LC, L, HC, H);
- (_Arg, empty) ->
- empty
- end,
- {LCmp, Low, HCmp, High},
- Args
- );
-% We can currently only traverse '$and' operators
-range({[{<<"$", _/binary>>}]}, _Index, LCmp, Low, HCmp, High) ->
- {LCmp, Low, HCmp, High};
-% If the field name matches the index see if we can narrow
-% the acceptable range.
-range({[{Index, Cond}]}, Index, LCmp, Low, HCmp, High) ->
- range(Cond, LCmp, Low, HCmp, High);
-% Else we have a field unrelated to this index so just
-% return the current values.
-range(_, _, LCmp, Low, HCmp, High) ->
- {LCmp, Low, HCmp, High}.
-
-% The comments below are a bit cryptic at first but they show
-% where the Arg cand land in the current range.
-%
-% For instance, given:
-%
-% {$lt: N}
-% Low = 1
-% High = 5
-%
-% Depending on the value of N we can have one of five locations
-% in regards to a given Low/High pair:
-%
-% min low mid high max
-%
-% That is:
-% min = (N < Low)
-% low = (N == Low)
-% mid = (Low < N < High)
-% high = (N == High)
-% max = (High < N)
-%
-% If N < 1, (min) then the effective range is empty.
-%
-% If N == 1, (low) then we have to set the range to empty because
-% N < 1 && N >= 1 is an empty set. If the operator had been '$lte'
-% and LCmp was '$gte' or '$eq' then we could keep around the equality
-% check on Arg by setting LCmp == HCmp = '$eq' and Low == High == Arg.
-%
-% If 1 < N < 5 (mid), then we set High to Arg and Arg has just
-% narrowed our range. HCmp is set the the '$lt' operator that was
-% part of the input.
-%
-% If N == 5 (high), We just set HCmp to '$lt' since its guaranteed
-% to be equally or more restrictive than the current possible values
-% of '$lt' or '$lte'.
-%
-% If N > 5 (max), nothing changes as our current range is already
-% more narrow than the current condition.
-%
-% Obviously all of that logic gets tweaked for the other logical
-% operators but its all straight forward once you figure out how
-% we're basically just narrowing our logical ranges.
-
-range({[{<<"$lt">>, Arg}]}, LCmp, Low, HCmp, High) ->
- case range_pos(Low, Arg, High) of
- min ->
- empty;
- low ->
- empty;
- mid ->
- {LCmp, Low, '$lt', Arg};
- high ->
- {LCmp, Low, '$lt', Arg};
- max ->
- {LCmp, Low, HCmp, High}
- end;
-range({[{<<"$lte">>, Arg}]}, LCmp, Low, HCmp, High) ->
- case range_pos(Low, Arg, High) of
- min ->
- empty;
- low when LCmp == '$gte'; LCmp == '$eq' ->
- {'$eq', Arg, '$eq', Arg};
- low ->
- empty;
- mid ->
- {LCmp, Low, '$lte', Arg};
- high ->
- {LCmp, Low, HCmp, High};
- max ->
- {LCmp, Low, HCmp, High}
- end;
-range({[{<<"$eq">>, Arg}]}, LCmp, Low, HCmp, High) ->
- case range_pos(Low, Arg, High) of
- min ->
- empty;
- low when LCmp == '$gte'; LCmp == '$eq' ->
- {'$eq', Arg, '$eq', Arg};
- low ->
- empty;
- mid ->
- {'$eq', Arg, '$eq', Arg};
- high when HCmp == '$lte'; HCmp == '$eq' ->
- {'$eq', Arg, '$eq', Arg};
- high ->
- empty;
- max ->
- empty
- end;
-range({[{<<"$gte">>, Arg}]}, LCmp, Low, HCmp, High) ->
- case range_pos(Low, Arg, High) of
- min ->
- {LCmp, Low, HCmp, High};
- low ->
- {LCmp, Low, HCmp, High};
- mid ->
- {'$gte', Arg, HCmp, High};
- high when HCmp == '$lte'; HCmp == '$eq' ->
- {'$eq', Arg, '$eq', Arg};
- high ->
- empty;
- max ->
- empty
- end;
-range({[{<<"$gt">>, Arg}]}, LCmp, Low, HCmp, High) ->
- case range_pos(Low, Arg, High) of
- min ->
- {LCmp, Low, HCmp, High};
- low ->
- {'$gt', Arg, HCmp, High};
- mid ->
- {'$gt', Arg, HCmp, High};
- high ->
- empty;
- max ->
- empty
- end;
-% There's some other un-indexable restriction on the index
-% that will be applied as a post-filter. Ignore it and
-% carry on our merry way.
-range({[{<<"$", _/binary>>, _}]}, LCmp, Low, HCmp, High) ->
- {LCmp, Low, HCmp, High}.
-
-% Returns the value min | low | mid | high | max depending
-% on how Arg compares to Low and High.
-range_pos(Low, Arg, High) ->
- case mango_json:cmp(Arg, Low) of
- N when N < 0 -> min;
- N when N == 0 -> low;
- _ ->
- case mango_json:cmp(Arg, High) of
- X when X < 0 ->
- mid;
- X when X == 0 ->
- high;
- _ ->
- max
- end
- end.
-
-% Can_use_sort works as follows:
-%
-% * no sort fields then we can use this
-% * Run out index columns we can't use this index
-% * If the current column is the start of the sort, return if sort is a prefix
-% * If the current column is constant, drop it and continue, else return false
-%
-% A constant column is a something that won't affect the sort
-% for example A: {$eq: 21}}
-%
-% Currently we only look at constant fields that are prefixes to the sort fields
-% set by the user. We considered adding in constant fields after sort fields
-% but were not 100% sure that it would not affect the sorting of the query.
-
-can_use_sort(_Cols, [], _Selector) ->
- true;
-can_use_sort([], _SortFields, _Selector) ->
- false;
-can_use_sort([Col | _] = Cols, [Col | _] = SortFields, _Selector) ->
- lists:prefix(SortFields, Cols);
-can_use_sort([Col | RestCols], SortFields, Selector) ->
- case mango_selector:is_constant_field(Selector, Col) of
- true -> can_use_sort(RestCols, SortFields, Selector);
- false -> false
- end.
diff --git a/src/mango/src/mango_idx_view.hrl b/src/mango/src/mango_idx_view.hrl
deleted file mode 100644
index 0d213e56e..000000000
--- a/src/mango/src/mango_idx_view.hrl
+++ /dev/null
@@ -1,13 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(MAX_JSON_OBJ, {<<255, 255, 255, 255>>}). \ No newline at end of file
diff --git a/src/mango/src/mango_json.erl b/src/mango/src/mango_json.erl
deleted file mode 100644
index ca18d8898..000000000
--- a/src/mango/src/mango_json.erl
+++ /dev/null
@@ -1,112 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_json).
-
--export([
- min/0,
- max/0,
- cmp/2,
- cmp_raw/2,
- type/1,
- special/1,
- to_binary/1
-]).
-
--define(MIN_VAL, mango_json_min).
--define(MAX_VAL, mango_json_max).
-
-min() ->
- ?MIN_VAL.
-
-max() ->
- ?MAX_VAL.
-
-cmp(?MIN_VAL, ?MIN_VAL) ->
- 0;
-cmp(?MIN_VAL, _) ->
- -1;
-cmp(_, ?MIN_VAL) ->
- 1;
-cmp(?MAX_VAL, ?MAX_VAL) ->
- 0;
-cmp(?MAX_VAL, _) ->
- 1;
-cmp(_, ?MAX_VAL) ->
- -1;
-cmp(A, B) ->
- couch_ejson_compare:less(A, B).
-
-cmp_raw(?MIN_VAL, ?MIN_VAL) ->
- 0;
-cmp_raw(?MIN_VAL, _) ->
- -1;
-cmp_raw(_, ?MIN_VAL) ->
- 1;
-cmp_raw(?MAX_VAL, ?MAX_VAL) ->
- 0;
-cmp_raw(?MAX_VAL, _) ->
- 1;
-cmp_raw(_, ?MAX_VAL) ->
- -1;
-cmp_raw(A, B) ->
- case A < B of
- true ->
- -1;
- false ->
- case A > B of
- true ->
- 1;
- false ->
- 0
- end
- end.
-
-type(null) ->
- <<"null">>;
-type(Bool) when is_boolean(Bool) ->
- <<"boolean">>;
-type(Num) when is_number(Num) ->
- <<"number">>;
-type(Str) when is_binary(Str) ->
- <<"string">>;
-type({Props}) when is_list(Props) ->
- <<"object">>;
-type(Vals) when is_list(Vals) ->
- <<"array">>.
-
-special(?MIN_VAL) ->
- true;
-special(?MAX_VAL) ->
- true;
-special(_) ->
- false.
-
-to_binary({Props}) ->
- Pred = fun({Key, Value}) ->
- {to_binary(Key), to_binary(Value)}
- end,
- {lists:map(Pred, Props)};
-to_binary(Data) when is_list(Data) ->
- [to_binary(D) || D <- Data];
-to_binary(null) ->
- null;
-to_binary(true) ->
- true;
-to_binary(false) ->
- false;
-to_binary(Data) when is_atom(Data) ->
- list_to_binary(atom_to_list(Data));
-to_binary(Data) when is_number(Data) ->
- Data;
-to_binary(Data) when is_binary(Data) ->
- Data.
diff --git a/src/mango/src/mango_json_bookmark.erl b/src/mango/src/mango_json_bookmark.erl
deleted file mode 100644
index 8446e0c8a..000000000
--- a/src/mango/src/mango_json_bookmark.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_json_bookmark).
-
--export([
- update_args/2,
- create/1
-]).
-
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include("mango_cursor.hrl").
--include("mango.hrl").
-
-update_args(EncodedBookmark, #mrargs{skip = Skip} = Args) ->
- Bookmark = unpack(EncodedBookmark),
- case is_list(Bookmark) of
- true ->
- {startkey, Startkey} = lists:keyfind(startkey, 1, Bookmark),
- {startkey_docid, StartkeyDocId} = lists:keyfind(startkey_docid, 1, Bookmark),
- Args#mrargs{
- start_key = Startkey,
- start_key_docid = StartkeyDocId,
- skip = 1 + Skip
- };
- false ->
- Args
- end.
-
-create(#cursor{bookmark_docid = BookmarkDocId, bookmark_key = BookmarkKey}) when
- BookmarkKey =/= undefined
-->
- QueryArgs = [
- {startkey_docid, BookmarkDocId},
- {startkey, BookmarkKey}
- ],
- Bin = term_to_binary(QueryArgs, [compressed, {minor_version, 1}]),
- couch_util:encodeBase64Url(Bin);
-create(#cursor{bookmark = Bookmark}) ->
- Bookmark.
-
-unpack(nil) ->
- nil;
-unpack(Packed) ->
- try
- Bookmark = binary_to_term(couch_util:decodeBase64Url(Packed), [safe]),
- verify(Bookmark)
- catch
- _:_ ->
- ?MANGO_ERROR({invalid_bookmark, Packed})
- end.
-
-verify(Bookmark) when is_list(Bookmark) ->
- case
- lists:keymember(startkey, 1, Bookmark) andalso lists:keymember(startkey_docid, 1, Bookmark)
- of
- true -> Bookmark;
- _ -> throw(invalid_bookmark)
- end;
-verify(_Bookmark) ->
- throw(invalid_bookmark).
diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl
deleted file mode 100644
index d3d200517..000000000
--- a/src/mango/src/mango_native_proc.erl
+++ /dev/null
@@ -1,346 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_native_proc).
--behavior(gen_server).
-
--include("mango_idx.hrl").
-
--export([
- start_link/0,
- set_timeout/2,
- prompt/2
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--record(st, {
- indexes = [],
- timeout = 5000
-}).
-
--record(tacc, {
- index_array_lengths = true,
- fields = all_fields,
- path = []
-}).
-
-start_link() ->
- gen_server:start_link(?MODULE, [], []).
-
-set_timeout(Pid, TimeOut) when is_integer(TimeOut), TimeOut > 0 ->
- gen_server:call(Pid, {set_timeout, TimeOut}).
-
-prompt(Pid, Data) ->
- gen_server:call(Pid, {prompt, Data}).
-
-init(_) ->
- {ok, #st{}}.
-
-terminate(_Reason, _St) ->
- ok.
-
-handle_call({set_timeout, TimeOut}, _From, St) ->
- {reply, ok, St#st{timeout = TimeOut}};
-handle_call({prompt, [<<"reset">>]}, _From, St) ->
- {reply, true, St#st{indexes = []}};
-handle_call({prompt, [<<"reset">>, _QueryConfig]}, _From, St) ->
- {reply, true, St#st{indexes = []}};
-handle_call({prompt, [<<"add_fun">>, IndexInfo]}, _From, St) ->
- Indexes =
- case validate_index_info(IndexInfo) of
- true ->
- St#st.indexes ++ [IndexInfo];
- false ->
- couch_log:error("No Valid Indexes For: ~p", [IndexInfo]),
- St#st.indexes
- end,
- NewSt = St#st{indexes = Indexes},
- {reply, true, NewSt};
-handle_call({prompt, [<<"map_doc">>, Doc]}, _From, St) ->
- {reply, map_doc(St, mango_json:to_binary(Doc)), St};
-handle_call({prompt, [<<"reduce">>, RedSrcs, _]}, _From, St) ->
- {reply, [true, [null || _ <- RedSrcs]], St};
-handle_call({prompt, [<<"rereduce">>, RedSrcs, _]}, _From, St) ->
- {reply, [true, [null || _ <- RedSrcs]], St};
-handle_call({prompt, [<<"index_doc">>, Doc]}, _From, St) ->
- Vals =
- case index_doc(St, mango_json:to_binary(Doc)) of
- [] ->
- [[]];
- Else ->
- Else
- end,
- {reply, Vals, St};
-handle_call(Msg, _From, St) ->
- {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-handle_cast(garbage_collect, St) ->
- erlang:garbage_collect(),
- {noreply, St};
-handle_cast(stop, St) ->
- {stop, normal, St};
-handle_cast(Msg, St) ->
- {stop, {invalid_cast, Msg}, St}.
-
-handle_info(Msg, St) ->
- {stop, {invalid_info, Msg}, St}.
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-map_doc(#st{indexes = Indexes}, Doc) ->
- lists:map(fun(Idx) -> get_index_entries(Idx, Doc) end, Indexes).
-
-index_doc(#st{indexes = Indexes}, Doc) ->
- lists:map(fun(Idx) -> get_text_entries(Idx, Doc) end, Indexes).
-
-get_index_entries({IdxProps}, Doc) ->
- {Fields} = couch_util:get_value(<<"fields">>, IdxProps),
- Selector = get_index_partial_filter_selector(IdxProps),
- case should_index(Selector, Doc) of
- false ->
- [];
- true ->
- Values = get_index_values(Fields, Doc),
- case lists:member(not_found, Values) of
- true -> [];
- false -> [[Values, null]]
- end
- end.
-
-get_index_values(Fields, Doc) ->
- lists:map(
- fun({Field, _Dir}) ->
- case mango_doc:get_field(Doc, Field) of
- not_found -> not_found;
- bad_path -> not_found;
- Value -> Value
- end
- end,
- Fields
- ).
-
-get_text_entries({IdxProps}, Doc) ->
- Selector = get_index_partial_filter_selector(IdxProps),
- case should_index(Selector, Doc) of
- true ->
- get_text_entries0(IdxProps, Doc);
- false ->
- []
- end.
-
-get_index_partial_filter_selector(IdxProps) ->
- case couch_util:get_value(<<"partial_filter_selector">>, IdxProps, {[]}) of
- {[]} ->
- % this is to support legacy text indexes that had the partial_filter_selector
- % set as selector
- couch_util:get_value(<<"selector">>, IdxProps, {[]});
- Else ->
- Else
- end.
-
-get_text_entries0(IdxProps, Doc) ->
- DefaultEnabled = get_default_enabled(IdxProps),
- IndexArrayLengths = get_index_array_lengths(IdxProps),
- FieldsList = get_text_field_list(IdxProps),
- TAcc = #tacc{
- index_array_lengths = IndexArrayLengths,
- fields = FieldsList
- },
- Fields0 = get_text_field_values(Doc, TAcc),
- Fields =
- if
- not DefaultEnabled -> Fields0;
- true -> add_default_text_field(Fields0)
- end,
- FieldNames = get_field_names(Fields),
- Converted = convert_text_fields(Fields),
- FieldNames ++ Converted.
-
-get_text_field_values({Props}, TAcc) when is_list(Props) ->
- get_text_field_values_obj(Props, TAcc, []);
-get_text_field_values(Values, TAcc) when is_list(Values) ->
- IndexArrayLengths = TAcc#tacc.index_array_lengths,
- NewPath = ["[]" | TAcc#tacc.path],
- NewTAcc = TAcc#tacc{path = NewPath},
- case IndexArrayLengths of
- true ->
- % We bypass make_text_field and directly call make_text_field_name
- % because the length field name is not part of the path.
- LengthFieldName = make_text_field_name(NewTAcc#tacc.path, <<"length">>),
- LengthField = [{LengthFieldName, <<"length">>, length(Values)}],
- get_text_field_values_arr(Values, NewTAcc, LengthField);
- _ ->
- get_text_field_values_arr(Values, NewTAcc, [])
- end;
-get_text_field_values(Bin, TAcc) when is_binary(Bin) ->
- make_text_field(TAcc, <<"string">>, Bin);
-get_text_field_values(Num, TAcc) when is_number(Num) ->
- make_text_field(TAcc, <<"number">>, Num);
-get_text_field_values(Bool, TAcc) when is_boolean(Bool) ->
- make_text_field(TAcc, <<"boolean">>, Bool);
-get_text_field_values(null, TAcc) ->
- make_text_field(TAcc, <<"null">>, true).
-
-get_text_field_values_obj([], _, FAcc) ->
- FAcc;
-get_text_field_values_obj([{Key, Val} | Rest], TAcc, FAcc) ->
- NewPath = [Key | TAcc#tacc.path],
- NewTAcc = TAcc#tacc{path = NewPath},
- Fields = get_text_field_values(Val, NewTAcc),
- get_text_field_values_obj(Rest, TAcc, Fields ++ FAcc).
-
-get_text_field_values_arr([], _, FAcc) ->
- FAcc;
-get_text_field_values_arr([Value | Rest], TAcc, FAcc) ->
- Fields = get_text_field_values(Value, TAcc),
- get_text_field_values_arr(Rest, TAcc, Fields ++ FAcc).
-
-get_default_enabled(Props) ->
- case couch_util:get_value(<<"default_field">>, Props, {[]}) of
- Bool when is_boolean(Bool) ->
- Bool;
- {[]} ->
- true;
- {Opts} ->
- couch_util:get_value(<<"enabled">>, Opts, true)
- end.
-
-get_index_array_lengths(Props) ->
- couch_util:get_value(<<"index_array_lengths">>, Props, true).
-
-add_default_text_field(Fields) ->
- DefaultFields = add_default_text_field(Fields, []),
- DefaultFields ++ Fields.
-
-add_default_text_field([], Acc) ->
- Acc;
-add_default_text_field([{_Name, <<"string">>, Value} | Rest], Acc) ->
- NewAcc = [{<<"$default">>, <<"string">>, Value} | Acc],
- add_default_text_field(Rest, NewAcc);
-add_default_text_field([_ | Rest], Acc) ->
- add_default_text_field(Rest, Acc).
-
-%% index of all field names
-get_field_names(Fields) ->
- FieldNameSet = lists:foldl(
- fun({Name, _, _}, Set) ->
- gb_sets:add([<<"$fieldnames">>, Name, []], Set)
- end,
- gb_sets:new(),
- Fields
- ),
- gb_sets:to_list(FieldNameSet).
-
-convert_text_fields([]) ->
- [];
-convert_text_fields([{Name, _Type, Value} | Rest]) ->
- [[Name, Value, []] | convert_text_fields(Rest)].
-
-should_index(Selector, Doc) ->
- % We should do this
- NormSelector = mango_selector:normalize(Selector),
- Matches = mango_selector:match(NormSelector, Doc),
- IsDesign =
- case mango_doc:get_field(Doc, <<"_id">>) of
- <<"_design/", _/binary>> -> true;
- _ -> false
- end,
- Matches and not IsDesign.
-
-get_text_field_list(IdxProps) ->
- case couch_util:get_value(<<"fields">>, IdxProps) of
- Fields when is_list(Fields) ->
- RawList = lists:flatmap(fun get_text_field_info/1, Fields),
- [mango_util:lucene_escape_user(Field) || Field <- RawList];
- _ ->
- all_fields
- end.
-
-get_text_field_info({Props}) ->
- Name = couch_util:get_value(<<"name">>, Props),
- Type0 = couch_util:get_value(<<"type">>, Props),
- if
- not is_binary(Name) ->
- [];
- true ->
- Type = get_text_field_type(Type0),
- [iolist_to_binary([Name, ":", Type])]
- end.
-
-get_text_field_type(<<"number">>) ->
- <<"number">>;
-get_text_field_type(<<"boolean">>) ->
- <<"boolean">>;
-get_text_field_type(_) ->
- <<"string">>.
-
-make_text_field(TAcc, Type, Value) ->
- FieldName = make_text_field_name(TAcc#tacc.path, Type),
- Fields = TAcc#tacc.fields,
- case Fields == all_fields orelse lists:member(FieldName, Fields) of
- true ->
- [{FieldName, Type, Value}];
- false ->
- []
- end.
-
-make_text_field_name([P | Rest], Type) ->
- Parts = lists:reverse(Rest, [iolist_to_binary([P, ":", Type])]),
- Escaped = [mango_util:lucene_escape_field(N) || N <- Parts],
- iolist_to_binary(mango_util:join(".", Escaped)).
-
-validate_index_info(IndexInfo) ->
- IdxTypes =
- case dreyfus:available() of
- true ->
- [mango_idx_view, mango_idx_text];
- false ->
- [mango_idx_view]
- end,
- Results = lists:foldl(
- fun(IdxType, Results0) ->
- try
- IdxType:validate_index_def(IndexInfo),
- [valid_index | Results0]
- catch
- _:_ ->
- [invalid_index | Results0]
- end
- end,
- [],
- IdxTypes
- ),
- lists:member(valid_index, Results).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-handle_garbage_collect_cast_test() ->
- ?assertEqual({noreply, []}, handle_cast(garbage_collect, [])).
-
-handle_stop_cast_test() ->
- ?assertEqual({stop, normal, []}, handle_cast(stop, [])).
-
-handle_invalid_cast_test() ->
- ?assertEqual({stop, {invalid_cast, random}, []}, handle_cast(random, [])).
-
--endif.
diff --git a/src/mango/src/mango_opts.erl b/src/mango/src/mango_opts.erl
deleted file mode 100644
index 96aa0eb42..000000000
--- a/src/mango/src/mango_opts.erl
+++ /dev/null
@@ -1,360 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_opts).
-
--export([
- validate_idx_create/1,
- validate_find/1
-]).
-
--export([
- validate/2,
-
- is_string/1,
- is_boolean/1,
- is_pos_integer/1,
- is_non_neg_integer/1,
- is_object/1,
- is_ok_or_false/1,
-
- validate_non_empty_string/1,
- validate_selector/1,
- validate_use_index/1,
- validate_bookmark/1,
- validate_sort/1,
- validate_fields/1,
- validate_bulk_delete/1,
- validate_partitioned/1,
-
- default_limit/0
-]).
-
--include("mango.hrl").
-
-validate_idx_create({Props}) ->
- Opts = [
- {<<"index">>, [
- {tag, def}
- ]},
- {<<"type">>, [
- {tag, type},
- {optional, true},
- {default, <<"json">>},
- {validator, fun is_string/1}
- ]},
- {<<"name">>, [
- {tag, name},
- {optional, true},
- {default, auto_name},
- {validator, fun validate_non_empty_string/1}
- ]},
- {<<"ddoc">>, [
- {tag, ddoc},
- {optional, true},
- {default, auto_name},
- {validator, fun validate_non_empty_string/1}
- ]},
- {<<"w">>, [
- {tag, w},
- {optional, true},
- {default, 2},
- {validator, fun is_pos_integer/1}
- ]},
- {<<"partitioned">>, [
- {tag, partitioned},
- {optional, true},
- {default, db_default},
- {validator, fun validate_partitioned/1}
- ]}
- ],
- validate(Props, Opts).
-
-validate_find({Props}) ->
- Opts = [
- {<<"selector">>, [
- {tag, selector},
- {validator, fun validate_selector/1}
- ]},
- {<<"use_index">>, [
- {tag, use_index},
- {optional, true},
- {default, []},
- {validator, fun validate_use_index/1}
- ]},
- {<<"bookmark">>, [
- {tag, bookmark},
- {optional, true},
- {default, <<>>},
- {validator, fun validate_bookmark/1}
- ]},
- {<<"limit">>, [
- {tag, limit},
- {optional, true},
- {default, default_limit()},
- {validator, fun is_non_neg_integer/1}
- ]},
- {<<"skip">>, [
- {tag, skip},
- {optional, true},
- {default, 0},
- {validator, fun is_non_neg_integer/1}
- ]},
- {<<"sort">>, [
- {tag, sort},
- {optional, true},
- {default, []},
- {validator, fun validate_sort/1}
- ]},
- {<<"fields">>, [
- {tag, fields},
- {optional, true},
- {default, []},
- {validator, fun validate_fields/1}
- ]},
- {<<"partition">>, [
- {tag, partition},
- {optional, true},
- {default, <<>>},
- {validator, fun validate_partition/1}
- ]},
- {<<"r">>, [
- {tag, r},
- {optional, true},
- {default, 1},
- {validator, fun mango_opts:is_pos_integer/1}
- ]},
- {<<"conflicts">>, [
- {tag, conflicts},
- {optional, true},
- {default, false},
- {validator, fun mango_opts:is_boolean/1}
- ]},
- {<<"stale">>, [
- {tag, stale},
- {optional, true},
- {default, false},
- {validator, fun mango_opts:is_ok_or_false/1}
- ]},
- {<<"update">>, [
- {tag, update},
- {optional, true},
- {default, true},
- {validator, fun mango_opts:is_boolean/1}
- ]},
- {<<"stable">>, [
- {tag, stable},
- {optional, true},
- {default, false},
- {validator, fun mango_opts:is_boolean/1}
- ]},
- {<<"execution_stats">>, [
- {tag, execution_stats},
- {optional, true},
- {default, false},
- {validator, fun mango_opts:is_boolean/1}
- ]}
- ],
- validate(Props, Opts).
-
-validate_bulk_delete({Props}) ->
- Opts = [
- {<<"docids">>, [
- {tag, docids},
- {validator, fun validate_bulk_docs/1}
- ]},
- {<<"w">>, [
- {tag, w},
- {optional, true},
- {default, 2},
- {validator, fun is_pos_integer/1}
- ]}
- ],
- validate(Props, Opts).
-
-validate(Props, Opts) ->
- case mango_util:assert_ejson({Props}) of
- true ->
- ok;
- false ->
- ?MANGO_ERROR({invalid_ejson, {Props}})
- end,
- {Rest, Acc} = validate_opts(Opts, Props, []),
- case Rest of
- [] ->
- ok;
- [{BadKey, _} | _] ->
- ?MANGO_ERROR({invalid_key, BadKey})
- end,
- {ok, Acc}.
-
-is_string(Val) when is_binary(Val) ->
- {ok, Val};
-is_string(Else) ->
- ?MANGO_ERROR({invalid_string, Else}).
-
-is_boolean(true) ->
- {ok, true};
-is_boolean(false) ->
- {ok, false};
-is_boolean(Else) ->
- ?MANGO_ERROR({invalid_boolean, Else}).
-
-is_pos_integer(V) when is_integer(V), V > 0 ->
- {ok, V};
-is_pos_integer(Else) ->
- ?MANGO_ERROR({invalid_pos_integer, Else}).
-
-is_non_neg_integer(V) when is_integer(V), V >= 0 ->
- {ok, V};
-is_non_neg_integer(Else) ->
- ?MANGO_ERROR({invalid_non_neg_integer, Else}).
-
-is_object({Props}) ->
- true = mango_util:assert_ejson({Props}),
- {ok, {Props}};
-is_object(Else) ->
- ?MANGO_ERROR({invalid_object, Else}).
-
-is_ok_or_false(<<"ok">>) ->
- {ok, ok};
-% convenience
-is_ok_or_false(<<"false">>) ->
- {ok, false};
-is_ok_or_false(false) ->
- {ok, false};
-is_ok_or_false(Else) ->
- ?MANGO_ERROR({invalid_ok_or_false_value, Else}).
-
-validate_non_empty_string(<<>>) ->
- ?MANGO_ERROR(invalid_empty_string);
-validate_non_empty_string(auto_name) ->
- {ok, auto_name};
-validate_non_empty_string(Else) ->
- is_string(Else).
-
-validate_selector({Props}) ->
- Norm = mango_selector:normalize({Props}),
- {ok, Norm};
-validate_selector(Else) ->
- ?MANGO_ERROR({invalid_selector_json, Else}).
-
-%% We re-use validate_use_index to make sure the index names are valid
-validate_bulk_docs(Docs) when is_list(Docs) ->
- lists:foreach(fun validate_use_index/1, Docs),
- {ok, Docs};
-validate_bulk_docs(Else) ->
- ?MANGO_ERROR({invalid_bulk_docs, Else}).
-
-validate_use_index(IndexName) when is_binary(IndexName) ->
- case binary:split(IndexName, <<"/">>) of
- [DesignId] ->
- {ok, [DesignId]};
- [<<"_design">>, DesignId] ->
- {ok, [DesignId]};
- [DesignId, ViewName] ->
- {ok, [DesignId, ViewName]};
- [<<"_design">>, DesignId, ViewName] ->
- {ok, [DesignId, ViewName]};
- _ ->
- ?MANGO_ERROR({invalid_index_name, IndexName})
- end;
-validate_use_index(null) ->
- {ok, []};
-validate_use_index([]) ->
- {ok, []};
-validate_use_index([DesignId]) when is_binary(DesignId) ->
- {ok, [DesignId]};
-validate_use_index([DesignId, ViewName]) when
- is_binary(DesignId), is_binary(ViewName)
-->
- {ok, [DesignId, ViewName]};
-validate_use_index(Else) ->
- ?MANGO_ERROR({invalid_index_name, Else}).
-
-validate_bookmark(null) ->
- {ok, nil};
-validate_bookmark(<<>>) ->
- {ok, nil};
-validate_bookmark(Bin) when is_binary(Bin) ->
- {ok, Bin};
-validate_bookmark(Else) ->
- ?MANGO_ERROR({invalid_bookmark, Else}).
-
-validate_sort(Value) ->
- mango_sort:new(Value).
-
-validate_fields(Value) ->
- mango_fields:new(Value).
-
-validate_partitioned(true) ->
- {ok, true};
-validate_partitioned(false) ->
- {ok, false};
-validate_partitioned(db_default) ->
- {ok, db_default};
-validate_partitioned(Else) ->
- ?MANGO_ERROR({invalid_partitioned_value, Else}).
-
-validate_partition(<<>>) ->
- {ok, <<>>};
-validate_partition(Partition) ->
- couch_partition:validate_partition(Partition),
- {ok, Partition}.
-
-validate_opts([], Props, Acc) ->
- {Props, lists:reverse(Acc)};
-validate_opts([{Name, Desc} | Rest], Props, Acc) ->
- {tag, Tag} = lists:keyfind(tag, 1, Desc),
- case lists:keytake(Name, 1, Props) of
- {value, {Name, Prop}, RestProps} ->
- NewAcc = [{Tag, validate_opt(Name, Desc, Prop)} | Acc],
- validate_opts(Rest, RestProps, NewAcc);
- false ->
- NewAcc = [{Tag, validate_opt(Name, Desc, undefined)} | Acc],
- validate_opts(Rest, Props, NewAcc)
- end.
-
-validate_opt(_Name, [], Value) ->
- Value;
-validate_opt(Name, Desc0, undefined) ->
- case lists:keytake(optional, 1, Desc0) of
- {value, {optional, true}, Desc1} ->
- {value, {default, Value}, Desc2} = lists:keytake(default, 1, Desc1),
- false = (Value == undefined),
- validate_opt(Name, Desc2, Value);
- _ ->
- ?MANGO_ERROR({missing_required_key, Name})
- end;
-validate_opt(Name, [{tag, _} | Rest], Value) ->
- % Tags aren't really validated
- validate_opt(Name, Rest, Value);
-validate_opt(Name, [{optional, _} | Rest], Value) ->
- % A value was specified for an optional value
- validate_opt(Name, Rest, Value);
-validate_opt(Name, [{default, _} | Rest], Value) ->
- % A value was specified for an optional value
- validate_opt(Name, Rest, Value);
-validate_opt(Name, [{assert, Value} | Rest], Value) ->
- validate_opt(Name, Rest, Value);
-validate_opt(Name, [{assert, Expect} | _], Found) ->
- ?MANGO_ERROR({invalid_value, Name, Expect, Found});
-validate_opt(Name, [{validator, Fun} | Rest], Value) ->
- case Fun(Value) of
- {ok, Validated} ->
- validate_opt(Name, Rest, Validated);
- false ->
- ?MANGO_ERROR({invalid_value, Name, Value})
- end.
-
-default_limit() ->
- config:get_integer("mango", "default_limit", 25).
diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl
deleted file mode 100644
index be2616ff5..000000000
--- a/src/mango/src/mango_selector.erl
+++ /dev/null
@@ -1,985 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_selector).
-
--export([
- normalize/1,
- match/2,
- has_required_fields/2,
- is_constant_field/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
-
-% Validate and normalize each operator. This translates
-% every selector operator into a consistent version that
-% we can then rely on for all other selector functions.
-% See the definition of each step below for more information
-% on what each one does.
-normalize({[]}) ->
- {[]};
-normalize(Selector) ->
- Steps = [
- fun norm_ops/1,
- fun norm_fields/1,
- fun norm_negations/1
- ],
- {NProps} = lists:foldl(fun(Step, Sel) -> Step(Sel) end, Selector, Steps),
- FieldNames = [Name || {Name, _} <- NProps],
- case lists:member(<<>>, FieldNames) of
- true ->
- ?MANGO_ERROR({invalid_selector, missing_field_name});
- false ->
- ok
- end,
- {NProps}.
-
-% Match a selector against a #doc{} or EJSON value.
-% This assumes that the Selector has been normalized.
-% Returns true or false.
-match(Selector, D) ->
- couch_stats:increment_counter([mango, evaluate_selector]),
- match_int(Selector, D).
-
-% An empty selector matches any value.
-match_int({[]}, _) ->
- true;
-match_int(Selector, #doc{body = Body}) ->
- match(Selector, Body, fun mango_json:cmp/2);
-match_int(Selector, {Props}) ->
- match(Selector, {Props}, fun mango_json:cmp/2).
-
-% Convert each operator into a normalized version as well
-% as convert an implict operators into their explicit
-% versions.
-norm_ops({[{<<"$and">>, Args}]}) when is_list(Args) ->
- {[{<<"$and">>, [norm_ops(A) || A <- Args]}]};
-norm_ops({[{<<"$and">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$and', Arg});
-norm_ops({[{<<"$or">>, Args}]}) when is_list(Args) ->
- {[{<<"$or">>, [norm_ops(A) || A <- Args]}]};
-norm_ops({[{<<"$or">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$or', Arg});
-norm_ops({[{<<"$not">>, {_} = Arg}]}) ->
- {[{<<"$not">>, norm_ops(Arg)}]};
-norm_ops({[{<<"$not">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$not', Arg});
-norm_ops({[{<<"$nor">>, Args}]}) when is_list(Args) ->
- {[{<<"$nor">>, [norm_ops(A) || A <- Args]}]};
-norm_ops({[{<<"$nor">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$nor', Arg});
-norm_ops({[{<<"$in">>, Args}]} = Cond) when is_list(Args) ->
- Cond;
-norm_ops({[{<<"$in">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$in', Arg});
-norm_ops({[{<<"$nin">>, Args}]} = Cond) when is_list(Args) ->
- Cond;
-norm_ops({[{<<"$nin">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$nin', Arg});
-norm_ops({[{<<"$exists">>, Arg}]} = Cond) when is_boolean(Arg) ->
- Cond;
-norm_ops({[{<<"$exists">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$exists', Arg});
-norm_ops({[{<<"$type">>, Arg}]} = Cond) when is_binary(Arg) ->
- Cond;
-norm_ops({[{<<"$type">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$type', Arg});
-norm_ops({[{<<"$mod">>, [D, R]}]} = Cond) when is_integer(D), is_integer(R) ->
- Cond;
-norm_ops({[{<<"$mod">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$mod', Arg});
-norm_ops({[{<<"$regex">>, Regex}]} = Cond) when is_binary(Regex) ->
- case re:compile(Regex) of
- {ok, _} ->
- Cond;
- _ ->
- ?MANGO_ERROR({bad_arg, '$regex', Regex})
- end;
-norm_ops({[{<<"$all">>, Args}]}) when is_list(Args) ->
- {[{<<"$all">>, Args}]};
-norm_ops({[{<<"$all">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$all', Arg});
-norm_ops({[{<<"$elemMatch">>, {_} = Arg}]}) ->
- {[{<<"$elemMatch">>, norm_ops(Arg)}]};
-norm_ops({[{<<"$elemMatch">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$elemMatch', Arg});
-norm_ops({[{<<"$allMatch">>, {_} = Arg}]}) ->
- {[{<<"$allMatch">>, norm_ops(Arg)}]};
-norm_ops({[{<<"$allMatch">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$allMatch', Arg});
-norm_ops({[{<<"$keyMapMatch">>, {_} = Arg}]}) ->
- {[{<<"$keyMapMatch">>, norm_ops(Arg)}]};
-norm_ops({[{<<"$keyMapMatch">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$keyMapMatch', Arg});
-norm_ops({[{<<"$size">>, Arg}]}) when is_integer(Arg), Arg >= 0 ->
- {[{<<"$size">>, Arg}]};
-norm_ops({[{<<"$size">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$size', Arg});
-norm_ops({[{<<"$text">>, Arg}]}) when
- is_binary(Arg);
- is_number(Arg);
- is_boolean(Arg)
-->
- {[{<<"$default">>, {[{<<"$text">>, Arg}]}}]};
-norm_ops({[{<<"$text">>, Arg}]}) ->
- ?MANGO_ERROR({bad_arg, '$text', Arg});
-% Not technically an operator but we pass it through here
-% so that this function accepts its own output. This exists
-% so that $text can have a field name value which simplifies
-% logic elsewhere.
-norm_ops({[{<<"$default">>, _}]} = Selector) ->
- Selector;
-% Terminals where we can't perform any validation
-% on the value because any value is acceptable.
-norm_ops({[{<<"$lt">>, _}]} = Cond) ->
- Cond;
-norm_ops({[{<<"$lte">>, _}]} = Cond) ->
- Cond;
-norm_ops({[{<<"$eq">>, _}]} = Cond) ->
- Cond;
-norm_ops({[{<<"$ne">>, _}]} = Cond) ->
- Cond;
-norm_ops({[{<<"$gte">>, _}]} = Cond) ->
- Cond;
-norm_ops({[{<<"$gt">>, _}]} = Cond) ->
- Cond;
-% Known but unsupported operators
-norm_ops({[{<<"$where">>, _}]}) ->
- ?MANGO_ERROR({not_supported, '$where'});
-norm_ops({[{<<"$geoWithin">>, _}]}) ->
- ?MANGO_ERROR({not_supported, '$geoWithin'});
-norm_ops({[{<<"$geoIntersects">>, _}]}) ->
- ?MANGO_ERROR({not_supported, '$geoIntersects'});
-norm_ops({[{<<"$near">>, _}]}) ->
- ?MANGO_ERROR({not_supported, '$near'});
-norm_ops({[{<<"$nearSphere">>, _}]}) ->
- ?MANGO_ERROR({not_supported, '$nearSphere'});
-% Unknown operator
-norm_ops({[{<<"$", _/binary>> = Op, _}]}) ->
- ?MANGO_ERROR({invalid_operator, Op});
-% A {Field: Cond} pair
-norm_ops({[{Field, Cond}]}) ->
- {[{Field, norm_ops(Cond)}]};
-% An implicit $and
-norm_ops({[_, _ | _] = Props}) ->
- {[{<<"$and">>, [norm_ops({[P]}) || P <- Props]}]};
-% A bare value condition means equality
-norm_ops(Value) ->
- {[{<<"$eq">>, Value}]}.
-
-% This takes a selector and normalizes all of the
-% field names as far as possible. For instance:
-%
-% Unnormalized:
-% {foo: {$and: [{$gt: 5}, {$lt: 10}]}}
-%
-% Normalized:
-% {$and: [{foo: {$gt: 5}}, {foo: {$lt: 10}}]}
-%
-% And another example:
-%
-% Unnormalized:
-% {foo: {bar: {$gt: 10}}}
-%
-% Normalized:
-% {"foo.bar": {$gt: 10}}
-%
-% Its important to note that we can only normalize
-% field names like this through boolean operators where
-% we can gaurantee commutativity. We can't necessarily
-% do the same through the '$elemMatch' or '$allMatch'
-% operators but we can apply the same algorithm to its
-% arguments.
-norm_fields({[]}) ->
- {[]};
-norm_fields(Selector) ->
- norm_fields(Selector, <<>>).
-
-% Operators where we can push the field names further
-% down the operator tree
-norm_fields({[{<<"$and">>, Args}]}, Path) ->
- {[{<<"$and">>, [norm_fields(A, Path) || A <- Args]}]};
-norm_fields({[{<<"$or">>, Args}]}, Path) ->
- {[{<<"$or">>, [norm_fields(A, Path) || A <- Args]}]};
-norm_fields({[{<<"$not">>, Arg}]}, Path) ->
- {[{<<"$not">>, norm_fields(Arg, Path)}]};
-norm_fields({[{<<"$nor">>, Args}]}, Path) ->
- {[{<<"$nor">>, [norm_fields(A, Path) || A <- Args]}]};
-% Fields where we can normalize fields in the
-% operator arguments independently.
-norm_fields({[{<<"$elemMatch">>, Arg}]}, Path) ->
- Cond = {[{<<"$elemMatch">>, norm_fields(Arg)}]},
- {[{Path, Cond}]};
-norm_fields({[{<<"$allMatch">>, Arg}]}, Path) ->
- Cond = {[{<<"$allMatch">>, norm_fields(Arg)}]},
- {[{Path, Cond}]};
-norm_fields({[{<<"$keyMapMatch">>, Arg}]}, Path) ->
- Cond = {[{<<"$keyMapMatch">>, norm_fields(Arg)}]},
- {[{Path, Cond}]};
-% The text operator operates against the internal
-% $default field. This also asserts that the $default
-% field is at the root as well as that it only has
-% a $text operator applied.
-norm_fields({[{<<"$default">>, {[{<<"$text">>, _Arg}]}}]} = Sel, <<>>) ->
- Sel;
-norm_fields({[{<<"$default">>, _}]} = Selector, _) ->
- ?MANGO_ERROR({bad_field, Selector});
-% Any other operator is a terminal below which no
-% field names should exist. Set the path to this
-% terminal and return it.
-norm_fields({[{<<"$", _/binary>>, _}]} = Cond, Path) ->
- {[{Path, Cond}]};
-% We've found a field name. Append it to the path
-% and skip this node as we unroll the stack as
-% the full path will be further down the branch.
-norm_fields({[{Field, Cond}]}, <<>>) ->
- % Don't include the '.' for the first element of
- % the path.
- norm_fields(Cond, Field);
-norm_fields({[{Field, Cond}]}, Path) ->
- norm_fields(Cond, <<Path/binary, ".", Field/binary>>);
-% An empty selector
-norm_fields({[]}, Path) ->
- {Path, {[]}};
-% Else we have an invalid selector
-norm_fields(BadSelector, _) ->
- ?MANGO_ERROR({bad_field, BadSelector}).
-
-% Take all the negation operators and move the logic
-% as far down the branch as possible. This does things
-% like:
-%
-% Unnormalized:
-% {$not: {foo: {$gt: 10}}}
-%
-% Normalized:
-% {foo: {$lte: 10}}
-%
-% And we also apply DeMorgan's laws
-%
-% Unnormalized:
-% {$not: {$and: [{foo: {$gt: 10}}, {foo: {$lt: 5}}]}}
-%
-% Normalized:
-% {$or: [{foo: {$lte: 10}}, {foo: {$gte: 5}}]}
-%
-% This logic is important because we can't "see" through
-% a '$not' operator to be able to locate indices that may
-% service a specific query. Though if we move the negations
-% down to the terminals we may be able to negate specific
-% operators which allows us to find usable indices.
-
-% Operators that cause a negation
-norm_negations({[{<<"$not">>, Arg}]}) ->
- negate(Arg);
-norm_negations({[{<<"$nor">>, Args}]}) ->
- {[{<<"$and">>, [negate(A) || A <- Args]}]};
-% Operators that we merely seek through as we look for
-% negations.
-norm_negations({[{<<"$and">>, Args}]}) ->
- {[{<<"$and">>, [norm_negations(A) || A <- Args]}]};
-norm_negations({[{<<"$or">>, Args}]}) ->
- {[{<<"$or">>, [norm_negations(A) || A <- Args]}]};
-norm_negations({[{<<"$elemMatch">>, Arg}]}) ->
- {[{<<"$elemMatch">>, norm_negations(Arg)}]};
-norm_negations({[{<<"$allMatch">>, Arg}]}) ->
- {[{<<"$allMatch">>, norm_negations(Arg)}]};
-norm_negations({[{<<"$keyMapMatch">>, Arg}]}) ->
- {[{<<"$keyMapMatch">>, norm_negations(Arg)}]};
-% All other conditions can't introduce negations anywhere
-% further down the operator tree.
-norm_negations(Cond) ->
- Cond.
-
-% Actually negate an expression. Make sure and read up
-% on DeMorgan's laws if you're trying to read this, but
-% in a nutshell:
-%
-% NOT(a AND b) == NOT(a) OR NOT(b)
-% NOT(a OR b) == NOT(a) AND NOT(b)
-%
-% Also notice that if a negation hits another negation
-% operator that we just nullify the combination. Its
-% possible that below the nullification we have more
-% negations so we have to recurse back to norm_negations/1.
-
-% Negating negation, nullify but recurse to
-% norm_negations/1
-negate({[{<<"$not">>, Arg}]}) ->
- norm_negations(Arg);
-negate({[{<<"$nor">>, Args}]}) ->
- {[{<<"$or">>, [norm_negations(A) || A <- Args]}]};
-% DeMorgan Negations
-negate({[{<<"$and">>, Args}]}) ->
- {[{<<"$or">>, [negate(A) || A <- Args]}]};
-negate({[{<<"$or">>, Args}]}) ->
- {[{<<"$and">>, [negate(A) || A <- Args]}]};
-negate({[{<<"$default">>, _}]} = Arg) ->
- ?MANGO_ERROR({bad_arg, '$not', Arg});
-% Negating comparison operators is straight forward
-negate({[{<<"$lt">>, Arg}]}) ->
- {[{<<"$gte">>, Arg}]};
-negate({[{<<"$lte">>, Arg}]}) ->
- {[{<<"$gt">>, Arg}]};
-negate({[{<<"$eq">>, Arg}]}) ->
- {[{<<"$ne">>, Arg}]};
-negate({[{<<"$ne">>, Arg}]}) ->
- {[{<<"$eq">>, Arg}]};
-negate({[{<<"$gte">>, Arg}]}) ->
- {[{<<"$lt">>, Arg}]};
-negate({[{<<"$gt">>, Arg}]}) ->
- {[{<<"$lte">>, Arg}]};
-negate({[{<<"$in">>, Args}]}) ->
- {[{<<"$nin">>, Args}]};
-negate({[{<<"$nin">>, Args}]}) ->
- {[{<<"$in">>, Args}]};
-% We can also trivially negate the exists operator
-negate({[{<<"$exists">>, Arg}]}) ->
- {[{<<"$exists">>, not Arg}]};
-% Anything else we have to just terminate the
-% negation by reinserting the negation operator
-negate({[{<<"$", _/binary>>, _}]} = Cond) ->
- {[{<<"$not">>, Cond}]};
-% Finally, negating a field just means we negate its
-% condition.
-negate({[{Field, Cond}]}) ->
- {[{Field, negate(Cond)}]}.
-
-% We need to treat an empty array as always true. This will be applied
-% for $or, $in, $all, $nin as well.
-match({[{<<"$and">>, []}]}, _, _) ->
- true;
-match({[{<<"$and">>, Args}]}, Value, Cmp) ->
- Pred = fun(SubSel) -> match(SubSel, Value, Cmp) end,
- lists:all(Pred, Args);
-match({[{<<"$or">>, []}]}, _, _) ->
- true;
-match({[{<<"$or">>, Args}]}, Value, Cmp) ->
- Pred = fun(SubSel) -> match(SubSel, Value, Cmp) end,
- lists:any(Pred, Args);
-match({[{<<"$not">>, Arg}]}, Value, Cmp) ->
- not match(Arg, Value, Cmp);
-match({[{<<"$all">>, []}]}, _, _) ->
- false;
-% All of the values in Args must exist in Values or
-% Values == hd(Args) if Args is a single element list
-% that contains a list.
-match({[{<<"$all">>, Args}]}, Values, _Cmp) when is_list(Values) ->
- Pred = fun(A) -> lists:member(A, Values) end,
- HasArgs = lists:all(Pred, Args),
- IsArgs =
- case Args of
- [A] when is_list(A) ->
- A == Values;
- _ ->
- false
- end,
- HasArgs orelse IsArgs;
-match({[{<<"$all">>, _Args}]}, _Values, _Cmp) ->
- false;
-%% This is for $elemMatch, $allMatch, and possibly $in because of our normalizer.
-%% A selector such as {"field_name": {"$elemMatch": {"$gte": 80, "$lt": 85}}}
-%% gets normalized to:
-%% {[{<<"field_name">>,
-%% {[{<<"$elemMatch">>,
-%% {[{<<"$and">>, [
-%% {[{<<>>,{[{<<"$gte">>,80}]}}]},
-%% {[{<<>>,{[{<<"$lt">>,85}]}}]}
-%% ]}]}
-%% }]}
-%% }]}.
-%% So we filter out the <<>>.
-match({[{<<>>, Arg}]}, Values, Cmp) ->
- match(Arg, Values, Cmp);
-% Matches when any element in values matches the
-% sub-selector Arg.
-match({[{<<"$elemMatch">>, Arg}]}, Values, Cmp) when is_list(Values) ->
- try
- lists:foreach(
- fun(V) ->
- case match(Arg, V, Cmp) of
- true -> throw(matched);
- _ -> ok
- end
- end,
- Values
- ),
- false
- catch
- throw:matched ->
- true;
- _:_ ->
- false
- end;
-match({[{<<"$elemMatch">>, _Arg}]}, _Value, _Cmp) ->
- false;
-% Matches when all elements in values match the
-% sub-selector Arg.
-match({[{<<"$allMatch">>, Arg}]}, [_ | _] = Values, Cmp) ->
- try
- lists:foreach(
- fun(V) ->
- case match(Arg, V, Cmp) of
- false -> throw(unmatched);
- _ -> ok
- end
- end,
- Values
- ),
- true
- catch
- _:_ ->
- false
- end;
-match({[{<<"$allMatch">>, _Arg}]}, _Value, _Cmp) ->
- false;
-% Matches when any key in the map value matches the
-% sub-selector Arg.
-match({[{<<"$keyMapMatch">>, Arg}]}, Value, Cmp) when is_tuple(Value) ->
- try
- lists:foreach(
- fun(V) ->
- case match(Arg, V, Cmp) of
- true -> throw(matched);
- _ -> ok
- end
- end,
- [Key || {Key, _} <- element(1, Value)]
- ),
- false
- catch
- throw:matched ->
- true;
- _:_ ->
- false
- end;
-match({[{<<"$keyMapMatch">>, _Arg}]}, _Value, _Cmp) ->
- false;
-% Our comparison operators are fairly straight forward
-match({[{<<"$lt">>, Arg}]}, Value, Cmp) ->
- Cmp(Value, Arg) < 0;
-match({[{<<"$lte">>, Arg}]}, Value, Cmp) ->
- Cmp(Value, Arg) =< 0;
-match({[{<<"$eq">>, Arg}]}, Value, Cmp) ->
- Cmp(Value, Arg) == 0;
-match({[{<<"$ne">>, Arg}]}, Value, Cmp) ->
- Cmp(Value, Arg) /= 0;
-match({[{<<"$gte">>, Arg}]}, Value, Cmp) ->
- Cmp(Value, Arg) >= 0;
-match({[{<<"$gt">>, Arg}]}, Value, Cmp) ->
- Cmp(Value, Arg) > 0;
-match({[{<<"$in">>, []}]}, _, _) ->
- false;
-match({[{<<"$in">>, Args}]}, Values, Cmp) when is_list(Values) ->
- Pred = fun(Arg) ->
- lists:foldl(
- fun(Value, Match) ->
- (Cmp(Value, Arg) == 0) or Match
- end,
- false,
- Values
- )
- end,
- lists:any(Pred, Args);
-match({[{<<"$in">>, Args}]}, Value, Cmp) ->
- Pred = fun(Arg) -> Cmp(Value, Arg) == 0 end,
- lists:any(Pred, Args);
-match({[{<<"$nin">>, []}]}, _, _) ->
- true;
-match({[{<<"$nin">>, Args}]}, Values, Cmp) when is_list(Values) ->
- not match({[{<<"$in">>, Args}]}, Values, Cmp);
-match({[{<<"$nin">>, Args}]}, Value, Cmp) ->
- Pred = fun(Arg) -> Cmp(Value, Arg) /= 0 end,
- lists:all(Pred, Args);
-% This logic is a bit subtle. Basically, if value is
-% not undefined, then it exists.
-match({[{<<"$exists">>, ShouldExist}]}, Value, _Cmp) ->
- Exists = Value /= undefined,
- ShouldExist andalso Exists;
-match({[{<<"$type">>, Arg}]}, Value, _Cmp) when is_binary(Arg) ->
- Arg == mango_json:type(Value);
-match({[{<<"$mod">>, [D, R]}]}, Value, _Cmp) when is_integer(Value) ->
- Value rem D == R;
-match({[{<<"$mod">>, _}]}, _Value, _Cmp) ->
- false;
-match({[{<<"$regex">>, Regex}]}, Value, _Cmp) when is_binary(Value) ->
- try
- match == re:run(Value, Regex, [{capture, none}])
- catch
- _:_ ->
- false
- end;
-match({[{<<"$regex">>, _}]}, _Value, _Cmp) ->
- false;
-match({[{<<"$size">>, Arg}]}, Values, _Cmp) when is_list(Values) ->
- length(Values) == Arg;
-match({[{<<"$size">>, _}]}, _Value, _Cmp) ->
- false;
-% We don't have any choice but to believe that the text
-% index returned valid matches
-match({[{<<"$default">>, _}]}, _Value, _Cmp) ->
- true;
-% All other operators are internal assertion errors for
-% matching because we either should've removed them during
-% normalization or something else broke.
-match({[{<<"$", _/binary>> = Op, _}]}, _, _) ->
- ?MANGO_ERROR({invalid_operator, Op});
-% We need to traverse value to find field. The call to
-% mango_doc:get_field/2 may return either not_found or
-% bad_path in which case matching fails.
-match({[{Field, Cond}]}, Value, Cmp) ->
- case mango_doc:get_field(Value, Field) of
- not_found when Cond == {[{<<"$exists">>, false}]} ->
- true;
- not_found ->
- false;
- bad_path ->
- false;
- SubValue when Field == <<"_id">> ->
- match(Cond, SubValue, fun mango_json:cmp_raw/2);
- SubValue ->
- match(Cond, SubValue, Cmp)
- end;
-match({[_, _ | _] = _Props} = Sel, _Value, _Cmp) ->
- erlang:error({unnormalized_selector, Sel}).
-
-% Returns true if Selector requires all
-% fields in RequiredFields to exist in any matching documents.
-
-% For each condition in the selector, check
-% whether the field is in RequiredFields.
-% If it is, remove it from RequiredFields and continue
-% until we match then all or run out of selector to
-% match against.
-
-has_required_fields(Selector, RequiredFields) ->
- Remainder = has_required_fields_int(Selector, RequiredFields),
- Remainder == [].
-
-% Empty selector
-has_required_fields_int({[]}, Remainder) ->
- Remainder;
-% No more required fields
-has_required_fields_int(_, []) ->
- [];
-% No more selector
-has_required_fields_int([], Remainder) ->
- Remainder;
-has_required_fields_int(Selector, RequiredFields) when not is_list(Selector) ->
- has_required_fields_int([Selector], RequiredFields);
-% We can "see" through $and operator. Iterate
-% through the list of child operators.
-has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields) when
- is_list(Args)
-->
- has_required_fields_int(Args, RequiredFields);
-% We can "see" through $or operator. Required fields
-% must be covered by all children.
-has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields) when
- is_list(Args)
-->
- Remainder0 = lists:foldl(
- fun(Arg, Acc) ->
- % for each child test coverage against the full
- % set of required fields
- Remainder = has_required_fields_int(Arg, RequiredFields),
-
- % collect the remaining fields across all children
- Acc ++ Remainder
- end,
- [],
- Args
- ),
-
- % remove duplicate fields
- Remainder1 = lists:usort(Remainder0),
- has_required_fields_int(Rest, Remainder1);
-% Handle $and operator where it has peers. Required fields
-% can be covered by any child.
-has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields) when
- is_list(Args)
-->
- Remainder = has_required_fields_int(Args, RequiredFields),
- has_required_fields_int(Rest, Remainder);
-has_required_fields_int([{[{Field, Cond}]} | Rest], RequiredFields) ->
- case Cond of
- % $exists:false is a special case - this is the only operator
- % that explicitly does not require a field to exist
- {[{<<"$exists">>, false}]} ->
- has_required_fields_int(Rest, RequiredFields);
- _ ->
- has_required_fields_int(Rest, lists:delete(Field, RequiredFields))
- end.
-
-% Returns true if a field in the selector is a constant value e.g. {a: {$eq: 1}}
-is_constant_field({[]}, _Field) ->
- false;
-is_constant_field(Selector, Field) when not is_list(Selector) ->
- is_constant_field([Selector], Field);
-is_constant_field([], _Field) ->
- false;
-is_constant_field([{[{<<"$and">>, Args}]}], Field) when is_list(Args) ->
- lists:any(fun(Arg) -> is_constant_field(Arg, Field) end, Args);
-is_constant_field([{[{<<"$and">>, Args}]}], Field) ->
- is_constant_field(Args, Field);
-is_constant_field([{[{Field, {[{Cond, _Val}]}}]} | _Rest], Field) ->
- Cond =:= <<"$eq">>;
-is_constant_field([{[{_UnMatched, _}]} | Rest], Field) ->
- is_constant_field(Rest, Field).
-
-%%%%%%%% module tests below %%%%%%%%
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-is_constant_field_basic_test() ->
- Selector = normalize({[{<<"A">>, <<"foo">>}]}),
- Field = <<"A">>,
- ?assertEqual(true, is_constant_field(Selector, Field)).
-
-is_constant_field_basic_two_test() ->
- Selector = normalize(
- {[
- {<<"$and">>, [
- {[{<<"cars">>, {[{<<"$eq">>, <<"2">>}]}}]},
- {[{<<"age">>, {[{<<"$gt">>, 10}]}}]}
- ]}
- ]}
- ),
- Field = <<"cars">>,
- ?assertEqual(true, is_constant_field(Selector, Field)).
-
-is_constant_field_not_eq_test() ->
- Selector = normalize(
- {[
- {<<"$and">>, [
- {[{<<"cars">>, {[{<<"$eq">>, <<"2">>}]}}]},
- {[{<<"age">>, {[{<<"$gt">>, 10}]}}]}
- ]}
- ]}
- ),
- Field = <<"age">>,
- ?assertEqual(false, is_constant_field(Selector, Field)).
-
-is_constant_field_missing_field_test() ->
- Selector = normalize(
- {[
- {<<"$and">>, [
- {[{<<"cars">>, {[{<<"$eq">>, <<"2">>}]}}]},
- {[{<<"age">>, {[{<<"$gt">>, 10}]}}]}
- ]}
- ]}
- ),
- Field = <<"wrong">>,
- ?assertEqual(false, is_constant_field(Selector, Field)).
-
-is_constant_field_or_field_test() ->
- Selector =
- {[
- {<<"$or">>, [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]}
- ]},
- Normalized = normalize(Selector),
- Field = <<"A">>,
- ?assertEqual(false, is_constant_field(Normalized, Field)).
-
-is_constant_field_empty_selector_test() ->
- Selector = normalize({[]}),
- Field = <<"wrong">>,
- ?assertEqual(false, is_constant_field(Selector, Field)).
-
-is_constant_nested_and_test() ->
- Selector1 =
- {[
- {<<"$and">>, [
- {[{<<"A">>, <<"foo">>}]}
- ]}
- ]},
- Selector2 =
- {[
- {<<"$and">>, [
- {[{<<"B">>, {[{<<"$gt">>, 10}]}}]}
- ]}
- ]},
- Selector =
- {[
- {<<"$and">>, [
- Selector1,
- Selector2
- ]}
- ]},
-
- Normalized = normalize(Selector),
- ?assertEqual(true, is_constant_field(Normalized, <<"A">>)),
- ?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
-
-is_constant_combined_or_and_equals_test() ->
- Selector =
- {[
- {<<"A">>, "foo"},
- {<<"$or">>, [
- {[{<<"B">>, <<"bar">>}]},
- {[{<<"B">>, <<"baz">>}]}
- ]},
- {<<"C">>, "qux"}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(true, is_constant_field(Normalized, <<"C">>)),
- ?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
-
-has_required_fields_basic_test() ->
- RequiredFields = [<<"A">>],
- Selector = {[{<<"A">>, <<"foo">>}]},
- Normalized = normalize(Selector),
- ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_basic_failure_test() ->
- RequiredFields = [<<"B">>],
- Selector = {[{<<"A">>, <<"foo">>}]},
- Normalized = normalize(Selector),
- ?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_empty_selector_test() ->
- RequiredFields = [<<"A">>],
- Selector = {[]},
- Normalized = normalize(Selector),
- ?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_exists_false_test() ->
- RequiredFields = [<<"A">>],
- Selector = {[{<<"A">>, {[{<<"$exists">>, false}]}}]},
- Normalized = normalize(Selector),
- ?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_and_true_test() ->
- RequiredFields = [<<"A">>],
- Selector =
- {[
- {<<"$and">>, [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_nested_and_true_test() ->
- RequiredFields = [<<"A">>, <<"B">>],
- Selector1 =
- {[
- {<<"$and">>, [
- {[{<<"A">>, <<"foo">>}]}
- ]}
- ]},
- Selector2 =
- {[
- {<<"$and">>, [
- {[{<<"B">>, <<"foo">>}]}
- ]}
- ]},
- Selector =
- {[
- {<<"$and">>, [
- Selector1,
- Selector2
- ]}
- ]},
-
- Normalized = normalize(Selector),
- ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_and_false_test() ->
- RequiredFields = [<<"A">>, <<"C">>],
- Selector =
- {[
- {<<"$and">>, [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_or_false_test() ->
- RequiredFields = [<<"A">>],
- Selector =
- {[
- {<<"$or">>, [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_or_true_test() ->
- RequiredFields = [<<"A">>, <<"B">>, <<"C">>],
- Selector =
- {[
- {<<"A">>, "foo"},
- {<<"$or">>, [
- {[{<<"B">>, <<"bar">>}]},
- {[{<<"B">>, <<"baz">>}]}
- ]},
- {<<"C">>, "qux"}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_and_nested_or_true_test() ->
- RequiredFields = [<<"A">>, <<"B">>],
- Selector1 =
- {[
- {<<"$and">>, [
- {[{<<"A">>, <<"foo">>}]}
- ]}
- ]},
- Selector2 =
- {[
- {<<"$or">>, [
- {[{<<"B">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]}
- ]},
- Selector =
- {[
- {<<"$and">>, [
- Selector1,
- Selector2
- ]}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(true, has_required_fields(Normalized, RequiredFields)),
-
- SelectorReverse =
- {[
- {<<"$and">>, [
- Selector2,
- Selector1
- ]}
- ]},
- NormalizedReverse = normalize(SelectorReverse),
- ?assertEqual(true, has_required_fields(NormalizedReverse, RequiredFields)).
-
-has_required_fields_and_nested_or_false_test() ->
- RequiredFields = [<<"A">>, <<"B">>],
- Selector1 =
- {[
- {<<"$and">>, [
- {[{<<"A">>, <<"foo">>}]}
- ]}
- ]},
- Selector2 =
- {[
- {<<"$or">>, [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]}
- ]},
- Selector =
- {[
- {<<"$and">>, [
- Selector1,
- Selector2
- ]}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(false, has_required_fields(Normalized, RequiredFields)),
-
- SelectorReverse =
- {[
- {<<"$and">>, [
- Selector2,
- Selector1
- ]}
- ]},
-
- NormalizedReverse = normalize(SelectorReverse),
- ?assertEqual(false, has_required_fields(NormalizedReverse, RequiredFields)).
-
-has_required_fields_or_nested_and_true_test() ->
- RequiredFields = [<<"A">>],
- Selector1 =
- {[
- {<<"$and">>, [
- {[{<<"A">>, <<"foo">>}]}
- ]}
- ]},
- Selector2 =
- {[
- {<<"$and">>, [
- {[{<<"A">>, <<"foo">>}]}
- ]}
- ]},
- Selector =
- {[
- {<<"$or">>, [
- Selector1,
- Selector2
- ]}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_or_nested_or_true_test() ->
- RequiredFields = [<<"A">>],
- Selector1 =
- {[
- {<<"$or">>, [
- {[{<<"A">>, <<"foo">>}]}
- ]}
- ]},
- Selector2 =
- {[
- {<<"$or">>, [
- {[{<<"A">>, <<"bar">>}]}
- ]}
- ]},
- Selector =
- {[
- {<<"$or">>, [
- Selector1,
- Selector2
- ]}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
-
-has_required_fields_or_nested_or_false_test() ->
- RequiredFields = [<<"A">>],
- Selector1 =
- {[
- {<<"$or">>, [
- {[{<<"A">>, <<"foo">>}]}
- ]}
- ]},
- Selector2 =
- {[
- {<<"$or">>, [
- {[{<<"B">>, <<"bar">>}]}
- ]}
- ]},
- Selector =
- {[
- {<<"$or">>, [
- Selector1,
- Selector2
- ]}
- ]},
- Normalized = normalize(Selector),
- ?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
-
--endif.
diff --git a/src/mango/src/mango_selector_text.erl b/src/mango/src/mango_selector_text.erl
deleted file mode 100644
index aaa1e3329..000000000
--- a/src/mango/src/mango_selector_text.erl
+++ /dev/null
@@ -1,423 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_selector_text).
-
--export([
- convert/1,
- convert/2,
-
- append_sort_type/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
-
-%% Regex for <<"\\.">>
--define(PERIOD, "\\.").
-
-convert(Object) ->
- TupleTree = convert([], Object),
- iolist_to_binary(to_query(TupleTree)).
-
-convert(Path, {[{<<"$and">>, Args}]}) ->
- Parts = [convert(Path, Arg) || Arg <- Args],
- {op_and, Parts};
-convert(Path, {[{<<"$or">>, Args}]}) ->
- Parts = [convert(Path, Arg) || Arg <- Args],
- {op_or, Parts};
-convert(Path, {[{<<"$not">>, Arg}]}) ->
- {op_not, {field_exists_query(Path), convert(Path, Arg)}};
-convert(Path, {[{<<"$default">>, Arg}]}) ->
- {op_field, {_, Query}} = convert(Path, Arg),
- {op_default, Query};
-% The $text operator specifies a Lucene syntax query
-% so we just pull it in directly.
-convert(Path, {[{<<"$text">>, Query}]}) when is_binary(Query) ->
- {op_field, {make_field(Path, Query), value_str(Query)}};
-% The MongoDB docs for $all are super confusing and read more
-% like they screwed up the implementation of this operator
-% and then just documented it as a feature.
-%
-% This implementation will match the behavior as closely as
-% possible based on the available docs but we'll need to have
-% the testing team validate how MongoDB handles edge conditions
-convert(Path, {[{<<"$all">>, Args}]}) ->
- case Args of
- [Values] when is_list(Values) ->
- % If Args is a single element array then we have to
- % either match if Path is that array or if it contains
- % the array as an element of an array (which isn't at all
- % confusing). For Lucene to return us all possible matches
- % that means we just need to search for each value in
- % Path.[] and Path.[].[] and rely on our filtering to limit
- % the results properly.
- Fields1 = convert(Path, {[{<<"$eq">>, Values}]}),
- Fields2 = convert([<<"[]">> | Path], {[{<<"$eq">>, Values}]}),
- {op_or, [Fields1, Fields2]};
- _ ->
- % Otherwise the $all operator is equivalent to an $and
- % operator so we treat it as such.
- convert([<<"[]">> | Path], {[{<<"$and">>, Args}]})
- end;
-% The $elemMatch Lucene query is not an exact translation
-% as we can't enforce that the matches are all for the same
-% item in an array. We just rely on the final selector match
-% to filter out anything that doesn't match. The only trick
-% is that we have to add the `[]` path element since the docs
-% say this has to match against an array.
-convert(Path, {[{<<"$elemMatch">>, Arg}]}) ->
- convert([<<"[]">> | Path], Arg);
-convert(Path, {[{<<"$allMatch">>, Arg}]}) ->
- convert([<<"[]">> | Path], Arg);
-% Our comparison operators are fairly straight forward
-convert(Path, {[{<<"$lt">>, Arg}]}) when
- is_list(Arg);
- is_tuple(Arg);
- Arg =:= null
-->
- field_exists_query(Path);
-convert(Path, {[{<<"$lt">>, Arg}]}) ->
- {op_field, {make_field(Path, Arg), range(lt, Arg)}};
-convert(Path, {[{<<"$lte">>, Arg}]}) when
- is_list(Arg);
- is_tuple(Arg);
- Arg =:= null
-->
- field_exists_query(Path);
-convert(Path, {[{<<"$lte">>, Arg}]}) ->
- {op_field, {make_field(Path, Arg), range(lte, Arg)}};
-%% This is for indexable_fields
-convert(Path, {[{<<"$eq">>, Arg}]}) when Arg =:= null ->
- {op_null, {make_field(Path, Arg), value_str(Arg)}};
-convert(Path, {[{<<"$eq">>, Args}]}) when is_list(Args) ->
- Path0 = [<<"[]">> | Path],
- LPart = {op_field, {make_field(Path0, length), value_str(length(Args))}},
- Parts0 = [convert(Path0, {[{<<"$eq">>, Arg}]}) || Arg <- Args],
- Parts = [LPart | Parts0],
- {op_and, Parts};
-convert(Path, {[{<<"$eq">>, {_} = Arg}]}) ->
- convert(Path, Arg);
-convert(Path, {[{<<"$eq">>, Arg}]}) ->
- {op_field, {make_field(Path, Arg), value_str(Arg)}};
-convert(Path, {[{<<"$ne">>, Arg}]}) ->
- {op_not, {field_exists_query(Path), convert(Path, {[{<<"$eq">>, Arg}]})}};
-convert(Path, {[{<<"$gte">>, Arg}]}) when
- is_list(Arg);
- is_tuple(Arg);
- Arg =:= null
-->
- field_exists_query(Path);
-convert(Path, {[{<<"$gte">>, Arg}]}) ->
- {op_field, {make_field(Path, Arg), range(gte, Arg)}};
-convert(Path, {[{<<"$gt">>, Arg}]}) when
- is_list(Arg);
- is_tuple(Arg);
- Arg =:= null
-->
- field_exists_query(Path);
-convert(Path, {[{<<"$gt">>, Arg}]}) ->
- {op_field, {make_field(Path, Arg), range(gt, Arg)}};
-convert(Path, {[{<<"$in">>, Args}]}) ->
- {op_or, convert_in(Path, Args)};
-convert(Path, {[{<<"$nin">>, Args}]}) ->
- {op_not, {field_exists_query(Path), convert(Path, {[{<<"$in">>, Args}]})}};
-convert(Path, {[{<<"$exists">>, ShouldExist}]}) ->
- FieldExists = field_exists_query(Path),
- case ShouldExist of
- true -> FieldExists;
- false -> {op_not, {FieldExists, false}}
- end;
-% We're not checking the actual type here, just looking for
-% anything that has a possibility of matching by checking
-% for the field name. We use the same logic for $exists on
-% the actual query.
-convert(Path, {[{<<"$type">>, _}]}) ->
- field_exists_query(Path);
-convert(Path, {[{<<"$mod">>, _}]}) ->
- field_exists_query(Path, "number");
-% The lucene regular expression engine does not use java's regex engine but
-% instead a custom implementation. The syntax is therefore different, so we do
-% would get different behavior than our view indexes. To be consistent, we will
-% simply return docs for fields that exist and then run our match filter.
-convert(Path, {[{<<"$regex">>, _}]}) ->
- field_exists_query(Path, "string");
-convert(Path, {[{<<"$size">>, Arg}]}) ->
- {op_field, {make_field([<<"[]">> | Path], length), value_str(Arg)}};
-% All other operators are internal assertion errors for
-% matching because we either should've removed them during
-% normalization or something else broke.
-convert(_Path, {[{<<"$", _/binary>> = Op, _}]}) ->
- ?MANGO_ERROR({invalid_operator, Op});
-% We've hit a field name specifier. Check if the field name is accessing
-% arrays. Convert occurrences of element position references to .[]. Then we
-% need to break the name into path parts and continue our conversion.
-convert(Path, {[{Field0, Cond}]}) ->
- {ok, PP0} =
- case Field0 of
- <<>> ->
- {ok, []};
- _ ->
- mango_util:parse_field(Field0)
- end,
- % Later on, we perform a lucene_escape_user call on the
- % final Path, which calls parse_field again. Calling the function
- % twice converts <<"a\\.b">> to [<<"a">>,<<"b">>]. This leads to
- % an incorrect query since we need [<<"a.b">>]. Without breaking
- % our escaping mechanism, we simply revert this first parse_field
- % effect and replace instances of "." to "\\.".
- MP = mango_util:cached_re(mango_period, ?PERIOD),
- PP1 = [
- re:replace(
- P,
- MP,
- <<"\\\\.">>,
- [global, {return, binary}]
- )
- || P <- PP0
- ],
- {PP2, HasInteger} = replace_array_indexes(PP1, [], false),
- NewPath = PP2 ++ Path,
- case HasInteger of
- true ->
- OldPath = lists:reverse(PP1, Path),
- OldParts = convert(OldPath, Cond),
- NewParts = convert(NewPath, Cond),
- {op_or, [OldParts, NewParts]};
- false ->
- convert(NewPath, Cond)
- end;
-%% For $in
-convert(Path, Val) when is_binary(Val); is_number(Val); is_boolean(Val) ->
- {op_field, {make_field(Path, Val), value_str(Val)}};
-% Anything else is a bad selector.
-convert(_Path, {Props} = Sel) when length(Props) > 1 ->
- erlang:error({unnormalized_selector, Sel}).
-
-to_query_nested(Args) ->
- QueryArgs = lists:map(fun to_query/1, Args),
- % removes empty queries that result from selectors with empty arrays
- FilterFun = fun(A) -> A =/= [] andalso A =/= "()" end,
- lists:filter(FilterFun, QueryArgs).
-
-to_query({op_and, []}) ->
- [];
-to_query({op_and, Args}) when is_list(Args) ->
- case to_query_nested(Args) of
- [] -> [];
- QueryArgs -> ["(", mango_util:join(<<" AND ">>, QueryArgs), ")"]
- end;
-to_query({op_or, []}) ->
- [];
-to_query({op_or, Args}) when is_list(Args) ->
- case to_query_nested(Args) of
- [] -> [];
- QueryArgs -> ["(", mango_util:join(" OR ", QueryArgs), ")"]
- end;
-to_query({op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) ->
- case to_query(Arg) of
- [] -> ["(", to_query(ExistsQuery), ")"];
- Query -> ["(", to_query(ExistsQuery), " AND NOT (", Query, "))"]
- end;
-%% For $exists:false
-to_query({op_not, {ExistsQuery, false}}) ->
- ["($fieldnames:/.*/ ", " AND NOT (", to_query(ExistsQuery), "))"];
-to_query({op_insert, Arg}) when is_binary(Arg) ->
- ["(", Arg, ")"];
-%% We escape : and / for now for values and all lucene chars for fieldnames
-%% This needs to be resolved.
-to_query({op_field, {Name, Value}}) ->
- NameBin = iolist_to_binary(Name),
- ["(", mango_util:lucene_escape_user(NameBin), ":", Value, ")"];
-%% This is for indexable_fields
-to_query({op_null, {Name, Value}}) ->
- NameBin = iolist_to_binary(Name),
- ["(", mango_util:lucene_escape_user(NameBin), ":", Value, ")"];
-to_query({op_fieldname, {Name, Wildcard}}) ->
- NameBin = iolist_to_binary(Name),
- ["($fieldnames:", mango_util:lucene_escape_user(NameBin), Wildcard, ")"];
-to_query({op_default, Value}) ->
- ["($default:", Value, ")"].
-
-%% We match on fieldname and fieldname.[]
-convert_in(Path, Args) ->
- Path0 = [<<"[]">> | Path],
- lists:map(
- fun(Arg) ->
- case Arg of
- {Object} ->
- Parts = lists:map(
- fun(SubObject) ->
- Fields1 = convert(Path, {[SubObject]}),
- Fields2 = convert(Path0, {[SubObject]}),
- {op_or, [Fields1, Fields2]}
- end,
- Object
- ),
- {op_or, Parts};
- SingleVal ->
- Fields1 = {op_field, {make_field(Path, SingleVal), value_str(SingleVal)}},
- Fields2 = {op_field, {make_field(Path0, SingleVal), value_str(SingleVal)}},
- {op_or, [Fields1, Fields2]}
- end
- end,
- Args
- ).
-
-make_field(Path, length) ->
- [path_str(Path), <<":length">>];
-make_field(Path, Arg) ->
- [path_str(Path), <<":">>, type_str(Arg)].
-
-range(lt, Arg) ->
- Min = get_range(min, Arg),
- [<<"[", Min/binary, " TO ">>, value_str(Arg), <<"}">>];
-range(lte, Arg) ->
- Min = get_range(min, Arg),
- [<<"[", Min/binary, " TO ">>, value_str(Arg), <<"]">>];
-range(gte, Arg) ->
- Max = get_range(max, Arg),
- [<<"[">>, value_str(Arg), <<" TO ", Max/binary, "]">>];
-range(gt, Arg) ->
- Max = get_range(max, Arg),
- [<<"{">>, value_str(Arg), <<" TO ", Max/binary, "]">>].
-
-get_range(min, Arg) when is_number(Arg) ->
- <<"-Infinity">>;
-get_range(min, _Arg) ->
- <<"\"\"">>;
-get_range(max, Arg) when is_number(Arg) ->
- <<"Infinity">>;
-get_range(max, _Arg) ->
- <<"\u0x10FFFF">>.
-
-field_exists_query(Path) ->
- % We specify two here for :* and .* so that we don't incorrectly
- % match a path foo.name against foo.name_first (if were to just
- % appened * isntead).
- Parts = [
- % We need to remove the period from the path list to indicate that it is
- % a path separator. We escape the colon because it is not used as a
- % separator and we escape colons in field names.
- {op_fieldname, {[path_str(Path), ":"], "*"}},
- {op_fieldname, {[path_str(Path)], ".*"}}
- ],
- {op_or, Parts}.
-
-field_exists_query(Path, Type) ->
- {op_fieldname, {[path_str(Path), ":"], Type}}.
-
-path_str(Path) ->
- path_str(Path, []).
-
-path_str([], Acc) ->
- Acc;
-path_str([Part], Acc) ->
- % No reverse because Path is backwards
- % during recursion of convert.
- [Part | Acc];
-path_str([Part | Rest], Acc) ->
- case Part of
- % do not append a period if Part is blank
- <<>> ->
- path_str(Rest, [Acc]);
- _ ->
- path_str(Rest, [<<".">>, Part | Acc])
- end.
-
-type_str(Value) when is_number(Value) ->
- <<"number">>;
-type_str(Value) when is_boolean(Value) ->
- <<"boolean">>;
-type_str(Value) when is_binary(Value) ->
- <<"string">>;
-type_str(null) ->
- <<"null">>.
-
-value_str(Value) when is_binary(Value) ->
- case mango_util:is_number_string(Value) of
- true ->
- <<"\"", Value/binary, "\"">>;
- false ->
- Escaped = mango_util:lucene_escape_query_value(Value),
- <<"\"", Escaped/binary, "\"">>
- end;
-value_str(Value) when is_integer(Value) ->
- list_to_binary(integer_to_list(Value));
-value_str(Value) when is_float(Value) ->
- list_to_binary(float_to_list(Value));
-value_str(true) ->
- <<"true">>;
-value_str(false) ->
- <<"false">>;
-value_str(null) ->
- <<"true">>.
-
-append_sort_type(RawSortField, Selector) ->
- EncodeField = mango_util:lucene_escape_user(RawSortField),
- String = mango_util:has_suffix(EncodeField, <<"_3astring">>),
- Number = mango_util:has_suffix(EncodeField, <<"_3anumber">>),
- case {String, Number} of
- {true, _} ->
- <<EncodeField/binary, "<string>">>;
- {_, true} ->
- <<EncodeField/binary, "<number>">>;
- _ ->
- Type = get_sort_type(RawSortField, Selector),
- <<EncodeField/binary, Type/binary>>
- end.
-
-get_sort_type(Field, Selector) ->
- Types = get_sort_types(Field, Selector, []),
- case lists:usort(Types) of
- [str] -> <<"_3astring<string>">>;
- [num] -> <<"_3anumber<number>">>;
- _ -> ?MANGO_ERROR({text_sort_error, Field})
- end.
-
-get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc) when
- is_binary(Cond)
-->
- [str | Acc];
-get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc) when
- is_number(Cond)
-->
- [num | Acc];
-get_sort_types(Field, {[{_, Cond}]}, Acc) when is_list(Cond) ->
- lists:foldl(
- fun(Arg, InnerAcc) ->
- get_sort_types(Field, Arg, InnerAcc)
- end,
- Acc,
- Cond
- );
-get_sort_types(Field, {[{_, Cond}]}, Acc) when is_tuple(Cond) ->
- get_sort_types(Field, Cond, Acc);
-get_sort_types(_Field, _, Acc) ->
- Acc.
-
-replace_array_indexes([], NewPartsAcc, HasIntAcc) ->
- {NewPartsAcc, HasIntAcc};
-replace_array_indexes([Part | Rest], NewPartsAcc, HasIntAcc) ->
- {NewPart, HasInt} =
- try
- _ = list_to_integer(binary_to_list(Part)),
- {<<"[]">>, true}
- catch
- _:_ ->
- {Part, false}
- end,
- replace_array_indexes(
- Rest,
- [NewPart | NewPartsAcc],
- HasInt or HasIntAcc
- ).
diff --git a/src/mango/src/mango_sort.erl b/src/mango/src/mango_sort.erl
deleted file mode 100644
index 808b6e7f2..000000000
--- a/src/mango/src/mango_sort.erl
+++ /dev/null
@@ -1,68 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_sort).
-
--export([
- new/1,
- to_json/1,
- fields/1,
- directions/1
-]).
-
--include("mango.hrl").
-
-new(Fields) when is_list(Fields) ->
- Sort = {[sort_field(Field) || Field <- Fields]},
- validate(Sort),
- {ok, Sort};
-new(Else) ->
- ?MANGO_ERROR({invalid_sort_json, Else}).
-
-to_json({Fields}) ->
- to_json(Fields);
-to_json([]) ->
- [];
-to_json([{Name, Dir} | Rest]) ->
- [{[{Name, Dir}]} | to_json(Rest)].
-
-fields({Props}) ->
- [Name || {Name, _Dir} <- Props].
-
-directions({Props}) ->
- [Dir || {_Name, Dir} <- Props].
-
-sort_field(<<"">>) ->
- ?MANGO_ERROR({invalid_sort_field, <<"">>});
-sort_field(Field) when is_binary(Field) ->
- {Field, <<"asc">>};
-sort_field({[{Name, <<"asc">>}]}) when is_binary(Name) ->
- {Name, <<"asc">>};
-sort_field({[{Name, <<"desc">>}]}) when is_binary(Name) ->
- {Name, <<"desc">>};
-sort_field({Name, BadDir}) when is_binary(Name) ->
- ?MANGO_ERROR({invalid_sort_dir, BadDir});
-sort_field(Else) ->
- ?MANGO_ERROR({invalid_sort_field, Else}).
-
-validate({Props}) ->
- % Assert each field is in the same direction
- % until we support mixed direction sorts.
- Dirs = [D || {_, D} <- Props],
- case lists:usort(Dirs) of
- [] ->
- ok;
- [_] ->
- ok;
- _ ->
- ?MANGO_ERROR({unsupported, mixed_sort})
- end.
diff --git a/src/mango/src/mango_sup.erl b/src/mango/src/mango_sup.erl
deleted file mode 100644
index c0b04d9c9..000000000
--- a/src/mango/src/mango_sup.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_sup).
--behaviour(supervisor).
--export([init/1]).
-
--export([start_link/1]).
-
-start_link(Args) ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
-
-init([]) ->
- {ok, {{one_for_one, 3, 10}, couch_epi:register_service(mango_epi, [])}}.
diff --git a/src/mango/src/mango_util.erl b/src/mango/src/mango_util.erl
deleted file mode 100644
index 609a9dbc0..000000000
--- a/src/mango/src/mango_util.erl
+++ /dev/null
@@ -1,405 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_util).
-
--export([
- open_doc/2,
- open_ddocs/1,
- load_ddoc/2,
- load_ddoc/3,
-
- defer/3,
- do_defer/3,
-
- assert_ejson/1,
-
- to_lower/1,
-
- enc_dbname/1,
- dec_dbname/1,
-
- enc_hex/1,
- dec_hex/1,
-
- lucene_escape_field/1,
- lucene_escape_query_value/1,
- lucene_escape_user/1,
- is_number_string/1,
-
- has_suffix/2,
-
- join/2,
-
- parse_field/1,
-
- cached_re/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
-
--define(DIGITS, "(\\p{N}+)").
--define(HEXDIGITS, "([0-9a-fA-F]+)").
--define(EXP, "[eE][+-]?" ++ ?DIGITS).
--define(NUMSTRING,
- "[\\x00-\\x20]*" ++ "[+-]?(" ++ "NaN|" ++
- "Infinity|" ++ "(((" ++
- ?DIGITS ++
- "(\\.)?(" ++
- ?DIGITS ++
- "?)(" ++
- ?EXP ++
- ")?)|" ++
- "(\\.(" ++
- ?DIGITS ++
- ")(" ++
- ?EXP ++
- ")?)|" ++
- "((" ++
- "(0[xX]" ++
- ?HEXDIGITS ++
- "(\\.)?)|" ++
- "(0[xX]" ++
- ?HEXDIGITS ++
- "?(\\.)" ++
- ?HEXDIGITS ++
- ")" ++
- ")[pP][+-]?" ++ ?DIGITS ++ "))" ++ "[fFdD]?))" ++ "[\\x00-\\x20]*"
-).
-
-open_doc(Db, DocId) ->
- open_doc(Db, DocId, [deleted, ejson_body]).
-
-open_doc(Db, DocId, Options) ->
- case mango_util:defer(fabric, open_doc, [Db, DocId, Options]) of
- {ok, Doc} ->
- {ok, Doc};
- {not_found, _} ->
- not_found;
- _ ->
- ?MANGO_ERROR({error_loading_doc, DocId})
- end.
-
-open_ddocs(Db) ->
- case mango_util:defer(fabric, design_docs, [Db]) of
- {ok, Docs} ->
- {ok, Docs};
- _ ->
- ?MANGO_ERROR(error_loading_ddocs)
- end.
-
-load_ddoc(Db, DDocId) ->
- load_ddoc(Db, DDocId, [deleted, ejson_body]).
-
-load_ddoc(Db, DDocId, DbOpts) ->
- case open_doc(Db, DDocId, DbOpts) of
- {ok, Doc} ->
- {ok, check_lang(Doc)};
- not_found ->
- Body =
- {[
- {<<"language">>, <<"query">>}
- ]},
- {ok, #doc{id = DDocId, body = Body}}
- end.
-
-defer(Mod, Fun, Args) ->
- {Pid, Ref} = erlang:spawn_monitor(?MODULE, do_defer, [Mod, Fun, Args]),
- receive
- {'DOWN', Ref, process, Pid, {mango_defer_ok, Value}} ->
- Value;
- {'DOWN', Ref, process, Pid, {mango_defer_throw, Value}} ->
- erlang:throw(Value);
- {'DOWN', Ref, process, Pid, {mango_defer_error, Value}} ->
- erlang:error(Value);
- {'DOWN', Ref, process, Pid, {mango_defer_exit, Value}} ->
- erlang:exit(Value)
- end.
-
-do_defer(Mod, Fun, Args) ->
- try erlang:apply(Mod, Fun, Args) of
- Resp ->
- erlang:exit({mango_defer_ok, Resp})
- catch
- ?STACKTRACE(throw, Error, Stack)
- couch_log:error("Defered error: ~w~n ~p", [{throw, Error}, Stack]),
- erlang:exit({mango_defer_throw, Error});
- ?STACKTRACE(error, Error, Stack)
- couch_log:error("Defered error: ~w~n ~p", [{error, Error}, Stack]),
- erlang:exit({mango_defer_error, Error});
- ?STACKTRACE(exit, Error, Stack)
- couch_log:error("Defered error: ~w~n ~p", [{exit, Error}, Stack]),
- erlang:exit({mango_defer_exit, Error})
- end.
-
-assert_ejson({Props}) ->
- assert_ejson_obj(Props);
-assert_ejson(Vals) when is_list(Vals) ->
- assert_ejson_arr(Vals);
-assert_ejson(null) ->
- true;
-assert_ejson(true) ->
- true;
-assert_ejson(false) ->
- true;
-assert_ejson(String) when is_binary(String) ->
- true;
-assert_ejson(Number) when is_number(Number) ->
- true;
-assert_ejson(_Else) ->
- false.
-
-assert_ejson_obj([]) ->
- true;
-assert_ejson_obj([{Key, Val} | Rest]) when is_binary(Key) ->
- case assert_ejson(Val) of
- true ->
- assert_ejson_obj(Rest);
- false ->
- false
- end;
-assert_ejson_obj(_Else) ->
- false.
-
-assert_ejson_arr([]) ->
- true;
-assert_ejson_arr([Val | Rest]) ->
- case assert_ejson(Val) of
- true ->
- assert_ejson_arr(Rest);
- false ->
- false
- end.
-
-check_lang(#doc{id = Id, deleted = true}) ->
- Body =
- {[
- {<<"language">>, <<"query">>}
- ]},
- #doc{id = Id, body = Body};
-check_lang(#doc{body = {Props}} = Doc) ->
- case lists:keyfind(<<"language">>, 1, Props) of
- {<<"language">>, <<"query">>} ->
- Doc;
- Else ->
- ?MANGO_ERROR({invalid_ddoc_lang, Else})
- end.
-
-to_lower(Key) when is_binary(Key) ->
- KStr = binary_to_list(Key),
- KLower = string:to_lower(KStr),
- list_to_binary(KLower).
-
-enc_dbname(<<>>) ->
- <<>>;
-enc_dbname(<<A:8/integer, Rest/binary>>) ->
- Bytes = enc_db_byte(A),
- Tail = enc_dbname(Rest),
- <<Bytes/binary, Tail/binary>>.
-
-enc_db_byte(N) when N >= $a, N =< $z -> <<N>>;
-enc_db_byte(N) when N >= $0, N =< $9 -> <<N>>;
-enc_db_byte(N) when N == $/; N == $_; N == $- -> <<N>>;
-enc_db_byte(N) ->
- H = enc_hex_byte(N div 16),
- L = enc_hex_byte(N rem 16),
- <<$$, H:8/integer, L:8/integer>>.
-
-dec_dbname(<<>>) ->
- <<>>;
-dec_dbname(<<$$, _:8/integer>>) ->
- throw(invalid_dbname_encoding);
-dec_dbname(<<$$, H:8/integer, L:8/integer, Rest/binary>>) ->
- Byte = (dec_hex_byte(H) bsl 4) bor dec_hex_byte(L),
- Tail = dec_dbname(Rest),
- <<Byte:8/integer, Tail/binary>>;
-dec_dbname(<<N:8/integer, Rest/binary>>) ->
- Tail = dec_dbname(Rest),
- <<N:8/integer, Tail/binary>>.
-
-enc_hex(<<>>) ->
- <<>>;
-enc_hex(<<V:8/integer, Rest/binary>>) ->
- H = enc_hex_byte(V div 16),
- L = enc_hex_byte(V rem 16),
- Tail = enc_hex(Rest),
- <<H:8/integer, L:8/integer, Tail/binary>>.
-
-enc_hex_byte(N) when N >= 0, N < 10 -> $0 + N;
-enc_hex_byte(N) when N >= 10, N < 16 -> $a + (N - 10);
-enc_hex_byte(N) -> throw({invalid_hex_value, N}).
-
-dec_hex(<<>>) ->
- <<>>;
-dec_hex(<<_:8/integer>>) ->
- throw(invalid_hex_string);
-dec_hex(<<H:8/integer, L:8/integer, Rest/binary>>) ->
- Byte = (dec_hex_byte(H) bsl 4) bor dec_hex_byte(L),
- Tail = dec_hex(Rest),
- <<Byte:8/integer, Tail/binary>>.
-
-dec_hex_byte(N) when N >= $0, N =< $9 -> (N - $0);
-dec_hex_byte(N) when N >= $a, N =< $f -> (N - $a) + 10;
-dec_hex_byte(N) when N >= $A, N =< $F -> (N - $A) + 10;
-dec_hex_byte(N) -> throw({invalid_hex_character, N}).
-
-lucene_escape_field(Bin) when is_binary(Bin) ->
- Str = binary_to_list(Bin),
- Enc = lucene_escape_field(Str),
- iolist_to_binary(Enc);
-lucene_escape_field([H | T]) when is_number(H), H >= 0, H =< 255 ->
- if
- H >= $a, $z >= H ->
- [H | lucene_escape_field(T)];
- H >= $A, $Z >= H ->
- [H | lucene_escape_field(T)];
- H >= $0, $9 >= H ->
- [H | lucene_escape_field(T)];
- true ->
- Hi = enc_hex_byte(H div 16),
- Lo = enc_hex_byte(H rem 16),
- [$_, Hi, Lo | lucene_escape_field(T)]
- end;
-lucene_escape_field([]) ->
- [].
-
-lucene_escape_query_value(IoList) when is_list(IoList) ->
- lucene_escape_query_value(iolist_to_binary(IoList));
-lucene_escape_query_value(Bin) when is_binary(Bin) ->
- IoList = lucene_escape_qv(Bin),
- iolist_to_binary(IoList).
-
-% This escapes the special Lucene query characters
-% listed below as well as any whitespace.
-%
-% + - && || ! ( ) { } [ ] ^ ~ * ? : \ " /
-%
-
-lucene_escape_qv(<<>>) ->
- [];
-lucene_escape_qv(<<"&&", Rest/binary>>) ->
- ["\\&&" | lucene_escape_qv(Rest)];
-lucene_escape_qv(<<"||", Rest/binary>>) ->
- ["\\||" | lucene_escape_qv(Rest)];
-lucene_escape_qv(<<C, Rest/binary>>) ->
- NeedsEscape = "+-(){}[]!^~*?:/\\\" \t\r\n",
- Out =
- case lists:member(C, NeedsEscape) of
- true -> ["\\", C];
- false -> [C]
- end,
- Out ++ lucene_escape_qv(Rest).
-
-lucene_escape_user(Field) ->
- {ok, Path} = parse_field(Field),
- Escaped = [mango_util:lucene_escape_field(P) || P <- Path],
- iolist_to_binary(join(".", Escaped)).
-
-has_suffix(Bin, Suffix) when is_binary(Bin), is_binary(Suffix) ->
- SBin = size(Bin),
- SSuffix = size(Suffix),
- if
- SBin < SSuffix ->
- false;
- true ->
- PSize = SBin - SSuffix,
- case Bin of
- <<_:PSize/binary, Suffix/binary>> ->
- true;
- _ ->
- false
- end
- end.
-
-join(_Sep, []) ->
- [];
-join(_Sep, [Item]) ->
- [Item];
-join(Sep, [Item | Rest]) ->
- [Item, Sep | join(Sep, Rest)].
-
-is_number_string(Value) when is_binary(Value) ->
- is_number_string(binary_to_list(Value));
-is_number_string(Value) when is_list(Value) ->
- MP = cached_re(mango_numstring_re, ?NUMSTRING),
- case re:run(Value, MP) of
- nomatch ->
- false;
- _ ->
- true
- end.
-
-cached_re(Name, RE) ->
- case mochiglobal:get(Name) of
- undefined ->
- {ok, MP} = re:compile(RE),
- ok = mochiglobal:put(Name, MP),
- MP;
- MP ->
- MP
- end.
-
-parse_field(Field) ->
- case binary:match(Field, <<"\\">>, []) of
- nomatch ->
- % Fast path, no regex required
- {ok, check_non_empty(Field, binary:split(Field, <<".">>, [global]))};
- _ ->
- parse_field_slow(Field)
- end.
-
-parse_field_slow(Field) ->
- Path = lists:map(
- fun
- (P) when P =:= <<>> ->
- ?MANGO_ERROR({invalid_field_name, Field});
- (P) ->
- re:replace(P, <<"\\\\">>, <<>>, [global, {return, binary}])
- end,
- re:split(Field, <<"(?<!\\\\)\\.">>)
- ),
- {ok, Path}.
-
-check_non_empty(Field, Parts) ->
- case lists:member(<<>>, Parts) of
- true ->
- ?MANGO_ERROR({invalid_field_name, Field});
- false ->
- Parts
- end.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-parse_field_test() ->
- ?assertEqual({ok, [<<"ab">>]}, parse_field(<<"ab">>)),
- ?assertEqual({ok, [<<"a">>, <<"b">>]}, parse_field(<<"a.b">>)),
- ?assertEqual({ok, [<<"a.b">>]}, parse_field(<<"a\\.b">>)),
- ?assertEqual({ok, [<<"a">>, <<"b">>, <<"c">>]}, parse_field(<<"a.b.c">>)),
- ?assertEqual({ok, [<<"a">>, <<"b.c">>]}, parse_field(<<"a.b\\.c">>)),
- Exception = {mango_error, ?MODULE, {invalid_field_name, <<"a..b">>}},
- ?assertThrow(Exception, parse_field(<<"a..b">>)).
-
-is_number_string_test() ->
- ?assert(is_number_string("0")),
- ?assert(is_number_string("1")),
- ?assert(is_number_string("1.0")),
- ?assert(is_number_string("1.0E10")),
- ?assert(is_number_string("0d")),
- ?assert(is_number_string("-1")),
- ?assert(is_number_string("-1.0")),
- ?assertNot(is_number_string("hello")),
- ?assertNot(is_number_string("")),
- ?assertMatch({match, _}, re:run("1.0", mochiglobal:get(mango_numstring_re))).
-
--endif.
diff --git a/src/mango/test/01-index-crud-test.py b/src/mango/test/01-index-crud-test.py
deleted file mode 100644
index dd70e7eea..000000000
--- a/src/mango/test/01-index-crud-test.py
+++ /dev/null
@@ -1,384 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import random
-
-import mango
-import copy
-import unittest
-
-DOCS = [
- {"_id": "1", "name": "Jimi", "age": 10, "cars": 1},
- {"_id": "2", "name": "kate", "age": 8, "cars": 0},
-]
-
-
-class IndexCrudTests(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
-
- def test_bad_fields(self):
- bad_fields = [
- None,
- True,
- False,
- "bing",
- 2.0,
- {"foo": "bar"},
- [{"foo": 2}],
- [{"foo": "asc", "bar": "desc"}],
- [{"foo": "asc"}, {"bar": "desc"}],
- [""],
- ]
- for fields in bad_fields:
- try:
- self.db.create_index(fields)
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("bad create index")
-
- def test_bad_types(self):
- bad_types = [
- None,
- True,
- False,
- 1.5,
- "foo", # Future support
- "geo", # Future support
- {"foo": "bar"},
- ["baz", 3.0],
- ]
- for bt in bad_types:
- try:
- self.db.create_index(["foo"], idx_type=bt)
- except Exception as e:
- self.assertEqual(
- e.response.status_code, 400, (bt, e.response.status_code)
- )
- else:
- raise AssertionError("bad create index")
-
- def test_bad_names(self):
- bad_names = ["", True, False, 1.5, {"foo": "bar"}, [None, False]]
- for bn in bad_names:
- try:
- self.db.create_index(["foo"], name=bn)
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("bad create index")
-
- def test_bad_ddocs(self):
- bad_ddocs = ["", True, False, 1.5, {"foo": "bar"}, [None, False]]
- for bd in bad_ddocs:
- try:
- self.db.create_index(["foo"], ddoc=bd)
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("bad create index")
-
- def test_create_idx_01(self):
- fields = ["foo", "bar"]
- ret = self.db.create_index(fields, name="idx_01")
- assert ret is True
- for idx in self.db.list_indexes():
- if idx["name"] != "idx_01":
- continue
- self.assertEqual(idx["def"]["fields"], [{"foo": "asc"}, {"bar": "asc"}])
- return
- raise AssertionError("index not created")
-
- def test_create_idx_01_exists(self):
- fields = ["foo", "bar"]
- ret = self.db.create_index(fields, name="idx_01")
- assert ret is True
- ret = self.db.create_index(fields, name="idx_01")
- assert ret is False
-
- def test_create_idx_02(self):
- fields = ["baz", "foo"]
- ret = self.db.create_index(fields, name="idx_02")
- assert ret is True
- for idx in self.db.list_indexes():
- if idx["name"] != "idx_02":
- continue
- self.assertEqual(idx["def"]["fields"], [{"baz": "asc"}, {"foo": "asc"}])
- return
- raise AssertionError("index not created")
-
- def test_read_idx_doc(self):
- self.db.create_index(["foo", "bar"], name="idx_01")
- self.db.create_index(["hello", "bar"])
- for idx in self.db.list_indexes():
- if idx["type"] == "special":
- continue
- ddocid = idx["ddoc"]
- doc = self.db.open_doc(ddocid)
- self.assertEqual(doc["_id"], ddocid)
- info = self.db.ddoc_info(ddocid)
- self.assertEqual(info["name"], ddocid.split("_design/")[-1])
-
- def test_delete_idx_escaped(self):
- self.db.create_index(["foo", "bar"], name="idx_01")
- pre_indexes = self.db.list_indexes()
- ret = self.db.create_index(["bing"], name="idx_del_1")
- assert ret is True
- for idx in self.db.list_indexes():
- if idx["name"] != "idx_del_1":
- continue
- self.assertEqual(idx["def"]["fields"], [{"bing": "asc"}])
- self.db.delete_index(idx["ddoc"].replace("/", "%2F"), idx["name"])
- post_indexes = self.db.list_indexes()
- self.assertEqual(pre_indexes, post_indexes)
-
- def test_delete_idx_unescaped(self):
- pre_indexes = self.db.list_indexes()
- ret = self.db.create_index(["bing"], name="idx_del_2")
- assert ret is True
- for idx in self.db.list_indexes():
- if idx["name"] != "idx_del_2":
- continue
- self.assertEqual(idx["def"]["fields"], [{"bing": "asc"}])
- self.db.delete_index(idx["ddoc"], idx["name"])
- post_indexes = self.db.list_indexes()
- self.assertEqual(pre_indexes, post_indexes)
-
- def test_delete_idx_no_design(self):
- pre_indexes = self.db.list_indexes()
- ret = self.db.create_index(["bing"], name="idx_del_3")
- assert ret is True
- for idx in self.db.list_indexes():
- if idx["name"] != "idx_del_3":
- continue
- self.assertEqual(idx["def"]["fields"], [{"bing": "asc"}])
- self.db.delete_index(idx["ddoc"].split("/")[-1], idx["name"])
- post_indexes = self.db.list_indexes()
- self.assertEqual(pre_indexes, post_indexes)
-
- def test_bulk_delete(self):
- fields = ["field1"]
- ret = self.db.create_index(fields, name="idx_01")
- assert ret is True
-
- fields = ["field2"]
- ret = self.db.create_index(fields, name="idx_02")
- assert ret is True
-
- fields = ["field3"]
- ret = self.db.create_index(fields, name="idx_03")
- assert ret is True
-
- docids = []
-
- for idx in self.db.list_indexes():
- if idx["ddoc"] is not None:
- docids.append(idx["ddoc"])
-
- docids.append("_design/this_is_not_an_index_name")
-
- ret = self.db.bulk_delete(docids)
-
- self.assertEqual(ret["fail"][0]["id"], "_design/this_is_not_an_index_name")
- self.assertEqual(len(ret["success"]), 3)
-
- for idx in self.db.list_indexes():
- assert idx["type"] != "json"
- assert idx["type"] != "text"
-
- def test_recreate_index(self):
- pre_indexes = self.db.list_indexes()
- for i in range(5):
- ret = self.db.create_index(["bing"], name="idx_recreate")
- assert ret is True
- for idx in self.db.list_indexes():
- if idx["name"] != "idx_recreate":
- continue
- self.assertEqual(idx["def"]["fields"], [{"bing": "asc"}])
- self.db.delete_index(idx["ddoc"], idx["name"])
- break
- post_indexes = self.db.list_indexes()
- self.assertEqual(pre_indexes, post_indexes)
-
- def test_delete_missing(self):
- # Missing design doc
- try:
- self.db.delete_index("this_is_not_a_design_doc_id", "foo")
- except Exception as e:
- self.assertEqual(e.response.status_code, 404)
- else:
- raise AssertionError("bad index delete")
-
- # Missing view name
- ret = self.db.create_index(["fields"], name="idx_01")
- indexes = self.db.list_indexes()
- not_special = [idx for idx in indexes if idx["type"] != "special"]
- idx = random.choice(not_special)
- ddocid = idx["ddoc"].split("/")[-1]
- try:
- self.db.delete_index(ddocid, "this_is_not_an_index_name")
- except Exception as e:
- self.assertEqual(e.response.status_code, 404)
- else:
- raise AssertionError("bad index delete")
-
- # Bad view type
- try:
- self.db.delete_index(ddocid, idx["name"], idx_type="not_a_real_type")
- except Exception as e:
- self.assertEqual(e.response.status_code, 404)
- else:
- raise AssertionError("bad index delete")
-
- def test_limit_skip_index(self):
- fields = ["field1"]
- ret = self.db.create_index(fields, name="idx_01")
- assert ret is True
-
- fields = ["field2"]
- ret = self.db.create_index(fields, name="idx_02")
- assert ret is True
-
- fields = ["field3"]
- ret = self.db.create_index(fields, name="idx_03")
- assert ret is True
-
- fields = ["field4"]
- ret = self.db.create_index(fields, name="idx_04")
- assert ret is True
-
- fields = ["field5"]
- ret = self.db.create_index(fields, name="idx_05")
- assert ret is True
-
- self.assertEqual(len(self.db.list_indexes(limit=2)), 2)
- self.assertEqual(len(self.db.list_indexes(limit=5, skip=4)), 2)
- self.assertEqual(len(self.db.list_indexes(skip=5)), 1)
- self.assertEqual(len(self.db.list_indexes(skip=6)), 0)
- self.assertEqual(len(self.db.list_indexes(skip=100)), 0)
- self.assertEqual(len(self.db.list_indexes(limit=10000000)), 6)
-
- try:
- self.db.list_indexes(skip=-1)
- except Exception as e:
- self.assertEqual(e.response.status_code, 500)
-
- try:
- self.db.list_indexes(limit=0)
- except Exception as e:
- self.assertEqual(e.response.status_code, 500)
-
- def test_out_of_sync(self):
- self.db.save_docs(copy.deepcopy(DOCS))
- self.db.create_index(["age"], name="age")
-
- selector = {"age": {"$gt": 0}}
- docs = self.db.find(
- selector, use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4"
- )
- self.assertEqual(len(docs), 2)
-
- self.db.delete_doc("1")
-
- docs1 = self.db.find(
- selector,
- update="False",
- use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4",
- )
- self.assertEqual(len(docs1), 1)
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class IndexCrudTextTests(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
-
- def test_create_text_idx(self):
- fields = [
- {"name": "stringidx", "type": "string"},
- {"name": "booleanidx", "type": "boolean"},
- ]
- ret = self.db.create_text_index(fields=fields, name="text_idx_01")
- assert ret is True
- for idx in self.db.list_indexes():
- if idx["name"] != "text_idx_01":
- continue
- self.assertEqual(
- idx["def"]["fields"],
- [{"stringidx": "string"}, {"booleanidx": "boolean"}],
- )
- return
- raise AssertionError("index not created")
-
- def test_create_bad_text_idx(self):
- bad_fields = [
- True,
- False,
- "bing",
- 2.0,
- ["foo", "bar"],
- [{"name": "foo2"}],
- [{"name": "foo3", "type": "garbage"}],
- [{"type": "number"}],
- [{"name": "age", "type": "number"}, {"name": "bad"}],
- [{"name": "age", "type": "number"}, "bla"],
- [{"name": "", "type": "number"}, "bla"],
- ]
- for fields in bad_fields:
- try:
- self.db.create_text_index(fields=fields)
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("bad create text index")
-
- def test_limit_skip_index(self):
- fields = ["field1"]
- ret = self.db.create_index(fields, name="idx_01")
- assert ret is True
-
- fields = ["field2"]
- ret = self.db.create_index(fields, name="idx_02")
- assert ret is True
-
- fields = ["field3"]
- ret = self.db.create_index(fields, name="idx_03")
- assert ret is True
-
- fields = ["field4"]
- ret = self.db.create_index(fields, name="idx_04")
- assert ret is True
-
- fields = [
- {"name": "stringidx", "type": "string"},
- {"name": "booleanidx", "type": "boolean"},
- ]
- ret = self.db.create_text_index(fields=fields, name="idx_05")
- assert ret is True
-
- self.assertEqual(len(self.db.list_indexes(limit=2)), 2)
- self.assertEqual(len(self.db.list_indexes(limit=5, skip=4)), 2)
- self.assertEqual(len(self.db.list_indexes(skip=5)), 1)
- self.assertEqual(len(self.db.list_indexes(skip=6)), 0)
- self.assertEqual(len(self.db.list_indexes(skip=100)), 0)
- self.assertEqual(len(self.db.list_indexes(limit=10000000)), 6)
-
- try:
- self.db.list_indexes(skip=-1)
- except Exception as e:
- self.assertEqual(e.response.status_code, 500)
-
- try:
- self.db.list_indexes(limit=0)
- except Exception as e:
- self.assertEqual(e.response.status_code, 500)
diff --git a/src/mango/test/02-basic-find-test.py b/src/mango/test/02-basic-find-test.py
deleted file mode 100644
index afdba03a2..000000000
--- a/src/mango/test/02-basic-find-test.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# -*- coding: latin-1 -*-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-
-import mango
-import user_docs
-
-
-class BasicFindTests(mango.UserDocsTests):
- def test_bad_selector(self):
- bad_selectors = [
- None,
- True,
- False,
- 1.0,
- "foobarbaz",
- {"foo": {"$not_an_op": 2}},
- {"$gt": 2},
- [None, "bing"],
- ]
- for bs in bad_selectors:
- try:
- self.db.find(bs)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("bad find")
-
- def test_bad_limit(self):
- bad_limits = ([None, True, False, -1, 1.2, "no limit!", {"foo": "bar"}, [2]],)
- for bl in bad_limits:
- try:
- self.db.find({"int": {"$gt": 2}}, limit=bl)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("bad find")
-
- def test_bad_skip(self):
- bad_skips = ([None, True, False, -3, 1.2, "no limit!", {"foo": "bar"}, [2]],)
- for bs in bad_skips:
- try:
- self.db.find({"int": {"$gt": 2}}, skip=bs)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("bad find")
-
- def test_bad_sort(self):
- bad_sorts = (
- [
- None,
- True,
- False,
- 1.2,
- "no limit!",
- {"foo": "bar"},
- [2],
- [{"foo": "asc", "bar": "asc"}],
- [{"foo": "asc"}, {"bar": "desc"}],
- ],
- )
- for bs in bad_sorts:
- try:
- self.db.find({"int": {"$gt": 2}}, sort=bs)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("bad find")
-
- def test_bad_fields(self):
- bad_fields = (
- [
- None,
- True,
- False,
- 1.2,
- "no limit!",
- {"foo": "bar"},
- [2],
- [[]],
- ["foo", 2.0],
- ],
- )
- for bf in bad_fields:
- try:
- self.db.find({"int": {"$gt": 2}}, fields=bf)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("bad find")
-
- def test_bad_r(self):
- bad_rs = ([None, True, False, 1.2, "no limit!", {"foo": "bar"}, [2]],)
- for br in bad_rs:
- try:
- self.db.find({"int": {"$gt": 2}}, r=br)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("bad find")
-
- def test_bad_conflicts(self):
- bad_conflicts = ([None, 1.2, "no limit!", {"foo": "bar"}, [2]],)
- for bc in bad_conflicts:
- try:
- self.db.find({"int": {"$gt": 2}}, conflicts=bc)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("bad find")
-
- def test_simple_find(self):
- docs = self.db.find({"age": {"$lt": 35}})
- assert len(docs) == 3
- assert docs[0]["user_id"] == 9
- assert docs[1]["user_id"] == 1
- assert docs[2]["user_id"] == 7
-
- def test_multi_cond_and(self):
- docs = self.db.find({"manager": True, "location.city": "Longbranch"})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 7
-
- def test_multi_cond_duplicate_field(self):
- # need to explicitly define JSON as dict won't allow duplicate keys
- body = (
- '{"selector":{"location.city":{"$regex": "^L+"},'
- '"location.city":{"$exists":true}}}'
- )
- r = self.db.sess.post(self.db.path("_find"), data=body)
- r.raise_for_status()
- docs = r.json()["docs"]
-
- # expectation is that only the second instance
- # of the "location.city" field is used
- self.assertEqual(len(docs), 15)
-
- def test_multi_cond_or(self):
- docs = self.db.find(
- {
- "$and": [
- {"age": {"$gte": 75}},
- {"$or": [{"name.first": "Mathis"}, {"name.first": "Whitley"}]},
- ]
- }
- )
- assert len(docs) == 2
- assert docs[0]["user_id"] == 11
- assert docs[1]["user_id"] == 13
-
- def test_multi_col_idx(self):
- docs = self.db.find(
- {
- "location.state": {"$and": [{"$gt": "Hawaii"}, {"$lt": "Maine"}]},
- "location.city": {"$lt": "Longbranch"},
- }
- )
- assert len(docs) == 1
- assert docs[0]["user_id"] == 6
-
- def test_missing_not_indexed(self):
- docs = self.db.find({"favorites.3": "C"})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 6
-
- docs = self.db.find({"favorites.3": None})
- assert len(docs) == 0
-
- docs = self.db.find({"twitter": {"$gt": None}})
- assert len(docs) == 4
- assert docs[0]["user_id"] == 1
- assert docs[1]["user_id"] == 4
- assert docs[2]["user_id"] == 0
- assert docs[3]["user_id"] == 13
-
- def test_limit(self):
- docs = self.db.find({"age": {"$gt": 0}})
- assert len(docs) == 15
- for l in [0, 1, 5, 14]:
- docs = self.db.find({"age": {"$gt": 0}}, limit=l)
- assert len(docs) == l
-
- def test_skip(self):
- docs = self.db.find({"age": {"$gt": 0}})
- assert len(docs) == 15
- for s in [0, 1, 5, 14]:
- docs = self.db.find({"age": {"$gt": 0}}, skip=s)
- assert len(docs) == (15 - s)
-
- def test_sort(self):
- docs1 = self.db.find({"age": {"$gt": 0}}, sort=[{"age": "asc"}])
- docs2 = list(sorted(docs1, key=lambda d: d["age"]))
- assert docs1 is not docs2 and docs1 == docs2
-
- docs1 = self.db.find({"age": {"$gt": 0}}, sort=[{"age": "desc"}])
- docs2 = list(reversed(sorted(docs1, key=lambda d: d["age"])))
- assert docs1 is not docs2 and docs1 == docs2
-
- def test_sort_desc_complex(self):
- docs = self.db.find(
- {
- "company": {"$lt": "M"},
- "$or": [{"company": "Dreamia"}, {"manager": True}],
- },
- sort=[{"company": "desc"}, {"manager": "desc"}],
- )
-
- companies_returned = list(d["company"] for d in docs)
- desc_companies = sorted(companies_returned, reverse=True)
- self.assertEqual(desc_companies, companies_returned)
-
- def test_sort_with_primary_sort_not_in_selector(self):
- try:
- docs = self.db.find(
- {"name.last": {"$lt": "M"}}, sort=[{"name.first": "desc"}]
- )
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- resp = e.response.json()
- self.assertEqual(resp["error"], "no_usable_index")
- else:
- raise AssertionError("expected find error")
-
- def test_sort_exists_true(self):
- docs1 = self.db.find(
- {"age": {"$gt": 0, "$exists": True}}, sort=[{"age": "asc"}]
- )
- docs2 = list(sorted(docs1, key=lambda d: d["age"]))
- assert docs1 is not docs2 and docs1 == docs2
-
- def test_sort_desc_complex_error(self):
- try:
- self.db.find(
- {
- "company": {"$lt": "M"},
- "$or": [{"company": "Dreamia"}, {"manager": True}],
- },
- sort=[{"company": "desc"}],
- )
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- resp = e.response.json()
- self.assertEqual(resp["error"], "no_usable_index")
- else:
- raise AssertionError("expected find error")
-
- def test_fields(self):
- selector = {"age": {"$gt": 0}}
- docs = self.db.find(selector, fields=["user_id", "location.address"])
- for d in docs:
- assert sorted(d.keys()) == ["location", "user_id"]
- assert sorted(d["location"].keys()) == ["address"]
-
- def test_r(self):
- for r in [1, 2, 3]:
- docs = self.db.find({"age": {"$gt": 0}}, r=r)
- assert len(docs) == 15
-
- def test_empty(self):
- docs = self.db.find({})
- # 15 users
- assert len(docs) == 15
-
- def test_empty_subsel(self):
- docs = self.db.find({"_id": {"$gt": None}, "location": {}})
- assert len(docs) == 0
-
- def test_empty_subsel_match(self):
- self.db.save_docs([{"user_id": "eo", "empty_obj": {}}])
- docs = self.db.find({"_id": {"$gt": None}, "empty_obj": {}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == "eo"
-
- def test_unsatisfiable_range(self):
- docs = self.db.find({"$and": [{"age": {"$gt": 0}}, {"age": {"$lt": 0}}]})
- assert len(docs) == 0
-
- def test_explain_view_args(self):
- explain = self.db.find({"age": {"$gt": 0}}, fields=["manager"], explain=True)
- assert explain["mrargs"]["stable"] == False
- assert explain["mrargs"]["update"] == True
- assert explain["mrargs"]["reduce"] == False
- assert explain["mrargs"]["start_key"] == [0]
- assert explain["mrargs"]["end_key"] == ["<MAX>"]
- assert explain["mrargs"]["include_docs"] == True
-
- def test_sort_with_all_docs(self):
- explain = self.db.find(
- {"_id": {"$gt": 0}, "age": {"$gt": 0}}, sort=["_id"], explain=True
- )
- self.assertEqual(explain["index"]["type"], "special")
diff --git a/src/mango/test/03-operator-test.py b/src/mango/test/03-operator-test.py
deleted file mode 100644
index a67ef91f3..000000000
--- a/src/mango/test/03-operator-test.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import unittest
-
-
-class OperatorTests:
- def assertUserIds(self, user_ids, docs):
- user_ids_returned = list(d["user_id"] for d in docs)
- user_ids.sort()
- user_ids_returned.sort()
- self.assertEqual(user_ids, user_ids_returned)
-
- def test_all(self):
- docs = self.db.find(
- {"manager": True, "favorites": {"$all": ["Lisp", "Python"]}}
- )
- self.assertEqual(len(docs), 3)
- user_ids = [2, 12, 9]
- self.assertUserIds(user_ids, docs)
-
- def test_all_non_array(self):
- docs = self.db.find({"manager": True, "location": {"$all": ["Ohai"]}})
- self.assertEqual(len(docs), 0)
-
- def test_elem_match(self):
- emdocs = [
- {"user_id": "a", "bang": [{"foo": 1, "bar": 2}]},
- {"user_id": "b", "bang": [{"foo": 2, "bam": True}]},
- ]
- self.db.save_docs(emdocs, w=3)
- docs = self.db.find(
- {
- "_id": {"$gt": None},
- "bang": {"$elemMatch": {"foo": {"$gte": 1}, "bam": True}},
- }
- )
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["user_id"], "b")
-
- def test_all_match(self):
- amdocs = [
- {"user_id": "a", "bang": [{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}]},
- {"user_id": "b", "bang": [{"foo": 1, "bar": 2}, {"foo": 4, "bar": 4}]},
- ]
- self.db.save_docs(amdocs, w=3)
- docs = self.db.find(
- {"bang": {"$allMatch": {"foo": {"$mod": [2, 1]}, "bar": {"$mod": [2, 0]}}}}
- )
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["user_id"], "a")
-
- def test_empty_all_match(self):
- amdocs = [{"bad_doc": "a", "emptybang": []}]
- self.db.save_docs(amdocs, w=3)
- docs = self.db.find({"emptybang": {"$allMatch": {"foo": {"$eq": 2}}}})
- self.assertEqual(len(docs), 0)
-
- def test_keymap_match(self):
- amdocs = [
- {"foo": {"aa": "bar", "bb": "bang"}},
- {"foo": {"cc": "bar", "bb": "bang"}},
- ]
- self.db.save_docs(amdocs, w=3)
- docs = self.db.find({"foo": {"$keyMapMatch": {"$eq": "aa"}}})
- self.assertEqual(len(docs), 1)
-
- def test_in_operator_array(self):
- docs = self.db.find({"manager": True, "favorites": {"$in": ["Ruby", "Python"]}})
- self.assertUserIds([2, 6, 7, 9, 11, 12], docs)
-
- def test_nin_operator_array(self):
- docs = self.db.find(
- {"manager": True, "favorites": {"$nin": ["Erlang", "Python"]}}
- )
- self.assertEqual(len(docs), 4)
- for doc in docs:
- if isinstance(doc["favorites"], list):
- self.assertNotIn("Erlang", doc["favorites"])
- self.assertNotIn("Python", doc["favorites"])
-
- def test_regex(self):
- docs = self.db.find(
- {"age": {"$gt": 40}, "location.state": {"$regex": "(?i)new.*"}}
- )
- self.assertEqual(len(docs), 2)
- self.assertUserIds([2, 10], docs)
-
- def test_exists_false(self):
- docs = self.db.find({"age": {"$gt": 0}, "twitter": {"$exists": False}})
- user_ids = [2, 3, 5, 6, 7, 8, 10, 11, 12, 14]
- self.assertUserIds(user_ids, docs)
- for d in docs:
- self.assertNotIn("twitter", d)
-
- def test_eq_null_does_not_include_missing(self):
- docs = self.db.find({"age": {"$gt": 0}, "twitter": None})
- user_ids = [9]
- self.assertUserIds(user_ids, docs)
- for d in docs:
- self.assertEqual(d["twitter"], None)
-
- def test_ne_includes_null_but_not_missing(self):
- docs = self.db.find({"twitter": {"$ne": "notamatch"}})
- user_ids = [0, 1, 4, 9, 13]
- self.assertUserIds(user_ids, docs)
- for d in docs:
- self.assertIn("twitter", d)
-
- # ideally this work be consistent across index types but, alas, it is not
- @unittest.skipUnless(
- not mango.has_text_service(),
- "text indexes do not support range queries across type boundaries",
- )
- def test_lt_includes_null_but_not_missing(self):
- docs = self.db.find({"twitter": {"$lt": 1}})
- user_ids = [9]
- self.assertUserIds(user_ids, docs)
- for d in docs:
- self.assertEqual(d["twitter"], None)
-
- @unittest.skipUnless(
- not mango.has_text_service(),
- "text indexes do not support range queries across type boundaries",
- )
- def test_lte_includes_null_but_not_missing(self):
- docs = self.db.find({"twitter": {"$lt": 1}})
- user_ids = [9]
- self.assertUserIds(user_ids, docs)
- for d in docs:
- self.assertEqual(d["twitter"], None)
-
- def test_lte_null_includes_null_but_not_missing(self):
- docs = self.db.find({"twitter": {"$lte": None}})
- user_ids = [9]
- self.assertUserIds(user_ids, docs)
- for d in docs:
- self.assertEqual(d["twitter"], None)
-
- def test_lte_at_z_except_null_excludes_null_and_missing(self):
- docs = self.db.find({"twitter": {"$and": [{"$lte": "@z"}, {"$ne": None}]}})
- user_ids = [0, 1, 4, 13]
- self.assertUserIds(user_ids, docs)
- for d in docs:
- self.assertNotEqual(d["twitter"], None)
-
- def test_range_gte_null_includes_null_but_not_missing(self):
- docs = self.db.find({"twitter": {"$gte": None}})
- self.assertGreater(len(docs), 0)
- for d in docs:
- self.assertIn("twitter", d)
-
- def test_exists_false_returns_missing_but_not_null(self):
- docs = self.db.find({"twitter": {"$exists": False}})
- self.assertGreater(len(docs), 0)
- for d in docs:
- self.assertNotIn("twitter", d)
-
- @unittest.skipUnless(
- not mango.has_text_service(),
- "text indexes do not support range queries across type boundaries",
- )
- def test_lte_respsects_unicode_collation(self):
- docs = self.db.find({"ordered": {"$lte": "a"}})
- user_ids = [7, 8, 9, 10, 11, 12]
- self.assertUserIds(user_ids, docs)
-
- @unittest.skipUnless(
- not mango.has_text_service(),
- "text indexes do not support range queries across type boundaries",
- )
- def test_gte_respsects_unicode_collation(self):
- docs = self.db.find({"ordered": {"$gte": "a"}})
- user_ids = [12, 13, 14]
- self.assertUserIds(user_ids, docs)
-
-
-class OperatorJSONTests(mango.UserDocsTests, OperatorTests):
- pass
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class OperatorTextTests(mango.UserDocsTextTests, OperatorTests):
- pass
-
-
-class OperatorAllDocsTests(mango.UserDocsTestsNoIndexes, OperatorTests):
- def test_range_id_eq(self):
- doc_id = "8e1c90c0-ac18-4832-8081-40d14325bde0"
- r = self.db.find({"_id": doc_id}, explain=True, return_raw=True)
-
- self.assertEqual(r["mrargs"]["end_key"], doc_id)
- self.assertEqual(r["mrargs"]["start_key"], doc_id)
diff --git a/src/mango/test/04-key-tests.py b/src/mango/test/04-key-tests.py
deleted file mode 100644
index a9551c6f8..000000000
--- a/src/mango/test/04-key-tests.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# -*- coding: latin-1 -*-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-
-import mango
-import unittest
-
-TEST_DOCS = [
- {"type": "complex_key", "title": "normal key"},
- {
- "type": "complex_key",
- "title": "key with dot",
- "dot.key": "dot's value",
- "none": {"dot": "none dot's value"},
- "name.first": "Kvothe",
- },
- {
- "type": "complex_key",
- "title": "key with peso",
- "$key": "peso",
- "deep": {"$key": "deep peso"},
- "name": {"first": "Master Elodin"},
- },
- {"type": "complex_key", "title": "unicode key", "": "apple"},
- {
- "title": "internal_fields_format",
- "utf8-1[]:string": "string",
- "utf8-2[]:boolean[]": True,
- "utf8-3[]:number": 9,
- "utf8-3[]:null": None,
- },
-]
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class KeyTests(mango.DbPerClass):
- @classmethod
- def setUpClass(klass):
- super(KeyTests, klass).setUpClass()
- klass.db.save_docs(TEST_DOCS, w=3)
- klass.db.create_index(["type"], ddoc="view")
- if mango.has_text_service():
- klass.db.create_text_index(ddoc="text")
-
- def run_check(self, query, check, fields=None, indexes=None):
- if indexes is None:
- indexes = ["view", "text"]
- for idx in indexes:
- docs = self.db.find(query, fields=fields, use_index=idx)
- check(docs)
-
- def test_dot_key(self):
- query = {"type": "complex_key"}
- fields = ["title", "dot\\.key", "none.dot"]
-
- def check(docs):
- assert len(docs) == 4
- assert "dot.key" in docs[1]
- assert docs[1]["dot.key"] == "dot's value"
- assert "none" in docs[1]
- assert docs[1]["none"]["dot"] == "none dot's value"
-
- self.run_check(query, check, fields=fields)
-
- def test_peso_key(self):
- query = {"type": "complex_key"}
- fields = ["title", "$key", "deep.$key"]
-
- def check(docs):
- assert len(docs) == 4
- assert "$key" in docs[2]
- assert docs[2]["$key"] == "peso"
- assert "deep" in docs[2]
- assert docs[2]["deep"]["$key"] == "deep peso"
-
- self.run_check(query, check, fields=fields)
-
- def test_unicode_in_fieldname(self):
- query = {"type": "complex_key"}
- fields = ["title", ""]
-
- def check(docs):
- assert len(docs) == 4
- # note:  == \uf8ff
- assert "\uf8ff" in docs[3]
- assert docs[3]["\uf8ff"] == "apple"
-
- self.run_check(query, check, fields=fields)
-
- # The rest of these tests are only run against the text
- # indexes because view indexes don't have to worry about
- # field *name* escaping in the index.
-
- def test_unicode_in_selector_field(self):
- query = {"": "apple"}
-
- def check(docs):
- assert len(docs) == 1
- assert docs[0]["\uf8ff"] == "apple"
-
- self.run_check(query, check, indexes=["text"])
-
- def test_internal_field_tests(self):
- queries = [
- {"utf8-1[]:string": "string"},
- {"utf8-2[]:boolean[]": True},
- {"utf8-3[]:number": 9},
- {"utf8-3[]:null": None},
- ]
-
- def check(docs):
- assert len(docs) == 1
- assert docs[0]["title"] == "internal_fields_format"
-
- for query in queries:
- self.run_check(query, check, indexes=["text"])
-
- def test_escape_period(self):
- query = {"name\\.first": "Kvothe"}
-
- def check(docs):
- assert len(docs) == 1
- assert docs[0]["name.first"] == "Kvothe"
-
- self.run_check(query, check, indexes=["text"])
-
- query = {"name.first": "Kvothe"}
-
- def check_empty(docs):
- assert len(docs) == 0
-
- self.run_check(query, check_empty, indexes=["text"])
-
- def test_object_period(self):
- query = {"name.first": "Master Elodin"}
-
- def check(docs):
- assert len(docs) == 1
- assert docs[0]["title"] == "key with peso"
-
- self.run_check(query, check, indexes=["text"])
-
- query = {"name\\.first": "Master Elodin"}
-
- def check_empty(docs):
- assert len(docs) == 0
-
- self.run_check(query, check_empty, indexes=["text"])
diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py
deleted file mode 100644
index cb4d32986..000000000
--- a/src/mango/test/05-index-selection-test.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import user_docs
-import unittest
-
-
-class IndexSelectionTests:
- def test_basic(self):
- resp = self.db.find({"age": 123}, explain=True)
- self.assertEqual(resp["index"]["type"], "json")
-
- def test_with_and(self):
- resp = self.db.find(
- {
- "name.first": "Stephanie",
- "name.last": "This doesn't have to match anything.",
- },
- explain=True,
- )
- self.assertEqual(resp["index"]["type"], "json")
-
- def test_with_nested_and(self):
- resp = self.db.find(
- {"name.first": {"$gt": "a", "$lt": "z"}, "name.last": "Foo"}, explain=True
- )
- self.assertEqual(resp["index"]["type"], "json")
-
- def test_with_or(self):
- ddocid = "_design/company_and_manager"
-
- resp = self.db.find(
- {
- "company": {"$gt": "a", "$lt": "z"},
- "$or": [{"manager": "Foo"}, {"manager": "Bar"}],
- },
- explain=True,
- )
- self.assertEqual(resp["index"]["ddoc"], ddocid)
-
- def test_use_most_columns(self):
- ddocid = "_design/age"
- resp = self.db.find(
- {
- "name.first": "Stephanie",
- "name.last": "Something or other",
- "age": {"$gt": 1},
- },
- explain=True,
- )
- self.assertNotEqual(resp["index"]["ddoc"], ddocid)
-
- resp = self.db.find(
- {
- "name.first": "Stephanie",
- "name.last": "Something or other",
- "age": {"$gt": 1},
- },
- use_index=ddocid,
- explain=True,
- )
- self.assertEqual(resp["index"]["ddoc"], ddocid)
-
- def test_no_valid_sort_index(self):
- try:
- self.db.find({"_id": {"$gt": None}}, sort=["name"], return_raw=True)
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("bad find")
-
- def test_invalid_use_index(self):
- # ddoc id for the age index
- ddocid = "_design/age"
- r = self.db.find({}, use_index=ddocid, return_raw=True)
- self.assertEqual(
- r["warning"].split("\n")[0].lower(),
- "{0} was not used because it does not contain a valid index for this query.".format(
- ddocid
- ),
- )
-
- def test_uses_index_when_no_range_or_equals(self):
- # index on ["manager"] should be valid because
- # selector requires "manager" to exist. The
- # selector doesn't narrow the keyrange so it's
- # a full index scan
- selector = {"manager": {"$exists": True}}
- docs = self.db.find(selector)
- self.assertEqual(len(docs), 14)
-
- resp_explain = self.db.find(selector, explain=True)
- self.assertEqual(resp_explain["index"]["type"], "json")
-
- def test_reject_use_index_invalid_fields(self):
- ddocid = "_design/company_and_manager"
- selector = {"company": "Pharmex"}
- r = self.db.find(selector, use_index=ddocid, return_raw=True)
- self.assertEqual(
- r["warning"].split("\n")[0].lower(),
- "{0} was not used because it does not contain a valid index for this query.".format(
- ddocid
- ),
- )
-
- # should still return a correct result
- for d in r["docs"]:
- self.assertEqual(d["company"], "Pharmex")
-
- def test_reject_use_index_ddoc_and_name_invalid_fields(self):
- ddocid = "_design/company_and_manager"
- name = "company_and_manager"
- selector = {"company": "Pharmex"}
-
- resp = self.db.find(selector, use_index=[ddocid, name], return_raw=True)
- self.assertEqual(
- resp["warning"].split("\n")[0].lower(),
- "{0}, {1} was not used because it is not a valid index for this query.".format(
- ddocid, name
- ),
- )
-
- # should still return a correct result
- for d in resp["docs"]:
- self.assertEqual(d["company"], "Pharmex")
-
- def test_reject_use_index_sort_order(self):
- # index on ["company","manager"] which should not be valid
- # and there is no valid fallback (i.e. an index on ["company"])
- ddocid = "_design/company_and_manager"
- selector = {"company": {"$gt": None}}
- try:
- self.db.find(selector, use_index=ddocid, sort=[{"company": "desc"}])
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("did not reject bad use_index")
-
- def test_use_index_fallback_if_valid_sort(self):
- ddocid_valid = "_design/fallbackfoo"
- ddocid_invalid = "_design/fallbackfoobar"
- self.db.create_index(fields=["foo"], ddoc=ddocid_invalid)
- self.db.create_index(fields=["foo", "bar"], ddoc=ddocid_valid)
- selector = {"foo": {"$gt": None}}
-
- resp_explain = self.db.find(
- selector, sort=["foo", "bar"], use_index=ddocid_invalid, explain=True
- )
- self.assertEqual(resp_explain["index"]["ddoc"], ddocid_valid)
-
- resp = self.db.find(
- selector, sort=["foo", "bar"], use_index=ddocid_invalid, return_raw=True
- )
- self.assertEqual(
- resp["warning"].split("\n")[0].lower(),
- "{0} was not used because it does not contain a valid index for this query.".format(
- ddocid_invalid
- ),
- )
- self.assertEqual(len(resp["docs"]), 0)
-
- def test_prefer_use_index_over_optimal_index(self):
- # index on ["company"] even though index on ["company", "manager"] is better
- ddocid_preferred = "_design/testsuboptimal"
- self.db.create_index(fields=["baz"], ddoc=ddocid_preferred)
- self.db.create_index(fields=["baz", "bar"])
- selector = {"baz": {"$gt": None}, "bar": {"$gt": None}}
- resp = self.db.find(selector, use_index=ddocid_preferred, return_raw=True)
- self.assertTrue("warning" not in resp)
-
- resp_explain = self.db.find(selector, use_index=ddocid_preferred, explain=True)
- self.assertEqual(resp_explain["index"]["ddoc"], ddocid_preferred)
-
- # This doc will not be saved given the new ddoc validation code
- # in couch_mrview
- def test_manual_bad_view_idx01(self):
- design_doc = {
- "_id": "_design/bad_view_index",
- "language": "query",
- "views": {
- "queryidx1": {
- "map": {"fields": {"age": "asc"}},
- "reduce": "_count",
- "options": {"def": {"fields": [{"age": "asc"}]}, "w": 2},
- }
- },
- "views": {
- "views001": {
- "map": "function(employee){if(employee.training)"
- + "{emit(employee.number, employee.training);}}"
- }
- },
- }
- with self.assertRaises(KeyError):
- self.db.save_doc(design_doc)
-
- def test_explain_sort_reverse(self):
- selector = {"manager": {"$gt": None}}
- resp_explain = self.db.find(
- selector, fields=["manager"], sort=[{"manager": "desc"}], explain=True
- )
- self.assertEqual(resp_explain["index"]["type"], "json")
-
-
-class JSONIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests):
- @classmethod
- def setUpClass(klass):
- super(JSONIndexSelectionTests, klass).setUpClass()
-
- def test_uses_all_docs_when_fields_do_not_match_selector(self):
- # index exists on ["company", "manager"] but not ["company"]
- # so we should fall back to all docs (so we include docs
- # with no "manager" field)
- selector = {"company": "Pharmex"}
- docs = self.db.find(selector)
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["company"], "Pharmex")
- self.assertNotIn("manager", docs[0])
-
- resp_explain = self.db.find(selector, explain=True)
-
- self.assertEqual(resp_explain["index"]["type"], "special")
-
- def test_uses_all_docs_when_selector_doesnt_require_fields_to_exist(self):
- # as in test above, use a selector that doesn't overlap with the index
- # due to an explicit exists clause
- selector = {"company": "Pharmex", "manager": {"$exists": False}}
- docs = self.db.find(selector)
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["company"], "Pharmex")
- self.assertNotIn("manager", docs[0])
-
- resp_explain = self.db.find(selector, explain=True)
- self.assertEqual(resp_explain["index"]["type"], "special")
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class TextIndexSelectionTests(mango.UserDocsTests):
- @classmethod
- def setUpClass(klass):
- super(TextIndexSelectionTests, klass).setUpClass()
- if mango.has_text_service():
- user_docs.add_text_indexes(klass.db, {})
-
- def test_with_text(self):
- resp = self.db.find(
- {
- "$text": "Stephanie",
- "name.first": "Stephanie",
- "name.last": "This doesn't have to match anything.",
- },
- explain=True,
- )
- self.assertEqual(resp["index"]["type"], "text")
-
- def test_no_view_index(self):
- resp = self.db.find({"name.first": "Ohai!"}, explain=True)
- self.assertEqual(resp["index"]["type"], "text")
-
- def test_with_or(self):
- resp = self.db.find(
- {
- "$or": [
- {"name.first": "Stephanie"},
- {"name.last": "This doesn't have to match anything."},
- ]
- },
- explain=True,
- )
- self.assertEqual(resp["index"]["type"], "text")
-
- def test_manual_bad_text_idx(self):
- design_doc = {
- "_id": "_design/bad_text_index",
- "language": "query",
- "indexes": {
- "text_index": {
- "default_analyzer": "keyword",
- "default_field": {},
- "selector": {},
- "fields": "all_fields",
- "analyzer": {
- "name": "perfield",
- "default": "keyword",
- "fields": {"$default": "standard"},
- },
- }
- },
- "indexes": {
- "st_index": {
- "analyzer": "standard",
- "index": 'function(doc){\n index("st_index", doc.geometry);\n}',
- }
- },
- }
- self.db.save_doc(design_doc)
- docs = self.db.find({"age": 48})
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["name"]["first"], "Stephanie")
- self.assertEqual(docs[0]["age"], 48)
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class MultiTextIndexSelectionTests(mango.UserDocsTests):
- @classmethod
- def setUpClass(klass):
- super(MultiTextIndexSelectionTests, klass).setUpClass()
- if mango.has_text_service():
- klass.db.create_text_index(ddoc="foo", analyzer="keyword")
- klass.db.create_text_index(ddoc="bar", analyzer="email")
-
- def test_fallback_to_json_with_multi_text(self):
- resp = self.db.find(
- {"name.first": "A first name", "name.last": "A last name"}, explain=True
- )
- self.assertEqual(resp["index"]["type"], "json")
-
- def test_multi_text_index_is_error(self):
- try:
- self.db.find({"$text": "a query"}, explain=True)
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
-
- def test_use_index_works(self):
- resp = self.db.find({"$text": "a query"}, use_index="foo", explain=True)
- self.assertEqual(resp["index"]["ddoc"], "_design/foo")
diff --git a/src/mango/test/06-basic-text-test.py b/src/mango/test/06-basic-text-test.py
deleted file mode 100644
index a3fe383d6..000000000
--- a/src/mango/test/06-basic-text-test.py
+++ /dev/null
@@ -1,602 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import json
-import mango
-import unittest
-import user_docs
-import math
-from hypothesis import given, assume, example
-import hypothesis.strategies as st
-
-
-@unittest.skipIf(mango.has_text_service(), "text service exists")
-class TextIndexCheckTests(mango.DbPerClass):
- def test_create_text_index(self):
- body = json.dumps({"index": {}, "type": "text"})
- resp = self.db.sess.post(self.db.path("_index"), data=body)
- assert resp.status_code == 503, resp
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class BasicTextTests(mango.UserDocsTextTests):
- def test_simple(self):
- docs = self.db.find({"$text": "Stephanie"})
- assert len(docs) == 1
- assert docs[0]["name"]["first"] == "Stephanie"
-
- def test_with_integer(self):
- docs = self.db.find({"name.first": "Stephanie", "age": 48})
- assert len(docs) == 1
- assert docs[0]["name"]["first"] == "Stephanie"
- assert docs[0]["age"] == 48
-
- def test_with_boolean(self):
- docs = self.db.find({"name.first": "Stephanie", "manager": False})
- assert len(docs) == 1
- assert docs[0]["name"]["first"] == "Stephanie"
- assert docs[0]["manager"] == False
-
- def test_with_array(self):
- faves = ["Ruby", "C", "Python"]
- docs = self.db.find({"name.first": "Stephanie", "favorites": faves})
- assert docs[0]["name"]["first"] == "Stephanie"
- assert docs[0]["favorites"] == faves
-
- def test_array_ref(self):
- docs = self.db.find({"favorites.1": "Python"})
- assert len(docs) == 4
- for d in docs:
- assert "Python" in d["favorites"]
-
- # Nested Level
- docs = self.db.find({"favorites.0.2": "Python"})
- assert len(docs) == 1
- for d in docs:
- assert "Python" in d["favorites"][0][2]
-
- def test_number_ref(self):
- docs = self.db.find({"11111": "number_field"})
- assert len(docs) == 1
- assert docs[0]["11111"] == "number_field"
-
- docs = self.db.find({"22222.33333": "nested_number_field"})
- assert len(docs) == 1
- assert docs[0]["22222"]["33333"] == "nested_number_field"
-
- def test_lt(self):
- docs = self.db.find({"age": {"$lt": 22}})
- assert len(docs) == 0
-
- docs = self.db.find({"age": {"$lt": 23}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"age": {"$lt": 33}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (1, 9)
-
- docs = self.db.find({"age": {"$lt": 34}})
- assert len(docs) == 3
- for d in docs:
- assert d["user_id"] in (1, 7, 9)
-
- docs = self.db.find({"company": {"$lt": "Dreamia"}})
- assert len(docs) == 1
- assert docs[0]["company"] == "Affluex"
-
- docs = self.db.find({"foo": {"$lt": "bar car apple"}})
- assert len(docs) == 0
-
- def test_lte(self):
- docs = self.db.find({"age": {"$lte": 21}})
- assert len(docs) == 0
-
- docs = self.db.find({"age": {"$lte": 22}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"age": {"$lte": 33}})
- assert len(docs) == 3
- for d in docs:
- assert d["user_id"] in (1, 7, 9)
-
- docs = self.db.find({"company": {"$lte": "Dreamia"}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (0, 11)
-
- docs = self.db.find({"foo": {"$lte": "bar car apple"}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 14
-
- def test_eq(self):
- docs = self.db.find({"age": 21})
- assert len(docs) == 0
-
- docs = self.db.find({"age": 22})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"age": {"$eq": 22}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"age": 33})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 7
-
- def test_ne(self):
- docs = self.db.find({"age": {"$ne": 22}})
- assert len(docs) == len(user_docs.DOCS) - 1
- for d in docs:
- assert d["age"] != 22
-
- docs = self.db.find({"$not": {"age": 22}})
- assert len(docs) == len(user_docs.DOCS) - 1
- for d in docs:
- assert d["age"] != 22
-
- def test_gt(self):
- docs = self.db.find({"age": {"$gt": 77}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (3, 13)
-
- docs = self.db.find({"age": {"$gt": 78}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 3
-
- docs = self.db.find({"age": {"$gt": 79}})
- assert len(docs) == 0
-
- docs = self.db.find({"company": {"$gt": "Zialactic"}})
- assert len(docs) == 0
-
- docs = self.db.find({"foo": {"$gt": "bar car apple"}})
- assert len(docs) == 0
-
- docs = self.db.find({"foo": {"$gt": "bar car"}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 14
-
- def test_gte(self):
- docs = self.db.find({"age": {"$gte": 77}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (3, 13)
-
- docs = self.db.find({"age": {"$gte": 78}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (3, 13)
-
- docs = self.db.find({"age": {"$gte": 79}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 3
-
- docs = self.db.find({"age": {"$gte": 80}})
- assert len(docs) == 0
-
- docs = self.db.find({"company": {"$gte": "Zialactic"}})
- assert len(docs) == 1
- assert docs[0]["company"] == "Zialactic"
-
- docs = self.db.find({"foo": {"$gte": "bar car apple"}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 14
-
- def test_and(self):
- docs = self.db.find({"age": 22, "manager": True})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"age": 22, "manager": False})
- assert len(docs) == 0
-
- docs = self.db.find({"$and": [{"age": 22}, {"manager": True}]})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"$and": [{"age": 22}, {"manager": False}]})
- assert len(docs) == 0
-
- docs = self.db.find({"$text": "Ramona", "age": 22})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"$and": [{"$text": "Ramona"}, {"age": 22}]})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"$and": [{"$text": "Ramona"}, {"$text": "Floyd"}]})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- def test_or(self):
- docs = self.db.find({"$or": [{"age": 22}, {"age": 33}]})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (7, 9)
-
- q = {"$or": [{"$text": "Ramona"}, {"$text": "Stephanie"}]}
- docs = self.db.find(q)
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (0, 9)
-
- q = {"$or": [{"$text": "Ramona"}, {"age": 22}]}
- docs = self.db.find(q)
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- def test_and_or(self):
- q = {"age": 22, "$or": [{"manager": False}, {"location.state": "Missouri"}]}
- docs = self.db.find(q)
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- q = {"$or": [{"age": 22}, {"age": 43, "manager": True}]}
- docs = self.db.find(q)
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (9, 10)
-
- q = {"$or": [{"$text": "Ramona"}, {"age": 43, "manager": True}]}
- docs = self.db.find(q)
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (9, 10)
-
- def test_nor(self):
- docs = self.db.find({"$nor": [{"age": 22}, {"age": 33}]})
- assert len(docs) == 13
- for d in docs:
- assert d["user_id"] not in (7, 9)
-
- def test_in_with_value(self):
- docs = self.db.find({"age": {"$in": [1, 5]}})
- assert len(docs) == 0
-
- docs = self.db.find({"age": {"$in": [1, 5, 22]}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"age": {"$in": [1, 5, 22, 31]}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (1, 9)
-
- docs = self.db.find({"age": {"$in": [22, 31]}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (1, 9)
-
- # Limits on boolean clauses?
- docs = self.db.find({"age": {"$in": list(range(1000))}})
- assert len(docs) == 15
-
- def test_in_with_array(self):
- vals = ["Random Garbage", 52, {"Versions": {"Alpha": "Beta"}}]
- docs = self.db.find({"favorites": {"$in": vals}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 1
-
- vals = ["Lisp", "Python"]
- docs = self.db.find({"favorites": {"$in": vals}})
- assert len(docs) == 10
-
- vals = [{"val1": 1, "val2": "val2"}]
- docs = self.db.find({"test_in": {"$in": vals}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 2
-
- def test_nin_with_value(self):
- docs = self.db.find({"age": {"$nin": [1, 5]}})
- assert len(docs) == len(user_docs.DOCS)
-
- docs = self.db.find({"age": {"$nin": [1, 5, 22]}})
- assert len(docs) == len(user_docs.DOCS) - 1
- for d in docs:
- assert d["user_id"] != 9
-
- docs = self.db.find({"age": {"$nin": [1, 5, 22, 31]}})
- assert len(docs) == len(user_docs.DOCS) - 2
- for d in docs:
- assert d["user_id"] not in (1, 9)
-
- docs = self.db.find({"age": {"$nin": [22, 31]}})
- assert len(docs) == len(user_docs.DOCS) - 2
- for d in docs:
- assert d["user_id"] not in (1, 9)
-
- # Limits on boolean clauses?
- docs = self.db.find({"age": {"$nin": list(range(1000))}})
- assert len(docs) == 0
-
- def test_nin_with_array(self):
- vals = ["Random Garbage", 52, {"Versions": {"Alpha": "Beta"}}]
- docs = self.db.find({"favorites": {"$nin": vals}})
- assert len(docs) == len(user_docs.DOCS) - 1
- for d in docs:
- assert d["user_id"] != 1
-
- vals = ["Lisp", "Python"]
- docs = self.db.find({"favorites": {"$nin": vals}})
- assert len(docs) == 5
-
- vals = [{"val1": 1, "val2": "val2"}]
- docs = self.db.find({"test_in": {"$nin": vals}})
- assert len(docs) == 0
-
- def test_all(self):
- vals = ["Ruby", "C", "Python", {"Versions": {"Alpha": "Beta"}}]
- docs = self.db.find({"favorites": {"$all": vals}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 1
-
- # This matches where favorites either contains
- # the nested array, or is the nested array. This is
- # notably different than the non-nested array in that
- # it does not match a re-ordered version of the array.
- # The fact that user_id 14 isn't included demonstrates
- # this behavior.
- vals = [["Lisp", "Erlang", "Python"]]
- docs = self.db.find({"favorites": {"$all": vals}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (3, 9)
-
- def test_exists_field(self):
- docs = self.db.find({"exists_field": {"$exists": True}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (7, 8)
-
- docs = self.db.find({"exists_field": {"$exists": False}})
- assert len(docs) == len(user_docs.DOCS) - 2
- for d in docs:
- assert d["user_id"] not in (7, 8)
-
- def test_exists_array(self):
- docs = self.db.find({"exists_array": {"$exists": True}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (9, 10)
-
- docs = self.db.find({"exists_array": {"$exists": False}})
- assert len(docs) == len(user_docs.DOCS) - 2
- for d in docs:
- assert d["user_id"] not in (9, 10)
-
- def test_exists_object(self):
- docs = self.db.find({"exists_object": {"$exists": True}})
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (11, 12)
-
- docs = self.db.find({"exists_object": {"$exists": False}})
- assert len(docs) == len(user_docs.DOCS) - 2
- for d in docs:
- assert d["user_id"] not in (11, 12)
-
- def test_exists_object_member(self):
- docs = self.db.find({"exists_object.should": {"$exists": True}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 11
-
- docs = self.db.find({"exists_object.should": {"$exists": False}})
- assert len(docs) == len(user_docs.DOCS) - 1
- for d in docs:
- assert d["user_id"] != 11
-
- def test_exists_and(self):
- q = {
- "$and": [
- {"manager": {"$exists": True}},
- {"exists_object.should": {"$exists": True}},
- ]
- }
- docs = self.db.find(q)
- assert len(docs) == 1
- assert docs[0]["user_id"] == 11
-
- q = {
- "$and": [
- {"manager": {"$exists": False}},
- {"exists_object.should": {"$exists": True}},
- ]
- }
- docs = self.db.find(q)
- assert len(docs) == 0
-
- # Translates to manager exists or exists_object.should doesn't
- # exist, which will match all docs
- q = {"$not": q}
- docs = self.db.find(q)
- assert len(docs) == len(user_docs.DOCS)
-
- def test_value_chars(self):
- q = {"complex_field_value": '+-(){}[]^~&&*||"\\/?:!'}
- docs = self.db.find(q)
- assert len(docs) == 1
-
- def test_regex(self):
- docs = self.db.find(
- {"age": {"$gt": 40}, "location.state": {"$regex": "(?i)new.*"}}
- )
- assert len(docs) == 2
- assert docs[0]["user_id"] == 2
- assert docs[1]["user_id"] == 10
-
- # test lucene syntax in $text
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class ElemMatchTests(mango.FriendDocsTextTests):
- def test_elem_match_non_object(self):
- q = {"bestfriends": {"$elemMatch": {"$eq": "Wolverine", "$eq": "Cyclops"}}}
- docs = self.db.find(q)
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["bestfriends"], ["Wolverine", "Cyclops"])
-
- q = {"results": {"$elemMatch": {"$gte": 80, "$lt": 85}}}
-
- docs = self.db.find(q)
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["results"], [82, 85, 88])
-
- def test_elem_match(self):
- q = {"friends": {"$elemMatch": {"name.first": "Vargas"}}}
- docs = self.db.find(q)
- self.assertEqual(len(docs), 2)
- for d in docs:
- self.assertIn(d["user_id"], (0, 1))
-
- q = {"friends": {"$elemMatch": {"name.first": "Ochoa", "name.last": "Burch"}}}
- docs = self.db.find(q)
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["user_id"], 4)
-
- # Check that we can do logic in elemMatch
- q = {"friends": {"$elemMatch": {"name.first": "Ochoa", "type": "work"}}}
- docs = self.db.find(q)
- self.assertEqual(len(docs), 2)
- for d in docs:
- self.assertIn(d["user_id"], (1, 15))
-
- q = {
- "friends": {
- "$elemMatch": {
- "name.first": "Ochoa",
- "$or": [{"type": "work"}, {"type": "personal"}],
- }
- }
- }
- docs = self.db.find(q)
- self.assertEqual(len(docs), 3)
- for d in docs:
- self.assertIn(d["user_id"], (1, 4, 15))
-
- # Same as last, but using $in
- q = {
- "friends": {
- "$elemMatch": {
- "name.first": "Ochoa",
- "type": {"$in": ["work", "personal"]},
- }
- }
- }
- docs = self.db.find(q)
- self.assertEqual(len(docs), 3)
- for d in docs:
- self.assertIn(d["user_id"], (1, 4, 15))
-
- q = {
- "$and": [
- {"friends": {"$elemMatch": {"id": 0, "name": {"$exists": True}}}},
- {
- "friends": {
- "$elemMatch": {
- "$or": [
- {"name": {"first": "Campos", "last": "Freeman"}},
- {
- "name": {
- "$in": [
- {"first": "Gibbs", "last": "Mccarty"},
- {"first": "Wilkins", "last": "Chang"},
- ]
- }
- },
- ]
- }
- }
- },
- ]
- }
- docs = self.db.find(q)
- self.assertEqual(len(docs), 3)
- for d in docs:
- self.assertIn(d["user_id"], (10, 11, 12))
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class AllMatchTests(mango.FriendDocsTextTests):
- def test_all_match(self):
- q = {"friends": {"$allMatch": {"type": "personal"}}}
- docs = self.db.find(q)
- assert len(docs) == 2
- for d in docs:
- assert d["user_id"] in (8, 5)
-
- # Check that we can do logic in allMatch
- q = {
- "friends": {
- "$allMatch": {
- "name.first": "Ochoa",
- "$or": [{"type": "work"}, {"type": "personal"}],
- }
- }
- }
- docs = self.db.find(q)
- assert len(docs) == 1
- assert docs[0]["user_id"] == 15
-
- # Same as last, but using $in
- q = {
- "friends": {
- "$allMatch": {
- "name.first": "Ochoa",
- "type": {"$in": ["work", "personal"]},
- }
- }
- }
- docs = self.db.find(q)
- assert len(docs) == 1
- assert docs[0]["user_id"] == 15
-
-
-# Test numeric strings for $text
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class NumStringTests(mango.DbPerClass):
- @classmethod
- def setUpClass(klass):
- super(NumStringTests, klass).setUpClass()
- klass.db.recreate()
- if mango.has_text_service():
- klass.db.create_text_index()
-
- # not available for python 2.7.x
- def isFinite(num):
- not (math.isinf(num) or math.isnan(num))
-
- @given(f=st.floats().filter(isFinite).map(str) | st.floats().map(lambda f: f.hex()))
- @example("NaN")
- @example("Infinity")
- def test_floating_point_val(self, f):
- doc = {"number_string": f}
- self.db.save_doc(doc)
- q = {"$text": f}
- docs = self.db.find(q)
- if len(docs) == 1:
- assert docs[0]["number_string"] == f
- if len(docs) == 2:
- if docs[0]["number_string"] != f:
- assert docs[1]["number_string"] == f
- q = {"number_string": f}
- docs = self.db.find(q)
- if len(docs) == 1:
- assert docs[0]["number_string"] == f
- if len(docs) == 2:
- if docs[0]["number_string"] != f:
- assert docs[1]["number_string"] == f
diff --git a/src/mango/test/06-text-default-field-test.py b/src/mango/test/06-text-default-field-test.py
deleted file mode 100644
index 7fdbd747d..000000000
--- a/src/mango/test/06-text-default-field-test.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import unittest
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class NoDefaultFieldTest(mango.UserDocsTextTests):
-
- DEFAULT_FIELD = False
-
- def test_basic(self):
- docs = self.db.find({"$text": "Ramona"})
- # Or should this throw an error?
- assert len(docs) == 0
-
- def test_other_fields_exist(self):
- docs = self.db.find({"age": 22})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class NoDefaultFieldWithAnalyzer(mango.UserDocsTextTests):
-
- DEFAULT_FIELD = {"enabled": False, "analyzer": "keyword"}
-
- def test_basic(self):
- docs = self.db.find({"$text": "Ramona"})
- assert len(docs) == 0
-
- def test_other_fields_exist(self):
- docs = self.db.find({"age": 22})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class DefaultFieldWithCustomAnalyzer(mango.UserDocsTextTests):
-
- DEFAULT_FIELD = {"enabled": True, "analyzer": "keyword"}
-
- def test_basic(self):
- docs = self.db.find({"$text": "Ramona"})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- def test_not_analyzed(self):
- docs = self.db.find({"$text": "Lott Place"})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"$text": "Lott"})
- assert len(docs) == 0
-
- docs = self.db.find({"$text": "Place"})
- assert len(docs) == 0
diff --git a/src/mango/test/07-text-custom-field-list-test.py b/src/mango/test/07-text-custom-field-list-test.py
deleted file mode 100644
index 8514111c4..000000000
--- a/src/mango/test/07-text-custom-field-list-test.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import unittest
-import user_docs
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class CustomFieldsTest(mango.UserDocsTextTests):
-
- FIELDS = [
- {"name": "favorites.[]", "type": "string"},
- {"name": "manager", "type": "boolean"},
- {"name": "age", "type": "number"},
- # These two are to test the default analyzer for
- # each field.
- {"name": "location.state", "type": "string"},
- {"name": "location.address.street", "type": "string"},
- {"name": "name\\.first", "type": "string"},
- ]
-
- def test_basic(self):
- docs = self.db.find({"age": 22})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- def test_multi_field(self):
- docs = self.db.find({"age": 22, "manager": True})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 9
-
- docs = self.db.find({"age": 22, "manager": False})
- assert len(docs) == 0
-
- def test_element_acess(self):
- docs = self.db.find({"favorites.0": "Ruby"})
- assert len(docs) == 3
- for d in docs:
- assert "Ruby" in d["favorites"]
-
- # This should throw an exception because we only index the array
- # favorites.[], and not the string field favorites
- def test_index_selection(self):
- try:
- self.db.find(
- {"selector": {"$or": [{"favorites": "Ruby"}, {"favorites.0": "Ruby"}]}}
- )
- except Exception as e:
- assert e.response.status_code == 400
-
- def test_in_with_array(self):
- vals = ["Lisp", "Python"]
- docs = self.db.find({"favorites": {"$in": vals}})
- assert len(docs) == 10
-
- def test_in_with_array_not_explicit(self):
- agelist = [22, 51]
- statelist = ["New Hampshire"]
- docs = self.db.find({"age": {"$in": agelist}})
- docs2 = self.db.find({"location.state": {"$in": statelist}})
- docs3 = self.db.find({"age": {"$in": statelist}})
- assert len(docs) == 2
- assert len(docs2) == 1
- assert len(docs3) == 0
-
- # This should also throw an error because we only indexed
- # favorites.[] of type string. For the following query to work, the
- # user has to index favorites.[] of type number, and also
- # favorites.[].Versions.Alpha of type string.
- def test_in_different_types(self):
- vals = ["Random Garbage", 52, {"Versions": {"Alpha": "Beta"}}]
- try:
- self.db.find({"favorites": {"$in": vals}})
- except Exception as e:
- assert e.response.status_code == 400
-
- def test_nin_with_array(self):
- vals = ["Lisp", "Python"]
- docs = self.db.find({"favorites": {"$nin": vals}})
- assert len(docs) == 5
-
- def test_missing(self):
- self.db.find({"location.state": "Nevada"})
-
- def test_missing_type(self):
- # Raises an exception
- try:
- self.db.find({"age": "foo"})
- raise Exception("Should have thrown an HTTPError")
- except:
- return
-
- def test_field_analyzer_is_keyword(self):
- docs = self.db.find({"location.state": "New"})
- assert len(docs) == 0
-
- docs = self.db.find({"location.state": "New Hampshire"})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 10
-
- # Since our FIELDS list only includes "name\\.first", we should
- # get an error when we try to search for "name.first", since the index
- # for that field does not exist.
- def test_escaped_field(self):
- docs = self.db.find({"name\\.first": "name dot first"})
- assert len(docs) == 1
- assert docs[0]["name.first"] == "name dot first"
-
- try:
- self.db.find({"name.first": "name dot first"})
- raise Exception("Should have thrown an HTTPError")
- except:
- return
-
- def test_filtered_search_fields(self):
- docs = self.db.find({"age": 22}, fields=["age", "location.state"])
- assert len(docs) == 1
- assert docs == [{"age": 22, "location": {"state": "Missouri"}}]
-
- docs = self.db.find({"age": 22}, fields=["age", "Random Garbage"])
- assert len(docs) == 1
- assert docs == [{"age": 22}]
-
- docs = self.db.find({"age": 22}, fields=["favorites"])
- assert len(docs) == 1
- assert docs == [{"favorites": ["Lisp", "Erlang", "Python"]}]
-
- docs = self.db.find({"age": 22}, fields=["favorites.[]"])
- assert len(docs) == 1
- assert docs == [{}]
-
- docs = self.db.find({"age": 22}, fields=["all_fields"])
- assert len(docs) == 1
- assert docs == [{}]
-
- def test_two_or(self):
- docs = self.db.find(
- {
- "$or": [
- {"location.state": "New Hampshire"},
- {"location.state": "Don't Exist"},
- ]
- }
- )
- assert len(docs) == 1
- assert docs[0]["user_id"] == 10
-
- def test_all_match(self):
- docs = self.db.find({"favorites": {"$allMatch": {"$eq": "Erlang"}}})
- assert len(docs) == 1
- assert docs[0]["user_id"] == 10
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class CustomFieldsExistsTest(mango.UserDocsTextTests):
-
- FIELDS = [
- {"name": "exists_field", "type": "string"},
- {"name": "exists_array.[]", "type": "string"},
- {"name": "exists_object.should", "type": "string"},
- {"name": "twitter", "type": "string"},
- ]
-
- def test_exists_field(self):
- docs = self.db.find({"exists_field": {"$exists": True}})
- self.assertEqual(len(docs), 2)
- for d in docs:
- self.assertIn(d["user_id"], (7, 8))
-
- docs = self.db.find({"exists_field": {"$exists": False}})
- self.assertEqual(len(docs), len(user_docs.DOCS) - 2)
- for d in docs:
- self.assertNotIn(d["user_id"], (7, 8))
-
- def test_exists_array(self):
- docs = self.db.find({"exists_array": {"$exists": True}})
- self.assertEqual(len(docs), 2)
- for d in docs:
- self.assertIn(d["user_id"], (9, 10))
-
- docs = self.db.find({"exists_array": {"$exists": False}})
- self.assertEqual(len(docs), len(user_docs.DOCS) - 2)
- for d in docs:
- self.assertNotIn(d["user_id"], (9, 10))
-
- def test_exists_object_member(self):
- docs = self.db.find({"exists_object.should": {"$exists": True}})
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["user_id"], 11)
-
- docs = self.db.find({"exists_object.should": {"$exists": False}})
- self.assertEqual(len(docs), len(user_docs.DOCS) - 1)
- for d in docs:
- self.assertNotEqual(d["user_id"], 11)
-
- def test_exists_false_same_as_views(self):
- docs = self.db.find({"twitter": {"$exists": False}})
- for d in docs:
- self.assertNotIn(d["user_id"], (0, 1, 4, 13))
diff --git a/src/mango/test/08-text-limit-test.py b/src/mango/test/08-text-limit-test.py
deleted file mode 100644
index ae827813d..000000000
--- a/src/mango/test/08-text-limit-test.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import limit_docs
-import unittest
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class LimitTests(mango.LimitDocsTextTests):
- def test_limit_field(self):
- q = {"$or": [{"user_id": {"$lt": 10}}, {"filtered_array.[]": 1}]}
- docs = self.db.find(q, limit=10)
- assert len(docs) == 8
- for d in docs:
- assert d["user_id"] < 10
-
- def test_limit_field2(self):
- q = {"$or": [{"user_id": {"$lt": 20}}, {"filtered_array.[]": 1}]}
- docs = self.db.find(q, limit=10)
- assert len(docs) == 10
- for d in docs:
- assert d["user_id"] < 20
-
- def test_limit_field3(self):
- q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]}
- docs = self.db.find(q, limit=1)
- assert len(docs) == 1
- for d in docs:
- assert d["user_id"] < 100
-
- def test_limit_field4(self):
- q = {"$or": [{"user_id": {"$lt": 0}}, {"filtered_array.[]": 1}]}
- docs = self.db.find(q, limit=35)
- assert len(docs) == 0
-
- # We reach our cap here of 50
- def test_limit_field5(self):
- q = {"age": {"$exists": True}}
- docs = self.db.find(q, limit=250)
- assert len(docs) == 75
- for d in docs:
- assert d["age"] < 100
-
- def test_limit_skip_field1(self):
- q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]}
- docs = self.db.find(q, limit=10, skip=20)
- assert len(docs) == 10
- for d in docs:
- assert d["user_id"] > 20
-
- def test_limit_skip_field2(self):
- q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]}
- docs = self.db.find(q, limit=100, skip=100)
- assert len(docs) == 0
-
- def test_limit_skip_field3(self):
- q = {"$or": [{"user_id": {"$lt": 20}}, {"filtered_array.[]": 1}]}
- docs = self.db.find(q, limit=1, skip=30)
- assert len(docs) == 0
-
- def test_limit_skip_field4(self):
- q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]}
- docs = self.db.find(q, limit=0, skip=0)
- assert len(docs) == 0
-
- def test_limit_skip_field5(self):
- q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]}
- try:
- self.db.find(q, limit=-1)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("Should have thrown error for negative limit")
-
- def test_limit_skip_field6(self):
- q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]}
- try:
- self.db.find(q, skip=-1)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("Should have thrown error for negative skip")
-
- # Basic test to ensure we can iterate through documents with a bookmark
- def test_limit_bookmark(self):
- for i in range(1, len(limit_docs.DOCS), 5):
- self.run_bookmark_check(i)
-
- for i in range(1, len(limit_docs.DOCS), 5):
- self.run_bookmark_sort_check(i)
-
- def run_bookmark_check(self, size):
- q = {"age": {"$gt": 0}}
- seen_docs = set()
- bm = None
- while True:
- json = self.db.find(q, limit=size, bookmark=bm, return_raw=True)
- for doc in json["docs"]:
- assert doc["_id"] not in seen_docs
- seen_docs.add(doc["_id"])
- if not len(json["docs"]):
- break
- assert json["bookmark"] != bm
- bm = json["bookmark"]
- assert len(seen_docs) == len(limit_docs.DOCS)
-
- def run_bookmark_sort_check(self, size):
- q = {"age": {"$gt": 0}}
- seen_docs = set()
- bm = None
- age = 0
- while True:
- json = self.db.find(
- q, limit=size, bookmark=bm, sort=["age"], return_raw=True
- )
- for doc in json["docs"]:
- assert doc["_id"] not in seen_docs
- assert doc["age"] >= age
- age = doc["age"]
- seen_docs.add(doc["_id"])
- if not len(json["docs"]):
- break
- assert json["bookmark"] != bm
- bm = json["bookmark"]
- assert len(seen_docs) == len(limit_docs.DOCS)
diff --git a/src/mango/test/09-text-sort-test.py b/src/mango/test/09-text-sort-test.py
deleted file mode 100644
index c0c36ccd0..000000000
--- a/src/mango/test/09-text-sort-test.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import unittest
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class SortTests(mango.UserDocsTextTests):
- def test_number_sort(self):
- q = {"age": {"$gt": 0}}
- docs = self.db.find(q, sort=["age:number"])
- self.assertEqual(len(docs), 15)
- self.assertEqual(docs[0]["age"], 22)
-
- def test_number_sort_desc(self):
- q = {"age": {"$gt": 0}}
- docs = self.db.find(q, sort=[{"age": "desc"}])
- self.assertEqual(len(docs), 15)
- self.assertEqual(docs[0]["age"], 79)
-
- q = {"manager": True}
- docs = self.db.find(q, sort=[{"age:number": "desc"}])
- self.assertEqual(len(docs), 10)
- self.assertEqual(docs[0]["age"], 79)
-
- def test_string_sort(self):
- q = {"email": {"$gt": None}}
- docs = self.db.find(q, sort=["email:string"])
- self.assertEqual(len(docs), 15)
- self.assertEqual(docs[0]["email"], "abbottwatson@talkola.com")
-
- def test_notype_sort(self):
- q = {"email": {"$gt": None}}
- try:
- self.db.find(q, sort=["email"])
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("Should have thrown error for sort")
-
- def test_array_sort(self):
- q = {"favorites": {"$exists": True}}
- docs = self.db.find(q, sort=["favorites.[]:string"])
- self.assertEqual(len(docs), 15)
- self.assertEqual(docs[0]["user_id"], 8)
-
- def test_multi_sort(self):
- q = {"name": {"$exists": True}}
- docs = self.db.find(q, sort=["name.last:string", "age:number"])
- self.assertEqual(len(docs), 15)
- self.assertEqual(docs[0]["name"], {"last": "Ewing", "first": "Shelly"})
- self.assertEqual(docs[1]["age"], 22)
-
- def test_guess_type_sort(self):
- q = {"$or": [{"age": {"$gt": 0}}, {"email": {"$gt": None}}]}
- docs = self.db.find(q, sort=["age"])
- self.assertEqual(len(docs), 15)
- self.assertEqual(docs[0]["age"], 22)
-
- def test_guess_dup_type_sort(self):
- q = {
- "$and": [
- {"age": {"$gt": 0}},
- {"email": {"$gt": None}},
- {"age": {"$lte": 100}},
- ]
- }
- docs = self.db.find(q, sort=["age"])
- self.assertEqual(len(docs), 15)
- self.assertEqual(docs[0]["age"], 22)
-
- def test_ambiguous_type_sort(self):
- q = {"$or": [{"age": {"$gt": 0}}, {"email": {"$gt": None}}, {"age": "34"}]}
- try:
- self.db.find(q, sort=["age"])
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("Should have thrown error for sort")
-
- def test_guess_multi_sort(self):
- q = {
- "$or": [
- {"age": {"$gt": 0}},
- {"email": {"$gt": None}},
- {"name.last": "Harvey"},
- ]
- }
- docs = self.db.find(q, sort=["name.last", "age"])
- self.assertEqual(len(docs), 15)
- self.assertEqual(docs[0]["name"], {"last": "Ewing", "first": "Shelly"})
- self.assertEqual(docs[1]["age"], 22)
-
- def test_guess_mix_sort(self):
- q = {
- "$or": [
- {"age": {"$gt": 0}},
- {"email": {"$gt": None}},
- {"name.last": "Harvey"},
- ]
- }
- docs = self.db.find(q, sort=["name.last:string", "age"])
- self.assertEqual(len(docs), 15)
- self.assertEqual(docs[0]["name"], {"last": "Ewing", "first": "Shelly"})
- self.assertEqual(docs[1]["age"], 22)
diff --git a/src/mango/test/10-disable-array-length-field-test.py b/src/mango/test/10-disable-array-length-field-test.py
deleted file mode 100644
index ea3279b55..000000000
--- a/src/mango/test/10-disable-array-length-field-test.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import unittest
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class DisableIndexArrayLengthsTest(mango.UserDocsTextTests):
- def setUp(self):
- self.db.recreate()
- self.db.create_text_index(
- ddoc="disable_index_array_lengths",
- analyzer="keyword",
- index_array_lengths=False,
- )
- self.db.create_text_index(
- ddoc="explicit_enable_index_array_lengths",
- analyzer="keyword",
- index_array_lengths=True,
- )
-
- def test_disable_index_array_length(self):
- docs = self.db.find(
- {"favorites": {"$size": 4}}, use_index="disable_index_array_lengths"
- )
- for d in docs:
- assert len(d["favorites"]) == 0
-
- def test_enable_index_array_length(self):
- docs = self.db.find(
- {"favorites": {"$size": 4}}, use_index="explicit_enable_index_array_lengths"
- )
- for d in docs:
- assert len(d["favorites"]) == 4
diff --git a/src/mango/test/11-ignore-design-docs-test.py b/src/mango/test/11-ignore-design-docs-test.py
deleted file mode 100644
index f31dcc5d1..000000000
--- a/src/mango/test/11-ignore-design-docs-test.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import unittest
-
-DOCS = [
- {"_id": "_design/my-design-doc"},
- {"_id": "54af50626de419f5109c962f", "user_id": 0, "age": 10, "name": "Jimi"},
- {"_id": "54af50622071121b25402dc3", "user_id": 1, "age": 11, "name": "Eddie"},
-]
-
-
-class IgnoreDesignDocsForAllDocsIndexTests(mango.DbPerClass):
- def test_should_not_return_design_docs(self):
- self.db.save_docs(DOCS)
- docs = self.db.find({"_id": {"$gte": None}})
- assert len(docs) == 2
diff --git a/src/mango/test/12-use-correct-index-test.py b/src/mango/test/12-use-correct-index-test.py
deleted file mode 100644
index c21ad6c5e..000000000
--- a/src/mango/test/12-use-correct-index-test.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import copy
-
-DOCS = [
- {"_id": "_design/my-design-doc"},
- {
- "_id": "54af50626de419f5109c962f",
- "user_id": 0,
- "age": 10,
- "name": "Jimi",
- "location": "UK",
- "number": 4,
- },
- {
- "_id": "54af50622071121b25402dc3",
- "user_id": 1,
- "age": 12,
- "name": "Eddie",
- "location": "ZAR",
- "number": 2,
- },
- {
- "_id": "54af50622071121b25402dc6",
- "user_id": 1,
- "age": 6,
- "name": "Harry",
- "location": "US",
- "number": 8,
- },
- {
- "_id": "54af50622071121b25402dc9",
- "name": "Eddie",
- "occupation": "engineer",
- "number": 7,
- },
-]
-
-
-class ChooseCorrectIndexForDocs(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
- self.db.save_docs(copy.deepcopy(DOCS))
-
- def test_choose_index_with_one_field_in_index(self):
- self.db.create_index(["name", "age", "user_id"], ddoc="aaa")
- self.db.create_index(["name"], ddoc="zzz")
- explain = self.db.find({"name": "Eddie"}, explain=True)
- self.assertEqual(explain["index"]["ddoc"], "_design/zzz")
-
- def test_choose_index_with_two(self):
- self.db.create_index(["name", "age", "user_id"], ddoc="aaa")
- self.db.create_index(["name", "age"], ddoc="bbb")
- self.db.create_index(["name"], ddoc="zzz")
- explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True)
- self.assertEqual(explain["index"]["ddoc"], "_design/bbb")
-
- def test_choose_index_alphabetically(self):
- self.db.create_index(["name"], ddoc="aaa")
- self.db.create_index(["name"], ddoc="bbb")
- self.db.create_index(["name"], ddoc="zzz")
- explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True)
- self.assertEqual(explain["index"]["ddoc"], "_design/aaa")
-
- def test_choose_index_most_accurate(self):
- self.db.create_index(["name", "age", "user_id"], ddoc="aaa")
- self.db.create_index(["name", "age"], ddoc="bbb")
- self.db.create_index(["name"], ddoc="zzz")
- explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True)
- self.assertEqual(explain["index"]["ddoc"], "_design/bbb")
-
- def test_choose_index_most_accurate_in_memory_selector(self):
- self.db.create_index(["name", "location", "user_id"], ddoc="aaa")
- self.db.create_index(["name", "age", "user_id"], ddoc="bbb")
- self.db.create_index(["name"], ddoc="zzz")
- explain = self.db.find({"name": "Eddie", "number": {"$lte": 12}}, explain=True)
- self.assertEqual(explain["index"]["ddoc"], "_design/zzz")
-
- def test_warn_on_full_db_scan(self):
- selector = {"not_indexed": "foo"}
- explain_resp = self.db.find(selector, explain=True, return_raw=True)
- self.assertEqual(explain_resp["index"]["type"], "special")
- resp = self.db.find(selector, return_raw=True)
- self.assertEqual(
- resp["warning"].split("\n")[0].lower(),
- "no matching index found, create an index to optimize query time.",
- )
-
- def test_chooses_idxA(self):
- DOCS2 = [{"a": 1, "b": 1, "c": 1}, {"a": 1000, "d": 1000, "e": 1000}]
- self.db.save_docs(copy.deepcopy(DOCS2))
- self.db.create_index(["a", "b", "c"])
- self.db.create_index(["a", "d", "e"])
- explain = self.db.find(
- {"a": {"$gt": 0}, "b": {"$gt": 0}, "c": {"$gt": 0}}, explain=True
- )
- self.assertEqual(
- explain["index"]["def"]["fields"],
- [{"a": "asc"}, {"b": "asc"}, {"c": "asc"}],
- )
-
- def test_can_query_with_range_on_secondary_column(self):
- self.db.create_index(["age", "name"], ddoc="bbb")
- selector = {"age": 10, "name": {"$gte": 0}}
- docs = self.db.find(selector)
- self.assertEqual(len(docs), 1)
- explain = self.db.find(selector, explain=True)
- self.assertEqual(explain["index"]["ddoc"], "_design/bbb")
- self.assertEqual(explain["mrargs"]["end_key"], [10, "<MAX>"])
-
- # all documents contain an _id and _rev field they
- # should not be used to restrict indexes based on the
- # fields required by the selector
- def test_choose_index_with_id(self):
- self.db.create_index(["name", "_id"], ddoc="aaa")
- explain = self.db.find({"name": "Eddie"}, explain=True)
- self.assertEqual(explain["index"]["ddoc"], "_design/aaa")
-
- def test_choose_index_with_rev(self):
- self.db.create_index(["name", "_rev"], ddoc="aaa")
- explain = self.db.find({"name": "Eddie"}, explain=True)
- self.assertEqual(explain["index"]["ddoc"], "_design/aaa")
diff --git a/src/mango/test/13-stable-update-test.py b/src/mango/test/13-stable-update-test.py
deleted file mode 100644
index 303f3fab1..000000000
--- a/src/mango/test/13-stable-update-test.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import copy
-import mango
-
-DOCS1 = [
- {
- "_id": "54af50626de419f5109c962f",
- "user_id": 0,
- "age": 10,
- "name": "Jimi",
- "location": "UK",
- "number": 4,
- },
- {
- "_id": "54af50622071121b25402dc3",
- "user_id": 1,
- "age": 12,
- "name": "Eddie",
- "location": "ZAR",
- "number": 2,
- },
-]
-
-
-class SupportStableAndUpdate(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
- # Hack to prevent auto-indexer from foiling update=False test
- # https://github.com/apache/couchdb/issues/2313
- self.db.save_doc(
- {"_id": "_design/foo", "language": "query", "autoupdate": False}
- )
- self.db.create_index(["name"], ddoc="foo")
- self.db.save_docs(copy.deepcopy(DOCS1))
-
- def test_update_updates_view_when_specified(self):
- docs = self.db.find({"name": "Eddie"}, update=False)
- assert len(docs) == 0
- docs = self.db.find({"name": "Eddie"}, update=True)
- assert len(docs) == 1
diff --git a/src/mango/test/13-users-db-find-test.py b/src/mango/test/13-users-db-find-test.py
deleted file mode 100644
index 73d15ea1a..000000000
--- a/src/mango/test/13-users-db-find-test.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# -*- coding: latin-1 -*-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-
-import mango, requests
-
-
-class UsersDbFindTests(mango.UsersDbTests):
- def test_simple_find(self):
- docs = self.db.find({"name": {"$eq": "demo02"}})
- assert len(docs) == 1
- assert docs[0]["_id"] == "org.couchdb.user:demo02"
-
- def test_multi_cond_and(self):
- self.db.create_index(["type", "roles"])
- docs = self.db.find({"type": "user", "roles": {"$eq": ["reader"]}})
- assert len(docs) == 1
- assert docs[0]["_id"] == "org.couchdb.user:demo02"
-
- def test_multi_cond_or(self):
- docs = self.db.find(
- {"$and": [{"type": "user"}, {"$or": [{"order": 1}, {"order": 3}]}]}
- )
- assert len(docs) == 2
- assert docs[0]["_id"] == "org.couchdb.user:demo01"
- assert docs[1]["_id"] == "org.couchdb.user:demo03"
-
- def test_sort(self):
- self.db.create_index(["order", "name"])
- selector = {"name": {"$gt": "demo01"}}
- docs1 = self.db.find(selector, sort=[{"order": "asc"}])
- docs2 = list(sorted(docs1, key=lambda d: d["order"]))
- assert docs1 is not docs2 and docs1 == docs2
-
- docs1 = self.db.find(selector, sort=[{"order": "desc"}])
- docs2 = list(reversed(sorted(docs1, key=lambda d: d["order"])))
- assert docs1 is not docs2 and docs1 == docs2
-
- def test_fields(self):
- selector = {"name": {"$eq": "demo02"}}
- docs = self.db.find(selector, fields=["name", "order"])
- assert len(docs) == 1
- assert sorted(docs[0].keys()) == ["name", "order"]
-
- def test_empty(self):
- docs = self.db.find({})
- assert len(docs) == 3
-
-
-class UsersDbIndexFindTests(UsersDbFindTests):
- def setUp(self):
- self.db.create_index(["name"])
-
- def test_multi_cond_and(self):
- self.db.create_index(["type", "roles"])
- super(UsersDbIndexFindTests, self).test_multi_cond_and()
-
- def test_multi_cond_or(self):
- self.db.create_index(["type", "order"])
- super(UsersDbIndexFindTests, self).test_multi_cond_or()
-
- def test_sort(self):
- self.db.create_index(["order", "name"])
- super(UsersDbIndexFindTests, self).test_sort()
diff --git a/src/mango/test/14-json-pagination-test.py b/src/mango/test/14-json-pagination-test.py
deleted file mode 100644
index 2d2430152..000000000
--- a/src/mango/test/14-json-pagination-test.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import copy
-
-DOCS = [
- {"_id": "100", "name": "Jimi", "location": "AUS", "user_id": 1, "same": "value"},
- {"_id": "200", "name": "Eddie", "location": "BRA", "user_id": 2, "same": "value"},
- {"_id": "300", "name": "Harry", "location": "CAN", "user_id": 3, "same": "value"},
- {"_id": "400", "name": "Eddie", "location": "DEN", "user_id": 4, "same": "value"},
- {"_id": "500", "name": "Jones", "location": "ETH", "user_id": 5, "same": "value"},
- {
- "_id": "600",
- "name": "Winnifried",
- "location": "FRA",
- "user_id": 6,
- "same": "value",
- },
- {"_id": "700", "name": "Marilyn", "location": "GHA", "user_id": 7, "same": "value"},
- {"_id": "800", "name": "Sandra", "location": "ZAR", "user_id": 8, "same": "value"},
-]
-
-
-class PaginateJsonDocs(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
- self.db.save_docs(copy.deepcopy(DOCS))
-
- def test_all_docs_paginate_to_end(self):
- selector = {"_id": {"$gt": 0}}
- # Page 1
- resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True)
- bookmark = resp["bookmark"]
- docs = resp["docs"]
- assert docs[0]["_id"] == "100"
- assert len(docs) == 5
-
- # Page 2
- resp = self.db.find(
- selector, fields=["_id"], bookmark=bookmark, limit=5, return_raw=True
- )
- bookmark = resp["bookmark"]
- docs = resp["docs"]
- assert docs[0]["_id"] == "600"
- assert len(docs) == 3
-
- # Page 3
- resp = self.db.find(selector, bookmark=bookmark, limit=5, return_raw=True)
- bookmark = resp["bookmark"]
- docs = resp["docs"]
- assert len(docs) == 0
-
- def test_return_previous_bookmark_for_empty(self):
- selector = {"_id": {"$gt": 0}}
- # Page 1
- resp = self.db.find(selector, fields=["_id"], return_raw=True)
- bookmark1 = resp["bookmark"]
- docs = resp["docs"]
- assert len(docs) == 8
-
- resp = self.db.find(
- selector, fields=["_id"], return_raw=True, bookmark=bookmark1
- )
- bookmark2 = resp["bookmark"]
- docs = resp["docs"]
- assert len(docs) == 0
-
- resp = self.db.find(
- selector, fields=["_id"], return_raw=True, bookmark=bookmark2
- )
- bookmark3 = resp["bookmark"]
- docs = resp["docs"]
- assert bookmark3 == bookmark2
- assert len(docs) == 0
-
- def test_all_docs_with_skip(self):
- selector = {"_id": {"$gt": 0}}
- # Page 1
- resp = self.db.find(selector, fields=["_id"], skip=2, limit=5, return_raw=True)
- bookmark = resp["bookmark"]
- docs = resp["docs"]
- assert docs[0]["_id"] == "300"
- assert len(docs) == 5
-
- # Page 2
- resp = self.db.find(
- selector, fields=["_id"], bookmark=bookmark, limit=5, return_raw=True
- )
- bookmark = resp["bookmark"]
- docs = resp["docs"]
- assert docs[0]["_id"] == "800"
- assert len(docs) == 1
- resp = self.db.find(selector, bookmark=bookmark, limit=5, return_raw=True)
- bookmark = resp["bookmark"]
- docs = resp["docs"]
- assert len(docs) == 0
-
- def test_all_docs_reverse(self):
- selector = {"_id": {"$gt": 0}}
- resp = self.db.find(
- selector, fields=["_id"], sort=[{"_id": "desc"}], limit=5, return_raw=True
- )
- docs = resp["docs"]
- bookmark1 = resp["bookmark"]
- assert len(docs) == 5
- assert docs[0]["_id"] == "800"
-
- resp = self.db.find(
- selector,
- fields=["_id"],
- sort=[{"_id": "desc"}],
- limit=5,
- return_raw=True,
- bookmark=bookmark1,
- )
- docs = resp["docs"]
- bookmark2 = resp["bookmark"]
- assert len(docs) == 3
- assert docs[0]["_id"] == "300"
-
- resp = self.db.find(
- selector,
- fields=["_id"],
- sort=[{"_id": "desc"}],
- limit=5,
- return_raw=True,
- bookmark=bookmark2,
- )
- docs = resp["docs"]
- assert len(docs) == 0
-
- def test_bad_bookmark(self):
- try:
- self.db.find({"_id": {"$gt": 0}}, bookmark="bad-bookmark")
- except Exception as e:
- resp = e.response.json()
- assert resp["error"] == "invalid_bookmark"
- assert resp["reason"] == 'Invalid bookmark value: "bad-bookmark"'
- assert e.response.status_code == 400
- else:
- raise AssertionError("Should have thrown error for bad bookmark")
-
- def test_throws_error_on_text_bookmark(self):
- bookmark = (
- "g2wAAAABaANkABFub2RlMUBjb3VjaGRiLm5ldGwAAAACYQBiP____2poAkY_8AAAAAAAAGEHag"
- )
- try:
- self.db.find({"_id": {"$gt": 0}}, bookmark=bookmark)
- except Exception as e:
- resp = e.response.json()
- assert resp["error"] == "invalid_bookmark"
- assert e.response.status_code == 400
- else:
- raise AssertionError("Should have thrown error for bad bookmark")
-
- def test_index_pagination(self):
- self.db.create_index(["location"])
- selector = {"location": {"$gt": "A"}}
- resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True)
- docs = resp["docs"]
- bookmark1 = resp["bookmark"]
- assert len(docs) == 5
- assert docs[0]["_id"] == "100"
-
- resp = self.db.find(
- selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark1
- )
- docs = resp["docs"]
- bookmark2 = resp["bookmark"]
- assert len(docs) == 3
- assert docs[0]["_id"] == "600"
-
- resp = self.db.find(
- selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark2
- )
- docs = resp["docs"]
- assert len(docs) == 0
-
- def test_index_pagination_two_keys(self):
- self.db.create_index(["location", "user_id"])
- selector = {"location": {"$gt": "A"}, "user_id": {"$gte": 1}}
- resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True)
- docs = resp["docs"]
- bookmark1 = resp["bookmark"]
- assert len(docs) == 5
- assert docs[0]["_id"] == "100"
-
- resp = self.db.find(
- selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark1
- )
- docs = resp["docs"]
- bookmark2 = resp["bookmark"]
- assert len(docs) == 3
- assert docs[0]["_id"] == "600"
-
- resp = self.db.find(
- selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark2
- )
- docs = resp["docs"]
- assert len(docs) == 0
-
- def test_index_pagination_reverse(self):
- self.db.create_index(["location", "user_id"])
- selector = {"location": {"$gt": "A"}, "user_id": {"$gte": 1}}
- sort = [{"location": "desc"}, {"user_id": "desc"}]
- resp = self.db.find(
- selector, fields=["_id"], sort=sort, limit=5, return_raw=True
- )
- docs = resp["docs"]
- bookmark1 = resp["bookmark"]
- assert len(docs) == 5
- assert docs[0]["_id"] == "800"
-
- resp = self.db.find(
- selector,
- fields=["_id"],
- limit=5,
- sort=sort,
- return_raw=True,
- bookmark=bookmark1,
- )
- docs = resp["docs"]
- bookmark2 = resp["bookmark"]
- assert len(docs) == 3
- assert docs[0]["_id"] == "300"
-
- resp = self.db.find(
- selector,
- fields=["_id"],
- limit=5,
- sort=sort,
- return_raw=True,
- bookmark=bookmark2,
- )
- docs = resp["docs"]
- assert len(docs) == 0
-
- def test_index_pagination_same_emitted_key(self):
- self.db.create_index(["same"])
- selector = {"same": {"$gt": ""}}
- resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True)
- docs = resp["docs"]
- bookmark1 = resp["bookmark"]
- assert len(docs) == 5
- assert docs[0]["_id"] == "100"
-
- resp = self.db.find(
- selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark1
- )
- docs = resp["docs"]
- bookmark2 = resp["bookmark"]
- assert len(docs) == 3
- assert docs[0]["_id"] == "600"
-
- resp = self.db.find(
- selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark2
- )
- docs = resp["docs"]
- assert len(docs) == 0
diff --git a/src/mango/test/15-execution-stats-test.py b/src/mango/test/15-execution-stats-test.py
deleted file mode 100644
index 537a19add..000000000
--- a/src/mango/test/15-execution-stats-test.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-
-import mango
-import os
-import unittest
-
-
-class ExecutionStatsTests(mango.UserDocsTests):
- def test_simple_json_index(self):
- resp = self.db.find({"age": {"$lt": 35}}, return_raw=True, executionStats=True)
- self.assertEqual(len(resp["docs"]), 3)
- self.assertEqual(resp["execution_stats"]["total_keys_examined"], 0)
- self.assertEqual(resp["execution_stats"]["total_docs_examined"], 3)
- self.assertEqual(resp["execution_stats"]["total_quorum_docs_examined"], 0)
- self.assertEqual(resp["execution_stats"]["results_returned"], 3)
- # See https://github.com/apache/couchdb/issues/1732
- # Erlang os:timestamp() only has ms accuracy on Windows!
- if os.name != "nt":
- self.assertGreater(resp["execution_stats"]["execution_time_ms"], 0)
-
- def test_no_execution_stats(self):
- resp = self.db.find({"age": {"$lt": 35}}, return_raw=True, executionStats=False)
- assert "execution_stats" not in resp
-
- def test_quorum_json_index(self):
- resp = self.db.find(
- {"age": {"$lt": 35}}, return_raw=True, r=3, executionStats=True
- )
- self.assertEqual(len(resp["docs"]), 3)
- self.assertEqual(resp["execution_stats"]["total_keys_examined"], 0)
- self.assertEqual(resp["execution_stats"]["total_docs_examined"], 0)
- self.assertEqual(resp["execution_stats"]["total_quorum_docs_examined"], 3)
- self.assertEqual(resp["execution_stats"]["results_returned"], 3)
- # See https://github.com/apache/couchdb/issues/1732
- # Erlang os:timestamp() only has ms accuracy on Windows!
- if os.name != "nt":
- self.assertGreater(resp["execution_stats"]["execution_time_ms"], 0)
-
- def test_results_returned_limit(self):
- resp = self.db.find(
- {"age": {"$lt": 35}}, limit=2, return_raw=True, executionStats=True
- )
- self.assertEqual(resp["execution_stats"]["results_returned"], len(resp["docs"]))
-
- def test_no_matches_index_scan(self):
- resp = self.db.find(
- {"age": {"$lt": 35}, "nomatch": "me"}, return_raw=True, executionStats=True
- )
- self.assertEqual(resp["execution_stats"]["total_docs_examined"], 3)
- self.assertEqual(resp["execution_stats"]["results_returned"], 0)
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class ExecutionStatsTests_Text(mango.UserDocsTextTests):
- def test_simple_text_index(self):
- resp = self.db.find(
- {"$text": "Stephanie"}, return_raw=True, executionStats=True
- )
- self.assertEqual(len(resp["docs"]), 1)
- self.assertEqual(resp["execution_stats"]["total_keys_examined"], 0)
- self.assertEqual(resp["execution_stats"]["total_docs_examined"], 1)
- self.assertEqual(resp["execution_stats"]["total_quorum_docs_examined"], 0)
- self.assertEqual(resp["execution_stats"]["results_returned"], 1)
- self.assertGreater(resp["execution_stats"]["execution_time_ms"], 0)
-
- def test_no_execution_stats(self):
- resp = self.db.find({"$text": "Stephanie"}, return_raw=True)
- self.assertNotIn("execution_stats", resp)
diff --git a/src/mango/test/16-index-selectors-test.py b/src/mango/test/16-index-selectors-test.py
deleted file mode 100644
index 4510065f5..000000000
--- a/src/mango/test/16-index-selectors-test.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import copy
-import mango
-import unittest
-
-DOCS = [
- {"_id": "100", "name": "Jimi", "location": "AUS", "user_id": 1, "same": "value"},
- {"_id": "200", "name": "Eddie", "location": "BRA", "user_id": 2, "same": "value"},
- {"_id": "300", "name": "Harry", "location": "CAN", "user_id": 3, "same": "value"},
- {"_id": "400", "name": "Eddie", "location": "DEN", "user_id": 4, "same": "value"},
- {"_id": "500", "name": "Jones", "location": "ETH", "user_id": 5, "same": "value"},
- {
- "_id": "600",
- "name": "Winnifried",
- "location": "FRA",
- "user_id": 6,
- "same": "value",
- },
- {"_id": "700", "name": "Marilyn", "location": "GHA", "user_id": 7, "same": "value"},
- {"_id": "800", "name": "Sandra", "location": "ZAR", "user_id": 8, "same": "value"},
-]
-
-oldschoolnoselectorddoc = {
- "_id": "_design/oldschoolnoselector",
- "language": "query",
- "views": {
- "oldschoolnoselector": {
- "map": {"fields": {"location": "asc"}},
- "reduce": "_count",
- "options": {"def": {"fields": ["location"]}},
- }
- },
-}
-
-oldschoolddoc = {
- "_id": "_design/oldschool",
- "language": "query",
- "views": {
- "oldschool": {
- "map": {
- "fields": {"location": "asc"},
- "selector": {"location": {"$gte": "FRA"}},
- },
- "reduce": "_count",
- "options": {"def": {"fields": ["location"]}},
- }
- },
-}
-
-oldschoolddoctext = {
- "_id": "_design/oldschooltext",
- "language": "query",
- "indexes": {
- "oldschooltext": {
- "index": {
- "default_analyzer": "keyword",
- "default_field": {},
- "selector": {"location": {"$gte": "FRA"}},
- "fields": [{"name": "location", "type": "string"}],
- "index_array_lengths": True,
- },
- "analyzer": {
- "name": "perfield",
- "default": "keyword",
- "fields": {"$default": "standard"},
- },
- }
- },
-}
-
-
-class IndexSelectorJson(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
- self.db.save_docs(copy.deepcopy(DOCS))
-
- def test_saves_partial_filter_selector_in_index(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.create_index(["location"], partial_filter_selector=selector)
- indexes = self.db.list_indexes()
- self.assertEqual(indexes[1]["def"]["partial_filter_selector"], selector)
-
- def test_partial_filter_only_in_return_if_not_default(self):
- self.db.create_index(["location"])
- index = self.db.list_indexes()[1]
- self.assertEqual("partial_filter_selector" in index["def"], False)
-
- def test_saves_selector_in_index_throws(self):
- selector = {"location": {"$gte": "FRA"}}
- try:
- self.db.create_index(["location"], selector=selector)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("bad index creation")
-
- def test_uses_partial_index_for_query_selector(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.create_index(
- ["location"],
- partial_filter_selector=selector,
- ddoc="Selected",
- name="Selected",
- )
- resp = self.db.find(selector, explain=True, use_index="Selected")
- self.assertEqual(resp["index"]["name"], "Selected")
- docs = self.db.find(selector, use_index="Selected")
- self.assertEqual(len(docs), 3)
-
- def test_uses_partial_index_with_different_selector(self):
- selector = {"location": {"$gte": "FRA"}}
- selector2 = {"location": {"$gte": "A"}}
- self.db.create_index(
- ["location"],
- partial_filter_selector=selector,
- ddoc="Selected",
- name="Selected",
- )
- resp = self.db.find(selector2, explain=True, use_index="Selected")
- self.assertEqual(resp["index"]["name"], "Selected")
- docs = self.db.find(selector2, use_index="Selected")
- self.assertEqual(len(docs), 3)
-
- def test_doesnot_use_selector_when_not_specified(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.create_index(
- ["location"],
- partial_filter_selector=selector,
- ddoc="Selected",
- name="Selected",
- )
- resp = self.db.find(selector, explain=True)
- self.assertEqual(resp["index"]["name"], "_all_docs")
-
- def test_doesnot_use_selector_when_not_specified_with_index(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.create_index(
- ["location"],
- partial_filter_selector=selector,
- ddoc="Selected",
- name="Selected",
- )
- self.db.create_index(["location"], name="NotSelected")
- resp = self.db.find(selector, explain=True)
- self.assertEqual(resp["index"]["name"], "NotSelected")
-
- def test_old_selector_with_no_selector_still_supported(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.save_doc(oldschoolnoselectorddoc)
- resp = self.db.find(selector, explain=True, use_index="oldschoolnoselector")
- self.assertEqual(resp["index"]["name"], "oldschoolnoselector")
- docs = self.db.find(selector, use_index="oldschoolnoselector")
- self.assertEqual(len(docs), 3)
-
- def test_old_selector_still_supported(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.save_doc(oldschoolddoc)
- resp = self.db.find(selector, explain=True, use_index="oldschool")
- self.assertEqual(resp["index"]["name"], "oldschool")
- docs = self.db.find(selector, use_index="oldschool")
- self.assertEqual(len(docs), 3)
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_text_saves_partialfilterselector_in_index(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.create_text_index(
- fields=[{"name": "location", "type": "string"}],
- partial_filter_selector=selector,
- )
- indexes = self.db.list_indexes()
- self.assertEqual(indexes[1]["def"]["partial_filter_selector"], selector)
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_text_uses_partial_index_for_query_selector(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.create_text_index(
- fields=[{"name": "location", "type": "string"}],
- partial_filter_selector=selector,
- ddoc="Selected",
- name="Selected",
- )
- resp = self.db.find(selector, explain=True, use_index="Selected")
- self.assertEqual(resp["index"]["name"], "Selected")
- docs = self.db.find(selector, use_index="Selected", fields=["_id", "location"])
- self.assertEqual(len(docs), 3)
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_text_uses_partial_index_with_different_selector(self):
- selector = {"location": {"$gte": "FRA"}}
- selector2 = {"location": {"$gte": "A"}}
- self.db.create_text_index(
- fields=[{"name": "location", "type": "string"}],
- partial_filter_selector=selector,
- ddoc="Selected",
- name="Selected",
- )
- resp = self.db.find(selector2, explain=True, use_index="Selected")
- self.assertEqual(resp["index"]["name"], "Selected")
- docs = self.db.find(selector2, use_index="Selected")
- self.assertEqual(len(docs), 3)
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_text_doesnot_use_selector_when_not_specified(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.create_text_index(
- fields=[{"name": "location", "type": "string"}],
- partial_filter_selector=selector,
- ddoc="Selected",
- name="Selected",
- )
- resp = self.db.find(selector, explain=True)
- self.assertEqual(resp["index"]["name"], "_all_docs")
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_text_doesnot_use_selector_when_not_specified_with_index(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.create_text_index(
- fields=[{"name": "location", "type": "string"}],
- partial_filter_selector=selector,
- ddoc="Selected",
- name="Selected",
- )
- self.db.create_text_index(
- fields=[{"name": "location", "type": "string"}], name="NotSelected"
- )
- resp = self.db.find(selector, explain=True)
- self.assertEqual(resp["index"]["name"], "NotSelected")
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_text_old_selector_still_supported(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.save_doc(oldschoolddoctext)
- resp = self.db.find(selector, explain=True, use_index="oldschooltext")
- self.assertEqual(resp["index"]["name"], "oldschooltext")
- docs = self.db.find(selector, use_index="oldschooltext")
- self.assertEqual(len(docs), 3)
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_text_old_selector_still_supported_via_api(self):
- selector = {"location": {"$gte": "FRA"}}
- self.db.create_text_index(
- fields=[{"name": "location", "type": "string"}],
- selector=selector,
- ddoc="Selected",
- name="Selected",
- )
- docs = self.db.find({"location": {"$exists": True}}, use_index="Selected")
- self.assertEqual(len(docs), 3)
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_text_partial_filter_only_in_return_if_not_default(self):
- self.db.create_text_index(fields=[{"name": "location", "type": "string"}])
- index = self.db.list_indexes()[1]
- self.assertEqual("partial_filter_selector" in index["def"], False)
diff --git a/src/mango/test/17-multi-type-value-test.py b/src/mango/test/17-multi-type-value-test.py
deleted file mode 100644
index 21e7afda4..000000000
--- a/src/mango/test/17-multi-type-value-test.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import copy
-import mango
-import unittest
-
-DOCS = [
- {"_id": "1", "name": "Jimi", "age": 10},
- {"_id": "2", "name": {"forename": "Eddie"}, "age": 20},
- {"_id": "3", "name": None, "age": 30},
- {"_id": "4", "name": 1, "age": 40},
- {"_id": "5", "forename": "Sam", "age": 50},
-]
-
-
-class MultiValueFieldTests:
- def test_can_query_with_name(self):
- docs = self.db.find({"name": {"$exists": True}})
- self.assertEqual(len(docs), 4)
- for d in docs:
- self.assertIn("name", d)
-
- def test_can_query_with_name_subfield(self):
- docs = self.db.find({"name.forename": {"$exists": True}})
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["_id"], "2")
-
- def test_can_query_with_name_range(self):
- docs = self.db.find({"name": {"$gte": 0}})
- # expect to include "Jimi", 1 and {"forename":"Eddie"}
- self.assertEqual(len(docs), 3)
- for d in docs:
- self.assertIn("name", d)
-
- def test_can_query_with_age_and_name_range(self):
- docs = self.db.find({"age": {"$gte": 0, "$lt": 40}, "name": {"$gte": 0}})
- # expect to include "Jimi", 1 and {"forename":"Eddie"}
- self.assertEqual(len(docs), 2)
- for d in docs:
- self.assertIn("name", d)
-
-
-class MultiValueFieldJSONTests(mango.DbPerClass, MultiValueFieldTests):
- def setUp(self):
- self.db.recreate()
- self.db.save_docs(copy.deepcopy(DOCS))
- self.db.create_index(["name"])
- self.db.create_index(["age", "name"])
-
-
-# @unittest.skipUnless(mango.has_text_service(), "requires text service")
-# class MultiValueFieldTextTests(MultiValueFieldDocsNoIndexes, OperatorTests):
-# pass
-
-
-class MultiValueFieldAllDocsTests(mango.DbPerClass, MultiValueFieldTests):
- def setUp(self):
- self.db.recreate()
- self.db.save_docs(copy.deepcopy(DOCS))
diff --git a/src/mango/test/18-json-sort.py b/src/mango/test/18-json-sort.py
deleted file mode 100644
index d4e60a32c..000000000
--- a/src/mango/test/18-json-sort.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import copy
-import unittest
-
-DOCS = [
- {"_id": "1", "name": "Jimi", "age": 10, "cars": 1},
- {"_id": "2", "name": "Eddie", "age": 20, "cars": 1},
- {"_id": "3", "name": "Jane", "age": 30, "cars": 2},
- {"_id": "4", "name": "Mary", "age": 40, "cars": 2},
- {"_id": "5", "name": "Sam", "age": 50, "cars": 3},
-]
-
-
-class JSONIndexSortOptimisations(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
- self.db.save_docs(copy.deepcopy(DOCS))
-
- def test_works_for_basic_case(self):
- self.db.create_index(["cars", "age"], name="cars-age")
- selector = {"cars": "2", "age": {"$gt": 10}}
- explain = self.db.find(selector, sort=["age"], explain=True)
- self.assertEqual(explain["index"]["name"], "cars-age")
- self.assertEqual(explain["mrargs"]["direction"], "fwd")
-
- def test_works_for_all_fields_specified(self):
- self.db.create_index(["cars", "age"], name="cars-age")
- selector = {"cars": "2", "age": {"$gt": 10}}
- explain = self.db.find(selector, sort=["cars", "age"], explain=True)
- self.assertEqual(explain["index"]["name"], "cars-age")
-
- def test_works_for_no_sort_fields_specified(self):
- self.db.create_index(["cars", "age"], name="cars-age")
- selector = {"cars": {"$gt": 10}, "age": {"$gt": 10}}
- explain = self.db.find(selector, explain=True)
- self.assertEqual(explain["index"]["name"], "cars-age")
-
- def test_works_for_opp_dir_sort(self):
- self.db.create_index(["cars", "age"], name="cars-age")
- selector = {"cars": "2", "age": {"$gt": 10}}
- explain = self.db.find(selector, sort=[{"age": "desc"}], explain=True)
- self.assertEqual(explain["index"]["name"], "cars-age")
- self.assertEqual(explain["mrargs"]["direction"], "rev")
-
- def test_not_work_for_non_constant_field(self):
- self.db.create_index(["cars", "age"], name="cars-age")
- selector = {"cars": {"$gt": 10}, "age": {"$gt": 10}}
- try:
- self.db.find(selector, explain=True, sort=["age"])
- raise Exception("Should not get here")
- except Exception as e:
- resp = e.response.json()
- self.assertEqual(resp["error"], "no_usable_index")
-
- def test_three_index_one(self):
- self.db.create_index(["cars", "age", "name"], name="cars-age-name")
- selector = {"cars": "2", "age": 10, "name": {"$gt": "AA"}}
- explain = self.db.find(selector, sort=["name"], explain=True)
- self.assertEqual(explain["index"]["name"], "cars-age-name")
-
- def test_three_index_two(self):
- self.db.create_index(["cars", "age", "name"], name="cars-age-name")
- selector = {"cars": "2", "name": "Eddie", "age": {"$gt": 10}}
- explain = self.db.find(selector, sort=["age"], explain=True)
- self.assertEqual(explain["index"]["name"], "cars-age-name")
-
- def test_three_index_fails(self):
- self.db.create_index(["cars", "age", "name"], name="cars-age-name")
- selector = {"name": "Eddie", "age": {"$gt": 1}, "cars": {"$gt": "1"}}
- try:
- self.db.find(selector, explain=True, sort=["name"])
- raise Exception("Should not get here")
- except Exception as e:
- resp = e.response.json()
- self.assertEqual(resp["error"], "no_usable_index")
-
- def test_empty_sort(self):
- self.db.create_index(["cars", "age", "name"], name="cars-age-name")
- selector = {"name": {"$gt": "Eddie"}, "age": 10, "cars": {"$gt": "1"}}
- explain = self.db.find(selector, explain=True)
- self.assertEqual(explain["index"]["name"], "cars-age-name")
-
- def test_in_between(self):
- self.db.create_index(["cars", "age", "name"], name="cars-age-name")
- selector = {"name": "Eddie", "age": 10, "cars": {"$gt": "1"}}
- explain = self.db.find(selector, explain=True)
- self.assertEqual(explain["index"]["name"], "cars-age-name")
-
- try:
- self.db.find(selector, sort=["cars", "name"], explain=True)
- raise Exception("Should not get here")
- except Exception as e:
- resp = e.response.json()
- self.assertEqual(resp["error"], "no_usable_index")
-
- def test_ignore_after_set_sort_value(self):
- self.db.create_index(["cars", "age", "name"], name="cars-age-name")
- selector = {"age": {"$gt": 10}, "cars": 2, "name": {"$gt": "A"}}
- explain = self.db.find(selector, sort=["age"], explain=True)
- self.assertEqual(explain["index"]["name"], "cars-age-name")
-
- def test_not_use_index_if_other_fields_in_sort(self):
- self.db.create_index(["cars", "age"], name="cars-age")
- selector = {"age": 10, "cars": {"$gt": "1"}}
- try:
- self.db.find(selector, sort=["cars", "name"], explain=True)
- raise Exception("Should not get here")
- except Exception as e:
- resp = e.response.json()
- self.assertEqual(resp["error"], "no_usable_index")
diff --git a/src/mango/test/19-find-conflicts.py b/src/mango/test/19-find-conflicts.py
deleted file mode 100644
index bf865d6ea..000000000
--- a/src/mango/test/19-find-conflicts.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import copy
-
-DOC = [{"_id": "doc", "a": 2}]
-
-CONFLICT = [{"_id": "doc", "_rev": "1-23202479633c2b380f79507a776743d5", "a": 1}]
-
-
-class ChooseCorrectIndexForDocs(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
- self.db.save_docs(copy.deepcopy(DOC))
- self.db.save_docs_with_conflicts(copy.deepcopy(CONFLICT))
-
- def test_retrieve_conflicts(self):
- self.db.create_index(["_conflicts"])
- result = self.db.find({"_conflicts": {"$exists": True}}, conflicts=True)
- self.assertEqual(
- result[0]["_conflicts"][0], "1-23202479633c2b380f79507a776743d5"
- )
- self.assertEqual(result[0]["_rev"], "1-3975759ccff3842adf690a5c10caee42")
diff --git a/src/mango/test/20-no-timeout-test.py b/src/mango/test/20-no-timeout-test.py
deleted file mode 100644
index cffdfc335..000000000
--- a/src/mango/test/20-no-timeout-test.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import copy
-import unittest
-
-
-class LongRunningMangoTest(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
- docs = []
- for i in range(100000):
- docs.append({"_id": str(i), "another": "field"})
- if i % 20000 == 0:
- self.db.save_docs(docs)
- docs = []
-
- # This test should run to completion and not timeout
- def test_query_does_not_time_out(self):
- selector = {"_id": {"$gt": 0}, "another": "wrong"}
- docs = self.db.find(selector)
- self.assertEqual(len(docs), 0)
diff --git a/src/mango/test/21-empty-selector-tests.py b/src/mango/test/21-empty-selector-tests.py
deleted file mode 100644
index 8fd76fcd5..000000000
--- a/src/mango/test/21-empty-selector-tests.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import json
-import mango
-import unittest
-import user_docs
-import math
-
-
-def make_empty_selector_suite(klass):
- class EmptySelectorTestCase(klass):
- def test_empty(self):
- resp = self.db.find({}, explain=True)
- self.assertEqual(resp["index"]["type"], "special")
-
- def test_empty_array_or(self):
- resp = self.db.find({"$or": []}, explain=True)
- self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE)
- docs = self.db.find({"$or": []})
- assert len(docs) == 0
-
- def test_empty_array_or_with_age(self):
- resp = self.db.find({"age": 22, "$or": []}, explain=True)
- self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE)
- docs = self.db.find({"age": 22, "$or": []})
- assert len(docs) == 1
-
- def test_empty_array_in_with_age(self):
- resp = self.db.find({"age": 22, "company": {"$in": []}}, explain=True)
- self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE)
- docs = self.db.find({"age": 22, "company": {"$in": []}})
- assert len(docs) == 0
-
- def test_empty_array_and_with_age(self):
- resp = self.db.find({"age": 22, "$and": []}, explain=True)
- self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE)
- docs = self.db.find({"age": 22, "$and": []})
- assert len(docs) == 1
-
- def test_empty_array_all_age(self):
- resp = self.db.find({"age": 22, "company": {"$all": []}}, explain=True)
- self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE)
- docs = self.db.find({"age": 22, "company": {"$all": []}})
- assert len(docs) == 0
-
- def test_empty_array_nested_all_with_age(self):
- resp = self.db.find(
- {"age": 22, "$and": [{"company": {"$all": []}}]}, explain=True
- )
- self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE)
- docs = self.db.find({"age": 22, "$and": [{"company": {"$all": []}}]})
- assert len(docs) == 0
-
- def test_empty_arrays_complex(self):
- resp = self.db.find({"$or": [], "a": {"$in": []}}, explain=True)
- self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE)
- docs = self.db.find({"$or": [], "a": {"$in": []}})
- assert len(docs) == 0
-
- def test_empty_nin(self):
- resp = self.db.find({"favorites": {"$nin": []}}, explain=True)
- self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE)
- docs = self.db.find({"favorites": {"$nin": []}})
- assert len(docs) == len(user_docs.DOCS)
-
- return EmptySelectorTestCase
-
-
-class EmptySelectorNoIndexTests(
- make_empty_selector_suite(mango.UserDocsTestsNoIndexes)
-):
- pass
-
-
-@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class EmptySelectorTextTests(make_empty_selector_suite(mango.UserDocsTextTests)):
- pass
-
-
-class EmptySelectorUserDocTests(make_empty_selector_suite(mango.UserDocsTests)):
- pass
diff --git a/src/mango/test/README.md b/src/mango/test/README.md
deleted file mode 100644
index c2f8aada2..000000000
--- a/src/mango/test/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-Mango Tests
-===========
-
-CouchDB should be started with `./dev/run -a testuser:testpass`.
-
-To run these, do this in the Mango top level directory:
-
- $ python3 -m venv venv
- $ . venv/bin/activate
- $ pip3 install -r requirements.txt
- $ venv/bin/nose2
-
-To run an individual test suite:
- nose2 12-use-correct-index-test
-
-To run the tests with text index support:
- MANGO_TEXT_INDEXES=1 nose2 test
-
-
-Test configuration
-==================
-
-The following environment variables can be used to configure the test fixtures:
-
- * `COUCH_HOST` - root url (including port) of the CouchDB instance to run the tests against. Default is `"http://127.0.0.1:15984"`.
- * `COUCH_USER` - CouchDB username (with admin premissions). Default is `"testuser"`.
- * `COUCH_PASSWORD` - CouchDB password. Default is `"testpass"`.
- * `COUCH_AUTH_HEADER` - Optional Authorization header value. If specified, this is used instead of basic authentication with the username/password variables above.
- * `MANGO_TEXT_INDEXES` - Set to `"1"` to run the tests only applicable to text indexes.
diff --git a/src/mango/test/friend_docs.py b/src/mango/test/friend_docs.py
deleted file mode 100644
index c6442267e..000000000
--- a/src/mango/test/friend_docs.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-"""
-Generated with http://www.json-generator.com/
-
-With this pattern:
-
-[
- '{{repeat(15)}}',
- {
- _id: '{{index()}}',
- name: {
- first: '{{firstName()}}',
- last: '{{surname()}}'
- },
- friends: [
- '{{repeat(3)}}',
- {
- id: '{{index()}}',
- name: {
- first: '{{firstName()}}',
- last: '{{surname()}}'
- },
- type: '{{random("personal", "work")}}'
- }
- ]
- }
-]
-"""
-
-import copy
-
-
-def setup(db, index_type="view"):
- db.recreate()
- db.save_docs(copy.deepcopy(DOCS))
- if index_type == "view":
- add_view_indexes(db)
- elif index_type == "text":
- add_text_indexes(db)
-
-
-def add_text_indexes(db):
- db.create_text_index()
-
-
-DOCS = [
- {
- "_id": "54a43171d37ae5e81bff5ae0",
- "user_id": 0,
- "name": {"first": "Ochoa", "last": "Fox"},
- "friends": [
- {
- "id": 0,
- "name": {"first": "Sherman", "last": "Davidson"},
- "type": "personal",
- },
- {
- "id": 1,
- "name": {"first": "Vargas", "last": "Mendez"},
- "type": "personal",
- },
- {"id": 2, "name": {"first": "Sheppard", "last": "Cotton"}, "type": "work"},
- ],
- },
- {
- "_id": "54a43171958485dc32917c50",
- "user_id": 1,
- "name": {"first": "Sheppard", "last": "Cotton"},
- "friends": [
- {"id": 0, "name": {"first": "Ochoa", "last": "Fox"}, "type": "work"},
- {
- "id": 1,
- "name": {"first": "Vargas", "last": "Mendez"},
- "type": "personal",
- },
- {"id": 2, "name": {"first": "Kendra", "last": "Burns"}, "type": "work"},
- ],
- },
- {
- "_id": "54a431711cf025ba74bea899",
- "user_id": 2,
- "name": {"first": "Hunter", "last": "Wells"},
- "friends": [
- {"id": 0, "name": {"first": "Estes", "last": "Fischer"}, "type": "work"},
- {
- "id": 1,
- "name": {"first": "Farrell", "last": "Maddox"},
- "type": "personal",
- },
- {"id": 2, "name": {"first": "Kendra", "last": "Burns"}, "type": "work"},
- ],
- },
- {
- "_id": "54a4317151a70a9881ac28a4",
- "user_id": 3,
- "name": {"first": "Millicent", "last": "Guy"},
- "friends": [
- {"id": 0, "name": {"first": "Luella", "last": "Mendoza"}, "type": "work"},
- {
- "id": 1,
- "name": {"first": "Melanie", "last": "Foster"},
- "type": "personal",
- },
- {"id": 2, "name": {"first": "Hopkins", "last": "Scott"}, "type": "work"},
- ],
- },
- {
- "_id": "54a43171d946b78703a0e076",
- "user_id": 4,
- "name": {"first": "Elisabeth", "last": "Brady"},
- "friends": [
- {"id": 0, "name": {"first": "Sofia", "last": "Workman"}, "type": "work"},
- {"id": 1, "name": {"first": "Alisha", "last": "Reilly"}, "type": "work"},
- {"id": 2, "name": {"first": "Ochoa", "last": "Burch"}, "type": "personal"},
- ],
- },
- {
- "_id": "54a4317118abd7f1992464ee",
- "user_id": 5,
- "name": {"first": "Pollard", "last": "French"},
- "friends": [
- {
- "id": 0,
- "name": {"first": "Hollie", "last": "Juarez"},
- "type": "personal",
- },
- {"id": 1, "name": {"first": "Nelda", "last": "Newton"}, "type": "personal"},
- {"id": 2, "name": {"first": "Yang", "last": "Pace"}, "type": "personal"},
- ],
- },
- {
- "_id": "54a43171f139e63d6579121e",
- "user_id": 6,
- "name": {"first": "Acevedo", "last": "Morales"},
- "friends": [
- {"id": 0, "name": {"first": "Payne", "last": "Berry"}, "type": "personal"},
- {
- "id": 1,
- "name": {"first": "Rene", "last": "Valenzuela"},
- "type": "personal",
- },
- {"id": 2, "name": {"first": "Dora", "last": "Gallegos"}, "type": "work"},
- ],
- },
- {
- "_id": "54a431719783cef80876dde8",
- "user_id": 7,
- "name": {"first": "Cervantes", "last": "Marquez"},
- "friends": [
- {
- "id": 0,
- "name": {"first": "Maxwell", "last": "Norman"},
- "type": "personal",
- },
- {"id": 1, "name": {"first": "Shields", "last": "Bass"}, "type": "personal"},
- {"id": 2, "name": {"first": "Luz", "last": "Jacobson"}, "type": "work"},
- ],
- },
- {
- "_id": "54a43171ecc7540d1f7aceae",
- "user_id": 8,
- "name": {"first": "West", "last": "Morrow"},
- "friends": [
- {
- "id": 0,
- "name": {"first": "Townsend", "last": "Dixon"},
- "type": "personal",
- },
- {
- "id": 1,
- "name": {"first": "Callahan", "last": "Buck"},
- "type": "personal",
- },
- {
- "id": 2,
- "name": {"first": "Rachel", "last": "Fletcher"},
- "type": "personal",
- },
- ],
- },
- {
- "_id": "54a4317113e831f4af041a0a",
- "user_id": 9,
- "name": {"first": "Cotton", "last": "House"},
- "friends": [
- {
- "id": 0,
- "name": {"first": "Mckenzie", "last": "Medina"},
- "type": "personal",
- },
- {"id": 1, "name": {"first": "Cecilia", "last": "Miles"}, "type": "work"},
- {"id": 2, "name": {"first": "Guerra", "last": "Cervantes"}, "type": "work"},
- ],
- },
- {
- "_id": "54a43171686eb1f48ebcbe01",
- "user_id": 10,
- "name": {"first": "Wright", "last": "Rivas"},
- "friends": [
- {
- "id": 0,
- "name": {"first": "Campos", "last": "Freeman"},
- "type": "personal",
- },
- {
- "id": 1,
- "name": {"first": "Christian", "last": "Ferguson"},
- "type": "personal",
- },
- {"id": 2, "name": {"first": "Doreen", "last": "Wilder"}, "type": "work"},
- ],
- },
- {
- "_id": "54a43171a4f3d5638c162f4f",
- "user_id": 11,
- "name": {"first": "Lorene", "last": "Dorsey"},
- "friends": [
- {
- "id": 0,
- "name": {"first": "Gibbs", "last": "Mccarty"},
- "type": "personal",
- },
- {"id": 1, "name": {"first": "Neal", "last": "Franklin"}, "type": "work"},
- {"id": 2, "name": {"first": "Kristy", "last": "Head"}, "type": "personal"},
- ],
- "bestfriends": ["Wolverine", "Cyclops"],
- },
- {
- "_id": "54a431719faa420a5b4fbeb0",
- "user_id": 12,
- "name": {"first": "Juanita", "last": "Cook"},
- "friends": [
- {"id": 0, "name": {"first": "Wilkins", "last": "Chang"}, "type": "work"},
- {"id": 1, "name": {"first": "Haney", "last": "Rivera"}, "type": "work"},
- {"id": 2, "name": {"first": "Lauren", "last": "Manning"}, "type": "work"},
- ],
- },
- {
- "_id": "54a43171e65d35f9ee8c53c0",
- "user_id": 13,
- "name": {"first": "Levy", "last": "Osborn"},
- "friends": [
- {"id": 0, "name": {"first": "Vinson", "last": "Vargas"}, "type": "work"},
- {"id": 1, "name": {"first": "Felicia", "last": "Beach"}, "type": "work"},
- {"id": 2, "name": {"first": "Nadine", "last": "Kemp"}, "type": "work"},
- ],
- "results": [82, 85, 88],
- },
- {
- "_id": "54a4317132f2c81561833259",
- "user_id": 14,
- "name": {"first": "Christina", "last": "Raymond"},
- "friends": [
- {"id": 0, "name": {"first": "Herrera", "last": "Walton"}, "type": "work"},
- {"id": 1, "name": {"first": "Hahn", "last": "Rutledge"}, "type": "work"},
- {"id": 2, "name": {"first": "Stacie", "last": "Harding"}, "type": "work"},
- ],
- },
- {
- "_id": "589f32af493145f890e1b051",
- "user_id": 15,
- "name": {"first": "Tanisha", "last": "Bowers"},
- "friends": [
- {"id": 0, "name": {"first": "Ochoa", "last": "Pratt"}, "type": "personal"},
- {"id": 1, "name": {"first": "Ochoa", "last": "Romero"}, "type": "personal"},
- {"id": 2, "name": {"first": "Ochoa", "last": "Bowman"}, "type": "work"},
- ],
- },
-]
diff --git a/src/mango/test/limit_docs.py b/src/mango/test/limit_docs.py
deleted file mode 100644
index 6c12790be..000000000
--- a/src/mango/test/limit_docs.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import copy
-
-
-def setup(db, index_type="view"):
- db.recreate()
- db.save_docs(copy.deepcopy(DOCS))
- if index_type == "view":
- add_view_indexes(db)
- elif index_type == "text":
- add_text_indexes(db)
-
-
-def add_text_indexes(db):
- db.create_text_index()
-
-
-DOCS = [
- {"_id": "54af50626de419f5109c962f", "user_id": 0, "age": 10},
- {"_id": "54af50622071121b25402dc3", "user_id": 1, "age": 11},
- {"_id": "54af50623809e19159a3cdd0", "user_id": 2, "age": 12},
- {"_id": "54af50629f45a0f49a441d01", "user_id": 3, "age": 13},
- {"_id": "54af50620f1755c22359a362", "user_id": 4, "age": 14},
- {"_id": "54af5062dd6f6c689ad2ca23", "user_id": 5, "age": 15},
- {"_id": "54af50623e89b432be1187b8", "user_id": 6, "age": 16},
- {"_id": "54af5062932a00270a3b5ab0", "user_id": 7, "age": 17},
- {"_id": "54af5062df773d69174e3345", "filtered_array": [1, 2, 3], "age": 18},
- {"_id": "54af50629c1153b9e21e346d", "filtered_array": [1, 2, 3], "age": 19},
- {"_id": "54af5062dabb7cc4b60e0c95", "user_id": 10, "age": 20},
- {"_id": "54af5062204996970a4439a2", "user_id": 11, "age": 21},
- {"_id": "54af50629cea39e8ea52bfac", "user_id": 12, "age": 22},
- {"_id": "54af50620597c094f75db2a1", "user_id": 13, "age": 23},
- {"_id": "54af50628d4048de0010723c", "user_id": 14, "age": 24},
- {"_id": "54af5062f339b6f44f52faf6", "user_id": 15, "age": 25},
- {"_id": "54af5062a893f17ea4402031", "user_id": 16, "age": 26},
- {"_id": "54af5062323dbc7077deb60a", "user_id": 17, "age": 27},
- {"_id": "54af506224db85bd7fcd0243", "filtered_array": [1, 2, 3], "age": 28},
- {"_id": "54af506255bb551c9cc251bf", "filtered_array": [1, 2, 3], "age": 29},
- {"_id": "54af50625a97394e07d718a1", "filtered_array": [1, 2, 3], "age": 30},
- {"_id": "54af506223f51d586b4ef529", "user_id": 21, "age": 31},
- {"_id": "54af50622740dede7d6117b7", "user_id": 22, "age": 32},
- {"_id": "54af50624efc87684a52e8fb", "user_id": 23, "age": 33},
- {"_id": "54af5062f40932760347799c", "user_id": 24, "age": 34},
- {"_id": "54af5062d9f7361951ac645d", "user_id": 25, "age": 35},
- {"_id": "54af5062f89aef302b37c3bc", "filtered_array": [1, 2, 3], "age": 36},
- {"_id": "54af5062498ec905dcb351f8", "filtered_array": [1, 2, 3], "age": 37},
- {"_id": "54af5062b1d2f2c5a85bdd7e", "user_id": 28, "age": 38},
- {"_id": "54af50625061029c0dd942b5", "filtered_array": [1, 2, 3], "age": 39},
- {"_id": "54af50628b0d08a1d23c030a", "user_id": 30, "age": 40},
- {"_id": "54af506271b6e3119eb31d46", "filtered_array": [1, 2, 3], "age": 41},
- {"_id": "54af5062b69f46424dfcf3e5", "user_id": 32, "age": 42},
- {"_id": "54af5062ed00c7dbe4d1bdcf", "user_id": 33, "age": 43},
- {"_id": "54af5062fb64e45180c9a90d", "user_id": 34, "age": 44},
- {"_id": "54af5062241c72b067127b09", "user_id": 35, "age": 45},
- {"_id": "54af50626a467d8b781a6d06", "user_id": 36, "age": 46},
- {"_id": "54af50620e992d60af03bf86", "filtered_array": [1, 2, 3], "age": 47},
- {"_id": "54af506254f992aa3c51532f", "user_id": 38, "age": 48},
- {"_id": "54af5062e99b20f301de39b9", "user_id": 39, "age": 49},
- {"_id": "54af50624fbade6b11505b5d", "user_id": 40, "age": 50},
- {"_id": "54af506278ad79b21e807ae4", "user_id": 41, "age": 51},
- {"_id": "54af5062fc7a1dcb33f31d08", "user_id": 42, "age": 52},
- {"_id": "54af5062ea2c954c650009cf", "user_id": 43, "age": 53},
- {"_id": "54af506213576c2f09858266", "user_id": 44, "age": 54},
- {"_id": "54af50624a05ac34c994b1c0", "user_id": 45, "age": 55},
- {"_id": "54af50625a624983edf2087e", "user_id": 46, "age": 56},
- {"_id": "54af50623de488c49d064355", "user_id": 47, "age": 57},
- {"_id": "54af5062628b5df08661a9d5", "user_id": 48, "age": 58},
- {"_id": "54af50620c706fc23032ae62", "user_id": 49, "age": 59},
- {"_id": "54af5062509f1e2371fe1da4", "user_id": 50, "age": 60},
- {"_id": "54af50625e96b22436791653", "user_id": 51, "age": 61},
- {"_id": "54af5062a9cb71463bb9577f", "user_id": 52, "age": 62},
- {"_id": "54af50624fea77a4221a4baf", "user_id": 53, "age": 63},
- {"_id": "54af5062c63df0a147d2417e", "user_id": 54, "age": 64},
- {"_id": "54af50623c56d78029316c9f", "user_id": 55, "age": 65},
- {"_id": "54af5062167f6e13aa0dd014", "user_id": 56, "age": 66},
- {"_id": "54af50621558abe77797d137", "filtered_array": [1, 2, 3], "age": 67},
- {"_id": "54af50624d5b36aa7cb5fa77", "user_id": 58, "age": 68},
- {"_id": "54af50620d79118184ae66bd", "user_id": 59, "age": 69},
- {"_id": "54af5062d18aafa5c4ca4935", "user_id": 60, "age": 71},
- {"_id": "54af5062fd22a409649962f4", "filtered_array": [1, 2, 3], "age": 72},
- {"_id": "54af5062e31045a1908e89f9", "user_id": 62, "age": 73},
- {"_id": "54af50624c062fcb4c59398b", "user_id": 63, "age": 74},
- {"_id": "54af506241ec83430a15957f", "user_id": 64, "age": 75},
- {"_id": "54af506224d0f888ae411101", "user_id": 65, "age": 76},
- {"_id": "54af506272a971c6cf3ab6b8", "user_id": 66, "age": 77},
- {"_id": "54af506221e25b485c95355b", "user_id": 67, "age": 78},
- {"_id": "54af5062800f7f2ca73e9623", "user_id": 68, "age": 79},
- {"_id": "54af5062bc962da30740534a", "user_id": 69, "age": 80},
- {"_id": "54af50625102d6e210fc2efd", "filtered_array": [1, 2, 3], "age": 81},
- {"_id": "54af5062e014b9d039f02c5e", "user_id": 71, "age": 82},
- {"_id": "54af5062fbd5e801dd217515", "user_id": 72, "age": 83},
- {"_id": "54af50629971992b658fcb88", "user_id": 73, "age": 84},
- {"_id": "54af5062607d53416c30bafd", "filtered_array": [1, 2, 3], "age": 85},
-]
diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py
deleted file mode 100644
index 03cb85f48..000000000
--- a/src/mango/test/mango.py
+++ /dev/null
@@ -1,364 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import json
-import time
-import unittest
-import uuid
-import os
-
-import requests
-
-import friend_docs
-import user_docs
-import limit_docs
-
-
-def random_db_name():
- return "mango_test_" + uuid.uuid4().hex
-
-
-def has_text_service():
- return os.environ.get("MANGO_TEXT_INDEXES") == "1"
-
-
-def get_from_environment(key, default):
- value = os.environ.get(key)
- return value if value is not None else default
-
-
-# add delay functionality
-def delay(n=5, t=0.5):
- for i in range(0, n):
- time.sleep(t)
-
-
-class Database(object):
- def __init__(
- self,
- dbname,
- host="127.0.0.1",
- port="15984",
- user="testuser",
- password="testpass",
- ):
- root_url = get_from_environment("COUCH_HOST", "http://{}:{}".format(host, port))
- auth_header = get_from_environment("COUCH_AUTH_HEADER", None)
- user = get_from_environment("COUCH_USER", user)
- password = get_from_environment("COUCH_PASSWORD", password)
-
- self.root_url = root_url
- self.dbname = dbname
- self.sess = requests.session()
-
- # allow explicit auth header to be set to enable testing
- # against deployments where basic auth isn't available
- if auth_header is not None:
- self.sess.headers["Authorization"] = auth_header
- else:
- self.sess.auth = (user, password)
-
- self.sess.headers["Content-Type"] = "application/json"
-
- @property
- def url(self):
- return "{}/{}".format(self.root_url, self.dbname)
-
- def path(self, parts):
- if isinstance(parts, ("".__class__, "".__class__)):
- parts = [parts]
- return "/".join([self.url] + parts)
-
- def create(self, q=1, n=1):
- r = self.sess.get(self.url)
- if r.status_code == 404:
- r = self.sess.put(self.url, params={"q": q, "n": n})
- r.raise_for_status()
-
- def delete(self):
- r = self.sess.delete(self.url)
-
- def recreate(self):
- r = self.sess.get(self.url)
- if r.status_code == 200:
- db_info = r.json()
- docs = db_info["doc_count"] + db_info["doc_del_count"]
- if docs == 0:
- # db never used - create unnecessary
- return
- self.delete()
- self.create()
- self.recreate()
-
- def save_doc(self, doc):
- self.save_docs([doc])
-
- def save_docs_with_conflicts(self, docs, **kwargs):
- body = json.dumps({"docs": docs, "new_edits": False})
- r = self.sess.post(self.path("_bulk_docs"), data=body, params=kwargs)
- r.raise_for_status()
-
- def save_docs(self, docs, **kwargs):
- body = json.dumps({"docs": docs})
- r = self.sess.post(self.path("_bulk_docs"), data=body, params=kwargs)
- r.raise_for_status()
- for doc, result in zip(docs, r.json()):
- doc["_id"] = result["id"]
- doc["_rev"] = result["rev"]
-
- def open_doc(self, docid):
- r = self.sess.get(self.path(docid))
- r.raise_for_status()
- return r.json()
-
- def delete_doc(self, docid):
- r = self.sess.get(self.path(docid))
- r.raise_for_status()
- original_rev = r.json()["_rev"]
- self.sess.delete(self.path(docid), params={"rev": original_rev})
-
- def ddoc_info(self, ddocid):
- r = self.sess.get(self.path([ddocid, "_info"]))
- r.raise_for_status()
- return r.json()
-
- def create_index(
- self,
- fields,
- idx_type="json",
- name=None,
- ddoc=None,
- partial_filter_selector=None,
- selector=None,
- ):
- body = {"index": {"fields": fields}, "type": idx_type, "w": 3}
- if name is not None:
- body["name"] = name
- if ddoc is not None:
- body["ddoc"] = ddoc
- if selector is not None:
- body["index"]["selector"] = selector
- if partial_filter_selector is not None:
- body["index"]["partial_filter_selector"] = partial_filter_selector
- body = json.dumps(body)
- r = self.sess.post(self.path("_index"), data=body)
- r.raise_for_status()
- assert r.json()["id"] is not None
- assert r.json()["name"] is not None
-
- created = r.json()["result"] == "created"
- if created:
- # wait until the database reports the index as available
- while len(self.get_index(r.json()["id"], r.json()["name"])) < 1:
- delay(t=0.1)
-
- return created
-
- def create_text_index(
- self,
- analyzer=None,
- idx_type="text",
- partial_filter_selector=None,
- selector=None,
- default_field=None,
- fields=None,
- name=None,
- ddoc=None,
- index_array_lengths=None,
- ):
- body = {"index": {}, "type": idx_type, "w": 3}
- if name is not None:
- body["name"] = name
- if analyzer is not None:
- body["index"]["default_analyzer"] = analyzer
- if default_field is not None:
- body["index"]["default_field"] = default_field
- if index_array_lengths is not None:
- body["index"]["index_array_lengths"] = index_array_lengths
- if selector is not None:
- body["index"]["selector"] = selector
- if partial_filter_selector is not None:
- body["index"]["partial_filter_selector"] = partial_filter_selector
- if fields is not None:
- body["index"]["fields"] = fields
- if ddoc is not None:
- body["ddoc"] = ddoc
- body = json.dumps(body)
- r = self.sess.post(self.path("_index"), data=body)
- r.raise_for_status()
- return r.json()["result"] == "created"
-
- def list_indexes(self, limit="", skip=""):
- if limit != "":
- limit = "limit=" + str(limit)
- if skip != "":
- skip = "skip=" + str(skip)
- r = self.sess.get(self.path("_index?" + limit + ";" + skip))
- r.raise_for_status()
- return r.json()["indexes"]
-
- def get_index(self, ddocid, name):
- if ddocid is None:
- return [i for i in self.list_indexes() if i["name"] == name]
-
- ddocid = ddocid.replace("%2F", "/")
- if not ddocid.startswith("_design/"):
- ddocid = "_design/" + ddocid
-
- if name is None:
- return [i for i in self.list_indexes() if i["ddoc"] == ddocid]
- else:
- return [
- i
- for i in self.list_indexes()
- if i["ddoc"] == ddocid and i["name"] == name
- ]
-
- def delete_index(self, ddocid, name, idx_type="json"):
- path = ["_index", ddocid, idx_type, name]
- r = self.sess.delete(self.path(path), params={"w": "3"})
- r.raise_for_status()
-
- while len(self.get_index(ddocid, name)) == 1:
- delay(t=0.1)
-
- def bulk_delete(self, docs):
- body = {"docids": docs, "w": 3}
- body = json.dumps(body)
- r = self.sess.post(self.path("_index/_bulk_delete"), data=body)
- return r.json()
-
- def find(
- self,
- selector,
- limit=25,
- skip=0,
- sort=None,
- fields=None,
- r=1,
- conflicts=False,
- use_index=None,
- explain=False,
- bookmark=None,
- return_raw=False,
- update=True,
- executionStats=False,
- ):
- body = {
- "selector": selector,
- "use_index": use_index,
- "limit": limit,
- "skip": skip,
- "r": r,
- "conflicts": conflicts,
- }
- if sort is not None:
- body["sort"] = sort
- if fields is not None:
- body["fields"] = fields
- if bookmark is not None:
- body["bookmark"] = bookmark
- if update == False:
- body["update"] = False
- if executionStats == True:
- body["execution_stats"] = True
- body = json.dumps(body)
- if explain:
- path = self.path("_explain")
- else:
- path = self.path("_find")
- r = self.sess.post(path, data=body)
- r.raise_for_status()
- if explain or return_raw:
- return r.json()
- else:
- return r.json()["docs"]
-
- def find_one(self, *args, **kwargs):
- results = self.find(*args, **kwargs)
- if len(results) > 1:
- raise RuntimeError("Multiple results for Database.find_one")
- if len(results):
- return results[0]
- else:
- return None
-
-
-class UsersDbTests(unittest.TestCase):
- @classmethod
- def setUpClass(klass):
- klass.db = Database("_users")
- user_docs.setup_users(klass.db)
-
- def setUp(self):
- self.db = self.__class__.db
-
-
-class DbPerClass(unittest.TestCase):
- @classmethod
- def setUpClass(klass):
- klass.db = Database(random_db_name())
- klass.db.create(q=1, n=1)
-
- def setUp(self):
- self.db = self.__class__.db
-
-
-class UserDocsTests(DbPerClass):
- INDEX_TYPE = "json"
-
- @classmethod
- def setUpClass(klass):
- super(UserDocsTests, klass).setUpClass()
- user_docs.setup(klass.db)
-
-
-class UserDocsTestsNoIndexes(DbPerClass):
- INDEX_TYPE = "special"
-
- @classmethod
- def setUpClass(klass):
- super(UserDocsTestsNoIndexes, klass).setUpClass()
- user_docs.setup(klass.db, index_type=klass.INDEX_TYPE)
-
-
-class UserDocsTextTests(DbPerClass):
- INDEX_TYPE = "text"
- DEFAULT_FIELD = None
- FIELDS = None
-
- @classmethod
- def setUpClass(klass):
- super(UserDocsTextTests, klass).setUpClass()
- if has_text_service():
- user_docs.setup(
- klass.db,
- index_type=klass.INDEX_TYPE,
- default_field=klass.DEFAULT_FIELD,
- fields=klass.FIELDS,
- )
-
-
-class FriendDocsTextTests(DbPerClass):
- @classmethod
- def setUpClass(klass):
- super(FriendDocsTextTests, klass).setUpClass()
- if has_text_service():
- friend_docs.setup(klass.db, index_type="text")
-
-
-class LimitDocsTextTests(DbPerClass):
- @classmethod
- def setUpClass(klass):
- super(LimitDocsTextTests, klass).setUpClass()
- if has_text_service():
- limit_docs.setup(klass.db, index_type="text")
diff --git a/src/mango/test/user_docs.py b/src/mango/test/user_docs.py
deleted file mode 100644
index 8f0ed2e04..000000000
--- a/src/mango/test/user_docs.py
+++ /dev/null
@@ -1,383 +0,0 @@
-# -*- coding: utf-8 -*-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-"""
-Generated with http://www.json-generator.com/
-
-With this pattern:
-
-[
- '{{repeat(20)}}',
- {
- _id: '{{guid()}}',
- user_id: "{{index()}}",
- name: {
- first: "{{firstName()}}",
- last: "{{surname()}}"
- },
- age: "{{integer(18,90)}}",
- location: {
- state: "{{state()}}",
- city: "{{city()}}",
- address: {
- street: "{{street()}}",
- number: "{{integer(10, 10000)}}"
- }
- },
- company: "{{company()}}",
- email: "{{email()}}",
- manager: "{{bool()}}",
- twitter: function(tags) {
- if(this.manager)
- return;
- return "@" + this.email.split("@")[0];
- },
- favorites: [
- "{{repeat(2,5)}}",
- "{{random('C', 'C++', 'Python', 'Ruby', 'Erlang', 'Lisp')}}"
- ]
- }
-]
-"""
-
-
-import copy
-
-
-def setup_users(db, **kwargs):
- db.recreate()
- db.save_docs(copy.deepcopy(USERS_DOCS))
-
-
-def setup(db, index_type="view", **kwargs):
- db.recreate()
- db.save_docs(copy.deepcopy(DOCS))
- if index_type == "view":
- add_view_indexes(db, kwargs)
- elif index_type == "text":
- add_text_indexes(db, kwargs)
-
-
-def add_view_indexes(db, kwargs):
- indexes = [
- (["user_id"], "user_id"),
- (["name.last", "name.first"], "name"),
- (["age"], "age"),
- (
- [
- "location.state",
- "location.city",
- "location.address.street",
- "location.address.number",
- ],
- "location",
- ),
- (["company", "manager"], "company_and_manager"),
- (["manager"], "manager"),
- (["favorites"], "favorites"),
- (["favorites.3"], "favorites_3"),
- (["twitter"], "twitter"),
- (["ordered"], "ordered"),
- ]
- for (idx, name) in indexes:
- assert db.create_index(idx, name=name, ddoc=name) is True
-
-
-def add_text_indexes(db, kwargs):
- db.create_text_index(**kwargs)
-
-
-DOCS = [
- {
- "_id": "71562648-6acb-42bc-a182-df6b1f005b09",
- "user_id": 0,
- "name": {"first": "Stephanie", "last": "Kirkland"},
- "age": 48,
- "location": {
- "state": "Nevada",
- "city": "Ronco",
- "address": {"street": "Evergreen Avenue", "number": 347},
- },
- "company": "Dreamia",
- "email": "stephaniekirkland@dreamia.com",
- "manager": False,
- "twitter": "@stephaniekirkland",
- "favorites": ["Ruby", "C", "Python"],
- "test": [{"a": 1}, {"b": 2}],
- },
- {
- "_id": "12a2800c-4fe2-45a8-8d78-c084f4e242a9",
- "user_id": 1,
- "name": {"first": "Abbott", "last": "Watson"},
- "age": 31,
- "location": {
- "state": "Connecticut",
- "city": "Gerber",
- "address": {"street": "Huntington Street", "number": 8987},
- },
- "company": "Talkola",
- "email": "abbottwatson@talkola.com",
- "manager": False,
- "twitter": "@abbottwatson",
- "favorites": ["Ruby", "Python", "C", {"Versions": {"Alpha": "Beta"}}],
- "test": [{"a": 1, "b": 2}],
- },
- {
- "_id": "48ca0455-8bd0-473f-9ae2-459e42e3edd1",
- "user_id": 2,
- "name": {"first": "Shelly", "last": "Ewing"},
- "age": 42,
- "location": {
- "state": "New Mexico",
- "city": "Thornport",
- "address": {"street": "Miller Avenue", "number": 7100},
- },
- "company": "Zialactic",
- "email": "shellyewing@zialactic.com",
- "manager": True,
- "favorites": ["Lisp", "Python", "Erlang"],
- "test_in": {"val1": 1, "val2": "val2"},
- },
- {
- "_id": "0461444c-e60a-457d-a4bb-b8d811853f21",
- "user_id": 3,
- "name": {"first": "Madelyn", "last": "Soto"},
- "age": 79,
- "location": {
- "state": "Utah",
- "city": "Albany",
- "address": {"street": "Stockholm Street", "number": 710},
- },
- "company": "Tasmania",
- "email": "madelynsoto@tasmania.com",
- "manager": True,
- "favorites": [["Lisp", "Erlang", "Python"], "Erlang", "C", "Erlang"],
- "11111": "number_field",
- "22222": {"33333": "nested_number_field"},
- },
- {
- "_id": "8e1c90c0-ac18-4832-8081-40d14325bde0",
- "user_id": 4,
- "name": {"first": "Nona", "last": "Horton"},
- "age": 61,
- "location": {
- "state": "Georgia",
- "city": "Corinne",
- "address": {"street": "Woodhull Street", "number": 6845},
- },
- "company": "Signidyne",
- "email": "nonahorton@signidyne.com",
- "manager": False,
- "twitter": "@nonahorton",
- "favorites": ["Lisp", "C", "Ruby", "Ruby"],
- "name.first": "name dot first",
- },
- {
- "_id": "a33d5457-741a-4dce-a217-3eab28b24e3e",
- "user_id": 5,
- "name": {"first": "Sheri", "last": "Perkins"},
- "age": 73,
- "location": {
- "state": "Michigan",
- "city": "Nutrioso",
- "address": {"street": "Bassett Avenue", "number": 5648},
- },
- "company": "Myopium",
- "email": "sheriperkins@myopium.com",
- "manager": True,
- "favorites": ["Lisp", "Lisp"],
- },
- {
- "_id": "b31dad3f-ae8b-4f86-8327-dfe8770beb27",
- "user_id": 6,
- "name": {"first": "Tate", "last": "Guy"},
- "age": 47,
- "location": {
- "state": "Illinois",
- "city": "Helen",
- "address": {"street": "Schenck Court", "number": 7392},
- },
- "company": "Prosely",
- "email": "tateguy@prosely.com",
- "manager": True,
- "favorites": ["C", "Lisp", "Ruby", "C"],
- },
- {
- "_id": "659d0430-b1f4-413a-a6b7-9ea1ef071325",
- "user_id": 7,
- "name": {"first": "Jewell", "last": "Stafford"},
- "age": 33,
- "location": {
- "state": "Iowa",
- "city": "Longbranch",
- "address": {"street": "Dodworth Street", "number": 3949},
- },
- "company": "Niquent",
- "email": "jewellstafford@niquent.com",
- "manager": True,
- "favorites": ["C", "C", "Ruby", "Ruby", "Erlang"],
- "exists_field": "should_exist1",
- "ordered": None,
- },
- {
- "_id": "6c0afcf1-e57e-421d-a03d-0c0717ebf843",
- "user_id": 8,
- "name": {"first": "James", "last": "Mcdaniel"},
- "age": 68,
- "location": {
- "state": "Maine",
- "city": "Craig",
- "address": {"street": "Greene Avenue", "number": 8776},
- },
- "company": "Globoil",
- "email": "jamesmcdaniel@globoil.com",
- "manager": True,
- "favorites": None,
- "exists_field": "should_exist2",
- "ordered": False,
- },
- {
- "_id": "954272af-d5ed-4039-a5eb-8ed57e9def01",
- "user_id": 9,
- "name": {"first": "Ramona", "last": "Floyd"},
- "age": 22,
- "location": {
- "state": "Missouri",
- "city": "Foxworth",
- "address": {"street": "Lott Place", "number": 1697},
- },
- "company": "Manglo",
- "email": "ramonafloyd@manglo.com",
- "manager": True,
- "twitter": None,
- "favorites": ["Lisp", "Erlang", "Python"],
- "exists_array": ["should", "exist", "array1"],
- "complex_field_value": '+-(){}[]^~&&*||"\\/?:!',
- "ordered": True,
- },
- {
- "_id": "e900001d-bc48-48a6-9b1a-ac9a1f5d1a03",
- "user_id": 10,
- "name": {"first": "Charmaine", "last": "Mills"},
- "age": 43,
- "location": {
- "state": "New Hampshire",
- "city": "Kiskimere",
- "address": {"street": "Nostrand Avenue", "number": 4503},
- },
- "company": "Lyria",
- "email": "charmainemills@lyria.com",
- "manager": True,
- "favorites": ["Erlang", "Erlang"],
- "exists_array": ["should", "exist", "array2"],
- "ordered": 9,
- },
- {
- "_id": "b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4",
- "user_id": 11,
- "name": {"first": "Mathis", "last": "Hernandez"},
- "age": 75,
- "location": {
- "state": "Hawaii",
- "city": "Dupuyer",
- "address": {"street": "Bancroft Place", "number": 2741},
- },
- "company": "Affluex",
- "email": "mathishernandez@affluex.com",
- "manager": True,
- "favorites": ["Ruby", "Lisp", "C", "C++", "C++"],
- "exists_object": {"should": "object"},
- "ordered": 10000,
- },
- {
- "_id": "5b61abc1-a3d3-4092-b9d7-ced90e675536",
- "user_id": 12,
- "name": {"first": "Patti", "last": "Rosales"},
- "age": 71,
- "location": {
- "state": "Pennsylvania",
- "city": "Juntura",
- "address": {"street": "Hunterfly Place", "number": 7683},
- },
- "company": "Oulu",
- "email": "pattirosales@oulu.com",
- "manager": True,
- "favorites": ["C", "Python", "Lisp"],
- "exists_object": {"another": "object"},
- "ordered": "a",
- },
- {
- "_id": "b1e70402-8add-4068-af8f-b4f3d0feb049",
- "user_id": 13,
- "name": {"first": "Whitley", "last": "Harvey"},
- "age": 78,
- "location": {
- "state": "Minnesota",
- "city": "Trail",
- "address": {"street": "Pleasant Place", "number": 8766},
- },
- "company": None,
- "email": "whitleyharvey@fangold.com",
- "manager": False,
- "twitter": "@whitleyharvey",
- "favorites": ["C", "Ruby", "Ruby"],
- "ordered": "A",
- },
- {
- "_id": "c78c529f-0b07-4947-90a6-d6b7ca81da62",
- "user_id": 14,
- "name": {"first": "Faith", "last": "Hess"},
- "age": 51,
- "location": {
- "state": "North Dakota",
- "city": "Axis",
- "address": {"street": "Brightwater Avenue", "number": 1106},
- },
- "foo": "bar car apple",
- "company": "Pharmex",
- "email": "faithhess@pharmex.com",
- "favorites": ["Erlang", "Python", "Lisp"],
- "ordered": "aa",
- },
-]
-
-
-USERS_DOCS = [
- {
- "_id": "org.couchdb.user:demo01",
- "name": "demo01",
- "username": "demo01",
- "password": "apple01",
- "roles": ["design"],
- "order": 1,
- "type": "user",
- },
- {
- "_id": "org.couchdb.user:demo02",
- "name": "demo02",
- "username": "demo02",
- "password": "apple02",
- "roles": ["reader"],
- "order": 2,
- "type": "user",
- },
- {
- "_id": "org.couchdb.user:demo03",
- "name": "demo03",
- "username": "demo03",
- "password": "apple03",
- "roles": ["reader", "writer"],
- "order": 3,
- "type": "user",
- },
-]
diff --git a/src/mango/unittest.cfg b/src/mango/unittest.cfg
deleted file mode 100644
index 05d7bcf5f..000000000
--- a/src/mango/unittest.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-[unittest]
-start-dir=test
-test-file-pattern=[0-9]*.py
diff --git a/src/mem3/LICENSE b/src/mem3/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/mem3/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/mem3/README.md b/src/mem3/README.md
deleted file mode 100644
index 8098f6979..000000000
--- a/src/mem3/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-## mem3
-
-Mem3 is the node membership application for clustered [CouchDB][1]. It is used
-in CouchDB since version 2.0 and tracks two very important things for the
-cluster:
-
- 1. member nodes
- 2. node/shards mappings for each database
-
-Both the nodes and shards are tracked in node-local couch databases. Shards
-are heavily used, so an ETS cache is also maintained for low-latency lookups.
-The nodes and shards are synchronized via continuous CouchDB replication,
-which serves as 'gossip' in Dynamo parlance. The shards ETS cache is kept in
-sync based on membership and database event listeners.
-
-A very important point to make here is that CouchDB does not necessarily
-divide up each database into equal shards across the nodes of a cluster. For
-instance, in a 20-node cluster, you may have the need to create a small
-database with very few documents. For efficiency reasons, you may create your
-database with Q=4 and keep the default of N=3. This means you only have 12
-shards total, so 8 nodes will hold none of the data for this database. Given
-this feature, we even shard use out across the cluster by altering the 'start'
-node for the database's shards.
-
-Shards can be split using the `/_reshard` API endpoint. Refer to a separate
-[README](README_reshard.md) regarding the technical detail on how shard
-splitting works.
-
-### Getting Started
-
-Mem3 requires R13B03 or higher and can be built with [rebar][2], which comes
-bundled in the repository. Rebar needs to be able to find the `couch_db.hrl`
-header file; one way to accomplish this is to set ERL_LIBS to point to the
-apps subdirectory of a CouchDB checkout, e.g.
-
- ERL_LIBS="/usr/local/src/couchdb/apps" ./rebar compile
-
-### License
-[Apache 2.0][3]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/rebar/rebar
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
diff --git a/src/mem3/README_reshard.md b/src/mem3/README_reshard.md
deleted file mode 100644
index 237371485..000000000
--- a/src/mem3/README_reshard.md
+++ /dev/null
@@ -1,93 +0,0 @@
-Developer Oriented Resharding Description
-=========================================
-
-This is a technical description of the resharding logic. The discussion will focus on: job creation and life-cycle, data definitions, and on the state transition mechanisms.
-
-
-Job Life-Cycle
---------------
-
-Job creation happens in the `mem3_reshard_httpd` API handler module. That module uses `mem3_reshard_http_util` to do some validation, and eventually calls `mem3_reshard:start_split_job/2` on one or more nodes in the cluster depending where the new jobs should run.
-
-`mem3_reshard:start_split_job/2` is the main Erlang API entry point. After some more validation it creates a `#job{}` record and calls the `mem3_reshard` job manager. The manager then will add the job to its jobs ets table, save it to a `_local` document in the shards db, and most importantly, start a new resharding process. That process will be supervised by the `mem3_reshard_job_sup` supervisor.
-
-Each job will be running in a gen_server implemented in `mem3_reshard_job` module. When splitting a shard, a job will go through a series of steps such as `initial_copy`, `build_indices`, `update_shard_map`, etc. Between each step it will report progress and checkpoint with `mem3_reshard` manager. A checkpoint involved the `mem3_reshard` manager persisting that job's state to disk in `_local` document in `_dbs` db. Then job continues until `completed` state or until it failed in the `failed` state.
-
-If a user stops shard splitting on the whole cluster, then all running jobs will stop. When shard splitting is resumed, they will try to recover from their last checkpoint.
-
-A job can also be individually stopped or resumed. If a job is individually stopped it will stay so even if the global shard splitting state is `running`. A user has to explicitly set that job to a `running` state for it to resume. If a node with running jobs is turned off, when it is rebooted running jobs will resume from their last checkpoint.
-
-
-Data Definitions
-----------------
-
-This section focuses on record definition and how data is transformed to and from various formats.
-
-Right below the `mem3_reshard:start_split_job/1` API level a job is converted to a `#job{}` record defined in the `mem3_reshard.hrl` header file. That record is then used throughout most of the resharding code. The job manager `mem3_reshard` stores it in its jobs ets table, then when a job process is spawn it single argument also just a `#job{}` record. As a job process is executing it will periodically report state back to the `mem3_reshard` manager as an updated `#job{}` record.
-
-Some interesting fields from the `#job{}` record:
-
- - `id` Uniquely identifies a job in a cluster. It is derived from the source shard name, node and a version (currently = 1).
- - `type` Currently the only type supported is `split` but `merge` or `rebalance` might be added in the future.
- - `job_state` The running state of the job. Indicates if the job is `running`, `stopped`, `completed` or `failed`.
- - `split_state` Once the job is running this indicates how far along it got in the splitting process.
- - `source` Source shard file. If/when merge is implemented this will be a list.
- - `target` List of target shard files. This is expected to be a list of 2 items currently.
- - `history` A time-line of state transitions represented as a list of tuples.
- - `pid` When job is running this will be set to the pid of the process.
-
-
-In the `mem3_reshard_job_store` module the `#job{}` record is translated to an json document so it can be persisted to disk. Translation functions to and from a json in that module are also used by the HTTP API layer to return a job's state and other information to the user.
-
-Another important piece of data is the global resharding state. When a user disables resharding on a cluster, a call is made to `mem3_reshard` manager on each node and they store that in a `#state{}` record. This record is defined in the `mem3_reshard.hrl` module, and just like the `#job{}` record can be translated to/from ejson in the `mem3_reshard_store` and stored and loaded from disk.
-
-
-State Transitions
------------------
-
-Resharding logic has 3 separate states to keep track of:
-
-1. Per-node resharding state. This state can be toggled between `running` and `stopped`. That toggle happens via the `mem3_reshard:start/0` and `mem3_reshard:stop/1` function. This state is kept in the `#state{}` record of the `mem3_reshard` manager gen_server. This state is also persisted to the local shard map database as a `_local` document so that it is maintained through a node restart. When the state is `running` then all jobs that are not individually `stopped`, and have not failed or completed, will be `running`. When the state is `stopped` all the running jobs will be `stopped`.
-
-2. Job's running state held in the `#job{}` `job_state` field. This is the general running state of a resharding job. It can be `new`, `running`, `stopped`, `completed` or `failed`. This state is most relevant for the `mem3_reshard` manager. In other words, it is the `mem3_reshard` gen_server that starts the job, monitors it to see if it exits successfully on completion or with an error.
-
-3. Job's internal splitting state. This state tracks the steps taken during shard splitting by each job. This state is mostly relevant for the `mem3_reshard_job` module. This state is kept in `#job{}`'s `split_state` field. The progression of these states is linear going from one state to the next. That's reflected in the code as well, when one state is done, `mem3_reshard_job:get_next_state/1` is called which returns the next state in the list. The list is defined in the `SPLIT_STATES` macro. This simplistic transition is also one of the reasons why a gen_fsm wasn't considered for `mem3_reshard_job` logic.
-
-Another interesting aspect is how the `split_state` transitions happen in the `mem3_reshard_job` module. What follows is an examination of that.
-
-A job starts running in the `new` state or from a previously checkpointed state. In the later case, the job goes through some recovery logic (see `?STATE_RESTART` macro in `mem3_reshard_job`) where it tries to resume its work from where it left of. It means, for example, if it was in the `initial_copy` state and was interrupted it might have to reset the target files and copy everything again. After recovery, the state execution logic is driven by `run(#job{})` which ends up calling `?MODULE:State(#job{})` state specific functions for each state.
-
-In `mem3_reshard_job:switch_to_next_state/2` job's history is updated, any current `state_info` is cleared, job state is switched in the `#job{}` record. Then, the new state is checkpointed in the `checkpoint/1` function. Checkpoint will cast a message to the `mem3_reshard` manager. After that message is sent the job process sits and waits.
-
-In the meantime `mem3_reshard` manager checkpoints the state, which means it updates both its ETS table with the new `#job{}` record, persists the state with the `mem3_reshard_store` module, then, finally, it notifies the job process that checkpointing is done by calling `mem3_reshard_job:checkpoint_done/1`.
-
-`mem3_reshard_job:checkpoint_done/1` function call sends a `checkpoint_done` message to the job's process, at which point it starts executing that state.
-
-Most states in `mem3_reshard_job` try not to block the main job process and instead launch worker processes to perform long running operations. It is usually just one worker process but it could be multiple as well. After that it waits for the workers to finish and inspects their exit signal (see `wait_for_workers/1` function). When all the workers exit for a particular `split_state`, the job is switched to the next state with `switch_to_next_state/1` and the whole thing repeats until the `completed` state is reached when the whole job exits normally.
-
-If the source is updated at high rate and the cluster is under load, there is a possibility for the resharding jobs to take longer to finish. The cluster would have to be running at the limit where both compaction and internal replication will have difficulty catching up as fundamentally the logic used for the initial bulk copy is similar the compaction code, and topoff states are just reusing the internal replicator code. Eventually when the load subsides the jobs should catch up and finish.
-
-Individual Modules Description
-------------------------------
-
-These are mostly random notes about various modules involved in resharding. Most, but not all, are in the `mem3` application.
-
-* `mem3_reshard`: Main API entry point and the job manager.
-
-* `mem3_reshard_job` : Individual job logic.
-
-* `mem3_reshard_dbdoc` : Responsible for updating shard doc in the `_db`'s database. Besides just having a bunch of utility function there is a gen_server spawned which is used to update shard documents in a cluster in such a way as to minimize the risk of conflicts. That is accomplished by having each shard updater calling only one such updater for the whole cluster. This coordinator is picked by sorting the list of all the live mem3 nodes and picking the first one in the list.
-
-* `mem3_reshard_httpd` : API endpoint definitions.
-
-* `mem3_reshard_api` : Cluster API endpoint. This module is responsible for sending requests to all the nodes in a cluster and gathering results.
-
-* `mem3_reshard_index` : This is a helper module used by workers in the `build_indices` state.
-
-* `mem3_reshard_job_sup` : Simple one for one supervisor which keeps track of running jobs.
-
-* `mem3_reshard_store` : State persistence module. It knows how to save/restore `#job{}` and `#state{}` records to/from `_local` docs. It is also re-used for serializing `#job{}` into ejson by the HTTP API module.
-
-* `mem3_reshard_validate` : Validate that source exists, target ranges don't have gaps in them, etc.
-
-* `couch_db_split` : This module is not in `mem3` app but it does all the heavy lifting during the initial data copy. Given a source db and some targets, and a function to decide which doc go to which target, it will copy all data from the source to the targets. It's best to think of this module as a form of compactor. Unlike `couch_bt_engine_compactor` this one lives above the `couch_db_engine` API, and instead of copying data to one new file it copies it to 2 or more. Unsurprisingly because of that it uses some lower level `couch_db_engine` API directly, including linking to a couch_db_updater, force setting db update sequences and others.
diff --git a/src/mem3/include/mem3.hrl b/src/mem3/include/mem3.hrl
deleted file mode 100644
index d97b25469..000000000
--- a/src/mem3/include/mem3.hrl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-% The last element in the ring
--define(RING_END, 2 bsl 31 - 1).
-
-
-% type specification hacked to suppress dialyzer warning re: match spec
--record(shard, {
- name :: binary() | '_' | 'undefined',
- node :: node() | '_' | 'undefined',
- dbname :: binary() | 'undefined',
- range :: [non_neg_integer() | '$1' | '$2'] | '_' | 'undefined',
- ref :: reference() | '_' | 'undefined',
- opts :: list() | 'undefined'
-}).
-
-%% Do not reference outside of mem3.
--record(ordered_shard, {
- name :: binary() | '_',
- node :: node() | '_',
- dbname :: binary(),
- range :: [non_neg_integer() | '$1' | '$2'] | '_',
- ref :: reference() | 'undefined' | '_',
- order :: non_neg_integer() | 'undefined' | '_',
- opts :: list()
-}).
-
-%% types
--type join_type() :: init | join | replace | leave.
--type join_order() :: non_neg_integer().
--type options() :: list().
--type mem_node() :: {join_order(), node(), options()}.
--type mem_node_list() :: [mem_node()].
--type arg_options() :: {test, boolean()}.
--type args() :: [] | [arg_options()].
--type test() :: undefined | node().
--type epoch() :: float().
--type clock() :: {node(), epoch()}.
--type vector_clock() :: [clock()].
--type ping_node() :: node() | nil.
--type gossip_fun() :: call | cast.
-
--type part() :: #shard{}.
--type fullmap() :: [part()].
--type ref_part_map() :: {reference(), part()}.
--type tref() :: reference().
--type np() :: {node(), part()}.
--type beg_acc() :: [integer()].
diff --git a/src/mem3/priv/stats_descriptions.cfg b/src/mem3/priv/stats_descriptions.cfg
deleted file mode 100644
index 569d16ac3..000000000
--- a/src/mem3/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-{[mem3, shard_cache, eviction], [
- {type, counter},
- {desc, <<"number of shard cache evictions">>}
-]}.
-{[mem3, shard_cache, hit], [
- {type, counter},
- {desc, <<"number of shard cache hits">>}
-]}.
-{[mem3, shard_cache, miss], [
- {type, counter},
- {desc, <<"number of shard cache misses">>}
-]}.
diff --git a/src/mem3/rebar.config b/src/mem3/rebar.config
deleted file mode 100644
index 362c8785e..000000000
--- a/src/mem3/rebar.config
+++ /dev/null
@@ -1,14 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/mem3/rebar.config.script b/src/mem3/rebar.config.script
deleted file mode 100644
index 8f2deb4ae..000000000
--- a/src/mem3/rebar.config.script
+++ /dev/null
@@ -1,22 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-WithProper = code:lib_dir(proper) /= {error, bad_name}.
-
-if not WithProper -> CONFIG; true ->
- CurrOpts = case lists:keyfind(erl_opts, 1, CONFIG) of
- {erl_opts, Opts} -> Opts;
- false -> []
- end,
- NewOpts = [{d, 'WITH_PROPER'} | CurrOpts],
- lists:keystore(erl_opts, 1, CONFIG, {erl_opts, NewOpts})
-end.
diff --git a/src/mem3/src/mem3.app.src b/src/mem3/src/mem3.app.src
deleted file mode 100644
index 889ebf9a3..000000000
--- a/src/mem3/src/mem3.app.src
+++ /dev/null
@@ -1,40 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, mem3, [
- {description, "CouchDB Cluster Membership"},
- {vsn, git},
- {mod, {mem3_app, []}},
- {registered, [
- mem3_events,
- mem3_nodes,
- mem3_shards,
- mem3_sync,
- mem3_sync_nodes,
- mem3_reshard,
- mem3_sup
- ]},
- {applications, [
- kernel,
- stdlib,
- config,
- sasl,
- crypto,
- mochiweb,
- couch_epi,
- couch,
- rexi,
- couch_log,
- couch_event,
- couch_stats
- ]}
-]}.
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
deleted file mode 100644
index 5a985b7f8..000000000
--- a/src/mem3/src/mem3.erl
+++ /dev/null
@@ -1,508 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3).
-
--export([
- start/0,
- stop/0,
- restart/0,
- nodes/0,
- node_info/2,
- shards/1, shards/2,
- choose_shards/2,
- n/1, n/2,
- dbname/1,
- ushards/1, ushards/2
-]).
--export([get_shard/3, local_shards/1, shard_suffix/1, fold_shards/2]).
--export([sync_security/0, sync_security/1]).
--export([compare_nodelists/0, compare_shards/1]).
--export([quorum/1, group_by_proximity/1]).
--export([live_shards/2]).
--export([belongs/2, owner/3]).
--export([get_placement/1]).
--export([ping/1, ping/2]).
--export([db_is_current/1]).
--export([shard_creation_time/1]).
-
-%% For mem3 use only.
--export([name/1, node/1, range/1, engine/1]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(PING_TIMEOUT_IN_MS, 60000).
-
-start() ->
- application:start(mem3).
-
-stop() ->
- application:stop(mem3).
-
-restart() ->
- stop(),
- start().
-
-%% @doc Detailed report of cluster-wide membership state. Queries the state
-%% on all member nodes and builds a dictionary with unique states as the
-%% key and the nodes holding that state as the value. Also reports member
-%% nodes which fail to respond and nodes which are connected but are not
-%% cluster members. Useful for debugging.
--spec compare_nodelists() ->
- [
- {
- {cluster_nodes, [node()]}
- | bad_nodes
- | non_member_nodes,
- [node()]
- }
- ].
-compare_nodelists() ->
- Nodes = mem3:nodes(),
- AllNodes = erlang:nodes([this, visible]),
- {Replies, BadNodes} = gen_server:multi_call(Nodes, mem3_nodes, get_nodelist),
- Dict = lists:foldl(
- fun({Node, Nodelist}, D) ->
- orddict:append({cluster_nodes, Nodelist}, Node, D)
- end,
- orddict:new(),
- Replies
- ),
- [{non_member_nodes, AllNodes -- Nodes}, {bad_nodes, BadNodes} | Dict].
-
--spec compare_shards(DbName :: iodata()) -> [{bad_nodes | [#shard{}], [node()]}].
-compare_shards(DbName) when is_list(DbName) ->
- compare_shards(list_to_binary(DbName));
-compare_shards(DbName) ->
- Nodes = mem3:nodes(),
- {Replies, BadNodes} = rpc:multicall(mem3, shards, [DbName]),
- GoodNodes = [N || N <- Nodes, not lists:member(N, BadNodes)],
- Dict = lists:foldl(
- fun({Shards, Node}, D) ->
- orddict:append(Shards, Node, D)
- end,
- orddict:new(),
- lists:zip(Replies, GoodNodes)
- ),
- [{bad_nodes, BadNodes} | Dict].
-
--spec n(DbName :: iodata()) -> integer().
-n(DbName) ->
- % Use _design to avoid issues with
- % partition validation
- n(DbName, <<"_design/foo">>).
-
-n(DbName, DocId) ->
- length(mem3:shards(DbName, DocId)).
-
--spec nodes() -> [node()].
-nodes() ->
- mem3_nodes:get_nodelist().
-
-node_info(Node, Key) ->
- mem3_nodes:get_node_info(Node, Key).
-
--spec shards(DbName :: iodata()) -> [#shard{}].
-shards(DbName) ->
- shards_int(DbName, []).
-
-shards_int(DbName, Options) when is_list(DbName) ->
- shards_int(list_to_binary(DbName), Options);
-shards_int(DbName, Options) ->
- Ordered = lists:member(ordered, Options),
- ShardDbName =
- list_to_binary(config:get("mem3", "shards_db", "_dbs")),
- case DbName of
- ShardDbName when Ordered ->
- %% shard_db is treated as a single sharded db to support calls to db_info
- %% and view_all_docs
- [
- #ordered_shard{
- node = node(),
- name = ShardDbName,
- dbname = ShardDbName,
- range = [0, (2 bsl 31) - 1],
- order = undefined,
- opts = []
- }
- ];
- ShardDbName ->
- %% shard_db is treated as a single sharded db to support calls to db_info
- %% and view_all_docs
- [
- #shard{
- node = node(),
- name = ShardDbName,
- dbname = ShardDbName,
- range = [0, (2 bsl 31) - 1],
- opts = []
- }
- ];
- _ ->
- mem3_shards:for_db(DbName, Options)
- end.
-
--spec shards(DbName :: iodata(), DocId :: binary()) -> [#shard{}].
-shards(DbName, DocId) ->
- shards_int(DbName, DocId, []).
-
-shards_int(DbName, DocId, Options) when is_list(DbName) ->
- shards_int(list_to_binary(DbName), DocId, Options);
-shards_int(DbName, DocId, Options) when is_list(DocId) ->
- shards_int(DbName, list_to_binary(DocId), Options);
-shards_int(DbName, DocId, Options) ->
- mem3_shards:for_docid(DbName, DocId, Options).
-
--spec ushards(DbName :: iodata()) -> [#shard{}].
-ushards(DbName) ->
- Nodes = [node() | erlang:nodes()],
- ZoneMap = zone_map(Nodes),
- Shards = ushards(DbName, live_shards(DbName, Nodes, [ordered]), ZoneMap),
- mem3_util:downcast(Shards).
-
--spec ushards(DbName :: iodata(), DocId :: binary()) -> [#shard{}].
-ushards(DbName, DocId) ->
- Shards = shards_int(DbName, DocId, [ordered]),
- Shard = hd(Shards),
- mem3_util:downcast([Shard]).
-
-ushards(DbName, Shards0, ZoneMap) ->
- {L, S, D} = group_by_proximity(Shards0, ZoneMap),
- % Prefer shards in the local zone over shards in a different zone,
- % but sort each zone separately to ensure a consistent choice between
- % nodes in the same zone.
- Shards = choose_ushards(DbName, L ++ S) ++ choose_ushards(DbName, D),
- OverlappedShards = lists:ukeysort(#shard.range, Shards),
- mem3_util:non_overlapping_shards(OverlappedShards).
-
-get_shard(DbName, Node, Range) ->
- mem3_shards:get(DbName, Node, Range).
-
-local_shards(DbName) ->
- mem3_shards:local(DbName).
-
-shard_suffix(DbName0) when is_binary(DbName0) ->
- Shard = hd(shards(DbName0)),
- <<"shards/", _:8/binary, "-", _:8/binary, "/", DbName/binary>> =
- Shard#shard.name,
- filename:extension(binary_to_list(DbName));
-shard_suffix(Db) ->
- shard_suffix(couch_db:name(Db)).
-
-shard_creation_time(DbName0) ->
- Shard = hd(shards(DbName0)),
- case Shard#shard.name of
- <<"shards/", _:8/binary, "-", _:8/binary, "/", DbName/binary>> ->
- case filename:extension(DbName) of
- <<".", Time/binary>> ->
- Time;
- _ ->
- <<"0">>
- end;
- _ ->
- <<"0">>
- end.
-
-fold_shards(Fun, Acc) ->
- mem3_shards:fold(Fun, Acc).
-
-sync_security() ->
- mem3_sync_security:go().
-
-sync_security(Db) ->
- mem3_sync_security:go(dbname(Db)).
-
--spec choose_shards(DbName :: iodata(), Options :: list()) -> [#shard{}].
-choose_shards(DbName, Options) when is_list(DbName) ->
- choose_shards(list_to_binary(DbName), Options);
-choose_shards(DbName, Options) ->
- try
- shards(DbName)
- catch
- error:E when E == database_does_not_exist; E == badarg ->
- Nodes = allowed_nodes(),
- case get_placement(Options) of
- undefined ->
- choose_shards(DbName, Nodes, Options);
- Placement ->
- lists:flatmap(
- fun({Zone, N}) ->
- NodesInZone = nodes_in_zone(Nodes, Zone),
- Options1 = lists:keymerge(1, [{n, N}], Options),
- choose_shards(DbName, NodesInZone, Options1)
- end,
- Placement
- )
- end
- end.
-
-choose_shards(DbName, Nodes, Options) ->
- NodeCount = length(Nodes),
- Suffix = couch_util:get_value(shard_suffix, Options, ""),
- N = mem3_util:n_val(couch_util:get_value(n, Options), NodeCount),
- if
- N =:= 0 -> erlang:error(no_nodes_in_zone);
- true -> ok
- end,
- Q = mem3_util:q_val(
- couch_util:get_value(
- q,
- Options,
- config:get_integer("cluster", "q", 2)
- )
- ),
- %% rotate to a random entry in the nodelist for even distribution
- RotatedNodes = rotate_rand(Nodes),
- mem3_util:create_partition_map(DbName, N, Q, RotatedNodes, Suffix).
-
-rotate_rand(Nodes) ->
- {A, B} = lists:split(couch_rand:uniform(length(Nodes)), Nodes),
- B ++ A.
-
-get_placement(Options) ->
- case couch_util:get_value(placement, Options) of
- undefined ->
- case config:get("cluster", "placement") of
- undefined ->
- undefined;
- PlacementStr ->
- decode_placement_string(PlacementStr)
- end;
- PlacementStr ->
- decode_placement_string(PlacementStr)
- end.
-
-decode_placement_string(PlacementStr) ->
- [
- begin
- [Zone, N] = string:tokens(Rule, ":"),
- {list_to_binary(Zone), list_to_integer(N)}
- end
- || Rule <- string:tokens(PlacementStr, ",")
- ].
-
--spec dbname(#shard{} | iodata()) -> binary().
-dbname(#shard{dbname = DbName}) ->
- DbName;
-dbname(<<"shards/", _:8/binary, "-", _:8/binary, "/", DbName/binary>>) ->
- list_to_binary(filename:rootname(binary_to_list(DbName)));
-dbname(DbName) when is_list(DbName) ->
- dbname(list_to_binary(DbName));
-dbname(DbName) when is_binary(DbName) ->
- DbName;
-dbname(_) ->
- erlang:error(badarg).
-
-%% @doc Determine if DocId belongs in shard (identified by record or filename)
-belongs(#shard{} = Shard, DocId) when is_binary(DocId) ->
- [Begin, End] = range(Shard),
- belongs(Begin, End, Shard, DocId);
-belongs(<<"shards/", _/binary>> = ShardName, DocId) when is_binary(DocId) ->
- [Begin, End] = range(ShardName),
- belongs(Begin, End, ShardName, DocId);
-belongs(DbName, DocId) when is_binary(DbName), is_binary(DocId) ->
- true.
-
-belongs(Begin, End, Shard, DocId) ->
- HashKey = mem3_hash:calculate(Shard, DocId),
- Begin =< HashKey andalso HashKey =< End.
-
-range(#shard{range = Range}) ->
- Range;
-range(#ordered_shard{range = Range}) ->
- Range;
-range(<<"shards/", Start:8/binary, "-", End:8/binary, "/", _/binary>>) ->
- [
- httpd_util:hexlist_to_integer(binary_to_list(Start)),
- httpd_util:hexlist_to_integer(binary_to_list(End))
- ].
-
-allowed_nodes() ->
- lists:filter(
- fun(Node) ->
- Decom = mem3:node_info(Node, <<"decom">>),
- (Decom =/= true) andalso (Decom =/= <<"true">>)
- end,
- mem3:nodes()
- ).
-
-nodes_in_zone(Nodes, Zone) ->
- [Node || Node <- Nodes, Zone == mem3:node_info(Node, <<"zone">>)].
-
-live_shards(DbName, Nodes) ->
- live_shards(DbName, Nodes, []).
-
-live_shards(DbName, Nodes, Options) ->
- [S || S <- shards_int(DbName, Options), lists:member(mem3:node(S), Nodes)].
-
-zone_map(Nodes) ->
- [{Node, node_info(Node, <<"zone">>)} || Node <- Nodes].
-
-group_by_proximity(Shards) ->
- Nodes = [mem3:node(S) || S <- lists:ukeysort(#shard.node, Shards)],
- group_by_proximity(Shards, zone_map(Nodes)).
-
-group_by_proximity(Shards, ZoneMap) ->
- {Local, Remote} = lists:partition(
- fun(S) -> mem3:node(S) =:= node() end,
- Shards
- ),
- LocalZone = proplists:get_value(node(), ZoneMap),
- Fun = fun(S) -> proplists:get_value(mem3:node(S), ZoneMap) =:= LocalZone end,
- {SameZone, DifferentZone} = lists:partition(Fun, Remote),
- {Local, SameZone, DifferentZone}.
-
-choose_ushards(DbName, Shards) ->
- Groups0 = group_by_range(Shards),
- Groups1 = [
- mem3_util:rotate_list({DbName, R}, order_shards(G))
- || {R, G} <- Groups0
- ],
- [hd(G) || G <- Groups1].
-
-order_shards([#ordered_shard{} | _] = OrderedShards) ->
- lists:keysort(#ordered_shard.order, OrderedShards);
-order_shards(UnorderedShards) ->
- UnorderedShards.
-
-group_by_range(Shards) ->
- lists:foldl(
- fun(Shard, Dict) ->
- orddict:append(mem3:range(Shard), Shard, Dict)
- end,
- orddict:new(),
- Shards
- ).
-
-% quorum functions
-
-quorum(DbName) when is_binary(DbName) ->
- n(DbName) div 2 + 1;
-quorum(Db) ->
- quorum(couch_db:name(Db)).
-
-node(#shard{node = Node}) ->
- Node;
-node(#ordered_shard{node = Node}) ->
- Node.
-
-name(#shard{name = Name}) ->
- Name;
-name(#ordered_shard{name = Name}) ->
- Name.
-
-% Direct calculation of node membership. This is the algorithm part. It
-% doesn't read the shard map, just picks owner based on a hash.
--spec owner(binary(), binary(), [node()]) -> node().
-owner(DbName, DocId, Nodes) ->
- hd(mem3_util:rotate_list({DbName, DocId}, lists:usort(Nodes))).
-
-engine(#shard{opts = Opts}) ->
- engine(Opts);
-engine(#ordered_shard{opts = Opts}) ->
- engine(Opts);
-engine(Opts) when is_list(Opts) ->
- case couch_util:get_value(engine, Opts) of
- Engine when is_binary(Engine) ->
- [{engine, Engine}];
- _ ->
- []
- end.
-
-%% Check whether a node is up or down
-%% side effect: set up a connection to Node if there not yet is one.
-
--spec ping(Node :: atom()) -> pong | pang.
-
-ping(Node) ->
- ping(Node, ?PING_TIMEOUT_IN_MS).
-
--spec ping(Node :: atom(), Timeout :: pos_integer()) -> pong | pang.
-
-ping(Node, Timeout) when is_atom(Node) ->
- %% The implementation of the function is copied from
- %% lib/kernel/src/net_adm.erl with addition of a Timeout
- case
- catch gen:call(
- {net_kernel, Node},
- '$gen_call',
- {is_auth, node()},
- Timeout
- )
- of
- {ok, yes} ->
- pong;
- _ ->
- erlang:disconnect_node(Node),
- pang
- end.
-
-db_is_current(#shard{name = Name}) ->
- db_is_current(Name);
-db_is_current(<<"shards/", _/binary>> = Name) ->
- try
- Shards = mem3:shards(mem3:dbname(Name)),
- lists:keyfind(Name, #shard.name, Shards) =/= false
- catch
- error:database_does_not_exist ->
- false
- end;
-db_is_current(Name) when is_binary(Name) ->
- % This accounts for local (non-sharded) dbs, and is mostly
- % for unit tests that either test or use mem3_rep logic
- couch_server:exists(Name).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(ALLOWED_NODE, 'node1@127.0.0.1').
-
-allowed_nodes_test_() ->
- {"allowed_nodes test", [
- {
- setup,
- fun() ->
- Props = [
- {?ALLOWED_NODE, []},
- {'node2@127.0.0.1', [{<<"decom">>, <<"true">>}]},
- {'node3@127.0.0.1', [{<<"decom">>, true}]}
- ],
- ok = meck:expect(
- mem3_nodes,
- get_nodelist,
- fun() -> proplists:get_keys(Props) end
- ),
- ok = meck:expect(
- mem3_nodes,
- get_node_info,
- fun(Node, Key) ->
- couch_util:get_value(Key, proplists:get_value(Node, Props))
- end
- )
- end,
- fun(_) -> meck:unload() end,
- [
- ?_assertMatch([?ALLOWED_NODE], allowed_nodes())
- ]
- }
- ]}.
-
-rotate_rand_degenerate_test() ->
- ?assertEqual([1], rotate_rand([1])).
-
-rotate_rand_distribution_test() ->
- Cases = [rotate_rand([1, 2, 3]) || _ <- lists:seq(1, 100)],
- ?assertEqual(3, length(lists:usort(Cases))).
-
--endif.
diff --git a/src/mem3/src/mem3_app.erl b/src/mem3/src/mem3_app.erl
deleted file mode 100644
index 3ddfbe6fd..000000000
--- a/src/mem3/src/mem3_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, []) ->
- mem3_sup:start_link().
-
-stop([]) ->
- ok.
diff --git a/src/mem3/src/mem3_bdu.erl b/src/mem3/src/mem3_bdu.erl
deleted file mode 100644
index 84eda2397..000000000
--- a/src/mem3/src/mem3_bdu.erl
+++ /dev/null
@@ -1,111 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_bdu).
-
--export([
- before_doc_update/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--spec before_doc_update(#doc{}, Db :: any(), couch_db:update_type()) -> #doc{}.
-before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db, _UpdateType) ->
- % Skip design docs
- Doc;
-before_doc_update(#doc{deleted = true} = Doc, _Db, _UpdateType) ->
- % Skip deleted
- Doc;
-before_doc_update(#doc{} = Doc, _Db, replicated_changes) ->
- % Skip internal replicator updates
- Doc;
-before_doc_update(#doc{} = Doc, _Db, _UpdateType) ->
- Body1 = couch_util:json_encode(Doc#doc.body),
- Body2 = couch_util:json_decode(Body1, [return_maps]),
- validate(Body2),
- Doc.
-
-validate(#{} = Body) ->
- validate_key(<<"by_node">>, Body, ["by_node is mandatory"]),
- validate_key(<<"by_range">>, Body, ["by_range is mandatory"]),
- ByNode = maps:get(<<"by_node">>, Body),
- case is_map(ByNode) of
- true -> ok;
- false -> throw({forbidden, ["by_node not an object"]})
- end,
- ByRange = maps:get(<<"by_range">>, Body),
- case is_map(ByRange) of
- true -> ok;
- false -> throw({forbidden, ["by_range not an object"]})
- end,
- % "by_node": {
- % "node1@xxx.xxx.xxx.xxx": ["00000000-1fffffff",...]
- % ]}
- maps:map(
- fun(Node, Ranges) ->
- validate_by_node(Node, Ranges, ByRange)
- end,
- ByNode
- ),
- % "by_range": {
- % "00000000-1fffffff": ["node1@xxx.xxx.xxx.xxx", ...]
- % ]}
- maps:map(
- fun(Range, Nodes) ->
- validate_by_range(Range, Nodes, ByNode)
- end,
- ByRange
- ).
-
-validate_by_node(Node, Ranges, ByRange) ->
- validate_array(Ranges, ["by_node", Ranges, "value not an array"]),
- lists:foreach(
- fun(Range) ->
- validate_key(Range, ByRange, ["by_range for", Range, "missing"]),
- Nodes = maps:get(Range, ByRange),
- validate_member(Node, Nodes, ["by_range for", Range, "missing", Node])
- end,
- Ranges
- ).
-
-validate_by_range(Range, Nodes, ByNode) ->
- validate_array(Nodes, ["by_range", Nodes, "value not an array"]),
- lists:foreach(
- fun(Node) ->
- validate_key(Node, ByNode, ["by_node for", Node, "missing"]),
- Ranges = maps:get(Node, ByNode),
- validate_member(Range, Ranges, ["by_node for", Node, "missing", Range])
- end,
- Nodes
- ).
-
-validate_array(Val, _ErrMsg) when is_list(Val) ->
- ok;
-validate_array(_Val, ErrMsg) ->
- throw({forbidden, errmsg(ErrMsg)}).
-
-validate_key(Key, #{} = Map, ErrMsg) ->
- case maps:is_key(Key, Map) of
- true -> ok;
- false -> throw({forbidden, errmsg(ErrMsg)})
- end.
-
-validate_member(Val, Array, ErrMsg) when is_list(Array) ->
- case lists:member(Val, Array) of
- true -> ok;
- false -> throw({forbidden, errmsg(ErrMsg)})
- end;
-validate_member(_Val, _Array, ErrMsg) ->
- throw({forbidden, errmsg(ErrMsg)}).
-
-errmsg(ErrMsg) when is_list(ErrMsg) ->
- list_to_binary(lists:join(" ", ErrMsg)).
diff --git a/src/mem3/src/mem3_cluster.erl b/src/mem3/src/mem3_cluster.erl
deleted file mode 100644
index 974b2cbef..000000000
--- a/src/mem3/src/mem3_cluster.erl
+++ /dev/null
@@ -1,149 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Maintain cluster stability information. A cluster is considered stable if there
-% were no changes to during a given period of time.
-%
-% To be notified of cluster stability / instability the owner module must
-% implement the mem3_cluster behavior. When cluster membership changes,
-% cluster_unstable behavior callback will be called. After that is are no more
-% changes to the cluster, then cluster_stable callback will be called.
-%
-% The period is passed in as start argument but it can also be set dynamically
-% via the set_period/2 API call.
-%
-% In some cases it might be useful to have a shorter pariod during startup.
-% That can be configured via the StartPeriod argument. If the time since start
-% is less than a full period, then the StartPeriod is used as the period.
-
--module(mem3_cluster).
-
--behaviour(gen_server).
-
--export([
- start_link/4,
- set_period/2
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--callback cluster_stable(Context :: term()) -> NewContext :: term().
--callback cluster_unstable(Context :: term()) -> NewContext :: term().
-
--record(state, {
- mod :: atom(),
- ctx :: term(),
- start_time :: erlang:timestamp(),
- last_change :: erlang:timestamp(),
- period :: integer(),
- start_period :: integer(),
- timer :: reference()
-}).
-
--spec start_link(module(), term(), integer(), integer()) ->
- {ok, pid()} | ignore | {error, term()}.
-start_link(Module, Context, StartPeriod, Period) when
- is_atom(Module), is_integer(StartPeriod), is_integer(Period)
-->
- gen_server:start_link(?MODULE, [Module, Context, StartPeriod, Period], []).
-
--spec set_period(pid(), integer()) -> ok.
-set_period(Server, Period) when is_pid(Server), is_integer(Period) ->
- gen_server:cast(Server, {set_period, Period}).
-
-% gen_server callbacks
-
-init([Module, Context, StartPeriod, Period]) ->
- net_kernel:monitor_nodes(true),
- {ok, #state{
- mod = Module,
- ctx = Context,
- start_time = os:timestamp(),
- last_change = os:timestamp(),
- period = Period,
- start_period = StartPeriod,
- timer = new_timer(StartPeriod)
- }}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call(_Msg, _From, State) ->
- {reply, ignored, State}.
-
-handle_cast({set_period, Period}, State) ->
- {noreply, State#state{period = Period}}.
-
-handle_info({nodeup, _Node}, State) ->
- {noreply, cluster_changed(State)};
-handle_info({nodedown, _Node}, State) ->
- {noreply, cluster_changed(State)};
-handle_info(stability_check, #state{mod = Mod, ctx = Ctx} = State) ->
- erlang:cancel_timer(State#state.timer),
- case now_diff_sec(State#state.last_change) > interval(State) of
- true ->
- {noreply, State#state{ctx = Mod:cluster_stable(Ctx)}};
- false ->
- Timer = new_timer(interval(State)),
- {noreply, State#state{timer = Timer}}
- end.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%% Internal functions
-
--spec cluster_changed(#state{}) -> #state{}.
-cluster_changed(#state{mod = Mod, ctx = Ctx} = State) ->
- State#state{
- last_change = os:timestamp(),
- timer = new_timer(interval(State)),
- ctx = Mod:cluster_unstable(Ctx)
- }.
-
--spec new_timer(non_neg_integer()) -> reference().
-new_timer(IntervalSec) ->
- erlang:send_after(IntervalSec * 1000, self(), stability_check).
-
-% For the first Period seconds after node boot we check cluster stability every
-% StartPeriod seconds. Once the initial Period seconds have passed we continue
-% to monitor once every Period seconds
--spec interval(#state{}) -> non_neg_integer().
-interval(#state{
- period = Period,
- start_period = StartPeriod,
- start_time = T0
-}) ->
- case now_diff_sec(T0) > Period of
- true ->
- % Normal operation
- Period;
- false ->
- % During startup
- StartPeriod
- end.
-
--spec now_diff_sec(erlang:timestamp()) -> non_neg_integer().
-now_diff_sec(Time) ->
- case timer:now_diff(os:timestamp(), Time) of
- USec when USec < 0 ->
- 0;
- USec when USec >= 0 ->
- USec / 1000000
- end.
diff --git a/src/mem3/src/mem3_epi.erl b/src/mem3/src/mem3_epi.erl
deleted file mode 100644
index 7bfc74dcf..000000000
--- a/src/mem3/src/mem3_epi.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- mem3.
-
-providers() ->
- [
- {couch_db, mem3_plugin_couch_db},
- {chttpd_handlers, mem3_httpd_handlers}
- ].
-
-services() ->
- [].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/mem3/src/mem3_hash.erl b/src/mem3/src/mem3_hash.erl
deleted file mode 100644
index ccaab7223..000000000
--- a/src/mem3/src/mem3_hash.erl
+++ /dev/null
@@ -1,64 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_hash).
-
--export([
- calculate/2,
-
- get_hash_fun/1,
-
- crc32/1
-]).
-
--include_lib("mem3/include/mem3.hrl").
-
-calculate(#shard{opts = Opts}, DocId) ->
- Props = couch_util:get_value(props, Opts, []),
- MFA = get_hash_fun_int(Props),
- calculate(MFA, DocId);
-calculate(#ordered_shard{opts = Opts}, DocId) ->
- Props = couch_util:get_value(props, Opts, []),
- MFA = get_hash_fun_int(Props),
- calculate(MFA, DocId);
-calculate(DbName, DocId) when is_binary(DbName) ->
- MFA = get_hash_fun(DbName),
- calculate(MFA, DocId);
-calculate({Mod, Fun, Args}, DocId) ->
- erlang:apply(Mod, Fun, [DocId | Args]).
-
-get_hash_fun(#shard{opts = Opts}) ->
- get_hash_fun_int(Opts);
-get_hash_fun(#ordered_shard{opts = Opts}) ->
- get_hash_fun_int(Opts);
-get_hash_fun(DbName0) when is_binary(DbName0) ->
- DbName = mem3:dbname(DbName0),
- try
- [#shard{opts = Opts} | _] = mem3_shards:for_db(DbName),
- get_hash_fun_int(couch_util:get_value(props, Opts, []))
- catch
- error:database_does_not_exist ->
- {?MODULE, crc32, []}
- end.
-
-crc32(Item) when is_binary(Item) ->
- erlang:crc32(Item);
-crc32(Item) ->
- erlang:crc32(term_to_binary(Item)).
-
-get_hash_fun_int(Opts) when is_list(Opts) ->
- case lists:keyfind(hash, 1, Opts) of
- {hash, [Mod, Fun, Args]} ->
- {Mod, Fun, Args};
- _ ->
- {?MODULE, crc32, []}
- end.
diff --git a/src/mem3/src/mem3_httpd.erl b/src/mem3/src/mem3_httpd.erl
deleted file mode 100644
index 745fe815c..000000000
--- a/src/mem3/src/mem3_httpd.erl
+++ /dev/null
@@ -1,118 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_httpd).
-
--export([
- handle_membership_req/1,
- handle_shards_req/2,
- handle_sync_req/2
-]).
-
-%% includes
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-handle_membership_req(
- #httpd{
- method = 'GET',
- path_parts = [<<"_membership">>]
- } = Req
-) ->
- ClusterNodes =
- try
- mem3:nodes()
- catch
- _:_ -> {ok, []}
- end,
- couch_httpd:send_json(
- Req,
- {[
- {all_nodes, lists:sort([node() | nodes()])},
- {cluster_nodes, lists:sort(ClusterNodes)}
- ]}
- );
-handle_membership_req(#httpd{path_parts = [<<"_membership">>]} = Req) ->
- chttpd:send_method_not_allowed(Req, "GET").
-
-handle_shards_req(
- #httpd{
- method = 'GET',
- path_parts = [_DbName, <<"_shards">>]
- } = Req,
- Db
-) ->
- DbName = mem3:dbname(couch_db:name(Db)),
- Shards = mem3:shards(DbName),
- JsonShards = json_shards(Shards, dict:new()),
- couch_httpd:send_json(
- Req,
- {[
- {shards, JsonShards}
- ]}
- );
-handle_shards_req(
- #httpd{
- method = 'GET',
- path_parts = [_DbName, <<"_shards">>, DocId]
- } = Req,
- Db
-) ->
- DbName = mem3:dbname(couch_db:name(Db)),
- Shards = mem3:shards(DbName, DocId),
- {[{Shard, Dbs}]} = json_shards(Shards, dict:new()),
- couch_httpd:send_json(
- Req,
- {[
- {range, Shard},
- {nodes, Dbs}
- ]}
- );
-handle_shards_req(#httpd{path_parts = [_DbName, <<"_shards">>]} = Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET");
-handle_shards_req(#httpd{path_parts = [_DbName, <<"_shards">>, _DocId]} = Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET").
-
-handle_sync_req(
- #httpd{
- method = 'POST',
- path_parts = [_DbName, <<"_sync_shards">>]
- } = Req,
- Db
-) ->
- DbName = mem3:dbname(couch_db:name(Db)),
- ShardList = [S#shard.name || S <- mem3:ushards(DbName)],
- [sync_shard(S) || S <- ShardList],
- chttpd:send_json(Req, 202, {[{ok, true}]});
-handle_sync_req(Req, _) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-%%
-%% internal
-%%
-
-json_shards([], AccIn) ->
- List = dict:to_list(AccIn),
- {lists:sort(List)};
-json_shards([#shard{node = Node, range = [B, E]} | Rest], AccIn) ->
- HexBeg = couch_util:to_hex(<<B:32/integer>>),
- HexEnd = couch_util:to_hex(<<E:32/integer>>),
- Range = list_to_binary(HexBeg ++ "-" ++ HexEnd),
- json_shards(Rest, dict:append(Range, Node, AccIn)).
-
-sync_shard(ShardName) ->
- Shards = mem3_shards:for_shard_range(ShardName),
- [
- rpc:call(S1#shard.node, mem3_sync, push, [S1, S2#shard.node])
- || S1 <- Shards, S2 <- Shards, S1 =/= S2
- ],
- ok.
diff --git a/src/mem3/src/mem3_httpd_handlers.erl b/src/mem3/src/mem3_httpd_handlers.erl
deleted file mode 100644
index ca6893e98..000000000
--- a/src/mem3/src/mem3_httpd_handlers.erl
+++ /dev/null
@@ -1,25 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1]).
-
-url_handler(<<"_membership">>) -> fun mem3_httpd:handle_membership_req/1;
-url_handler(<<"_reshard">>) -> fun mem3_reshard_httpd:handle_reshard_req/1;
-url_handler(_) -> no_match.
-
-db_handler(<<"_shards">>) -> fun mem3_httpd:handle_shards_req/2;
-db_handler(<<"_sync_shards">>) -> fun mem3_httpd:handle_sync_req/2;
-db_handler(_) -> no_match.
-
-design_handler(_) -> no_match.
diff --git a/src/mem3/src/mem3_nodes.erl b/src/mem3/src/mem3_nodes.erl
deleted file mode 100644
index b46b3bb64..000000000
--- a/src/mem3/src/mem3_nodes.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_nodes).
--behaviour(gen_server).
--vsn(1).
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
--export([start_link/0, get_nodelist/0, get_node_info/2]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(state, {changes_pid, update_seq}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_nodelist() ->
- try
- lists:sort([N || {N, _} <- ets:tab2list(?MODULE)])
- catch
- error:badarg ->
- gen_server:call(?MODULE, get_nodelist)
- end.
-
-get_node_info(Node, Key) ->
- try
- couch_util:get_value(Key, ets:lookup_element(?MODULE, Node, 2))
- catch
- error:badarg ->
- gen_server:call(?MODULE, {get_node_info, Node, Key})
- end.
-
-init([]) ->
- ets:new(?MODULE, [named_table, {read_concurrency, true}]),
- UpdateSeq = initialize_nodelist(),
- {Pid, _} = spawn_monitor(fun() -> listen_for_changes(UpdateSeq) end),
- {ok, #state{changes_pid = Pid, update_seq = UpdateSeq}}.
-
-handle_call(get_nodelist, _From, State) ->
- {reply, lists:sort([N || {N, _} <- ets:tab2list(?MODULE)]), State};
-handle_call({get_node_info, Node, Key}, _From, State) ->
- Resp =
- try
- couch_util:get_value(Key, ets:lookup_element(?MODULE, Node, 2))
- catch
- error:badarg ->
- error
- end,
- {reply, Resp, State};
-handle_call({add_node, Node, NodeInfo}, _From, State) ->
- gen_event:notify(mem3_events, {add_node, Node}),
- ets:insert(?MODULE, {Node, NodeInfo}),
- {reply, ok, State};
-handle_call({remove_node, Node}, _From, State) ->
- gen_event:notify(mem3_events, {remove_node, Node}),
- ets:delete(?MODULE, Node),
- {reply, ok, State};
-handle_call(_Call, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid = Pid} = State) ->
- couch_log:notice("~p changes listener died ~p", [?MODULE, Reason]),
- StartSeq = State#state.update_seq,
- Seq =
- case Reason of
- {seq, EndSeq} -> EndSeq;
- _ -> StartSeq
- end,
- erlang:send_after(5000, self(), start_listener),
- {noreply, State#state{update_seq = Seq}};
-handle_info(start_listener, #state{update_seq = Seq} = State) ->
- {NewPid, _} = spawn_monitor(fun() -> listen_for_changes(Seq) end),
- {noreply, State#state{changes_pid = NewPid}};
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, #state{} = State, _Extra) ->
- {ok, State}.
-
-%% internal functions
-
-initialize_nodelist() ->
- DbName = config:get("mem3", "nodes_db", "_nodes"),
- {ok, Db} = mem3_util:ensure_exists(DbName),
- {ok, _} = couch_db:fold_docs(Db, fun first_fold/2, Db, []),
- insert_if_missing(Db, [node() | mem3_seeds:get_seeds()]),
- Seq = couch_db:get_update_seq(Db),
- couch_db:close(Db),
- Seq.
-
-first_fold(#full_doc_info{id = <<"_design/", _/binary>>}, Acc) ->
- {ok, Acc};
-first_fold(#full_doc_info{deleted = true}, Acc) ->
- {ok, Acc};
-first_fold(#full_doc_info{id = Id} = DocInfo, Db) ->
- {ok, #doc{body = {Props}}} = couch_db:open_doc(Db, DocInfo, [ejson_body]),
- ets:insert(?MODULE, {mem3_util:to_atom(Id), Props}),
- {ok, Db}.
-
-listen_for_changes(Since) ->
- DbName = config:get("mem3", "nodes_db", "_nodes"),
- {ok, Db} = mem3_util:ensure_exists(DbName),
- Args = #changes_args{
- feed = "continuous",
- since = Since,
- heartbeat = true,
- include_docs = true
- },
- ChangesFun = couch_changes:handle_db_changes(Args, nil, Db),
- ChangesFun(fun changes_callback/2).
-
-changes_callback(start, _) ->
- {ok, nil};
-changes_callback({stop, EndSeq}, _) ->
- exit({seq, EndSeq});
-changes_callback({change, {Change}, _}, _) ->
- Node = couch_util:get_value(<<"id">>, Change),
- case Node of
- <<"_design/", _/binary>> ->
- ok;
- _ ->
- case mem3_util:is_deleted(Change) of
- false ->
- {Props} = couch_util:get_value(doc, Change),
- gen_server:call(?MODULE, {add_node, mem3_util:to_atom(Node), Props});
- true ->
- gen_server:call(?MODULE, {remove_node, mem3_util:to_atom(Node)})
- end
- end,
- {ok, couch_util:get_value(<<"seq">>, Change)};
-changes_callback(timeout, _) ->
- {ok, nil}.
-
-insert_if_missing(Db, Nodes) ->
- Docs = lists:foldl(
- fun(Node, Acc) ->
- case ets:lookup(?MODULE, Node) of
- [_] ->
- Acc;
- [] ->
- ets:insert(?MODULE, {Node, []}),
- [#doc{id = couch_util:to_binary(Node)} | Acc]
- end
- end,
- [],
- Nodes
- ),
- if
- Docs =/= [] ->
- {ok, _} = couch_db:update_docs(Db, Docs, []);
- true ->
- {ok, []}
- end.
diff --git a/src/mem3/src/mem3_plugin_couch_db.erl b/src/mem3/src/mem3_plugin_couch_db.erl
deleted file mode 100644
index ca6a2e570..000000000
--- a/src/mem3/src/mem3_plugin_couch_db.erl
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_plugin_couch_db).
-
--export([
- is_valid_purge_client/2
-]).
-
-is_valid_purge_client(DbName, Props) ->
- mem3_rep:verify_purge_checkpoint(DbName, Props).
diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl
deleted file mode 100644
index afb3bc72b..000000000
--- a/src/mem3/src/mem3_rep.erl
+++ /dev/null
@@ -1,1094 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_rep).
-
--export([
- go/2,
- go/3,
- make_local_id/2,
- make_local_id/3,
- make_purge_id/2,
- verify_purge_checkpoint/2,
- find_source_seq/4,
- find_split_target_seq/4,
- local_id_hash/1
-]).
-
--export([
- changes_enumerator/2
-]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(acc, {
- batch_size,
- batch_count,
- seq = 0,
- revcount = 0,
- source,
- targets,
- filter,
- db,
- hashfun,
- incomplete_ranges
-}).
-
--record(tgt, {
- shard,
- seq = 0,
- infos = [],
- localid,
- purgeid,
- history = {[]},
- remaining = 0
-}).
-
-go(Source, Target) ->
- go(Source, Target, []).
-
-go(DbName, Node, Opts) when is_binary(DbName), is_atom(Node) ->
- go(#shard{name = DbName, node = node()}, #shard{name = DbName, node = Node}, Opts);
-go(#shard{} = Source, #shard{} = Target, Opts) ->
- case mem3:db_is_current(Source) of
- true ->
- go(Source, targets_map(Source, Target), Opts);
- false ->
- % Database could have been recreated
- {error, missing_source}
- end;
-go(#shard{} = Source, #{} = Targets0, Opts) when map_size(Targets0) > 0 ->
- Targets = maps:map(fun(_, T) -> #tgt{shard = T} end, Targets0),
- case couch_server:exists(Source#shard.name) of
- true ->
- sync_security(Source, Targets),
- BatchSize =
- case proplists:get_value(batch_size, Opts) of
- BS when is_integer(BS), BS > 0 -> BS;
- _ -> 100
- end,
- BatchCount =
- case proplists:get_value(batch_count, Opts) of
- all -> all;
- BC when is_integer(BC), BC > 0 -> BC;
- _ -> 1
- end,
- IncompleteRanges = config:get_boolean(
- "mem3",
- "incomplete_ranges",
- false
- ),
- Filter = proplists:get_value(filter, Opts),
- Acc = #acc{
- batch_size = BatchSize,
- batch_count = BatchCount,
- source = Source,
- targets = Targets,
- filter = Filter,
- incomplete_ranges = IncompleteRanges
- },
- go(Acc);
- false ->
- {error, missing_source}
- end.
-
-go(#acc{source = Source, batch_count = BC} = Acc) ->
- case couch_db:open(Source#shard.name, [?ADMIN_CTX]) of
- {ok, Db} ->
- Resp =
- try
- HashFun = mem3_hash:get_hash_fun(couch_db:name(Db)),
- repl(Acc#acc{db = Db, hashfun = HashFun})
- catch
- error:{error, missing_source} ->
- {error, missing_source};
- error:{not_found, no_db_file} ->
- {error, missing_target}
- after
- couch_db:close(Db)
- end,
- case Resp of
- {ok, P} when P > 0, BC == all ->
- go(Acc);
- {ok, P} when P > 0, BC > 1 ->
- go(Acc#acc{batch_count = BC - 1});
- Else ->
- Else
- end;
- {not_found, no_db_file} ->
- {error, missing_source}
- end.
-
-make_local_id(Source, Target) ->
- make_local_id(Source, Target, undefined).
-
-make_local_id(#shard{node = SourceNode}, #shard{node = TargetNode}, Filter) ->
- make_local_id(SourceNode, TargetNode, Filter);
-make_local_id(SourceThing, TargetThing, F) when is_binary(F) ->
- S = local_id_hash(SourceThing),
- T = local_id_hash(TargetThing),
- <<"_local/shard-sync-", S/binary, "-", T/binary, F/binary>>;
-make_local_id(SourceThing, TargetThing, Filter) ->
- S = local_id_hash(SourceThing),
- T = local_id_hash(TargetThing),
- F = filter_hash(Filter),
- <<"_local/shard-sync-", S/binary, "-", T/binary, F/binary>>.
-
-filter_hash(Filter) when is_function(Filter) ->
- {new_uniq, Hash} = erlang:fun_info(Filter, new_uniq),
- B = couch_util:encodeBase64Url(Hash),
- <<"-", B/binary>>;
-filter_hash(_) ->
- <<>>.
-
-local_id_hash(Thing) ->
- couch_util:encodeBase64Url(couch_hash:md5_hash(term_to_binary(Thing))).
-
-make_purge_id(SourceUUID, TargetUUID) ->
- <<"_local/purge-mem3-", SourceUUID/binary, "-", TargetUUID/binary>>.
-
-verify_purge_checkpoint(DbName, Props) ->
- try
- Type = couch_util:get_value(<<"type">>, Props),
- if
- Type =/= <<"internal_replication">> ->
- false;
- true ->
- SourceBin = couch_util:get_value(<<"source">>, Props),
- TargetBin = couch_util:get_value(<<"target">>, Props),
- Range = couch_util:get_value(<<"range">>, Props),
-
- Source = binary_to_existing_atom(SourceBin, latin1),
- Target = binary_to_existing_atom(TargetBin, latin1),
-
- try
- Nodes = lists:foldl(
- fun(Shard, Acc) ->
- case Shard#shard.range == Range of
- true -> [Shard#shard.node | Acc];
- false -> Acc
- end
- end,
- [],
- mem3:shards(DbName)
- ),
- lists:member(Source, Nodes) andalso lists:member(Target, Nodes)
- catch
- error:database_does_not_exist ->
- false
- end
- end
- catch
- _:_ ->
- false
- end.
-
-%% @doc Find and return the largest update_seq in SourceDb
-%% that the client has seen from TargetNode.
-%%
-%% When reasoning about this function it is very important to
-%% understand the direction of replication for this comparison.
-%% We're only interesting in internal replications initiated
-%% by this node to the node being replaced. When doing a
-%% replacement the most important thing is that the client doesn't
-%% miss any updates. This means we can only fast-forward as far
-%% as they've seen updates on this node. We can detect that by
-%% looking for our push replication history and choosing the
-%% largest source_seq that has a target_seq =< TgtSeq.
-find_source_seq(SrcDb, TgtNode, TgtUUIDPrefix, TgtSeq) ->
- case find_repl_doc(SrcDb, TgtUUIDPrefix) of
- {ok, TgtUUID, Doc} ->
- SrcNode = atom_to_binary(node(), utf8),
- find_source_seq_int(Doc, SrcNode, TgtNode, TgtUUID, TgtSeq);
- {not_found, _} ->
- couch_log:warning(
- "~p find_source_seq repl doc not_found "
- "src_db: ~p, tgt_node: ~p, tgt_uuid_prefix: ~p, tgt_seq: ~p",
- [?MODULE, SrcDb, TgtNode, TgtUUIDPrefix, TgtSeq]
- ),
- 0
- end.
-
-find_source_seq_int(#doc{body = {Props}}, SrcNode0, TgtNode0, TgtUUID, TgtSeq) ->
- SrcNode =
- case is_atom(SrcNode0) of
- true -> atom_to_binary(SrcNode0, utf8);
- false -> SrcNode0
- end,
- TgtNode =
- case is_atom(TgtNode0) of
- true -> atom_to_binary(TgtNode0, utf8);
- false -> TgtNode0
- end,
- % This is split off purely for the ability to run unit tests
- % against this bit of code without requiring all sorts of mocks.
- {History} = couch_util:get_value(<<"history">>, Props, {[]}),
- SrcHistory = couch_util:get_value(SrcNode, History, []),
- UseableHistory = lists:filter(
- fun({Entry}) ->
- couch_util:get_value(<<"target_node">>, Entry) =:= TgtNode andalso
- couch_util:get_value(<<"target_uuid">>, Entry) =:= TgtUUID andalso
- couch_util:get_value(<<"target_seq">>, Entry) =< TgtSeq
- end,
- SrcHistory
- ),
-
- % This relies on SrcHistory being ordered desceding by source
- % sequence.
- case UseableHistory of
- [{Entry} | _] ->
- couch_util:get_value(<<"source_seq">>, Entry);
- [] ->
- couch_log:warning(
- "~p find_source_seq_int nil useable history "
- "src_node: ~p, tgt_node: ~p, tgt_uuid: ~p, tgt_seq: ~p, "
- "src_history: ~p",
- [?MODULE, SrcNode, TgtNode, TgtUUID, TgtSeq, SrcHistory]
- ),
- 0
- end.
-
-find_split_target_seq(TgtDb, SrcNode0, SrcUUIDPrefix, SrcSeq) ->
- SrcNode =
- case is_atom(SrcNode0) of
- true -> atom_to_binary(SrcNode0, utf8);
- false -> SrcNode0
- end,
- case find_split_target_seq_int(TgtDb, SrcNode, SrcUUIDPrefix) of
- {ok, [{BulkCopySeq, BulkCopySeq} | _]} when SrcSeq =< BulkCopySeq ->
- % Check if source sequence is at or below the initial bulk copy
- % checkpointed sequence. That sequence or anything lower than it
- % can be directly replaced with the same value for each target. For
- % extra safety we assert that the initial source and target
- % sequences are the same value
- SrcSeq;
- {ok, Seqs = [{_, _} | _]} ->
- % Pick the target sequence for the greatest source sequence that is
- % less than `SrcSeq`.
- case lists:takewhile(fun({Seq, _}) -> Seq < SrcSeq end, Seqs) of
- [] ->
- couch_log:warning(
- "~p find_split_target_seq target seq not found "
- "tgt_db: ~p, src_uuid_prefix: ~p, src_seq: ~p",
- [?MODULE, couch_db:name(TgtDb), SrcUUIDPrefix, SrcSeq]
- ),
- 0;
- [{_, _} | _] = Seqs1 ->
- {_, TSeq} = lists:last(Seqs1),
- TSeq
- end;
- {not_found, _} ->
- couch_log:warning(
- "~p find_split_target_seq target seq not found "
- "tgt_db: ~p, src_uuid_prefix: ~p, src_seq: ~p",
- [?MODULE, couch_db:name(TgtDb), SrcUUIDPrefix, SrcSeq]
- ),
- 0
- end.
-
-repl(#acc{db = Db0} = Acc0) ->
- erlang:put(io_priority, {internal_repl, couch_db:name(Db0)}),
- Acc1 = calculate_start_seq_multi(Acc0),
- try
- Acc3 =
- case config:get_boolean("mem3", "replicate_purges", false) of
- true ->
- Acc2 = pull_purges_multi(Acc1),
- push_purges_multi(Acc2);
- false ->
- Acc1
- end,
- push_changes(Acc3)
- catch
- throw:{finished, Count} ->
- {ok, Count}
- end.
-
-pull_purges_multi(#acc{source = Source} = Acc0) ->
- #acc{batch_size = Count, seq = UpdateSeq, targets = Targets0} = Acc0,
- with_src_db(Acc0, fun(Db) ->
- Targets = maps:map(
- fun(_, #tgt{} = T) ->
- pull_purges(Db, Count, Source, T)
- end,
- reset_remaining(Targets0)
- ),
- Remaining = maps:fold(
- fun(_, #tgt{remaining = R}, Sum) ->
- Sum + R
- end,
- 0,
- Targets
- ),
- if
- Remaining == 0 ->
- Acc0#acc{targets = Targets};
- true ->
- PurgeSeq = couch_db:get_purge_seq(Db),
- OldestPurgeSeq = couch_db:get_oldest_purge_seq(Db),
- PurgesToPush = PurgeSeq - OldestPurgeSeq,
- Changes = couch_db:count_changes_since(Db, UpdateSeq),
- Pending = Remaining + PurgesToPush + Changes,
- throw({finished, Pending})
- end
- end).
-
-pull_purges(Db, Count, SrcShard, #tgt{} = Tgt0) ->
- #tgt{shard = TgtShard} = Tgt0,
- SrcUUID = couch_db:get_uuid(Db),
- #shard{node = TgtNode, name = TgtDbName} = TgtShard,
- {LocalPurgeId, Infos, ThroughSeq, Remaining} =
- mem3_rpc:load_purge_infos(TgtNode, TgtDbName, SrcUUID, Count),
- Tgt = Tgt0#tgt{purgeid = LocalPurgeId},
- if
- Infos == [] ->
- ok;
- true ->
- {ok, _} = couch_db:purge_docs(Db, Infos, [replicated_changes]),
- Body = purge_cp_body(SrcShard, TgtShard, ThroughSeq),
- mem3_rpc:save_purge_checkpoint(TgtNode, TgtDbName, LocalPurgeId, Body)
- end,
- Tgt#tgt{remaining = max(0, Remaining)}.
-
-push_purges_multi(#acc{source = SrcShard} = Acc) ->
- #acc{batch_size = BatchSize, seq = UpdateSeq, targets = Targets0} = Acc,
- with_src_db(Acc, fun(Db) ->
- Targets = maps:map(
- fun(_, #tgt{} = T) ->
- push_purges(Db, BatchSize, SrcShard, T)
- end,
- reset_remaining(Targets0)
- ),
- Remaining = maps:fold(
- fun(_, #tgt{remaining = R}, Sum) ->
- Sum + R
- end,
- 0,
- Targets
- ),
- if
- Remaining == 0 ->
- Acc#acc{targets = Targets};
- true ->
- Changes = couch_db:count_changes_since(Db, UpdateSeq),
- throw({finished, Remaining + Changes})
- end
- end).
-
-push_purges(Db, BatchSize, SrcShard, Tgt) ->
- #tgt{shard = TgtShard, purgeid = LocalPurgeId} = Tgt,
- #shard{node = TgtNode, name = TgtDbName} = TgtShard,
- StartSeq =
- case couch_db:open_doc(Db, LocalPurgeId, []) of
- {ok, #doc{body = {Props}}} ->
- couch_util:get_value(<<"purge_seq">>, Props);
- {not_found, _} ->
- Oldest = couch_db:get_oldest_purge_seq(Db),
- erlang:max(0, Oldest - 1)
- end,
- FoldFun = fun({PSeq, UUID, Id, Revs}, {Count, Infos, _}) ->
- NewCount = Count + length(Revs),
- NewInfos = [{UUID, Id, Revs} | Infos],
- Status =
- if
- NewCount < BatchSize -> ok;
- true -> stop
- end,
- {Status, {NewCount, NewInfos, PSeq}}
- end,
- InitAcc = {0, [], StartSeq},
- {ok, {_, Infos, ThroughSeq}} =
- couch_db:fold_purge_infos(Db, StartSeq, FoldFun, InitAcc),
- if
- Infos == [] ->
- ok;
- true ->
- ok = purge_on_target(TgtNode, TgtDbName, Infos),
- Body = purge_cp_body(SrcShard, TgtShard, ThroughSeq),
- Doc = #doc{id = LocalPurgeId, body = Body},
- {ok, _} = couch_db:update_doc(Db, Doc, [])
- end,
- Tgt#tgt{remaining = max(0, couch_db:get_purge_seq(Db) - ThroughSeq)}.
-
-calculate_start_seq_multi(#acc{} = Acc) ->
- #acc{db = Db, targets = Targets0, filter = Filter} = Acc,
- FilterHash = filter_hash(Filter),
- Targets = maps:map(
- fun(_, #tgt{} = T) ->
- calculate_start_seq(Db, FilterHash, T)
- end,
- Targets0
- ),
- % There will always be at least one target
- #tgt{seq = Seq0} = hd(maps:values(Targets)),
- Seq = maps:fold(fun(_, #tgt{seq = S}, M) -> min(S, M) end, Seq0, Targets),
- Acc#acc{seq = Seq, targets = Targets}.
-
-calculate_start_seq(Db, FilterHash, #tgt{shard = TgtShard} = Tgt) ->
- UUID = couch_db:get_uuid(Db),
- #shard{node = Node, name = Name} = TgtShard,
- {NewDocId, Doc} = mem3_rpc:load_checkpoint(
- Node,
- Name,
- node(),
- UUID,
- FilterHash
- ),
- #doc{id = FoundId, body = {TProps}} = Doc,
- Tgt1 = Tgt#tgt{localid = NewDocId},
- % NewDocId and FoundId may be different the first time
- % this code runs to save our newly named internal replication
- % checkpoints. We store NewDocId to use when saving checkpoints
- % but use FoundId to reuse the same docid that the target used.
- case couch_db:open_doc(Db, FoundId, [ejson_body]) of
- {ok, #doc{body = {SProps}}} ->
- SourceSeq = couch_util:get_value(<<"seq">>, SProps, 0),
- TargetSeq = couch_util:get_value(<<"seq">>, TProps, 0),
- % We resume from the lower update seq stored in the two
- % shard copies. We also need to be sure and use the
- % corresponding history. A difference here could result
- % from either a write failure on one of the nodes or if
- % either shard was truncated by an operator.
- case SourceSeq =< TargetSeq of
- true ->
- Seq = SourceSeq,
- History = couch_util:get_value(<<"history">>, SProps, {[]});
- false ->
- Seq = TargetSeq,
- History = couch_util:get_value(<<"history">>, TProps, {[]})
- end,
- Tgt1#tgt{seq = Seq, history = History};
- {not_found, _} ->
- compare_epochs(Db, Tgt1)
- end.
-
-push_changes(#acc{} = Acc0) ->
- #acc{
- db = Db0,
- seq = Seq
- } = Acc0,
-
- % Avoid needless rewriting the internal replication
- % checkpoint document if nothing is replicated.
- UpdateSeq = couch_db:get_update_seq(Db0),
- if
- Seq < UpdateSeq -> ok;
- true -> throw({finished, 0})
- end,
-
- with_src_db(Acc0, fun(Db) ->
- Acc1 = Acc0#acc{db = Db},
- Fun = fun ?MODULE:changes_enumerator/2,
- {ok, Acc2} = couch_db:fold_changes(Db, Seq, Fun, Acc1),
- {ok, #acc{seq = LastSeq}} = replicate_batch_multi(Acc2),
- {ok, couch_db:count_changes_since(Db, LastSeq)}
- end).
-
-compare_epochs(Db, #tgt{shard = TgtShard} = Tgt) ->
- #shard{node = Node, name = Name} = TgtShard,
- UUID = couch_db:get_uuid(Db),
- Epochs = couch_db:get_epochs(Db),
- Seq = mem3_rpc:find_common_seq(Node, Name, UUID, Epochs),
- Tgt#tgt{seq = Seq, history = {[]}}.
-
-changes_enumerator(#doc_info{id = DocId}, #acc{db = Db} = Acc) ->
- {ok, FDI} = couch_db:get_full_doc_info(Db, DocId),
- changes_enumerator(FDI, Acc);
-changes_enumerator(#full_doc_info{} = FDI, #acc{} = Acc0) ->
- #acc{
- revcount = C,
- targets = Targets0,
- hashfun = HashFun,
- incomplete_ranges = IncompleteRanges
- } = Acc0,
- #doc_info{high_seq = Seq, revs = Revs} = couch_doc:to_doc_info(FDI),
- {Count, Targets} =
- case filter_doc(Acc0#acc.filter, FDI) of
- keep ->
- NewTargets = changes_append_fdi(
- FDI,
- Targets0,
- HashFun,
- IncompleteRanges
- ),
- {C + length(Revs), NewTargets};
- discard ->
- {C, Targets0}
- end,
- Acc1 = Acc0#acc{seq = Seq, revcount = Count, targets = Targets},
- Go =
- if
- Count < Acc1#acc.batch_size -> ok;
- true -> stop
- end,
- {Go, Acc1}.
-
-changes_append_fdi(
- #full_doc_info{id = Id} = FDI,
- Targets,
- HashFun,
- IncompleteRanges
-) ->
- case mem3_reshard_job:pickfun(Id, maps:keys(Targets), HashFun) of
- not_in_range when IncompleteRanges ->
- Targets;
- not_in_range when not IncompleteRanges ->
- ErrMsg = "~p : ~p not in any target ranges: ~p",
- TShards = [TS || #tgt{shard = TS} <- maps:values(Targets)],
- TNames = [TN || #shard{name = TN} <- TShards],
- couch_log:error(ErrMsg, [?MODULE, Id, TNames]),
- error({error, {Id, not_in_target_ranges}});
- Key ->
- maps:update_with(
- Key,
- fun(#tgt{infos = Infos} = T) ->
- T#tgt{infos = [FDI | Infos]}
- end,
- Targets
- )
- end.
-
-replicate_batch_multi(#acc{targets = Targets0, seq = Seq, db = Db} = Acc) ->
- Targets = maps:map(
- fun(_, #tgt{} = T) ->
- replicate_batch(T, Db, Seq)
- end,
- Targets0
- ),
- {ok, Acc#acc{targets = Targets, revcount = 0}}.
-
-replicate_batch(#tgt{shard = TgtShard, infos = Infos} = Target, Db, Seq) ->
- #shard{node = Node, name = Name} = TgtShard,
- case find_missing_revs(Target) of
- [] ->
- ok;
- Missing ->
- lists:map(
- fun(Chunk) ->
- Docs = open_docs(Db, Infos, Chunk),
- ok = save_on_target(Node, Name, Docs)
- end,
- chunk_revs(Missing)
- )
- end,
- update_locals(Target, Db, Seq),
- Target#tgt{infos = []}.
-
-find_missing_revs(#tgt{shard = TgtShard, infos = Infos}) ->
- #shard{node = Node, name = Name} = TgtShard,
- IdsRevs = lists:map(
- fun(FDI) ->
- #doc_info{id = Id, revs = RevInfos} = couch_doc:to_doc_info(FDI),
- {Id, [R || #rev_info{rev = R} <- RevInfos]}
- end,
- Infos
- ),
- Missing = mem3_rpc:get_missing_revs(Node, Name, IdsRevs, [
- {io_priority, {internal_repl, Name}},
- ?ADMIN_CTX
- ]),
- lists:filter(
- fun
- ({_Id, [], _Ancestors}) -> false;
- ({_Id, _Revs, _Ancestors}) -> true
- end,
- Missing
- ).
-
-chunk_revs(Revs) ->
- Limit = list_to_integer(config:get("mem3", "rev_chunk_size", "5000")),
- chunk_revs(Revs, Limit).
-
-chunk_revs(Revs, Limit) ->
- chunk_revs(Revs, {0, []}, [], Limit).
-
-chunk_revs([], {_Count, Chunk}, Chunks, _Limit) ->
- [Chunk | Chunks];
-chunk_revs([{Id, R, A} | Revs], {Count, Chunk}, Chunks, Limit) when length(R) =< Limit - Count ->
- chunk_revs(
- Revs,
- {Count + length(R), [{Id, R, A} | Chunk]},
- Chunks,
- Limit
- );
-chunk_revs([{Id, R, A} | Revs], {Count, Chunk}, Chunks, Limit) ->
- {This, Next} = lists:split(Limit - Count, R),
- chunk_revs(
- [{Id, Next, A} | Revs],
- {0, []},
- [[{Id, This, A} | Chunk] | Chunks],
- Limit
- ).
-
-open_docs(Db, Infos, Missing) ->
- lists:flatmap(
- fun({Id, Revs, _}) ->
- FDI = lists:keyfind(Id, #full_doc_info.id, Infos),
- #full_doc_info{rev_tree = RevTree} = FDI,
- {FoundRevs, _} = couch_key_tree:get_key_leafs(RevTree, Revs),
- lists:map(
- fun({#leaf{deleted = IsDel, ptr = SummaryPtr}, FoundRevPath}) ->
- couch_db:make_doc(Db, Id, IsDel, SummaryPtr, FoundRevPath)
- end,
- FoundRevs
- )
- end,
- Missing
- ).
-
-save_on_target(Node, Name, Docs) ->
- mem3_rpc:update_docs(Node, Name, Docs, [
- replicated_changes,
- full_commit,
- ?ADMIN_CTX,
- {io_priority, {internal_repl, Name}}
- ]),
- ok.
-
-purge_on_target(Node, Name, PurgeInfos) ->
- mem3_rpc:purge_docs(Node, Name, PurgeInfos, [
- replicated_changes,
- full_commit,
- ?ADMIN_CTX,
- {io_priority, {internal_repl, Name}}
- ]),
- ok.
-
-update_locals(Target, Db, Seq) ->
- #tgt{shard = TgtShard, localid = Id, history = History} = Target,
- #shard{node = Node, name = Name} = TgtShard,
- NewEntry = [
- {<<"source_node">>, atom_to_binary(node(), utf8)},
- {<<"source_uuid">>, couch_db:get_uuid(Db)},
- {<<"source_seq">>, Seq},
- {<<"timestamp">>, list_to_binary(mem3_util:iso8601_timestamp())}
- ],
- NewBody = mem3_rpc:save_checkpoint(Node, Name, Id, Seq, NewEntry, History),
- {ok, _} = couch_db:update_doc(Db, #doc{id = Id, body = NewBody}, []).
-
-purge_cp_body(#shard{} = Source, #shard{} = Target, PurgeSeq) ->
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- {[
- {<<"type">>, <<"internal_replication">>},
- {<<"updated_on">>, NowSecs},
- {<<"purge_seq">>, PurgeSeq},
- {<<"source">>, atom_to_binary(Source#shard.node, latin1)},
- {<<"target">>, atom_to_binary(Target#shard.node, latin1)},
- {<<"range">>, Source#shard.range}
- ]}.
-
-find_repl_doc(SrcDb, TgtUUIDPrefix) ->
- SrcUUID = couch_db:get_uuid(SrcDb),
- S = local_id_hash(SrcUUID),
- DocIdPrefix = <<"_local/shard-sync-", S/binary, "-">>,
- FoldFun = fun(#doc{id = DocId, body = {BodyProps}} = Doc, _) ->
- TgtUUID = couch_util:get_value(<<"target_uuid">>, BodyProps, <<>>),
- case is_prefix(DocIdPrefix, DocId) of
- true ->
- case is_prefix(TgtUUIDPrefix, TgtUUID) of
- true ->
- {stop, {TgtUUID, Doc}};
- false ->
- {ok, not_found}
- end;
- _ ->
- {stop, not_found}
- end
- end,
- Options = [{start_key, DocIdPrefix}],
- case couch_db:fold_local_docs(SrcDb, FoldFun, not_found, Options) of
- {ok, {TgtUUID, Doc}} ->
- {ok, TgtUUID, Doc};
- {ok, not_found} ->
- {not_found, missing};
- Else ->
- couch_log:error("Error finding replication doc: ~w", [Else]),
- {not_found, missing}
- end.
-
-find_split_target_seq_int(TgtDb, Node, SrcUUIDPrefix) ->
- TgtUUID = couch_db:get_uuid(TgtDb),
- FoldFun = fun(#doc{body = {Props}}, _) ->
- DocTgtUUID = couch_util:get_value(<<"target_uuid">>, Props, <<>>),
- case TgtUUID == DocTgtUUID of
- true ->
- {History} = couch_util:get_value(<<"history">>, Props, {[]}),
- HProps = couch_util:get_value(Node, History, []),
- case get_target_seqs(HProps, TgtUUID, Node, SrcUUIDPrefix, []) of
- [] ->
- % No replication found from source to target
- {ok, not_found};
- [{_, _} | _] = SeqPairs ->
- % Found shared replicated history from source to target
- % Return sorted list by the earliest source sequence
- {stop, lists:sort(SeqPairs)}
- end;
- false ->
- {ok, not_found}
- end
- end,
- Options = [{start_key, <<"_local/shard-sync-">>}],
- case couch_db:fold_local_docs(TgtDb, FoldFun, not_found, Options) of
- {ok, Seqs} when is_list(Seqs) ->
- {ok, Seqs};
- {ok, not_found} ->
- {not_found, missing};
- Else ->
- couch_log:error("Error finding replication doc: ~w", [Else]),
- {not_found, missing}
- end.
-
-% Get target sequences for each checkpoint when source replicated to the target
-% The "target" is the current db where the history entry was read from and "source"
-% is another, now possibly deleted, database.
-get_target_seqs([], _TgtUUID, _Node, _SrcUUIDPrefix, Acc) ->
- lists:reverse(Acc);
-get_target_seqs([{Entry} | HProps], TgtUUID, Node, SrcUUIDPrefix, Acc) ->
- SameTgt = couch_util:get_value(<<"target_uuid">>, Entry) =:= TgtUUID,
- SameNode = couch_util:get_value(<<"target_node">>, Entry) =:= Node,
- SrcUUID = couch_util:get_value(<<"source_uuid">>, Entry),
- IsPrefix = is_prefix(SrcUUIDPrefix, SrcUUID),
- Acc1 =
- case SameTgt andalso SameNode andalso IsPrefix of
- true ->
- EntrySourceSeq = couch_util:get_value(<<"source_seq">>, Entry),
- EntryTargetSeq = couch_util:get_value(<<"target_seq">>, Entry),
- [{EntrySourceSeq, EntryTargetSeq} | Acc];
- false ->
- Acc
- end,
- get_target_seqs(HProps, TgtUUID, Node, SrcUUIDPrefix, Acc1).
-
-with_src_db(#acc{source = Source}, Fun) ->
- case couch_db:open(Source#shard.name, [?ADMIN_CTX]) of
- {ok, Db} ->
- try
- Fun(Db)
- after
- couch_db:close(Db)
- end;
- {not_found, _} ->
- error({error, missing_source})
- end.
-
-is_prefix(Prefix, Subject) ->
- binary:longest_common_prefix([Prefix, Subject]) == size(Prefix).
-
-filter_doc(Filter, FullDocInfo) when is_function(Filter) ->
- try Filter(FullDocInfo) of
- discard -> discard;
- _ -> keep
- catch
- _:_ ->
- keep
- end;
-filter_doc(_, _) ->
- keep.
-
-sync_security(#shard{} = Source, #{} = Targets) ->
- maps:map(
- fun(_, #tgt{shard = Target}) ->
- mem3_sync_security:maybe_sync(Source, Target)
- end,
- Targets
- ).
-
-targets_map(
- #shard{name = <<"shards/", _/binary>> = SrcName} = Src,
- #shard{name = <<"shards/", _/binary>>, node = TgtNode} = Tgt
-) ->
- % Parse range from name in case the passed shard is built with a name only
- SrcRange = mem3:range(SrcName),
- Shards0 = mem3:shards(mem3:dbname(SrcName)),
- Shards1 = [S || S <- Shards0, not shard_eq(S, Src)],
- Shards2 = [S || S <- Shards1, check_overlap(SrcRange, TgtNode, S)],
- case [{R, S} || #shard{range = R} = S <- Shards2] of
- [] ->
- % If target map is empty, create a target map with just
- % that one target. This is to support tooling which may be
- % moving / copying shards using mem3:go/2,3 before the
- % shards are present in the shard map
- #{mem3:range(SrcName) => Tgt};
- [_ | _] = TMapList ->
- maps:from_list(TMapList)
- end;
-targets_map(_Src, Tgt) ->
- #{[0, ?RING_END] => Tgt}.
-
-shard_eq(#shard{name = Name, node = Node}, #shard{name = Name, node = Node}) ->
- true;
-shard_eq(_, _) ->
- false.
-
-check_overlap(SrcRange, Node, #shard{node = Node, range = TgtRange}) ->
- mem3_util:range_overlap(SrcRange, TgtRange);
-check_overlap([_, _], _, #shard{}) ->
- false.
-
-reset_remaining(#{} = Targets) ->
- maps:map(
- fun(_, #tgt{} = T) ->
- T#tgt{remaining = 0}
- end,
- Targets
- ).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
--define(TDEF(A), {atom_to_list(A), fun A/0}).
-
-find_source_seq_int_test_() ->
- {
- setup,
- fun() -> meck:expect(couch_log, warning, 2, ok) end,
- fun(_) -> meck:unload() end,
- [
- ?TDEF(t_unknown_node),
- ?TDEF(t_unknown_uuid),
- ?TDEF(t_ok),
- ?TDEF(t_old_ok),
- ?TDEF(t_different_node)
- ]
- }.
-
-t_unknown_node() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo">>, <<"bing">>, <<"bar_uuid">>, 10),
- 0
- ).
-
-t_unknown_uuid() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo">>, <<"bar">>, <<"teapot">>, 10),
- 0
- ).
-
-t_ok() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo">>, <<"bar">>, <<"bar_uuid">>, 100),
- 100
- ).
-
-t_old_ok() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo">>, <<"bar">>, <<"bar_uuid">>, 84),
- 50
- ).
-
-t_different_node() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo2">>, <<"bar">>, <<"bar_uuid">>, 92),
- 31
- ).
-
--define(SNODE, <<"source_node">>).
--define(SUUID, <<"source_uuid">>).
--define(SSEQ, <<"source_seq">>).
--define(TNODE, <<"target_node">>).
--define(TUUID, <<"target_uuid">>).
--define(TSEQ, <<"target_seq">>).
-
-doc_() ->
- Foo_Bar = [
- {[
- {?SNODE, <<"foo">>},
- {?SUUID, <<"foo_uuid">>},
- {?SSEQ, 100},
- {?TNODE, <<"bar">>},
- {?TUUID, <<"bar_uuid">>},
- {?TSEQ, 100}
- ]},
- {[
- {?SNODE, <<"foo">>},
- {?SUUID, <<"foo_uuid">>},
- {?SSEQ, 90},
- {?TNODE, <<"bar">>},
- {?TUUID, <<"bar_uuid">>},
- {?TSEQ, 85}
- ]},
- {[
- {?SNODE, <<"foo">>},
- {?SUUID, <<"foo_uuid">>},
- {?SSEQ, 50},
- {?TNODE, <<"bar">>},
- {?TUUID, <<"bar_uuid">>},
- {?TSEQ, 51}
- ]},
- {[
- {?SNODE, <<"foo">>},
- {?SUUID, <<"foo_uuid">>},
- {?SSEQ, 40},
- {?TNODE, <<"bar">>},
- {?TUUID, <<"bar_uuid">>},
- {?TSEQ, 45}
- ]},
- {[
- {?SNODE, <<"foo">>},
- {?SUUID, <<"foo_uuid">>},
- {?SSEQ, 2},
- {?TNODE, <<"bar">>},
- {?TUUID, <<"bar_uuid">>},
- {?TSEQ, 2}
- ]}
- ],
- Foo2_Bar = [
- {[
- {?SNODE, <<"foo2">>},
- {?SUUID, <<"foo_uuid">>},
- {?SSEQ, 100},
- {?TNODE, <<"bar">>},
- {?TUUID, <<"bar_uuid">>},
- {?TSEQ, 100}
- ]},
- {[
- {?SNODE, <<"foo2">>},
- {?SUUID, <<"foo_uuid">>},
- {?SSEQ, 92},
- {?TNODE, <<"bar">>},
- {?TUUID, <<"bar_uuid">>},
- {?TSEQ, 93}
- ]},
- {[
- {?SNODE, <<"foo2">>},
- {?SUUID, <<"foo_uuid">>},
- {?SSEQ, 31},
- {?TNODE, <<"bar">>},
- {?TUUID, <<"bar_uuid">>},
- {?TSEQ, 30}
- ]}
- ],
- History =
- {[
- {<<"foo">>, Foo_Bar},
- {<<"foo2">>, Foo2_Bar}
- ]},
- #doc{
- body = {[{<<"history">>, History}]}
- }.
-
-targets_map_test_() ->
- {
- setup,
- fun() -> meck:new(mem3, [passthrough]) end,
- fun(_) -> meck:unload() end,
- [
- target_not_a_shard(),
- source_contained_in_target(),
- multiple_targets(),
- uneven_overlap(),
- target_not_in_shard_map()
- ]
- }.
-
-target_not_a_shard() ->
- ?_assertEqual(#{[0, ?RING_END] => <<"t">>}, targets_map(<<"s">>, <<"t">>)).
-
-source_contained_in_target() ->
- ?_test(begin
- R07 = [16#00000000, 16#7fffffff],
- R8f = [16#80000000, 16#ffffffff],
- R0f = [16#00000000, 16#ffffffff],
-
- Shards = [
- #shard{node = 'n1', range = R07},
- #shard{node = 'n1', range = R8f},
- #shard{node = 'n2', range = R07},
- #shard{node = 'n2', range = R8f},
- #shard{node = 'n3', range = R0f}
- ],
- meck:expect(mem3, shards, 1, Shards),
-
- SrcName1 = <<"shards/00000000-7fffffff/d.1551893552">>,
- TgtName1 = <<"shards/00000000-7fffffff/d.1551893552">>,
-
- Src1 = #shard{name = SrcName1, node = 'n1'},
- Tgt1 = #shard{name = TgtName1, node = 'n2'},
- Map1 = targets_map(Src1, Tgt1),
- ?assertEqual(1, map_size(Map1)),
- ?assertMatch(#{R07 := #shard{node = 'n2'}}, Map1),
-
- Tgt2 = #shard{name = TgtName1, node = 'n3'},
- Map2 = targets_map(Src1, Tgt2),
- ?assertEqual(1, map_size(Map2)),
- ?assertMatch(#{R0f := #shard{node = 'n3'}}, Map2)
- end).
-
-multiple_targets() ->
- ?_test(begin
- R07 = [16#00000000, 16#7fffffff],
- R8f = [16#80000000, 16#ffffffff],
- R0f = [16#00000000, 16#ffffffff],
-
- Shards = [
- #shard{node = 'n1', range = R07},
- #shard{node = 'n1', range = R8f},
- #shard{node = 'n2', range = R0f}
- ],
- meck:expect(mem3, shards, 1, Shards),
-
- SrcName = <<"shards/00000000-ffffffff/d.1551893552">>,
- TgtName = <<"shards/00000000-7fffffff/d.1551893552">>,
-
- Src = #shard{name = SrcName, node = 'n2'},
- Tgt = #shard{name = TgtName, node = 'n1'},
- Map = targets_map(Src, Tgt),
- ?assertEqual(2, map_size(Map)),
- ?assertMatch(#{R07 := #shard{node = 'n1'}}, Map),
- ?assertMatch(#{R8f := #shard{node = 'n1'}}, Map)
- end).
-
-uneven_overlap() ->
- ?_test(begin
- R04 = [16#00000000, 16#4fffffff],
- R26 = [16#20000000, 16#6fffffff],
- R58 = [16#50000000, 16#8fffffff],
- R9f = [16#90000000, 16#ffffffff],
- Shards = [
- #shard{node = 'n1', range = R04},
- #shard{node = 'n1', range = R58},
- #shard{node = 'n1', range = R9f},
- #shard{node = 'n2', range = R26}
- ],
-
- meck:expect(mem3, shards, 1, Shards),
-
- SrcName = <<"shards/20000000-6fffffff/d.1551893552">>,
- TgtName = <<"shards/20000000-6fffffff/d.1551893552">>,
-
- Src = #shard{name = SrcName, node = 'n2'},
- Tgt = #shard{name = TgtName, node = 'n1'},
- Map = targets_map(Src, Tgt),
- ?assertEqual(2, map_size(Map)),
- ?assertMatch(#{R04 := #shard{node = 'n1'}}, Map),
- ?assertMatch(#{R58 := #shard{node = 'n1'}}, Map)
- end).
-
-target_not_in_shard_map() ->
- ?_test(begin
- R0f = [16#00000000, 16#ffffffff],
- Name = <<"shards/00000000-ffffffff/d.1551893552">>,
- Shards = [
- #shard{name = Name, node = 'n1', range = R0f},
- #shard{name = Name, node = 'n2', range = R0f}
- ],
- meck:expect(mem3, shards, 1, Shards),
- Src = #shard{name = Name, node = 'n1'},
- Tgt = #shard{name = Name, node = 'n3'},
- Map = targets_map(Src, Tgt),
- ?assertEqual(1, map_size(Map)),
- ?assertMatch(#{R0f := #shard{name = Name, node = 'n3'}}, Map)
- end).
-
--endif.
diff --git a/src/mem3/src/mem3_reshard.erl b/src/mem3/src/mem3_reshard.erl
deleted file mode 100644
index ec08c72cd..000000000
--- a/src/mem3/src/mem3_reshard.erl
+++ /dev/null
@@ -1,851 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard).
-
--behaviour(gen_server).
-
--export([
- start_link/0,
-
- start/0,
- stop/1,
-
- start_split_job/1,
- stop_job/2,
- resume_job/1,
- remove_job/1,
-
- get_state/0,
- jobs/0,
- job/1,
- is_disabled/0,
-
- report/2,
- checkpoint/2,
-
- now_sec/0,
- update_history/4,
- shard_from_name/1,
- reset_state/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--include("mem3_reshard.hrl").
-
--define(JOB_ID_VERSION, 1).
--define(JOB_STATE_VERSION, 1).
--define(DEFAULT_MAX_JOBS, 48).
--define(DEFAULT_MAX_HISTORY, 20).
--define(JOB_PREFIX, <<"reshard-job-">>).
--define(STATE_PREFIX, <<"reshard-state-">>).
-
-%% Public API
-
--spec start_link() -> {ok, pid()} | ignore | {error, term()}.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
--spec start() -> ok | {error, any()}.
-start() ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, start, infinity)
- end.
-
--spec stop(binary()) -> ok | {error, any()}.
-stop(Reason) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, {stop, Reason}, infinity)
- end.
-
--spec start_split_job(#shard{} | binary()) -> {ok, binary()} | {error, term()}.
-start_split_job(#shard{} = Shard) ->
- start_split_job(Shard, 2);
-start_split_job(ShardName) when is_binary(ShardName) ->
- start_split_job(shard_from_name(ShardName), 2).
-
--spec start_split_job(#shard{}, split()) -> {ok, binary()} | {error, any()}.
-start_split_job(#shard{} = Source, Split) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> validate_and_start_job(Source, Split)
- end.
-
--spec stop_job(binary(), binary()) -> ok | {error, any()}.
-stop_job(JobId, Reason) when is_binary(JobId), is_binary(Reason) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, {stop_job, JobId, Reason}, infinity)
- end.
-
--spec resume_job(binary()) -> ok | {error, any()}.
-resume_job(JobId) when is_binary(JobId) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, {resume_job, JobId}, infinity)
- end.
-
--spec remove_job(binary()) -> ok | {error, any()}.
-remove_job(JobId) when is_binary(JobId) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, {remove_job, JobId}, infinity)
- end.
-
--spec get_state() -> {[_ | _]}.
-get_state() ->
- gen_server:call(?MODULE, get_state, infinity).
-
--spec jobs() -> [[tuple()]].
-jobs() ->
- ets:foldl(
- fun(Job, Acc) ->
- Opts = [iso8601],
- Props = mem3_reshard_store:job_to_ejson_props(Job, Opts),
- [{Props} | Acc]
- end,
- [],
- ?MODULE
- ).
-
--spec job(job_id()) -> {ok, {[_ | _]}} | {error, not_found}.
-job(JobId) ->
- case job_by_id(JobId) of
- #job{} = Job ->
- Opts = [iso8601],
- Props = mem3_reshard_store:job_to_ejson_props(Job, Opts),
- {ok, {Props}};
- not_found ->
- {error, not_found}
- end.
-
-% Return true resharding is disabled in the application level settings
--spec is_disabled() -> boolean().
-is_disabled() ->
- case application:get_env(mem3, reshard_disabled) of
- {ok, "true"} -> true;
- {ok, true} -> true;
- _ -> false
- end.
-
-% State reporting callbacks. Used by mem3_reshard_job module.
--spec report(pid(), #job{}) -> ok.
-report(Server, #job{} = Job) when is_pid(Server) ->
- gen_server:cast(Server, {report, Job}).
-
--spec checkpoint(pid(), #job{}) -> ok.
-checkpoint(Server, #job{} = Job) ->
- couch_log:notice("~p checkpointing ~p ~p", [?MODULE, Server, jobfmt(Job)]),
- gen_server:cast(Server, {checkpoint, Job}).
-
-% Utility functions used from other mem3_reshard modules
-
--spec now_sec() -> non_neg_integer().
-now_sec() ->
- {Mega, Sec, _Micro} = os:timestamp(),
- Mega * 1000000 + Sec.
-
--spec update_history(atom(), binary() | null, time_sec(), list()) -> list().
-update_history(State, State, Ts, History) ->
- % State is the same as detail. Make the detail null to avoid duplication
- update_history(State, null, Ts, History);
-update_history(State, Detail, Ts, History) ->
- % Reverse, so we can process the last event as the head using
- % head matches, then after append and trimming, reserse again
- Rev = lists:reverse(History),
- UpdatedRev = update_history_rev(State, Detail, Ts, Rev),
- TrimmedRev = lists:sublist(UpdatedRev, max_history()),
- lists:reverse(TrimmedRev).
-
--spec shard_from_name(binary()) -> #shard{}.
-shard_from_name(<<"shards/", _:8/binary, "-", _:8/binary, "/", Rest/binary>> = Shard) ->
- Range = mem3:range(Shard),
- [DbName, Suffix] = binary:split(Rest, <<".">>),
- build_shard(Range, DbName, Suffix).
-
-% For debugging only
-
--spec reset_state() -> ok.
-reset_state() ->
- gen_server:call(?MODULE, reset_state, infinity).
-
-% Gen server functions
-
-init(_) ->
- % Advertise resharding API feature only if it is not disabled
- case is_disabled() of
- true -> ok;
- false -> config:enable_feature('reshard')
- end,
- couch_log:notice("~p start init()", [?MODULE]),
- EtsOpts = [named_table, {keypos, #job.id}, {read_concurrency, true}],
- ?MODULE = ets:new(?MODULE, EtsOpts),
- ManagerPid = self(),
- State = #state{
- state = running,
- state_info = [],
- update_time = now_sec(),
- node = node(),
- db_monitor = spawn_link(fun() -> db_monitor(ManagerPid) end)
- },
- State1 = mem3_reshard_store:init(State, ?JOB_PREFIX, state_id()),
- State2 = mem3_reshard_store:load_state(State1, running),
- State3 = maybe_disable(State2),
- gen_server:cast(self(), reload_jobs),
- {ok, State3}.
-
-terminate(Reason, State) ->
- couch_log:notice("~p terminate ~p ~p", [?MODULE, Reason, statefmt(State)]),
- catch unlink(State#state.db_monitor),
- catch exit(State#state.db_monitor, kill),
- lists:foreach(fun(Job) -> kill_job_int(Job) end, running_jobs()).
-
-handle_call(start, _From, #state{state = stopped} = State) ->
- State1 = State#state{
- state = running,
- update_time = now_sec(),
- state_info = info_delete(reason, State#state.state_info)
- },
- ok = mem3_reshard_store:store_state(State1),
- State2 = maybe_disable(State1),
- State3 = reload_jobs(State2),
- {reply, ok, State3};
-handle_call(start, _From, State) ->
- {reply, ok, State};
-handle_call({stop, Reason}, _From, #state{state = running} = State) ->
- State1 = State#state{
- state = stopped,
- update_time = now_sec(),
- state_info = info_update(reason, Reason, State#state.state_info)
- },
- ok = mem3_reshard_store:store_state(State1),
- lists:foreach(fun(Job) -> temporarily_stop_job(Job) end, running_jobs()),
- {reply, ok, State1};
-handle_call({stop, _}, _From, State) ->
- {reply, ok, State};
-handle_call({start_job, #job{id = Id, source = Source} = Job}, _From, State) ->
- couch_log:notice("~p start_job call ~p", [?MODULE, jobfmt(Job)]),
- Total = ets:info(?MODULE, size),
- SourceOk = mem3_reshard_validate:source(Source),
- case {job_by_id(Id), Total + 1 =< get_max_jobs(), SourceOk} of
- {not_found, true, ok} ->
- handle_start_job(Job, State);
- {#job{}, _, _} ->
- {reply, {error, job_already_exists}, State};
- {_, false, _} ->
- {reply, {error, max_jobs_exceeded}, State};
- {_, _, {error, _} = SourceError} ->
- {reply, SourceError, State}
- end;
-handle_call({resume_job, _}, _From, #state{state = stopped} = State) ->
- case couch_util:get_value(reason, State#state.state_info) of
- undefined ->
- {reply, {error, stopped}, State};
- Reason ->
- {reply, {error, {stopped, Reason}}, State}
- end;
-handle_call({resume_job, Id}, _From, State) ->
- couch_log:notice("~p resume_job call ~p", [?MODULE, Id]),
- case job_by_id(Id) of
- #job{job_state = stopped} = Job ->
- case start_job_int(Job, State) of
- ok ->
- {reply, ok, State};
- {error, Error} ->
- {reply, {error, Error}, State}
- end;
- #job{} ->
- {reply, ok, State};
- not_found ->
- {reply, {error, not_found}, State}
- end;
-handle_call({stop_job, Id, Reason}, _From, State) ->
- couch_log:notice("~p stop_job Id:~p Reason:~p", [?MODULE, Id, Reason]),
- case job_by_id(Id) of
- #job{job_state = JSt} = Job when
- JSt =:= running orelse JSt =:= new orelse
- JSt =:= stopped
- ->
- ok = stop_job_int(Job, stopped, Reason, State),
- {reply, ok, State};
- #job{} ->
- {reply, ok, State};
- not_found ->
- {reply, {error, not_found}, State}
- end;
-handle_call({remove_job, Id}, _From, State) ->
- {reply, remove_job_int(Id, State), State};
-handle_call(get_state, _From, #state{state = GlobalState} = State) ->
- StateProps = mem3_reshard_store:state_to_ejson_props(State),
- Stats0 = #{running => 0, completed => 0, failed => 0, stopped => 0},
- StateStats = ets:foldl(
- fun(#job{job_state = JS}, Acc) ->
- % When jobs are disabled globally their state is not checkpointed as
- % "stopped", but it stays as "running". But when returning the state we
- % don't want to mislead and indicate that there are "N running jobs"
- % when the global state is "stopped".
- JS1 =
- case GlobalState =:= stopped andalso JS =:= running of
- true -> stopped;
- false -> JS
- end,
- Acc#{JS1 => maps:get(JS1, Acc, 0) + 1}
- end,
- Stats0,
- ?MODULE
- ),
- Total = ets:info(?MODULE, size),
- StateStats1 = maps:to_list(StateStats) ++ [{total, Total}],
- Result = {lists:sort(StateProps ++ StateStats1)},
- {reply, Result, State};
-handle_call(reset_state, _From, State) ->
- {reply, ok, reset_state(State)};
-handle_call(Call, From, State) ->
- couch_log:error("~p unknown call ~p from: ~p", [?MODULE, Call, From]),
- {noreply, State}.
-
-handle_cast({db_deleted, DbName}, State) ->
- % Remove only completed jobs. Other running states would `fail` but
- % job result would stick around so users can inspect them.
- JobIds = jobs_by_db_and_state(DbName, completed),
- [remove_job_int(JobId, State) || JobId <- JobIds],
- {noreply, State};
-handle_cast({report, Job}, State) ->
- report_int(Job),
- {noreply, State};
-handle_cast({checkpoint, Job}, State) ->
- {noreply, checkpoint_int(Job, State)};
-handle_cast(reload_jobs, State) ->
- couch_log:notice("~p starting reloading jobs", [?MODULE]),
- State1 = reload_jobs(State),
- couch_log:notice("~p finished reloading jobs", [?MODULE]),
- {noreply, State1};
-handle_cast(Cast, State) ->
- couch_log:error("~p unexpected cast ~p", [?MODULE, Cast]),
- {noreply, State}.
-
-handle_info({'DOWN', _Ref, process, Pid, Info}, State) ->
- case job_by_pid(Pid) of
- {ok, Job} ->
- couch_log:notice("~p job ~s exit ~p", [?MODULE, Job#job.id, Info]),
- ok = handle_job_exit(Job, Info, State);
- {error, not_found} ->
- couch_log:error("~p job not found: ~p ~p", [?MODULE, Pid, Info])
- end,
- {noreply, State};
-handle_info(Info, State) ->
- couch_log:error("~p unexpected info ~p", [?MODULE, Info]),
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%% Private API
-
-validate_and_start_job(#shard{} = Source, Split) ->
- case mem3_reshard_validate:start_args(Source, Split) of
- ok ->
- Target = target_shards(Source, Split),
- case mem3_reshard_validate:targets(Source, Target) of
- ok ->
- TStamp = now_sec(),
- Job = #job{
- type = split,
- job_state = new,
- split_state = new,
- start_time = TStamp,
- update_time = TStamp,
- node = node(),
- source = Source,
- target = Target
- },
- Job1 = Job#job{id = job_id(Job)},
- Job2 = update_job_history(Job1),
- gen_server:call(?MODULE, {start_job, Job2}, infinity);
- {error, Error} ->
- {error, Error}
- end;
- {error, Error} ->
- {error, Error}
- end.
-
-handle_start_job(#job{} = Job, #state{state = running} = State) ->
- case start_job_int(Job, State) of
- ok ->
- {reply, {ok, Job#job.id}, State};
- {error, Error} ->
- {reply, {error, Error}, State}
- end;
-handle_start_job(#job{} = Job, #state{state = stopped} = State) ->
- ok = mem3_reshard_store:store_job(State, Job),
- % Since resharding is stopped on this node, the job is temporarily marked
- % as stopped in the ets table so as not to return a "running" result which
- % would look odd.
- temporarily_stop_job(Job),
- {reply, {ok, Job#job.id}, State}.
-
-% Insert job in the ets table as a temporarily stopped job. This would happen
-% when a job is reloaded or added when node-wide resharding is stopped.
--spec temporarily_stop_job(#job{}) -> #job{}.
-temporarily_stop_job(Job) ->
- Job1 = kill_job_int(Job),
- OldInfo = Job1#job.state_info,
- Reason = <<"Shard splitting disabled">>,
- Job2 = Job1#job{
- job_state = stopped,
- update_time = now_sec(),
- start_time = 0,
- state_info = info_update(reason, Reason, OldInfo),
- pid = undefined,
- ref = undefined
- },
- Job3 = update_job_history(Job2),
- true = ets:insert(?MODULE, Job3),
- Job3.
-
--spec reload_jobs(#state{}) -> #state{}.
-reload_jobs(State) ->
- Jobs = mem3_reshard_store:get_jobs(State),
- lists:foldl(fun reload_job/2, State, Jobs).
-
-% This is a case when main application is stopped but a job is reloaded that
-% was checkpointed in running state. Set that state to stopped to avoid the API
-% results looking odd.
--spec reload_job(#job{}, #state{}) -> #state{}.
-reload_job(#job{job_state = JS} = Job, #state{state = stopped} = State) when
- JS =:= running orelse JS =:= new
-->
- temporarily_stop_job(Job),
- State;
-% This is a case when a job process should be spawend
-reload_job(#job{job_state = JS} = Job, #state{state = running} = State) when
- JS =:= running orelse JS =:= new
-->
- case start_job_int(Job, State) of
- ok ->
- State;
- {error, Error} ->
- Msg = "~p could not resume ~s error: ~p",
- couch_log:error(Msg, [?MODULE, jobfmt(Job), Error]),
- State
- end;
-% If job is disabled individually (stopped by the user), is completed or failed
-% then simply load it into the ets table
-reload_job(#job{job_state = JS} = Job, #state{} = State) when
- JS =:= failed orelse JS =:= completed orelse JS =:= stopped
-->
- true = ets:insert(?MODULE, Job),
- State.
-
--spec get_max_jobs() -> integer().
-get_max_jobs() ->
- config:get_integer("reshard", "max_jobs", ?DEFAULT_MAX_JOBS).
-
--spec start_job_int(#job{}, #state{}) -> ok | {error, term()}.
-start_job_int(Job, State) ->
- case spawn_job(Job) of
- {ok, #job{} = Job1} ->
- Job2 = update_job_history(Job1),
- ok = mem3_reshard_store:store_job(State, Job2),
- true = ets:insert(?MODULE, Job2),
- ok;
- {error, Error} ->
- {error, Error}
- end.
-
--spec spawn_job(#job{}) -> {ok, pid()} | {error, term()}.
-spawn_job(#job{} = Job0) ->
- Job = Job0#job{
- job_state = running,
- start_time = 0,
- update_time = now_sec(),
- state_info = info_delete(reason, Job0#job.state_info),
- manager = self(),
- workers = [],
- retries = 0
- },
- case mem3_reshard_job_sup:start_child(Job) of
- {ok, Pid} ->
- Ref = monitor(process, Pid),
- {ok, Job#job{pid = Pid, ref = Ref}};
- {error, Reason} ->
- {error, Reason}
- end.
-
--spec stop_job_int(#job{}, job_state(), term(), #state{}) -> ok.
-stop_job_int(#job{} = Job, JobState, Reason, State) ->
- couch_log:info("~p stop_job_int ~p newstate: ~p reason:~p", [
- ?MODULE,
- jobfmt(Job),
- JobState,
- Reason
- ]),
- Job1 = kill_job_int(Job),
- Job2 = Job1#job{
- job_state = JobState,
- update_time = now_sec(),
- state_info = [{reason, Reason}]
- },
- ok = mem3_reshard_store:store_job(State, Job2),
- true = ets:insert(?MODULE, Job2),
- couch_log:info("~p stop_job_int stopped ~p", [?MODULE, jobfmt(Job2)]),
- ok.
-
--spec kill_job_int(#job{}) -> #job{}.
-kill_job_int(#job{pid = undefined} = Job) ->
- Job;
-kill_job_int(#job{pid = Pid, ref = Ref} = Job) ->
- couch_log:info("~p kill_job_int ~p", [?MODULE, jobfmt(Job)]),
- demonitor(Ref, [flush]),
- case erlang:is_process_alive(Pid) of
- true ->
- ok = mem3_reshard_job_sup:terminate_child(Pid);
- false ->
- ok
- end,
- Job1 = Job#job{pid = undefined, ref = undefined},
- true = ets:insert(?MODULE, Job1),
- Job1.
-
--spec handle_job_exit(#job{}, term(), #state{}) -> ok.
-handle_job_exit(#job{split_state = completed} = Job, normal, State) ->
- couch_log:notice("~p completed job ~s exited", [?MODULE, Job#job.id]),
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = completed,
- update_time = now_sec(),
- state_info = []
- },
- Job2 = update_job_history(Job1),
- ok = mem3_reshard_store:store_job(State, Job2),
- true = ets:insert(?MODULE, Job2),
- ok;
-handle_job_exit(#job{job_state = running} = Job, normal, _State) ->
- couch_log:notice("~p running job ~s stopped", [?MODULE, Job#job.id]),
- OldInfo = Job#job.state_info,
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = stopped,
- update_time = now_sec(),
- state_info = info_update(reason, <<"Job stopped">>, OldInfo)
- },
- true = ets:insert(?MODULE, update_job_history(Job1)),
- ok;
-handle_job_exit(#job{job_state = running} = Job, shutdown, _State) ->
- couch_log:notice("~p job ~s shutdown", [?MODULE, Job#job.id]),
- OldInfo = Job#job.state_info,
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = stopped,
- update_time = now_sec(),
- state_info = info_update(reason, <<"Job shutdown">>, OldInfo)
- },
- true = ets:insert(?MODULE, update_job_history(Job1)),
- ok;
-handle_job_exit(#job{job_state = running} = Job, {shutdown, Msg}, _State) ->
- couch_log:notice("~p job ~s shutdown ~p", [?MODULE, Job#job.id, Msg]),
- OldInfo = Job#job.state_info,
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = stopped,
- update_time = now_sec(),
- state_info = info_update(reason, <<"Job shutdown">>, OldInfo)
- },
- true = ets:insert(?MODULE, update_job_history(Job1)),
- ok;
-handle_job_exit(#job{} = Job, Error, State) ->
- couch_log:notice("~p job ~s failed ~p", [?MODULE, Job#job.id, Error]),
- OldInfo = Job#job.state_info,
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = failed,
- update_time = now_sec(),
- state_info = info_update(reason, Error, OldInfo)
- },
- Job2 = update_job_history(Job1),
- ok = mem3_reshard_store:store_job(State, Job2),
- true = ets:insert(?MODULE, Job2),
- ok.
-
--spec job_by_id(job_id()) -> #job{} | not_found.
-job_by_id(Id) ->
- case ets:lookup(?MODULE, Id) of
- [] ->
- not_found;
- [#job{} = Job] ->
- Job
- end.
-
--spec job_by_pid(pid()) -> {ok, #job{}} | {error, not_found}.
-job_by_pid(Pid) when is_pid(Pid) ->
- case ets:match_object(?MODULE, #job{pid = Pid, _ = '_'}) of
- [] ->
- {error, not_found};
- [#job{} = Job] ->
- {ok, Job}
- end.
-
--spec state_id() -> binary().
-state_id() ->
- Ver = iolist_to_binary(io_lib:format("~3..0B", [?JOB_STATE_VERSION])),
- <<?STATE_PREFIX/binary, Ver/binary>>.
-
--spec job_id(#job{}) -> binary().
-job_id(#job{source = #shard{name = SourceName}}) ->
- HashInput = [SourceName, atom_to_binary(node(), utf8)],
- IdHashList = couch_util:to_hex(crypto:hash(sha256, HashInput)),
- IdHash = iolist_to_binary(IdHashList),
- Prefix = iolist_to_binary(io_lib:format("~3..0B", [?JOB_ID_VERSION])),
- <<Prefix/binary, "-", IdHash/binary>>.
-
--spec target_shards(#shard{}, split()) -> [#shard{}].
-target_shards(#shard{name = Name, range = [B, E], dbname = DbName}, Split) when
- is_integer(Split), Split >= 2, (E - B + 1) >= Split
-->
- Ranges = target_ranges([B, E], Split),
- <<"shards/", _:8/binary, "-", _:8/binary, "/", DbAndSuffix/binary>> = Name,
- [DbName, Suffix] = binary:split(DbAndSuffix, <<".">>),
- [build_shard(R, DbName, Suffix) || R <- Ranges].
-
--spec target_ranges([range_pos()], split()) -> [[range_pos()]].
-target_ranges([Begin, End], Split) when
- (End - Begin + 1) >= Split,
- Split >= 2
-->
- % + 1 since intervals are inclusive
- Len = End - Begin + 1,
- NewLen = Len div Split,
- Rem = Len rem Split,
- Ranges = [[I, I + NewLen - 1] || I <- lists:seq(Begin, End - Rem, NewLen)],
- % Adjust last end to always match the original end to ensure we always
- % cover the whole range. In case when remainder is larger this will make
- % the last range larger. Improve the algorithm later to re-distribute
- % the remainder equally amonst the chunks.
- {BeforeLast, [[BeginLast, _]]} = lists:split(Split - 1, Ranges),
- BeforeLast ++ [[BeginLast, End]].
-
--spec build_shard([non_neg_integer()], binary(), binary()) -> #shard{}.
-build_shard(Range, DbName, Suffix) ->
- Shard = #shard{dbname = DbName, range = Range, node = node()},
- mem3_util:name_shard(Shard, <<".", Suffix/binary>>).
-
--spec running_jobs() -> [#job{}].
-running_jobs() ->
- Pat = #job{job_state = running, _ = '_'},
- ets:match_object(?MODULE, Pat).
-
--spec info_update(atom(), any(), [tuple()]) -> [tuple()].
-info_update(Key, Val, StateInfo) ->
- lists:keystore(Key, 1, StateInfo, {Key, Val}).
-
--spec info_delete(atom(), [tuple()]) -> [tuple()].
-info_delete(Key, StateInfo) ->
- lists:keydelete(Key, 1, StateInfo).
-
--spec checkpoint_int(#job{}, #state{}) -> #state{}.
-checkpoint_int(#job{} = Job, State) ->
- couch_log:debug("~p checkpoint ~s", [?MODULE, jobfmt(Job)]),
- case report_int(Job) of
- ok ->
- ok = mem3_reshard_store:store_job(State, Job),
- ok = mem3_reshard_job:checkpoint_done(Job),
- State;
- not_found ->
- couch_log:error("~p checkpoint couldn't find ~p", [?MODULE, Job]),
- State
- end.
-
--spec report_int(#job{}) -> ok | not_found.
-report_int(Job) ->
- case ets:lookup(?MODULE, Job#job.id) of
- [#job{ref = Ref, pid = CurPid}] ->
- case Job#job.pid =:= CurPid of
- true ->
- couch_log:debug("~p reported ~s", [?MODULE, jobfmt(Job)]),
- % Carry over the reference from ets as the #job{} coming
- % from the job process won't have it's own monitor ref.
- true = ets:insert(?MODULE, Job#job{ref = Ref}),
- ok;
- false ->
- LogMsg = "~p ignoring old job report ~p curr pid:~p",
- couch_log:warning(LogMsg, [?MODULE, jobfmt(Job), CurPid]),
- not_found
- end;
- _ ->
- couch_log:error("~p reporting : couldn't find ~p", [?MODULE, Job]),
- not_found
- end.
-
--spec remove_job_int(#job{}, #state{}) -> ok | {error, not_found}.
-remove_job_int(Id, State) ->
- couch_log:notice("~p call remove_job Id:~p", [?MODULE, Id]),
- case job_by_id(Id) of
- #job{} = Job ->
- kill_job_int(Job),
- ok = mem3_reshard_store:delete_job(State, Id),
- ets:delete(?MODULE, Job#job.id),
- ok;
- not_found ->
- {error, not_found}
- end.
-
-% This function is for testing and debugging only
--spec reset_state(#state{}) -> #state{}.
-reset_state(#state{} = State) ->
- couch_log:warning("~p resetting state", [?MODULE]),
- ok = mem3_reshard_store:delete_state(State),
- couch_log:warning("~p killing all running jobs", [?MODULE]),
- [kill_job_int(Job) || Job <- running_jobs()],
- ets:delete_all_objects(?MODULE),
- couch_log:warning("~p resetting all job states", [?MODULE]),
- Jobs = mem3_reshard_store:get_jobs(State),
- lists:foldl(
- fun(#job{id = Id}, StateAcc) ->
- couch_log:warning("~p resetting job state ~p", [?MODULE, Id]),
- ok = mem3_reshard_store:delete_job(StateAcc, Id),
- StateAcc
- end,
- State,
- Jobs
- ),
- couch_log:warning("~p resetting state done", [?MODULE]),
- State#state{
- state = running,
- state_info = [],
- update_time = now_sec()
- }.
-
--spec update_job_history(#job{}) -> #job{}.
-update_job_history(#job{job_state = St, update_time = Ts} = Job) ->
- Hist = Job#job.history,
- Reason =
- case couch_util:get_value(reason, Job#job.state_info) of
- undefined -> null;
- Val -> couch_util:to_binary(Val)
- end,
- Job#job{history = update_history(St, Reason, Ts, Hist)}.
-
-update_history_rev(State, null, Ts, [{_, State, Detail} | Rest]) ->
- % Just updated the detail, state stays the same, no new entry added
- [{Ts, State, Detail} | Rest];
-update_history_rev(State, Detail, Ts, [{_, State, Detail} | Rest]) ->
- % State and detail were same as last event, just update the timestamp
- [{Ts, State, Detail} | Rest];
-update_history_rev(State, Detail, Ts, [{_, State, Detail} | Rest]) ->
- % State and detail were same as last event, just update the timestamp
- [{Ts, State, Detail} | Rest];
-update_history_rev(State, Detail, Ts, History) ->
- [{Ts, State, Detail} | History].
-
--spec max_history() -> non_neg_integer().
-max_history() ->
- config:get_integer("reshard", "max_history", ?DEFAULT_MAX_HISTORY).
-
--spec maybe_disable(#state{}) -> #state{}.
-maybe_disable(#state{} = State) ->
- case is_disabled() of
- true ->
- Reason = <<"Resharding disabled by application level config">>,
- SInfo = State#state.state_info,
- State#state{
- state = stopped,
- state_info = info_update(reason, Reason, SInfo)
- };
- false ->
- State
- end.
-
--spec jobs_by_db_and_state(binary(), split_state() | '_') -> [job_id()].
-jobs_by_db_and_state(Db, State) ->
- DbName = mem3:dbname(Db),
- Pat = #job{
- id = '$1',
- source = #shard{dbname = DbName, _ = '_'},
- job_state = State,
- _ = '_'
- },
- [JobId || [JobId] <- ets:match(?MODULE, Pat)].
-
--spec db_exists(binary()) -> boolean().
-db_exists(Name) ->
- try
- mem3:shards(mem3:dbname(Name)),
- true
- catch
- error:database_does_not_exist ->
- false
- end.
-
--spec db_monitor(pid()) -> no_return().
-db_monitor(Server) ->
- couch_log:notice("~p db monitor ~p starting", [?MODULE, self()]),
- EvtRef = erlang:monitor(process, couch_event_server),
- couch_event:register_all(self()),
- db_monitor_loop(Server, EvtRef).
-
--spec db_monitor_loop(pid(), reference()) -> no_return().
-db_monitor_loop(Server, EvtRef) ->
- receive
- {'$couch_event', DbName, deleted} ->
- case db_exists(DbName) of
- true ->
- % Could be source shard being deleted during splitting
- ok;
- false ->
- case length(jobs_by_db_and_state(DbName, '_')) > 0 of
- true ->
- % Notify only if there are jobs with that db
- gen_server:cast(Server, {db_deleted, DbName});
- false ->
- ok
- end
- end,
- db_monitor_loop(Server, EvtRef);
- {'$couch_event', _, _} ->
- db_monitor_loop(Server, EvtRef);
- {'DOWN', EvtRef, _, _, Info} ->
- couch_log:error("~p db monitor listener died ~p", [?MODULE, Info]),
- exit({db_monitor_died, Info});
- Msg ->
- couch_log:error("~p db monitor unexpected msg ~p", [?MODULE, Msg]),
- db_monitor_loop(Server, EvtRef)
- end.
-
--spec statefmt(#state{} | term()) -> string().
-statefmt(#state{state = StateName}) ->
- Total = ets:info(?MODULE, size),
- Active = mem3_reshard_job_sup:count_children(),
- Msg = "#state{~s total:~B active:~B}",
- Fmt = io_lib:format(Msg, [StateName, Total, Active]),
- lists:flatten(Fmt);
-statefmt(State) ->
- Fmt = io_lib:format("<Unknown split state:~p>", [State]),
- lists:flatten(Fmt).
-
--spec jobfmt(#job{}) -> string().
-jobfmt(#job{} = Job) ->
- mem3_reshard_job:jobfmt(Job).
diff --git a/src/mem3/src/mem3_reshard.hrl b/src/mem3/src/mem3_reshard.hrl
deleted file mode 100644
index ad76aeadf..000000000
--- a/src/mem3/src/mem3_reshard.hrl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include_lib("mem3/include/mem3.hrl").
-
-
--type range_pos() :: non_neg_integer().
--type split() :: pos_integer().
--type job_id() :: binary() | undefined.
--type job_type() :: split.
--type time_sec() :: non_neg_integer().
-
--type shard_split_main_state() ::
- running |
- stopped.
-
--type job_state() ::
- new |
- running |
- stopped |
- failed |
- completed.
-
--type split_state() ::
- new |
- initial_copy |
- topoff1 |
- build_indices |
- topoff2 |
- copy_local_docs |
- update_shardmap |
- wait_source_close |
- topoff3 |
- source_delete |
- completed.
-
-
--record(job, {
- id :: job_id() | '$1' | '_',
- type :: job_type(),
- job_state :: job_state(),
- split_state :: split_state(),
- state_info = [] :: [{atom(), any()}],
- source :: #shard{},
- target :: [#shard{}],
- history = [] :: [{atom(), time_sec()}],
- start_time = 0 :: non_neg_integer(),
- update_time = 0 :: non_neg_integer(),
- node :: node(),
- pid :: undefined | pid() | '$1' | '_',
- ref :: undefined | reference() | '_',
- manager :: undefined | pid(),
- workers = [] :: [pid()],
- retries = 0 :: non_neg_integer()
-}).
-
--record(state, {
- state :: shard_split_main_state(),
- state_info :: [],
- update_time :: non_neg_integer(),
- job_prefix :: binary(),
- state_id :: binary(),
- node :: node(),
- db_monitor :: pid()
-}).
diff --git a/src/mem3/src/mem3_reshard_api.erl b/src/mem3/src/mem3_reshard_api.erl
deleted file mode 100644
index a4d395461..000000000
--- a/src/mem3/src/mem3_reshard_api.erl
+++ /dev/null
@@ -1,229 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_api).
-
--export([
- create_jobs/5,
- get_jobs/0,
- get_job/1,
- get_summary/0,
- resume_job/1,
- stop_job/2,
- start_shard_splitting/0,
- stop_shard_splitting/1,
- get_shard_splitting_state/0
-]).
-
-create_jobs(Node, Shard, Db, Range, split) ->
- lists:map(
- fun(S) ->
- N = mem3:node(S),
- Name = mem3:name(S),
- case rpc:call(N, mem3_reshard, start_split_job, [Name]) of
- {badrpc, Error} ->
- {error, Error, N, Name};
- {ok, JobId} ->
- {ok, JobId, N, Name};
- {error, Error} ->
- {error, Error, N, Name}
- end
- end,
- pick_shards(Node, Shard, Db, Range)
- ).
-
-get_jobs() ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, jobs, []),
- lists:flatten(Replies).
-
-get_job(JobId) ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, job, [JobId]),
- case [JobInfo || {ok, JobInfo} <- Replies] of
- [JobInfo | _] ->
- {ok, JobInfo};
- [] ->
- {error, not_found}
- end.
-
-get_summary() ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, get_state, []),
- Stats0 = #{
- running => 0,
- total => 0,
- completed => 0,
- failed => 0,
- stopped => 0
- },
- StatsF = lists:foldl(
- fun({Res}, Stats) ->
- maps:map(
- fun(Stat, OldVal) ->
- OldVal + couch_util:get_value(Stat, Res, 0)
- end,
- Stats
- )
- end,
- Stats0,
- Replies
- ),
- {State, Reason} = state_and_reason(Replies),
- StateReasonProps = [{state, State}, {state_reason, Reason}],
- {StateReasonProps ++ lists:sort(maps:to_list(StatsF))}.
-
-resume_job(JobId) ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(
- Nodes,
- mem3_reshard,
- resume_job,
- [JobId]
- ),
- WithoutNotFound = [R || R <- Replies, R =/= {error, not_found}],
- case lists:usort(WithoutNotFound) of
- [ok] ->
- ok;
- [{error, Error} | _] ->
- {error, {[{error, couch_util:to_binary(Error)}]}};
- [] ->
- {error, not_found}
- end.
-
-stop_job(JobId, Reason) ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(
- Nodes,
- mem3_reshard,
- stop_job,
- [JobId, Reason]
- ),
- WithoutNotFound = [R || R <- Replies, R =/= {error, not_found}],
- case lists:usort(WithoutNotFound) of
- [ok] ->
- ok;
- [{error, Error} | _] ->
- {error, {[{error, couch_util:to_binary(Error)}]}};
- [] ->
- {error, not_found}
- end.
-
-start_shard_splitting() ->
- {Replies, _Bad} = rpc:multicall(mem3_reshard, start, []),
- case lists:usort(lists:flatten(Replies)) of
- [ok] ->
- {ok, {[{ok, true}]}};
- [Error | _] ->
- {error, {[{error, couch_util:to_binary(Error)}]}}
- end.
-
-stop_shard_splitting(Reason) ->
- {Replies, _Bad} = rpc:multicall(mem3_reshard, stop, [Reason]),
- case lists:usort(lists:flatten(Replies)) of
- [ok] ->
- {ok, {[{ok, true}]}};
- [Error | _] ->
- {error, {[{error, couch_util:to_binary(Error)}]}}
- end.
-
-get_shard_splitting_state() ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, get_state, []),
- state_and_reason(Replies).
-
-state_and_reason(StateReplies) ->
- AccF = lists:foldl(
- fun({ResProps}, Acc) ->
- Reason = get_reason(ResProps),
- case couch_util:get_value(state, ResProps) of
- <<"running">> -> orddict:append(running, Reason, Acc);
- <<"stopped">> -> orddict:append(stopped, Reason, Acc);
- undefined -> Acc
- end
- end,
- orddict:from_list([{running, []}, {stopped, []}]),
- StateReplies
- ),
- Running = orddict:fetch(running, AccF),
- case length(Running) > 0 of
- true ->
- Reason = pick_reason(Running),
- {running, Reason};
- false ->
- Reason = pick_reason(orddict:fetch(stopped, AccF)),
- {stopped, Reason}
- end.
-
-pick_reason(Reasons) ->
- Reasons1 = lists:usort(Reasons),
- Reasons2 = [R || R <- Reasons1, R =/= undefined],
- case Reasons2 of
- [] -> null;
- [R1 | _] -> R1
- end.
-
-get_reason(StateProps) when is_list(StateProps) ->
- case couch_util:get_value(state_info, StateProps) of
- [] -> undefined;
- undefined -> undefined;
- {SInfoProps} -> couch_util:get_value(reason, SInfoProps)
- end.
-
-pick_shards(undefined, undefined, Db, undefined) when is_binary(Db) ->
- check_node_required(),
- check_range_required(),
- mem3:shards(Db);
-pick_shards(Node, undefined, Db, undefined) when
- is_atom(Node),
- is_binary(Db)
-->
- check_range_required(),
- [S || S <- mem3:shards(Db), mem3:node(S) == Node];
-pick_shards(undefined, undefined, Db, [_B, _E] = Range) when is_binary(Db) ->
- check_node_required(),
- [S || S <- mem3:shards(Db), mem3:range(S) == Range];
-pick_shards(Node, undefined, Db, [_B, _E] = Range) when
- is_atom(Node),
- is_binary(Db)
-->
- [S || S <- mem3:shards(Db), mem3:node(S) == Node, mem3:range(S) == Range];
-pick_shards(undefined, Shard, undefined, undefined) when is_binary(Shard) ->
- check_node_required(),
- Db = mem3:dbname(Shard),
- [S || S <- mem3:shards(Db), mem3:name(S) == Shard];
-pick_shards(Node, Shard, undefined, undefined) when
- is_atom(Node),
- is_binary(Shard)
-->
- Db = mem3:dbname(Shard),
- [S || S <- mem3:shards(Db), mem3:name(S) == Shard, mem3:node(S) == Node];
-pick_shards(_, undefined, undefined, _) ->
- throw({bad_request, <<"Must specify at least `db` or `shard`">>});
-pick_shards(_, Db, Shard, _) when is_binary(Db), is_binary(Shard) ->
- throw({bad_request, <<"`db` and `shard` are mutually exclusive">>}).
-
-check_node_required() ->
- case config:get_boolean("reshard", "require_node_param", false) of
- true ->
- throw({bad_request, <<"`node` prameter is required">>});
- false ->
- ok
- end.
-
-check_range_required() ->
- case config:get_boolean("reshard", "require_range_param", false) of
- true ->
- throw({bad_request, <<"`range` prameter is required">>});
- false ->
- ok
- end.
diff --git a/src/mem3/src/mem3_reshard_dbdoc.erl b/src/mem3/src/mem3_reshard_dbdoc.erl
deleted file mode 100644
index 7fb69598e..000000000
--- a/src/mem3/src/mem3_reshard_dbdoc.erl
+++ /dev/null
@@ -1,255 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_dbdoc).
-
--behaviour(gen_server).
-
--export([
- update_shard_map/1,
-
- start_link/0,
-
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mem3_reshard.hrl").
-
--spec update_shard_map(#job{}) -> no_return | ok.
-update_shard_map(#job{source = Source, target = Target} = Job) ->
- Node = hd(mem3_util:live_nodes()),
- JobStr = mem3_reshard_job:jobfmt(Job),
- LogMsg1 = "~p : ~p calling update_shard_map node:~p",
- couch_log:notice(LogMsg1, [?MODULE, JobStr, Node]),
- ServerRef = {?MODULE, Node},
- CallArg = {update_shard_map, Source, Target},
- TimeoutMSec = shard_update_timeout_msec(),
- try
- case gen_server:call(ServerRef, CallArg, TimeoutMSec) of
- {ok, _} -> ok;
- {error, CallError} -> throw({error, CallError})
- end
- catch
- _:Err ->
- exit(Err)
- end,
- LogMsg2 = "~p : ~p update_shard_map on node:~p returned",
- couch_log:notice(LogMsg2, [?MODULE, JobStr, Node]),
- UntilSec = mem3_reshard:now_sec() + (TimeoutMSec div 1000),
- case wait_source_removed(Source, 5, UntilSec) of
- true -> ok;
- false -> exit(shard_update_did_not_propagate)
- end.
-
--spec start_link() -> {ok, pid()} | ignore | {error, term()}.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init(_) ->
- couch_log:notice("~p start init()", [?MODULE]),
- {ok, nil}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call({update_shard_map, Source, Target}, _From, State) ->
- Res =
- try
- update_shard_map(Source, Target)
- catch
- throw:{error, Error} ->
- {error, Error}
- end,
- {reply, Res, State};
-handle_call(Call, From, State) ->
- couch_log:error("~p unknown call ~p from: ~p", [?MODULE, Call, From]),
- {noreply, State}.
-
-handle_cast(Cast, State) ->
- couch_log:error("~p unexpected cast ~p", [?MODULE, Cast]),
- {noreply, State}.
-
-handle_info(Info, State) ->
- couch_log:error("~p unexpected info ~p", [?MODULE, Info]),
- {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-% Private
-
-update_shard_map(Source, Target) ->
- ok = validate_coordinator(),
- ok = replicate_from_all_nodes(shard_update_timeout_msec()),
- DocId = mem3:dbname(Source#shard.name),
- OldDoc =
- case mem3_util:open_db_doc(DocId) of
- {ok, #doc{deleted = true}} ->
- throw({error, missing_source});
- {ok, #doc{} = Doc} ->
- Doc;
- {not_found, deleted} ->
- throw({error, missing_source});
- OpenErr ->
- throw({error, {shard_doc_open_error, OpenErr}})
- end,
- #doc{body = OldBody} = OldDoc,
- NewBody = update_shard_props(OldBody, Source, Target),
- {ok, _} = write_shard_doc(OldDoc, NewBody),
- ok = replicate_to_all_nodes(shard_update_timeout_msec()),
- {ok, NewBody}.
-
-validate_coordinator() ->
- case hd(mem3_util:live_nodes()) =:= node() of
- true -> ok;
- false -> throw({error, coordinator_changed})
- end.
-
-replicate_from_all_nodes(TimeoutMSec) ->
- case mem3_util:replicate_dbs_from_all_nodes(TimeoutMSec) of
- ok -> ok;
- Error -> throw({error, Error})
- end.
-
-replicate_to_all_nodes(TimeoutMSec) ->
- case mem3_util:replicate_dbs_to_all_nodes(TimeoutMSec) of
- ok -> ok;
- Error -> throw({error, Error})
- end.
-
-write_shard_doc(#doc{id = Id} = Doc, Body) ->
- UpdatedDoc = Doc#doc{body = Body},
- couch_util:with_db(mem3_sync:shards_db(), fun(Db) ->
- try
- {ok, _} = couch_db:update_doc(Db, UpdatedDoc, [])
- catch
- conflict ->
- throw({error, {conflict, Id, Doc#doc.body, UpdatedDoc}})
- end
- end).
-
-update_shard_props({Props0}, #shard{} = Source, [#shard{} | _] = Targets) ->
- {ByNode0} = couch_util:get_value(<<"by_node">>, Props0, {[]}),
- ByNodeKV = {<<"by_node">>, {update_by_node(ByNode0, Source, Targets)}},
- Props1 = lists:keyreplace(<<"by_node">>, 1, Props0, ByNodeKV),
-
- {ByRange0} = couch_util:get_value(<<"by_range">>, Props1, {[]}),
- ByRangeKV = {<<"by_range">>, {update_by_range(ByRange0, Source, Targets)}},
- Props2 = lists:keyreplace(<<"by_range">>, 1, Props1, ByRangeKV),
-
- Changelog = couch_util:get_value(<<"changelog">>, Props2, []),
- {Node, Range} = {node_key(Source), range_key(Source)},
- TRanges = [range_key(T) || T <- Targets],
- ChangelogEntry = [[<<"split">>, Range, TRanges, Node]],
- ChangelogKV = {<<"changelog">>, Changelog ++ ChangelogEntry},
- Props3 = lists:keyreplace(<<"changelog">>, 1, Props2, ChangelogKV),
-
- {Props3}.
-
-update_by_node(ByNode, #shard{} = Source, [#shard{} | _] = Targets) ->
- {NodeKey, SKey} = {node_key(Source), range_key(Source)},
- {_, Ranges} = lists:keyfind(NodeKey, 1, ByNode),
- Ranges1 = Ranges -- [SKey],
- Ranges2 = Ranges1 ++ [range_key(T) || T <- Targets],
- lists:keyreplace(NodeKey, 1, ByNode, {NodeKey, lists:sort(Ranges2)}).
-
-update_by_range(ByRange, Source, Targets) ->
- ByRange1 = remove_node_from_source(ByRange, Source),
- lists:foldl(fun add_node_to_target_foldl/2, ByRange1, Targets).
-
-remove_node_from_source(ByRange, Source) ->
- {NodeKey, SKey} = {node_key(Source), range_key(Source)},
- {_, SourceNodes} = lists:keyfind(SKey, 1, ByRange),
- % Double check that source had node to begin with
- case lists:member(NodeKey, SourceNodes) of
- true ->
- ok;
- false ->
- throw({source_shard_missing_node, NodeKey, SourceNodes})
- end,
- SourceNodes1 = SourceNodes -- [NodeKey],
- case SourceNodes1 of
- [] ->
- % If last node deleted, remove entry
- lists:keydelete(SKey, 1, ByRange);
- _ ->
- lists:keyreplace(SKey, 1, ByRange, {SKey, SourceNodes1})
- end.
-
-add_node_to_target_foldl(#shard{} = Target, ByRange) ->
- {NodeKey, TKey} = {node_key(Target), range_key(Target)},
- case lists:keyfind(TKey, 1, ByRange) of
- {_, Nodes} ->
- % Double check that target does not have node already
- case lists:member(NodeKey, Nodes) of
- false ->
- ok;
- true ->
- throw({target_shard_already_has_node, NodeKey, Nodes})
- end,
- Nodes1 = lists:sort([NodeKey | Nodes]),
- lists:keyreplace(TKey, 1, ByRange, {TKey, Nodes1});
- false ->
- % fabric_db_create:make_document/3 says they should be sorted
- lists:sort([{TKey, [NodeKey]} | ByRange])
- end.
-
-node_key(#shard{node = Node}) ->
- couch_util:to_binary(Node).
-
-range_key(#shard{range = [B, E]}) ->
- BHex = couch_util:to_hex(<<B:32/integer>>),
- EHex = couch_util:to_hex(<<E:32/integer>>),
- list_to_binary([BHex, "-", EHex]).
-
-shard_update_timeout_msec() ->
- config:get_integer("reshard", "shard_upate_timeout_msec", 300000).
-
-wait_source_removed(#shard{name = Name} = Source, SleepSec, UntilSec) ->
- case check_source_removed(Source) of
- true ->
- true;
- false ->
- case mem3_reshard:now_sec() < UntilSec of
- true ->
- LogMsg = "~p : Waiting for shard ~p removal confirmation",
- couch_log:notice(LogMsg, [?MODULE, Name]),
- timer:sleep(SleepSec * 1000),
- wait_source_removed(Source, SleepSec, UntilSec);
- false ->
- false
- end
- end.
-
-check_source_removed(#shard{name = Name}) ->
- DbName = mem3:dbname(Name),
- Live = mem3_util:live_nodes(),
- ShardNodes = [N || #shard{node = N} <- mem3:shards(DbName)],
- Nodes = lists:usort([N || N <- ShardNodes, lists:member(N, Live)]),
- {Responses, _} = rpc:multicall(Nodes, mem3, shards, [DbName]),
- Shards = lists:usort(lists:flatten(Responses)),
- SourcePresent = [
- S
- || S = #shard{name = S, node = N} <- Shards,
- S =:= Name,
- N =:= node()
- ],
- case SourcePresent of
- [] -> true;
- [_ | _] -> false
- end.
diff --git a/src/mem3/src/mem3_reshard_httpd.erl b/src/mem3/src/mem3_reshard_httpd.erl
deleted file mode 100644
index 5abe8025c..000000000
--- a/src/mem3/src/mem3_reshard_httpd.erl
+++ /dev/null
@@ -1,319 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_httpd).
-
--export([
- handle_reshard_req/1
-]).
-
--import(couch_httpd, [
- send_json/2,
- send_json/3,
- send_method_not_allowed/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(JOBS, <<"jobs">>).
--define(STATE, <<"state">>).
--define(S_RUNNING, <<"running">>).
--define(S_STOPPED, <<"stopped">>).
-
-% GET /_reshard
-handle_reshard_req(#httpd{method = 'GET', path_parts = [_]} = Req) ->
- reject_if_disabled(),
- State = mem3_reshard_api:get_summary(),
- send_json(Req, State);
-handle_reshard_req(#httpd{path_parts = [_]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD");
-% GET /_reshard/state
-handle_reshard_req(
- #httpd{
- method = 'GET',
- path_parts = [_, ?STATE]
- } = Req
-) ->
- reject_if_disabled(),
- {State, Reason} = mem3_reshard_api:get_shard_splitting_state(),
- send_json(Req, {[{state, State}, {reason, Reason}]});
-% PUT /_reshard/state
-handle_reshard_req(
- #httpd{
- method = 'PUT',
- path_parts = [_, ?STATE]
- } = Req
-) ->
- reject_if_disabled(),
- couch_httpd:validate_ctype(Req, "application/json"),
- {Props} = couch_httpd:json_body_obj(Req),
- State = couch_util:get_value(<<"state">>, Props),
- Reason = couch_util:get_value(<<"reason">>, Props),
- case {State, Reason} of
- {undefined, _} ->
- throw({bad_request, <<"Expected a `state` field">>});
- {?S_RUNNING, _} ->
- case mem3_reshard_api:start_shard_splitting() of
- {ok, JsonResult} ->
- send_json(Req, 200, JsonResult);
- {error, JsonError} ->
- send_json(Req, 500, JsonError)
- end;
- {?S_STOPPED, Reason} ->
- Reason1 =
- case Reason =:= undefined of
- false -> Reason;
- true -> <<"Cluster-wide resharding stopped by the user">>
- end,
- case mem3_reshard_api:stop_shard_splitting(Reason1) of
- {ok, JsonResult} ->
- send_json(Req, 200, JsonResult);
- {error, JsonError} ->
- send_json(Req, 500, JsonError)
- end;
- {_, _} ->
- throw({bad_request, <<"State field not `running` or `stopped`">>})
- end;
-handle_reshard_req(#httpd{path_parts = [_, ?STATE]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD,PUT");
-handle_reshard_req(#httpd{path_parts = [_, ?STATE | _]} = _Req) ->
- throw(not_found);
-% GET /_reshard/jobs
-handle_reshard_req(#httpd{method = 'GET', path_parts = [_, ?JOBS]} = Req) ->
- reject_if_disabled(),
- Jobs = mem3_reshard_api:get_jobs(),
- Total = length(Jobs),
- send_json(Req, {[{total_rows, Total}, {offset, 0}, {jobs, Jobs}]});
-% POST /_reshard/jobs {"node": "...", "shard": "..."}
-handle_reshard_req(
- #httpd{
- method = 'POST',
- path_parts = [_, ?JOBS]
- } = Req
-) ->
- reject_if_disabled(),
- couch_httpd:validate_ctype(Req, "application/json"),
- {Props} = couch_httpd:json_body_obj(Req),
- Node = validate_node(couch_util:get_value(<<"node">>, Props)),
- Shard = validate_shard(couch_util:get_value(<<"shard">>, Props)),
- Db = validate_db(couch_util:get_value(<<"db">>, Props)),
- Range = validate_range(couch_util:get_value(<<"range">>, Props)),
- Type = validate_type(couch_util:get_value(<<"type">>, Props)),
- Res = mem3_reshard_api:create_jobs(Node, Shard, Db, Range, Type),
- case Res of
- [] -> throw(not_found);
- _ -> ok
- end,
- Oks = length([R || {ok, _, _, _} = R <- Res]),
- Code =
- case {Oks, length(Res)} of
- {Oks, Oks} -> 201;
- {Oks, _} when Oks > 0 -> 202;
- {0, _} -> 500
- end,
- EJson = lists:map(
- fun
- ({ok, Id, N, S}) ->
- {[{ok, true}, {id, Id}, {node, N}, {shard, S}]};
- ({error, E, N, S}) ->
- {[{error, couch_util:to_binary(E)}, {node, N}, {shard, S}]}
- end,
- Res
- ),
- send_json(Req, Code, EJson);
-handle_reshard_req(#httpd{path_parts = [_, ?JOBS]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD,POST");
-handle_reshard_req(#httpd{path_parts = [_, _]}) ->
- throw(not_found);
-% GET /_reshard/jobs/$jobid
-handle_reshard_req(
- #httpd{
- method = 'GET',
- path_parts = [_, ?JOBS, JobId]
- } = Req
-) ->
- reject_if_disabled(),
- case mem3_reshard_api:get_job(JobId) of
- {ok, JobInfo} ->
- send_json(Req, JobInfo);
- {error, not_found} ->
- throw(not_found)
- end;
-% DELETE /_reshard/jobs/$jobid
-handle_reshard_req(
- #httpd{
- method = 'DELETE',
- path_parts = [_, ?JOBS, JobId]
- } = Req
-) ->
- reject_if_disabled(),
- case mem3_reshard_api:get_job(JobId) of
- {ok, {Props}} ->
- NodeBin = couch_util:get_value(node, Props),
- Node = binary_to_atom(NodeBin, utf8),
- case rpc:call(Node, mem3_reshard, remove_job, [JobId]) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- {error, not_found} ->
- throw(not_found)
- end;
- {error, not_found} ->
- throw(not_found)
- end;
-handle_reshard_req(#httpd{path_parts = [_, ?JOBS, _]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD,DELETE");
-% GET /_reshard/jobs/$jobid/state
-handle_reshard_req(
- #httpd{
- method = 'GET',
- path_parts = [_, ?JOBS, JobId, ?STATE]
- } = Req
-) ->
- reject_if_disabled(),
- case mem3_reshard_api:get_job(JobId) of
- {ok, {Props}} ->
- JobState = couch_util:get_value(job_state, Props),
- {SIProps} = couch_util:get_value(state_info, Props),
- Reason =
- case couch_util:get_value(reason, SIProps) of
- undefined -> null;
- Val -> couch_util:to_binary(Val)
- end,
- send_json(Req, 200, {[{state, JobState}, {reason, Reason}]});
- {error, not_found} ->
- throw(not_found)
- end;
-% PUT /_reshard/jobs/$jobid/state
-handle_reshard_req(
- #httpd{
- method = 'PUT',
- path_parts = [_, ?JOBS, JobId, ?STATE]
- } = Req
-) ->
- reject_if_disabled(),
- couch_httpd:validate_ctype(Req, "application/json"),
- {Props} = couch_httpd:json_body_obj(Req),
- State = couch_util:get_value(<<"state">>, Props),
- Reason = couch_util:get_value(<<"reason">>, Props),
- case {State, Reason} of
- {undefined, _} ->
- throw({bad_request, <<"Expected a `state` field">>});
- {?S_RUNNING, _} ->
- case mem3_reshard_api:resume_job(JobId) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- {error, not_found} ->
- throw(not_found);
- {error, JsonError} ->
- send_json(Req, 500, JsonError)
- end;
- {?S_STOPPED, Reason} ->
- Reason1 =
- case Reason =:= undefined of
- false -> Reason;
- true -> <<"Stopped by user">>
- end,
- case mem3_reshard_api:stop_job(JobId, Reason1) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- {error, not_found} ->
- throw(not_found);
- {error, JsonError} ->
- send_json(Req, 500, JsonError)
- end;
- {_, _} ->
- throw({bad_request, <<"State field not `running` or `stopped`">>})
- end;
-handle_reshard_req(#httpd{path_parts = [_, ?JOBS, _, ?STATE]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD,PUT").
-
-reject_if_disabled() ->
- case mem3_reshard:is_disabled() of
- true -> throw(not_implemented);
- false -> ok
- end.
-
-validate_type(<<"split">>) ->
- split;
-validate_type(_Type) ->
- throw({bad_request, <<"`job type must be `split`">>}).
-
-validate_node(undefined) ->
- undefined;
-validate_node(Node0) when is_binary(Node0) ->
- Nodes = mem3_util:live_nodes(),
- try binary_to_existing_atom(Node0, utf8) of
- N1 ->
- case lists:member(N1, Nodes) of
- true -> N1;
- false -> throw({bad_request, <<"Not connected to `node`">>})
- end
- catch
- error:badarg ->
- throw({bad_request, <<"`node` is not a valid node name">>})
- end;
-validate_node(_Node) ->
- throw({bad_request, <<"Invalid `node`">>}).
-
-validate_shard(undefined) ->
- undefined;
-validate_shard(Shard) when is_binary(Shard) ->
- case Shard of
- <<"shards/", _:8/binary, "-", _:8/binary, "/", _/binary>> ->
- Shard;
- _ ->
- throw({bad_request, <<"`shard` is invalid">>})
- end;
-validate_shard(_Shard) ->
- throw({bad_request, <<"Invalid `shard`">>}).
-
-validate_db(undefined) ->
- undefined;
-validate_db(DbName) when is_binary(DbName) ->
- try mem3:shards(DbName) of
- [_ | _] -> DbName;
- _ -> throw({bad_request, <<"`No shards in `db`">>})
- catch
- _:_ ->
- throw({bad_request, <<"Invalid `db`">>})
- end;
-validate_db(_bName) ->
- throw({bad_request, <<"Invalid `db`">>}).
-
-validate_range(undefined) ->
- undefined;
-validate_range(<<BBin:8/binary, "-", EBin:8/binary>>) ->
- {B, E} =
- try
- {
- httpd_util:hexlist_to_integer(binary_to_list(BBin)),
- httpd_util:hexlist_to_integer(binary_to_list(EBin))
- }
- catch
- _:_ ->
- invalid_range()
- end,
- if
- B < 0 -> invalid_range();
- E < 0 -> invalid_range();
- B > (2 bsl 31) - 1 -> invalid_range();
- E > (2 bsl 31) - 1 -> invalid_range();
- B >= E -> invalid_range();
- true -> ok
- end,
- % Use a list format here to make it look the same as #shard's range
- [B, E];
-validate_range(_Range) ->
- invalid_range().
-
-invalid_range() ->
- throw({bad_request, <<"Invalid `range`">>}).
diff --git a/src/mem3/src/mem3_reshard_index.erl b/src/mem3/src/mem3_reshard_index.erl
deleted file mode 100644
index fa0a101b5..000000000
--- a/src/mem3/src/mem3_reshard_index.erl
+++ /dev/null
@@ -1,184 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_index).
-
--export([
- design_docs/1,
- target_indices/2,
- spawn_builders/1,
- build_index/2
-]).
-
--define(MRVIEW, mrview).
--define(DREYFUS, dreyfus).
--define(HASTINGS, hastings).
-
--include_lib("mem3/include/mem3.hrl").
-
-%% Public API
-
-design_docs(DbName) ->
- try
- case fabric_design_docs(mem3:dbname(DbName)) of
- {error, {maintenance_mode, _, _Node}} ->
- {ok, []};
- {ok, DDocs} ->
- JsonDocs = [couch_doc:from_json_obj(DDoc) || DDoc <- DDocs],
- {ok, JsonDocs};
- Else ->
- Else
- end
- catch
- error:database_does_not_exist ->
- {ok, []}
- end.
-
-target_indices(Docs, Targets) ->
- Indices = [[indices(N, D) || D <- Docs] || #shard{name = N} <- Targets],
- lists:flatten(Indices).
-
-spawn_builders(Indices) ->
- Retries = max_retries(),
- [spawn_link(?MODULE, build_index, [Idx, Retries]) || Idx <- Indices].
-
-%% Private API
-
-fabric_design_docs(DbName) ->
- case couch_util:with_proc(fabric, design_docs, [DbName], infinity) of
- {ok, Resp} -> Resp;
- {error, Error} -> Error
- end.
-
-indices(DbName, Doc) ->
- mrview_indices(DbName, Doc) ++
- [dreyfus_indices(DbName, Doc) || has_app(dreyfus)] ++
- [hastings_indices(DbName, Doc) || has_app(hastings)].
-
-mrview_indices(DbName, Doc) ->
- try
- {ok, MRSt} = couch_mrview_util:ddoc_to_mrst(DbName, Doc),
- Views = couch_mrview_index:get(views, MRSt),
- case Views =/= [] of
- true ->
- [{?MRVIEW, DbName, MRSt}];
- false ->
- []
- end
- catch
- Tag:Err ->
- Msg = "~p couldn't get mrview index ~p ~p ~p:~p",
- couch_log:error(Msg, [?MODULE, DbName, Doc, Tag, Err]),
- []
- end.
-
-dreyfus_indices(DbName, Doc) ->
- try
- Indices = dreyfus_index:design_doc_to_indexes(Doc),
- [{?DREYFUS, DbName, Index} || Index <- Indices]
- catch
- Tag:Err ->
- Msg = "~p couldn't get dreyfus indices ~p ~p ~p:~p",
- couch_log:error(Msg, [?MODULE, DbName, Doc, Tag, Err]),
- []
- end.
-
-hastings_indices(DbName, Doc) ->
- try
- Indices = hastings_index:design_doc_to_indexes(Doc),
- [{?HASTINGS, DbName, Index} || Index <- Indices]
- catch
- Tag:Err ->
- Msg = "~p couldn't get hasting indices ~p ~p ~p:~p",
- couch_log:error(Msg, [?MODULE, DbName, Doc, Tag, Err]),
- []
- end.
-
-build_index({?MRVIEW, _DbName, MRSt} = Ctx, Try) ->
- await_retry(
- couch_index_server:get_index(couch_mrview_index, MRSt),
- fun couch_index:get_state/2,
- Ctx,
- Try
- );
-build_index({?DREYFUS, DbName, DIndex} = Ctx, Try) ->
- await_retry(
- dreyfus_index_manager:get_index(DbName, DIndex),
- fun dreyfus_index:await/2,
- Ctx,
- Try
- );
-build_index({?HASTINGS, DbName, HIndex} = Ctx, Try) ->
- await_retry(
- hastings_index_manager:get_index(DbName, HIndex),
- fun hastings_index:await/2,
- Ctx,
- Try
- ).
-
-await_retry({ok, Pid}, AwaitIndex, {_, DbName, _} = Ctx, Try) ->
- try AwaitIndex(Pid, get_update_seq(DbName)) of
- {ok, _} -> ok;
- {ok, _, _} -> ok;
- AwaitError -> maybe_retry(Ctx, AwaitError, Try)
- catch
- _:CatchError ->
- maybe_retry(Ctx, CatchError, Try)
- end;
-await_retry(OpenError, _AwaitIndex, Ctx, Try) ->
- maybe_retry(Ctx, OpenError, Try).
-
-maybe_retry(Ctx, killed = Error, Try) ->
- retry(Ctx, Error, Try);
-maybe_retry(Ctx, {killed, _} = Error, Try) ->
- retry(Ctx, Error, Try);
-maybe_retry(Ctx, shutdown = Error, Try) ->
- retry(Ctx, Error, Try);
-maybe_retry(Ctx, Error, 0) ->
- fail(Ctx, Error);
-maybe_retry(Ctx, Error, Try) when is_integer(Try), Try > 0 ->
- retry(Ctx, Error, Try - 1).
-
-retry(Ctx, Error, Try) ->
- IndexInfo = index_info(Ctx),
- LogMsg = "~p : error ~p when building ~p, retrying (~p)",
- couch_log:warning(LogMsg, [?MODULE, Error, IndexInfo, Try]),
- timer:sleep(retry_interval_sec() * 1000),
- build_index(Ctx, Try).
-
-fail(Ctx, Error) ->
- IndexInfo = index_info(Ctx),
- LogMsg = "~p : error ~p when building ~p, max tries exceeded, failing",
- couch_log:error(LogMsg, [?MODULE, Error, IndexInfo]),
- exit({error_building_index, IndexInfo}).
-
-index_info({?MRVIEW, DbName, MRSt}) ->
- GroupName = couch_mrview_index:get(idx_name, MRSt),
- {DbName, GroupName};
-index_info({?DREYFUS, DbName, Index}) ->
- {DbName, Index};
-index_info({?HASTINGS, DbName, Index}) ->
- {DbName, Index}.
-
-has_app(App) ->
- code:lib_dir(App) /= {error, bad_name}.
-
-get_update_seq(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:get_update_seq(Db)
- end).
-
-max_retries() ->
- config:get_integer("reshard", "index_max_retries", 5).
-
-retry_interval_sec() ->
- config:get_integer("reshard", "index_retry_interval_sec", 10).
diff --git a/src/mem3/src/mem3_reshard_job.erl b/src/mem3/src/mem3_reshard_job.erl
deleted file mode 100644
index a9fb48134..000000000
--- a/src/mem3/src/mem3_reshard_job.erl
+++ /dev/null
@@ -1,669 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_job).
-
--export([
- start_link/1,
-
- checkpoint_done/1,
- jobfmt/1,
- pickfun/3
-]).
-
--export([
- init/1,
-
- initial_copy/1,
- initial_copy_impl/1,
-
- topoff/1,
- topoff_impl/1,
-
- build_indices/1,
-
- copy_local_docs/1,
- copy_local_docs_impl/1,
-
- update_shardmap/1,
-
- wait_source_close/1,
- wait_source_close_impl/1,
-
- source_delete/1,
- source_delete_impl/1,
-
- completed/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mem3_reshard.hrl").
-
-% Batch size for internal replication topoffs
--define(INTERNAL_REP_BATCH_SIZE, 2000).
-
-% The list of possible job states. The order of this
-% list is important as a job will progress linearly
-% through it. However, when starting a job we may
-% have to resume from an earlier state as listed
-% below in STATE_RESTART.
--define(SPLIT_STATES, [
- new,
- initial_copy,
- topoff1,
- build_indices,
- topoff2,
- copy_local_docs,
- update_shardmap,
- wait_source_close,
- topoff3,
- source_delete,
- completed
-]).
-
-% When a job starts it may be resuming from a partially
-% completed state. These state pairs list the state
-% we have to restart from for each possible state.
--define(STATE_RESTART, #{
- new => initial_copy,
- initial_copy => initial_copy,
- topoff1 => topoff1,
- build_indices => topoff1,
- topoff2 => topoff1,
- copy_local_docs => topoff1,
- update_shardmap => update_shardmap,
- wait_source_close => wait_source_close,
- topoff3 => wait_source_close,
- source_delete => wait_source_close,
- completed => completed
-}).
-
-% If we have a worker failing during any of these
-% states we need to clean up the targets
--define(CLEAN_TARGET_STATES, [
- initial_copy,
- topoff1,
- build_indices,
- topoff2,
- copy_local_docs
-]).
-
-start_link(#job{} = Job) ->
- proc_lib:start_link(?MODULE, init, [Job]).
-
-% This is called by the main proces after it has checkpointed the progress
-% of the job. After the new state is checkpointed, we signal the job to start
-% executing that state.
-checkpoint_done(#job{pid = Pid} = Job) ->
- couch_log:notice(" ~p : checkpoint done for ~p", [?MODULE, jobfmt(Job)]),
- Pid ! checkpoint_done,
- ok.
-
-% Formatting function, used for logging mostly
-jobfmt(#job{} = Job) ->
- #job{
- id = Id,
- source = #shard{name = Source},
- target = Target,
- split_state = State,
- job_state = JobState,
- pid = Pid
- } = Job,
- TargetCount = length(Target),
- Msg = "#job{~s ~s /~B job_state:~s split_state:~s pid:~p}",
- Fmt = io_lib:format(Msg, [Id, Source, TargetCount, JobState, State, Pid]),
- lists:flatten(Fmt).
-
-% This is the function which picks between various targets. It is used here as
-% well as in mem3_rep internal replicator and couch_db_split bulk copy logic.
-% Given a document id and list of ranges, and a hash function, it will pick one
-% of the range or return not_in_range atom.
-pickfun(DocId, [[B, E] | _] = Ranges, {_M, _F, _A} = HashFun) when
- is_integer(B), is_integer(E), B =< E
-->
- HashKey = mem3_hash:calculate(HashFun, DocId),
- Pred = fun([Begin, End]) ->
- Begin =< HashKey andalso HashKey =< End
- end,
- case lists:filter(Pred, Ranges) of
- [] -> not_in_range;
- [Key] -> Key
- end.
-
-init(#job{} = Job0) ->
- process_flag(trap_exit, true),
- Job1 = set_start_state(Job0#job{
- pid = self(),
- start_time = mem3_reshard:now_sec(),
- workers = [],
- retries = 0
- }),
- Job2 = update_split_history(Job1),
- proc_lib:init_ack({ok, self()}),
- couch_log:notice("~p starting job ~s", [?MODULE, jobfmt(Job2)]),
- ok = checkpoint(Job2),
- run(Job2).
-
-run(#job{split_state = CurrState} = Job) ->
- StateFun =
- case CurrState of
- topoff1 -> topoff;
- topoff2 -> topoff;
- topoff3 -> topoff;
- _ -> CurrState
- end,
- NewJob =
- try
- Job1 = ?MODULE:StateFun(Job),
- Job2 = wait_for_workers(Job1),
- Job3 = switch_to_next_state(Job2),
- ok = checkpoint(Job3),
- Job3
- catch
- throw:{retry, RetryJob} ->
- RetryJob
- end,
- run(NewJob).
-
-set_start_state(#job{split_state = State} = Job) ->
- case maps:get(State, ?STATE_RESTART, undefined) of
- undefined ->
- Fmt1 = "~p recover : unknown state ~s",
- couch_log:error(Fmt1, [?MODULE, jobfmt(Job)]),
- erlang:error({invalid_split_job_recover_state, Job});
- StartState ->
- Job#job{split_state = StartState}
- end.
-
-get_next_state(#job{split_state = State}) ->
- get_next_state(State, ?SPLIT_STATES).
-
-get_next_state(completed, _) ->
- completed;
-get_next_state(CurrState, [CurrState, NextState | _]) ->
- NextState;
-get_next_state(CurrState, [_ | Rest]) ->
- get_next_state(CurrState, Rest).
-
-switch_to_next_state(#job{} = Job0) ->
- Info0 = Job0#job.state_info,
- Info1 = info_delete(error, Info0),
- Info2 = info_delete(reason, Info1),
- Job1 = Job0#job{
- split_state = get_next_state(Job0),
- update_time = mem3_reshard:now_sec(),
- retries = 0,
- state_info = Info2,
- workers = []
- },
- Job2 = update_split_history(Job1),
- check_state(Job2).
-
-checkpoint(Job) ->
- % Ask main process to checkpoint. When it has finished it will notify us
- % by calling by checkpoint_done/1. The reason not to call the main process
- % via a gen_server:call is because the main process could be in the middle
- % of terminating the job and then it would deadlock (after sending us a
- % shutdown message) and it would end up using the whole supervisor
- % termination timeout before finally.
- ok = mem3_reshard:checkpoint(Job#job.manager, Job),
- Parent = parent(),
- receive
- {'EXIT', Parent, Reason} ->
- handle_exit(Job, Reason);
- checkpoint_done ->
- ok;
- Other ->
- handle_unknown_msg(Job, "checkpoint", Other)
- end.
-
-wait_for_workers(#job{workers = []} = Job) ->
- Job;
-wait_for_workers(#job{workers = Workers} = Job) ->
- Parent = parent(),
- receive
- {'EXIT', Parent, Reason} ->
- handle_exit(Job, Reason);
- {'EXIT', Pid, Reason} ->
- case lists:member(Pid, Workers) of
- true ->
- NewJob = handle_worker_exit(Job, Pid, Reason),
- wait_for_workers(NewJob);
- false ->
- handle_unknown_msg(Job, "wait_for_workers", {Pid, Reason})
- end;
- Other ->
- handle_unknown_msg(Job, "wait_for_workers", Other)
- end.
-
-handle_worker_exit(#job{workers = Workers} = Job, Pid, normal) ->
- Job#job{workers = Workers -- [Pid]};
-handle_worker_exit(#job{} = Job, _Pid, {error, missing_source}) ->
- Msg1 = "~p stopping worker due to source missing ~p",
- couch_log:error(Msg1, [?MODULE, jobfmt(Job)]),
- kill_workers(Job),
- case lists:member(Job#job.split_state, ?CLEAN_TARGET_STATES) of
- true ->
- Msg2 = "~p cleaning target after db was deleted ~p",
- couch_log:error(Msg2, [?MODULE, jobfmt(Job)]),
- reset_target(Job),
- exit({error, missing_source});
- false ->
- exit({error, missing_source})
- end;
-handle_worker_exit(#job{} = Job, _Pid, {error, missing_target}) ->
- Msg = "~p stopping worker due to target db missing ~p",
- couch_log:error(Msg, [?MODULE, jobfmt(Job)]),
- kill_workers(Job),
- exit({error, missing_target});
-handle_worker_exit(#job{} = Job0, _Pid, Reason) ->
- couch_log:error("~p worker error ~p ~p", [?MODULE, jobfmt(Job0), Reason]),
- kill_workers(Job0),
- Job1 = Job0#job{workers = []},
- case Job1#job.retries =< max_retries() of
- true ->
- retry_state(Job1, Reason);
- false ->
- exit(Reason)
- end.
-
-% Cleanup and exit when we receive an 'EXIT' message from our parent. In case
-% the shard map is being updated, try to wait some time for it to finish.
-handle_exit(
- #job{split_state = update_shardmap, workers = [WPid]} = Job,
- Reason
-) ->
- Timeout = update_shard_map_timeout_sec(),
- Msg1 = "~p job exit ~s ~p while shard map is updating, waiting ~p sec",
- couch_log:warning(Msg1, [?MODULE, jobfmt(Job), Reason, Timeout]),
- receive
- {'EXIT', WPid, normal} ->
- Msg2 = "~p ~s shard map finished updating successfully, exiting",
- couch_log:notice(Msg2, [?MODULE, jobfmt(Job)]),
- exit(Reason);
- {'EXIT', WPid, Error} ->
- Msg3 = "~p ~s shard map update failed with error ~p",
- couch_log:error(Msg3, [?MODULE, jobfmt(Job), Error]),
- exit(Reason)
- after Timeout * 1000 ->
- Msg4 = "~p ~s shard map update timeout exceeded ~p sec",
- couch_log:error(Msg4, [?MODULE, jobfmt(Job), Timeout]),
- kill_workers(Job),
- exit(Reason)
- end;
-handle_exit(#job{} = Job, Reason) ->
- kill_workers(Job),
- exit(Reason).
-
-retry_state(#job{retries = Retries, state_info = Info} = Job0, Error) ->
- Job1 = Job0#job{
- retries = Retries + 1,
- state_info = info_update(error, Error, Info)
- },
- couch_log:notice("~p retrying ~p ~p", [?MODULE, jobfmt(Job1), Retries]),
- Job2 = report(Job1),
- Timeout = retry_interval_sec(),
- Parent = parent(),
- receive
- {'EXIT', Parent, Reason} ->
- handle_exit(Job2, Reason);
- Other ->
- handle_unknown_msg(Job2, "retry_state", Other)
- after Timeout * 1000 ->
- ok
- end,
- throw({retry, Job2}).
-
-report(#job{manager = ManagerPid} = Job) ->
- Job1 = Job#job{update_time = mem3_reshard:now_sec()},
- ok = mem3_reshard:report(ManagerPid, Job1),
- Job1.
-
-kill_workers(#job{workers = Workers}) ->
- lists:foreach(
- fun(Worker) ->
- unlink(Worker),
- exit(Worker, kill)
- end,
- Workers
- ),
- flush_worker_messages().
-
-flush_worker_messages() ->
- Parent = parent(),
- receive
- {'EXIT', Pid, _} when Pid =/= Parent ->
- flush_worker_messages()
- after 0 ->
- ok
- end.
-
-parent() ->
- case get('$ancestors') of
- [Pid | _] when is_pid(Pid) -> Pid;
- [Name | _] when is_atom(Name) -> whereis(Name);
- _ -> undefined
- end.
-
-handle_unknown_msg(Job, When, RMsg) ->
- LogMsg = "~p ~s received an unknown message ~p when in ~s",
- couch_log:error(LogMsg, [?MODULE, jobfmt(Job), RMsg, When]),
- erlang:error({invalid_split_job_message, Job#job.id, When, RMsg}).
-
-initial_copy(#job{} = Job) ->
- Pid = spawn_link(?MODULE, initial_copy_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-initial_copy_impl(#job{source = Source, target = Targets0} = Job) ->
- #shard{name = SourceName} = Source,
- Targets = [{R, N} || #shard{range = R, name = N} <- Targets0],
- TMap = maps:from_list(Targets),
- LogMsg1 = "~p initial_copy started ~s",
- LogArgs1 = [?MODULE, shardsstr(Source, Targets0)],
- couch_log:notice(LogMsg1, LogArgs1),
- reset_target(Job),
- case couch_db_split:split(SourceName, TMap, fun pickfun/3) of
- {ok, Seq} ->
- LogMsg2 = "~p initial_copy of ~s finished @ seq:~p",
- LogArgs2 = [?MODULE, shardsstr(Source, Targets0), Seq],
- couch_log:notice(LogMsg2, LogArgs2),
- create_artificial_mem3_rep_checkpoints(Job, Seq);
- {error, Error} ->
- LogMsg3 = "~p initial_copy of ~p finished @ ~p",
- LogArgs3 = [?MODULE, shardsstr(Source, Targets0), Error],
- couch_log:notice(LogMsg3, LogArgs3),
- exit({error, Error})
- end.
-
-topoff(#job{} = Job) ->
- Pid = spawn_link(?MODULE, topoff_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-topoff_impl(#job{source = #shard{} = Source, target = Targets}) ->
- couch_log:notice("~p topoff ~p", [?MODULE, shardsstr(Source, Targets)]),
- check_source_exists(Source, topoff),
- check_targets_exist(Targets, topoff),
- TMap = maps:from_list([{R, T} || #shard{range = R} = T <- Targets]),
- Opts = [{batch_size, ?INTERNAL_REP_BATCH_SIZE}, {batch_count, all}],
- case mem3_rep:go(Source, TMap, Opts) of
- {ok, Count} ->
- Args = [?MODULE, shardsstr(Source, Targets), Count],
- couch_log:notice("~p topoff done ~s, count: ~p", Args),
- ok;
- {error, Error} ->
- Args = [?MODULE, shardsstr(Source, Targets), Error],
- couch_log:error("~p topoff failed ~s, error: ~p", Args),
- exit({error, Error})
- end.
-
-build_indices(#job{} = Job) ->
- #job{
- source = #shard{name = SourceName} = Source,
- target = Targets
- } = Job,
- check_source_exists(Source, build_indices),
- {ok, DDocs} = mem3_reshard_index:design_docs(SourceName),
- Indices = mem3_reshard_index:target_indices(DDocs, Targets),
- case mem3_reshard_index:spawn_builders(Indices) of
- [] ->
- % Skip the log spam if this is a no-op
- Job#job{workers = []};
- [_ | _] = Pids ->
- report(Job#job{workers = Pids})
- end.
-
-copy_local_docs(#job{split_state = copy_local_docs} = Job) ->
- Pid = spawn_link(?MODULE, copy_local_docs_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-copy_local_docs_impl(#job{source = Source, target = Targets0}) ->
- #shard{name = SourceName} = Source,
- Targets = [{R, N} || #shard{range = R, name = N} <- Targets0],
- TMap = maps:from_list(Targets),
- LogArg1 = [?MODULE, shardsstr(Source, Targets)],
- couch_log:notice("~p copy local docs start ~s", LogArg1),
- case couch_db_split:copy_local_docs(SourceName, TMap, fun pickfun/3) of
- ok ->
- couch_log:notice("~p copy local docs finished for ~s", LogArg1),
- ok;
- {error, Error} ->
- LogArg2 = [?MODULE, shardsstr(Source, Targets), Error],
- couch_log:error("~p copy local docs failed for ~s ~p", LogArg2),
- exit({error, Error})
- end.
-
-update_shardmap(#job{} = Job) ->
- Pid = spawn_link(mem3_reshard_dbdoc, update_shard_map, [Job]),
- report(Job#job{workers = [Pid]}).
-
-wait_source_close(#job{source = #shard{name = Name}} = Job) ->
- couch_event:notify(Name, deleted),
- Pid = spawn_link(?MODULE, wait_source_close_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-wait_source_close_impl(#job{source = #shard{name = Name}, target = Targets}) ->
- Timeout = config:get_integer("reshard", "source_close_timeout_sec", 600),
- check_targets_exist(Targets, wait_source_close),
- case couch_db:open_int(Name, [?ADMIN_CTX]) of
- {ok, Db} ->
- Now = mem3_reshard:now_sec(),
- case wait_source_close(Db, 1, Now + Timeout) of
- true ->
- ok;
- false ->
- exit({error, source_db_close_timeout, Name, Timeout})
- end;
- {not_found, _} ->
- couch_log:warning("~p source already deleted ~p", [?MODULE, Name]),
- ok
- end.
-
-wait_source_close(Db, SleepSec, UntilSec) ->
- case couch_db:monitored_by(Db) -- [self()] of
- [] ->
- true;
- [_ | _] ->
- Now = mem3_reshard:now_sec(),
- case Now < UntilSec of
- true ->
- LogMsg = "~p : Waiting for source shard ~p to be closed",
- couch_log:notice(LogMsg, [?MODULE, couch_db:name(Db)]),
- timer:sleep(SleepSec * 1000),
- wait_source_close(Db, SleepSec, UntilSec);
- false ->
- false
- end
- end.
-
-source_delete(#job{} = Job) ->
- Pid = spawn_link(?MODULE, source_delete_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-source_delete_impl(#job{source = #shard{name = Name}, target = Targets}) ->
- check_targets_exist(Targets, source_delete),
- case config:get_boolean("mem3_reshard", "delete_source", true) of
- true ->
- case couch_server:delete(Name, [?ADMIN_CTX]) of
- ok ->
- couch_log:notice(
- "~p : deleted source shard ~p",
- [?MODULE, Name]
- );
- not_found ->
- couch_log:warning(
- "~p : source was already deleted ~p",
- [?MODULE, Name]
- )
- end;
- false ->
- % Emit deleted event even when not actually deleting the files this
- % is the second one emitted, the other one was before
- % wait_source_close. They should be idempotent. This one is just to
- % match the one that couch_server would emit had the config not
- % been set
- couch_event:notify(Name, deleted),
- LogMsg = "~p : according to configuration not deleting source ~p",
- couch_log:warning(LogMsg, [?MODULE, Name])
- end,
- TNames = [TName || #shard{name = TName} <- Targets],
- lists:foreach(fun(TName) -> couch_event:notify(TName, updated) end, TNames).
-
-completed(#job{} = Job) ->
- couch_log:notice("~p : ~p completed, exit normal", [?MODULE, jobfmt(Job)]),
- exit(normal).
-
-% This is for belt and suspenders really. Call periodically to validate the
-% state is one of the expected states.
--spec check_state(#job{}) -> #job{} | no_return().
-check_state(#job{split_state = State} = Job) ->
- case lists:member(State, ?SPLIT_STATES) of
- true ->
- Job;
- false ->
- erlang:error({invalid_shard_split_state, State, Job})
- end.
-
-create_artificial_mem3_rep_checkpoints(#job{} = Job, Seq) ->
- #job{source = Source = #shard{name = SourceName}, target = Targets} = Job,
- check_source_exists(Source, initial_copy),
- TNames = [TN || #shard{name = TN} <- Targets],
- Timestamp = list_to_binary(mem3_util:iso8601_timestamp()),
- couch_util:with_db(SourceName, fun(SDb) ->
- [
- couch_util:with_db(TName, fun(TDb) ->
- Doc = mem3_rep_checkpoint_doc(SDb, TDb, Timestamp, Seq),
- {ok, _} = couch_db:update_doc(SDb, Doc, []),
- {ok, _} = couch_db:update_doc(TDb, Doc, []),
- ok
- end)
- || TName <- TNames
- ]
- end),
- ok.
-
-mem3_rep_checkpoint_doc(SourceDb, TargetDb, Timestamp, Seq) ->
- Node = atom_to_binary(node(), utf8),
- SourceUUID = couch_db:get_uuid(SourceDb),
- TargetUUID = couch_db:get_uuid(TargetDb),
- History =
- {[
- {<<"source_node">>, Node},
- {<<"source_uuid">>, SourceUUID},
- {<<"source_seq">>, Seq},
- {<<"timestamp">>, Timestamp},
- {<<"target_node">>, Node},
- {<<"target_uuid">>, TargetUUID},
- {<<"target_seq">>, Seq}
- ]},
- Body =
- {[
- {<<"seq">>, Seq},
- {<<"target_uuid">>, TargetUUID},
- {<<"history">>, {[{Node, [History]}]}}
- ]},
- Id = mem3_rep:make_local_id(SourceUUID, TargetUUID),
- #doc{id = Id, body = Body}.
-
-check_source_exists(#shard{name = Name}, StateName) ->
- case couch_server:exists(Name) of
- true ->
- ok;
- false ->
- ErrMsg = "~p source ~p is unexpectedly missing in ~p",
- couch_log:error(ErrMsg, [?MODULE, Name, StateName]),
- exit({error, missing_source})
- end.
-
-check_targets_exist(Targets, StateName) ->
- lists:foreach(
- fun(#shard{name = Name}) ->
- case couch_server:exists(Name) of
- true ->
- ok;
- false ->
- ErrMsg = "~p target ~p is unexpectedly missing in ~p",
- couch_log:error(ErrMsg, [?MODULE, Name, StateName]),
- exit({error, missing_target})
- end
- end,
- Targets
- ).
-
--spec max_retries() -> integer().
-max_retries() ->
- config:get_integer("reshard", "max_retries", 5).
-
--spec retry_interval_sec() -> integer().
-retry_interval_sec() ->
- config:get_integer("reshard", "retry_interval_sec", 10).
-
--spec update_shard_map_timeout_sec() -> integer().
-update_shard_map_timeout_sec() ->
- config:get_integer("reshard", "update_shardmap_timeout_sec", 60).
-
--spec info_update(atom(), any(), [tuple()]) -> [tuple()].
-info_update(Key, Val, StateInfo) ->
- lists:keystore(Key, 1, StateInfo, {Key, Val}).
-
--spec info_delete(atom(), [tuple()]) -> [tuple()].
-info_delete(Key, StateInfo) ->
- lists:keydelete(Key, 1, StateInfo).
-
--spec shardsstr(#shard{}, #shard{} | [#shard{}]) -> string().
-shardsstr(#shard{name = SourceName}, #shard{name = TargetName}) ->
- lists:flatten(io_lib:format("~s -> ~s", [SourceName, TargetName]));
-shardsstr(#shard{name = SourceName}, Targets) ->
- TNames = [TN || #shard{name = TN} <- Targets],
- TargetsStr = string:join([binary_to_list(T) || T <- TNames], ","),
- lists:flatten(io_lib:format("~s -> ~s", [SourceName, TargetsStr])).
-
--spec reset_target(#job{}) -> #job{}.
-reset_target(#job{source = Source, target = Targets} = Job) ->
- ShardNames =
- try
- [N || #shard{name = N} <- mem3:local_shards(mem3:dbname(Source))]
- catch
- error:database_does_not_exist ->
- []
- end,
- lists:map(
- fun(#shard{name = Name}) ->
- case {couch_server:exists(Name), lists:member(Name, ShardNames)} of
- {_, true} ->
- % Should never get here but if we do crash and don't continue
- LogMsg = "~p : ~p target unexpectedly found in shard map ~p",
- couch_log:error(LogMsg, [?MODULE, jobfmt(Job), Name]),
- erlang:error({target_present_in_shard_map, Name});
- {true, false} ->
- LogMsg = "~p : ~p resetting ~p target",
- couch_log:warning(LogMsg, [?MODULE, jobfmt(Job), Name]),
- couch_db_split:cleanup_target(Source#shard.name, Name);
- {false, false} ->
- ok
- end
- end,
- Targets
- ),
- Job.
-
--spec update_split_history(#job{}) -> #job{}.
-update_split_history(#job{split_state = St, update_time = Ts} = Job) ->
- Hist = Job#job.history,
- JobSt =
- case St of
- completed -> completed;
- failed -> failed;
- new -> new;
- stopped -> stopped;
- _ -> running
- end,
- Job#job{history = mem3_reshard:update_history(JobSt, St, Ts, Hist)}.
diff --git a/src/mem3/src/mem3_reshard_job_sup.erl b/src/mem3/src/mem3_reshard_job_sup.erl
deleted file mode 100644
index e98cb5a08..000000000
--- a/src/mem3/src/mem3_reshard_job_sup.erl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_job_sup).
-
--behaviour(supervisor).
-
--export([
- start_link/0,
- start_child/1,
- terminate_child/1,
- count_children/0,
- init/1
-]).
-
--include("mem3_reshard.hrl").
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-start_child(Job) ->
- supervisor:start_child(?MODULE, [Job]).
-
-terminate_child(Pid) ->
- supervisor:terminate_child(?MODULE, Pid).
-
-count_children() ->
- Props = supervisor:count_children(?MODULE),
- proplists:get_value(active, Props).
-
-init(_Args) ->
- Children = [
- {mem3_reshard_job, {mem3_reshard_job, start_link, []}, temporary, 60000, worker, [
- mem3_reshard_job
- ]}
- ],
- {ok, {{simple_one_for_one, 10, 3}, Children}}.
diff --git a/src/mem3/src/mem3_reshard_store.erl b/src/mem3/src/mem3_reshard_store.erl
deleted file mode 100644
index 140cc5bd7..000000000
--- a/src/mem3/src/mem3_reshard_store.erl
+++ /dev/null
@@ -1,269 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_store).
-
--export([
- init/3,
-
- store_job/2,
- load_job/2,
- delete_job/2,
- get_jobs/1,
-
- store_state/1,
- load_state/2,
- % for debugging
- delete_state/1,
-
- job_to_ejson_props/2,
- state_to_ejson_props/1
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include("mem3_reshard.hrl").
-
--spec init(#state{}, binary(), binary()) -> #state{}.
-init(#state{} = State, JobPrefix, StateDocId) ->
- State#state{
- job_prefix = <<?LOCAL_DOC_PREFIX, JobPrefix/binary>>,
- state_id = <<?LOCAL_DOC_PREFIX, StateDocId/binary>>
- }.
-
--spec store_job(#state{}, #job{}) -> ok.
-store_job(#state{job_prefix = Prefix}, #job{id = Id} = Job) ->
- with_shards_db(fun(Db) ->
- DocId = <<Prefix/binary, Id/binary>>,
- ok = update_doc(Db, DocId, job_to_ejson_props(Job))
- end).
-
--spec load_job(#state{}, binary()) -> {ok, {[_]}} | not_found.
-load_job(#state{job_prefix = Prefix}, Id) ->
- with_shards_db(fun(Db) ->
- case load_doc(Db, <<Prefix/binary, Id/binary>>) of
- {ok, DocBody} ->
- {ok, job_from_ejson(DocBody)};
- not_found ->
- not_found
- end
- end).
-
--spec delete_job(#state{}, binary()) -> ok.
-delete_job(#state{job_prefix = Prefix}, Id) ->
- with_shards_db(fun(Db) ->
- DocId = <<Prefix/binary, Id/binary>>,
- ok = delete_doc(Db, DocId)
- end).
-
--spec get_jobs(#state{}) -> [#job{}].
-get_jobs(#state{job_prefix = Prefix}) ->
- with_shards_db(fun(Db) ->
- PrefixLen = byte_size(Prefix),
- FoldFun = fun(#doc{id = Id, body = Body}, Acc) ->
- case Id of
- <<Prefix:PrefixLen/binary, _/binary>> ->
- {ok, [job_from_ejson(Body) | Acc]};
- _ ->
- {stop, Acc}
- end
- end,
- Opts = [{start_key, Prefix}],
- {ok, Jobs} = couch_db:fold_local_docs(Db, FoldFun, [], Opts),
- lists:reverse(Jobs)
- end).
-
--spec store_state(#state{}) -> ok.
-store_state(#state{state_id = DocId} = State) ->
- with_shards_db(fun(Db) ->
- ok = update_doc(Db, DocId, state_to_ejson_props(State))
- end).
-
--spec load_state(#state{}, atom()) -> #state{}.
-load_state(#state{state_id = DocId} = State, Default) ->
- with_shards_db(fun(Db) ->
- case load_doc(Db, DocId) of
- {ok, DocBody} ->
- state_from_ejson(State, DocBody);
- not_found ->
- State#state{state = Default}
- end
- end).
-
--spec delete_state(#state{}) -> ok.
-delete_state(#state{state_id = DocId}) ->
- with_shards_db(fun(Db) ->
- ok = delete_doc(Db, DocId)
- end).
-
-job_to_ejson_props(#job{source = Source, target = Targets} = Job, Opts) ->
- Iso8601 = proplists:get_value(iso8601, Opts),
- History = history_to_ejson(Job#job.history, Iso8601),
- StartTime =
- case Iso8601 of
- true -> iso8601(Job#job.start_time);
- _ -> Job#job.start_time
- end,
- UpdateTime =
- case Iso8601 of
- true -> iso8601(Job#job.update_time);
- _ -> Job#job.update_time
- end,
- [
- {id, Job#job.id},
- {type, Job#job.type},
- {source, Source#shard.name},
- {target, [T#shard.name || T <- Targets]},
- {job_state, Job#job.job_state},
- {split_state, Job#job.split_state},
- {state_info, state_info_to_ejson(Job#job.state_info)},
- {node, atom_to_binary(Job#job.node, utf8)},
- {start_time, StartTime},
- {update_time, UpdateTime},
- {history, History}
- ].
-
-state_to_ejson_props(#state{} = State) ->
- [
- {state, atom_to_binary(State#state.state, utf8)},
- {state_info, state_info_to_ejson(State#state.state_info)},
- {update_time, State#state.update_time},
- {node, atom_to_binary(State#state.node, utf8)}
- ].
-
-% Private API
-
-with_shards_db(Fun) ->
- DbName = config:get("mem3", "shards_db", "_dbs"),
- case mem3_util:ensure_exists(DbName) of
- {ok, Db} ->
- try
- Fun(Db)
- after
- catch couch_db:close(Db)
- end;
- Else ->
- throw(Else)
- end.
-
-delete_doc(Db, DocId) ->
- case couch_db:open_doc(Db, DocId, []) of
- {ok, #doc{revs = {_, Revs}}} ->
- {ok, _} = couch_db:delete_doc(Db, DocId, Revs),
- ok;
- {not_found, _} ->
- ok
- end.
-
-update_doc(Db, DocId, Body) ->
- DocProps = [{<<"_id">>, DocId}] ++ Body,
- Body1 = ?JSON_DECODE(?JSON_ENCODE({DocProps})),
- BaseDoc = couch_doc:from_json_obj(Body1),
- Doc =
- case couch_db:open_doc(Db, DocId, []) of
- {ok, #doc{revs = Revs}} ->
- BaseDoc#doc{revs = Revs};
- {not_found, _} ->
- BaseDoc
- end,
- case store_state() of
- true ->
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- couch_log:debug("~p updated doc ~p ~p", [?MODULE, DocId, Body]),
- ok;
- false ->
- couch_log:debug("~p not storing state in ~p", [?MODULE, DocId]),
- ok
- end.
-
-load_doc(Db, DocId) ->
- case couch_db:open_doc(Db, DocId, [ejson_body]) of
- {ok, #doc{body = Body}} ->
- couch_log:debug("~p loaded doc ~p ~p", [?MODULE, DocId, Body]),
- {ok, Body};
- {not_found, _} ->
- not_found
- end.
-
-job_to_ejson_props(#job{} = Job) ->
- job_to_ejson_props(Job, []).
-
-job_from_ejson({Props}) ->
- Id = couch_util:get_value(<<"id">>, Props),
- Type = couch_util:get_value(<<"type">>, Props),
- Source = couch_util:get_value(<<"source">>, Props),
- Target = couch_util:get_value(<<"target">>, Props),
- JobState = couch_util:get_value(<<"job_state">>, Props),
- SplitState = couch_util:get_value(<<"split_state">>, Props),
- StateInfo = couch_util:get_value(<<"state_info">>, Props),
- TStarted = couch_util:get_value(<<"start_time">>, Props),
- TUpdated = couch_util:get_value(<<"update_time">>, Props),
- History = couch_util:get_value(<<"history">>, Props),
- #job{
- id = Id,
- type = binary_to_atom(Type, utf8),
- job_state = binary_to_atom(JobState, utf8),
- split_state = binary_to_atom(SplitState, utf8),
- state_info = state_info_from_ejson(StateInfo),
- node = node(),
- start_time = TStarted,
- update_time = TUpdated,
- source = mem3_reshard:shard_from_name(Source),
- target = [mem3_reshard:shard_from_name(T) || T <- Target],
- history = history_from_ejson(History)
- }.
-
-state_from_ejson(#state{} = State, {Props}) ->
- StateVal = couch_util:get_value(<<"state">>, Props),
- StateInfo = couch_util:get_value(<<"state_info">>, Props),
- TUpdated = couch_util:get_value(<<"update_time">>, Props),
- State#state{
- state = binary_to_atom(StateVal, utf8),
- state_info = state_info_from_ejson(StateInfo),
- node = node(),
- update_time = TUpdated
- }.
-
-state_info_from_ejson({Props}) ->
- Props1 = [
- {binary_to_atom(K, utf8), couch_util:to_binary(V)}
- || {K, V} <- Props
- ],
- lists:sort(Props1).
-
-history_to_ejson(Hist, true) when is_list(Hist) ->
- [{[{timestamp, iso8601(T)}, {type, S}, {detail, D}]} || {T, S, D} <- Hist];
-history_to_ejson(Hist, _) when is_list(Hist) ->
- [{[{timestamp, T}, {type, S}, {detail, D}]} || {T, S, D} <- Hist].
-
-history_from_ejson(HistoryEJson) when is_list(HistoryEJson) ->
- lists:map(
- fun({EventProps}) ->
- Timestamp = couch_util:get_value(<<"timestamp">>, EventProps),
- State = couch_util:get_value(<<"type">>, EventProps),
- Detail = couch_util:get_value(<<"detail">>, EventProps),
- {Timestamp, binary_to_atom(State, utf8), Detail}
- end,
- HistoryEJson
- ).
-
-state_info_to_ejson(Props) ->
- {lists:sort([{K, couch_util:to_binary(V)} || {K, V} <- Props])}.
-
-store_state() ->
- config:get_boolean("reshard", "store_state", true).
-
-iso8601(UnixSec) ->
- Mega = UnixSec div 1000000,
- Sec = UnixSec rem 1000000,
- {{Y, M, D}, {H, Min, S}} = calendar:now_to_universal_time({Mega, Sec, 0}),
- Format = "~B-~2..0B-~2..0BT~2..0B:~2..0B:~2..0BZ",
- iolist_to_binary(io_lib:format(Format, [Y, M, D, H, Min, S])).
diff --git a/src/mem3/src/mem3_reshard_sup.erl b/src/mem3/src/mem3_reshard_sup.erl
deleted file mode 100644
index 5a28359fb..000000000
--- a/src/mem3/src/mem3_reshard_sup.erl
+++ /dev/null
@@ -1,36 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_sup).
-
--behaviour(supervisor).
-
--export([
- start_link/0,
- init/1
-]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init(_Args) ->
- Children = [
- {mem3_reshard_dbdoc, {mem3_reshard_dbdoc, start_link, []}, permanent, infinity, worker, [
- mem3_reshard_dbdoc
- ]},
- {mem3_reshard_job_sup, {mem3_reshard_job_sup, start_link, []}, permanent, infinity,
- supervisor, [mem3_reshard_job_sup]},
- {mem3_reshard, {mem3_reshard, start_link, []}, permanent, brutal_kill, worker, [
- mem3_reshard
- ]}
- ],
- {ok, {{one_for_all, 5, 5}, Children}}.
diff --git a/src/mem3/src/mem3_reshard_validate.erl b/src/mem3/src/mem3_reshard_validate.erl
deleted file mode 100644
index fca1617ce..000000000
--- a/src/mem3/src/mem3_reshard_validate.erl
+++ /dev/null
@@ -1,119 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_validate).
-
--export([
- start_args/2,
- source/1,
- targets/2
-]).
-
--include_lib("mem3/include/mem3.hrl").
-
--spec start_args(#shard{}, any()) -> ok | {error, term()}.
-start_args(Source, Split) ->
- first_error([
- check_split(Split),
- check_range(Source, Split),
- check_node(Source),
- source(Source),
- check_shard_map(Source)
- ]).
-
--spec source(#shard{}) -> ok | {error, term()}.
-source(#shard{name = Name}) ->
- case couch_server:exists(Name) of
- true ->
- ok;
- false ->
- {error, {source_shard_not_found, Name}}
- end.
-
--spec check_shard_map(#shard{}) -> ok | {error, term()}.
-check_shard_map(#shard{name = Name}) ->
- DbName = mem3:dbname(Name),
- AllShards = mem3:shards(DbName),
- case mem3_util:calculate_max_n(AllShards) of
- N when is_integer(N), N >= 1 ->
- ok;
- N when is_integer(N), N < 1 ->
- {error, {not_enough_shard_copies, DbName}}
- end.
-
--spec targets(#shard{}, [#shard{}]) -> ok | {error, term()}.
-targets(#shard{} = Source, Targets) ->
- first_error([
- target_ranges(Source, Targets)
- ]).
-
--spec check_split(any()) -> ok | {error, term()}.
-check_split(Split) when is_integer(Split), Split > 1 ->
- ok;
-check_split(Split) ->
- {error, {invalid_split_parameter, Split}}.
-
--spec check_range(#shard{}, any()) -> ok | {error, term()}.
-check_range(#shard{range = Range = [B, E]}, Split) ->
- case (E + 1 - B) >= Split of
- true ->
- ok;
- false ->
- {error, {shard_range_cannot_be_split, Range, Split}}
- end.
-
--spec check_node(#shard{}) -> ok | {error, term()}.
-check_node(#shard{node = undefined}) ->
- ok;
-check_node(#shard{node = Node}) when Node =:= node() ->
- ok;
-check_node(#shard{node = Node}) ->
- {error, {source_shard_node_is_not_current_node, Node}}.
-
--spec target_ranges(#shard{}, [#shard{}]) -> ok | {error, any()}.
-target_ranges(#shard{range = [Begin, End]}, Targets) ->
- Ranges = [R || #shard{range = R} <- Targets],
- SortFun = fun([B1, _], [B2, _]) -> B1 =< B2 end,
- [First | RestRanges] = lists:sort(SortFun, Ranges),
- try
- TotalRange = lists:foldl(
- fun([B2, E2], [B1, E1]) ->
- case B2 =:= E1 + 1 of
- true ->
- ok;
- false ->
- throw({range_error, {B2, E1}})
- end,
- [B1, E2]
- end,
- First,
- RestRanges
- ),
- case [Begin, End] =:= TotalRange of
- true ->
- ok;
- false ->
- throw({range_error, {[Begin, End], TotalRange}})
- end
- catch
- throw:{range_error, Error} ->
- {error, {shard_range_error, Error}}
- end.
-
--spec first_error([ok | {error, term()}]) -> ok | {error, term()}.
-first_error(Results) ->
- case [Res || Res <- Results, Res =/= ok] of
- [] ->
- ok;
- [FirstError | _] ->
- FirstError
- end.
diff --git a/src/mem3/src/mem3_rpc.erl b/src/mem3/src/mem3_rpc.erl
deleted file mode 100644
index 468bdee21..000000000
--- a/src/mem3/src/mem3_rpc.erl
+++ /dev/null
@@ -1,797 +0,0 @@
-% Copyright 2013 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_rpc).
-
--export([
- find_common_seq/4,
- get_missing_revs/4,
- update_docs/4,
- pull_replication/1,
- load_checkpoint/4,
- load_checkpoint/5,
- save_checkpoint/6,
-
- load_purge_infos/4,
- save_purge_checkpoint/4,
- purge_docs/4,
-
- replicate/4
-]).
-
-% Private RPC callbacks
--export([
- find_common_seq_rpc/3,
- load_checkpoint_rpc/3,
- pull_replication_rpc/1,
- load_checkpoint_rpc/4,
- save_checkpoint_rpc/5,
-
- load_purge_infos_rpc/3,
- save_purge_checkpoint_rpc/3,
-
- replicate_rpc/2
-]).
-
--include("mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(BATCH_SIZE, 1000).
--define(REXI_CALL_TIMEOUT_MSEC, 600000).
-
-% "Pull" is a bit of a misnomer here, as what we're actually doing is
-% issuing an RPC request and telling the remote node to push updates to
-% us. This lets us reuse all of the battle-tested machinery of mem3_rpc.
-pull_replication(Seed) ->
- rexi_call(Seed, {mem3_rpc, pull_replication_rpc, [node()]}).
-
-get_missing_revs(Node, DbName, IdsRevs, Options) ->
- rexi_call(Node, {fabric_rpc, get_missing_revs, [DbName, IdsRevs, Options]}).
-
-update_docs(Node, DbName, Docs, Options) ->
- rexi_call(Node, {fabric_rpc, update_docs, [DbName, Docs, Options]}).
-
-load_checkpoint(Node, DbName, SourceNode, SourceUUID, <<>>) ->
- % Upgrade clause for a mixed cluster for old nodes that don't have
- % load_checkpoint_rpc/4 yet. FilterHash is currently not
- % used and so defaults to <<>> everywhere
- load_checkpoint(Node, DbName, SourceNode, SourceUUID);
-load_checkpoint(Node, DbName, SourceNode, SourceUUID, FilterHash) ->
- Args = [DbName, SourceNode, SourceUUID, FilterHash],
- rexi_call(Node, {mem3_rpc, load_checkpoint_rpc, Args}).
-
-load_checkpoint(Node, DbName, SourceNode, SourceUUID) ->
- Args = [DbName, SourceNode, SourceUUID],
- rexi_call(Node, {mem3_rpc, load_checkpoint_rpc, Args}).
-
-save_checkpoint(Node, DbName, DocId, Seq, Entry, History) ->
- Args = [DbName, DocId, Seq, Entry, History],
- rexi_call(Node, {mem3_rpc, save_checkpoint_rpc, Args}).
-
-find_common_seq(Node, DbName, SourceUUID, SourceEpochs) ->
- Args = [DbName, SourceUUID, SourceEpochs],
- rexi_call(Node, {mem3_rpc, find_common_seq_rpc, Args}).
-
-load_purge_infos(Node, DbName, SourceUUID, Count) ->
- Args = [DbName, SourceUUID, Count],
- rexi_call(Node, {mem3_rpc, load_purge_infos_rpc, Args}).
-
-save_purge_checkpoint(Node, DbName, PurgeDocId, Body) ->
- Args = [DbName, PurgeDocId, Body],
- rexi_call(Node, {mem3_rpc, save_purge_checkpoint_rpc, Args}).
-
-purge_docs(Node, DbName, PurgeInfos, Options) ->
- rexi_call(Node, {fabric_rpc, purge_docs, [DbName, PurgeInfos, Options]}).
-
-replicate(Source, Target, DbName, Timeout) when
- is_atom(Source), is_atom(Target), is_binary(DbName)
-->
- Args = [DbName, Target],
- rexi_call(Source, {mem3_rpc, replicate_rpc, Args}, Timeout).
-
-load_checkpoint_rpc(DbName, SourceNode, SourceUUID) ->
- load_checkpoint_rpc(DbName, SourceNode, SourceUUID, <<>>).
-
-load_checkpoint_rpc(DbName, SourceNode, SourceUUID, FilterHash) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- TargetUUID = couch_db:get_uuid(Db),
- NewId = mem3_rep:make_local_id(SourceUUID, TargetUUID, FilterHash),
- case couch_db:open_doc(Db, NewId, []) of
- {ok, Doc} ->
- rexi:reply({ok, {NewId, Doc}});
- {not_found, _} ->
- OldId = mem3_rep:make_local_id(SourceNode, node()),
- case couch_db:open_doc(Db, OldId, []) of
- {ok, Doc} ->
- rexi:reply({ok, {NewId, Doc}});
- {not_found, _} ->
- rexi:reply({ok, {NewId, #doc{id = NewId}}})
- end
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-save_checkpoint_rpc(DbName, Id, SourceSeq, NewEntry0, History0) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- NewEntry = {
- [
- {<<"target_node">>, atom_to_binary(node(), utf8)},
- {<<"target_uuid">>, couch_db:get_uuid(Db)},
- {<<"target_seq">>, couch_db:get_update_seq(Db)}
- ] ++ NewEntry0
- },
- Body =
- {[
- {<<"seq">>, SourceSeq},
- {<<"target_uuid">>, couch_db:get_uuid(Db)},
- {<<"history">>, add_checkpoint(NewEntry, History0)}
- ]},
- Doc = #doc{id = Id, body = Body},
- rexi:reply(
- try couch_db:update_doc(Db, Doc, []) of
- {ok, _} ->
- {ok, Body};
- Else ->
- {error, Else}
- catch
- Exception ->
- Exception;
- error:Reason ->
- {error, Reason}
- end
- );
- Error ->
- rexi:reply(Error)
- end.
-
-find_common_seq_rpc(DbName, SourceUUID, SourceEpochs) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- case couch_db:get_uuid(Db) of
- SourceUUID ->
- TargetEpochs = couch_db:get_epochs(Db),
- Seq = compare_epochs(SourceEpochs, TargetEpochs),
- rexi:reply({ok, Seq});
- _Else ->
- rexi:reply({ok, 0})
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-pull_replication_rpc(Target) ->
- Dbs = mem3_sync:local_dbs(),
- Opts = [{batch_size, 1000}, {batch_count, 50}],
- Repl = fun(Db) -> {Db, mem3_rep:go(Db, Target, Opts)} end,
- rexi:reply({ok, lists:map(Repl, Dbs)}).
-
-load_purge_infos_rpc(DbName, SrcUUID, BatchSize) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- TgtUUID = couch_db:get_uuid(Db),
- PurgeDocId = mem3_rep:make_purge_id(SrcUUID, TgtUUID),
- StartSeq =
- case couch_db:open_doc(Db, PurgeDocId, []) of
- {ok, #doc{body = {Props}}} ->
- couch_util:get_value(<<"purge_seq">>, Props);
- {not_found, _} ->
- Oldest = couch_db:get_oldest_purge_seq(Db),
- erlang:max(0, Oldest - 1)
- end,
- FoldFun = fun({PSeq, UUID, Id, Revs}, {Count, Infos, _}) ->
- NewCount = Count + length(Revs),
- NewInfos = [{UUID, Id, Revs} | Infos],
- Status =
- if
- NewCount < BatchSize -> ok;
- true -> stop
- end,
- {Status, {NewCount, NewInfos, PSeq}}
- end,
- InitAcc = {0, [], StartSeq},
- {ok, {_, PurgeInfos, ThroughSeq}} =
- couch_db:fold_purge_infos(Db, StartSeq, FoldFun, InitAcc),
- PurgeSeq = couch_db:get_purge_seq(Db),
- Remaining = PurgeSeq - ThroughSeq,
- rexi:reply({ok, {PurgeDocId, PurgeInfos, ThroughSeq, Remaining}});
- Else ->
- rexi:reply(Else)
- end.
-
-save_purge_checkpoint_rpc(DbName, PurgeDocId, Body) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- Doc = #doc{id = PurgeDocId, body = Body},
- Resp =
- try couch_db:update_doc(Db, Doc, []) of
- Resp0 -> Resp0
- catch
- T:R ->
- {T, R}
- end,
- rexi:reply(Resp);
- Error ->
- rexi:reply(Error)
- end.
-
-replicate_rpc(DbName, Target) ->
- rexi:reply(
- try
- Opts = [{batch_size, ?BATCH_SIZE}, {batch_count, all}],
- {ok, mem3_rep:go(DbName, Target, Opts)}
- catch
- Tag:Error ->
- {Tag, Error}
- end
- ).
-
-%% @doc Return the sequence where two files with the same UUID diverged.
-compare_epochs(SourceEpochs, TargetEpochs) ->
- compare_rev_epochs(
- lists:reverse(SourceEpochs),
- lists:reverse(TargetEpochs)
- ).
-
-compare_rev_epochs([{Node, Seq} | SourceRest], [{Node, Seq} | TargetRest]) ->
- % Common history, fast-forward
- compare_epochs(SourceRest, TargetRest);
-compare_rev_epochs([], [{_, TargetSeq} | _]) ->
- % Source has not moved, start from seq just before the target took over
- TargetSeq - 1;
-compare_rev_epochs([{_, SourceSeq} | _], []) ->
- % Target has not moved, start from seq where source diverged
- SourceSeq;
-compare_rev_epochs([{_, SourceSeq} | _], [{_, TargetSeq} | _]) ->
- % The source was moved to a new location independently, take the minimum
- erlang:min(SourceSeq, TargetSeq) - 1.
-
-%% @doc This adds a new update sequence checkpoint to the replication
-%% history. Checkpoints are keyed by the source node so that we
-%% aren't mixing history between source shard moves.
-add_checkpoint({Props}, {History}) ->
- % Extract the source and target seqs for reference
- SourceSeq = couch_util:get_value(<<"source_seq">>, Props),
- TargetSeq = couch_util:get_value(<<"target_seq">>, Props),
-
- % Get the history relevant to the source node.
- SourceNode = couch_util:get_value(<<"source_node">>, Props),
- SourceHistory = couch_util:get_value(SourceNode, History, []),
-
- % If either the source or target shard has been truncated
- % we need to filter out any history that was stored for
- % any larger update seq than we're currently recording.
- FilteredHistory = filter_history(SourceSeq, TargetSeq, SourceHistory),
-
- % Re-bucket our history based on the most recent source
- % sequence. This is where we drop old checkpoints to
- % maintain the exponential distribution.
- {_, RebucketedHistory} = rebucket(FilteredHistory, SourceSeq, 0),
- NewSourceHistory = [{Props} | RebucketedHistory],
-
- % Finally update the source node history and we're done.
- NodeRemoved = lists:keydelete(SourceNode, 1, History),
- {[{SourceNode, NewSourceHistory} | NodeRemoved]}.
-
-filter_history(SourceSeqThresh, TargetSeqThresh, History) ->
- SourceFilter = fun({Entry}) ->
- SourceSeq = couch_util:get_value(<<"source_seq">>, Entry),
- SourceSeq < SourceSeqThresh
- end,
- TargetFilter = fun({Entry}) ->
- TargetSeq = couch_util:get_value(<<"target_seq">>, Entry),
- TargetSeq < TargetSeqThresh
- end,
- SourceFiltered = lists:filter(SourceFilter, History),
- lists:filter(TargetFilter, SourceFiltered).
-
-%% @doc This function adjusts our history to maintain a
-%% history of checkpoints that follow an exponentially
-%% increasing age from the most recent checkpoint.
-%%
-%% The terms newest and oldest used in these comments
-%% refers to the (NewSeq - CurSeq) difference where smaller
-%% values are considered newer.
-%%
-%% It works by assigning each entry to a bucket and keeping
-%% the newest and oldest entry in each bucket. Keeping
-%% both the newest and oldest means that we won't end up
-%% with empty buckets as checkpoints are promoted to new
-%% buckets.
-%%
-%% The return value of this function is a two-tuple of the
-%% form `{BucketId, History}` where BucketId is the id of
-%% the bucket for the first entry in History. This is used
-%% when recursing to detect the oldest value in a given
-%% bucket.
-%%
-%% This function expects the provided history to be sorted
-%% in descending order of source_seq values.
-rebucket([], _NewSeq, Bucket) ->
- {Bucket + 1, []};
-rebucket([{Entry} | RestHistory], NewSeq, Bucket) ->
- CurSeq = couch_util:get_value(<<"source_seq">>, Entry),
- case find_bucket(NewSeq, CurSeq, Bucket) of
- Bucket ->
- % This entry is in an existing bucket which means
- % we will only keep it if its the oldest value
- % in the bucket. To detect this we rebucket the
- % rest of the list and only include Entry if the
- % rest of the list is in a bigger bucket.
- case rebucket(RestHistory, NewSeq, Bucket) of
- {Bucket, NewHistory} ->
- % There's another entry in this bucket so we drop the
- % current entry.
- {Bucket, NewHistory};
- {NextBucket, NewHistory} when NextBucket > Bucket ->
- % The rest of the history was rebucketed into a larger
- % bucket so this is the oldest entry in the current
- % bucket.
- {Bucket, [{Entry} | NewHistory]}
- end;
- NextBucket when NextBucket > Bucket ->
- % This entry is the newest in NextBucket so we add it
- % to our history and continue rebucketing.
- {_, NewHistory} = rebucket(RestHistory, NewSeq, NextBucket),
- {NextBucket, [{Entry} | NewHistory]}
- end.
-
-%% @doc Find the bucket id for the given sequence pair.
-find_bucket(NewSeq, CurSeq, Bucket) ->
- % The +1 constant in this comparison is a bit subtle. The
- % reason for it is to make sure that the first entry in
- % the history is guaranteed to have a BucketId of 1. This
- % also relies on never having a duplicated update
- % sequence so adding 1 here guarantees a difference >= 2.
- if
- (NewSeq - CurSeq + 1) > (2 bsl Bucket) ->
- find_bucket(NewSeq, CurSeq, Bucket + 1);
- true ->
- Bucket
- end.
-
-rexi_call(Node, MFA) ->
- rexi_call(Node, MFA, ?REXI_CALL_TIMEOUT_MSEC).
-
-rexi_call(Node, MFA, Timeout) ->
- Mon = rexi_monitor:start([rexi_utils:server_pid(Node)]),
- Ref = rexi:cast(Node, self(), MFA, [sync]),
- try
- receive
- {Ref, {ok, Reply}} ->
- Reply;
- {Ref, Error} ->
- erlang:error(Error);
- {rexi_DOWN, Mon, _, Reason} ->
- erlang:error({rexi_DOWN, {Node, Reason}})
- after Timeout ->
- erlang:error(timeout)
- end
- after
- rexi_monitor:stop(Mon)
- end.
-
-get_or_create_db(DbName, Options) ->
- mem3_util:get_or_create_db_int(DbName, Options).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
--define(SNODE, <<"src@localhost">>).
--define(TNODE, <<"tgt@localhost">>).
--define(SNODE_KV, {<<"source_node">>, ?SNODE}).
--define(TNODE_KV, {<<"target_node">>, ?TNODE}).
--define(SSEQ, <<"source_seq">>).
--define(TSEQ, <<"target_seq">>).
--define(ENTRY(S, T), {[?SNODE_KV, {?SSEQ, S}, ?TNODE_KV, {?TSEQ, T}]}).
-
-filter_history_data() ->
- [
- ?ENTRY(13, 15),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ].
-
-filter_history_remove_none_test() ->
- ?assertEqual(filter_history(20, 20, filter_history_data()), [
- ?ENTRY(13, 15),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]).
-
-filter_history_remove_all_test() ->
- ?assertEqual(filter_history(1, 1, filter_history_data()), []).
-
-filter_history_remove_equal_test() ->
- ?assertEqual(filter_history(10, 10, filter_history_data()), [
- ?ENTRY(2, 3)
- ]),
- ?assertEqual(filter_history(11, 9, filter_history_data()), [
- ?ENTRY(2, 3)
- ]).
-
-filter_history_remove_for_source_and_target_test() ->
- ?assertEqual(filter_history(11, 20, filter_history_data()), [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]),
- ?assertEqual(filter_history(14, 14, filter_history_data()), [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]).
-
-filter_history_remove_for_both_test() ->
- ?assertEqual(filter_history(11, 11, filter_history_data()), [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]).
-
-filter_history_remove_for_both_again_test() ->
- ?assertEqual(filter_history(3, 4, filter_history_data()), [
- ?ENTRY(2, 3)
- ]).
-
-add_first_checkpoint_test() ->
- History = {[]},
- ?assertEqual(
- add_checkpoint(?ENTRY(2, 3), History),
- {[
- {?SNODE, [
- ?ENTRY(2, 3)
- ]}
- ]}
- ).
-
-add_first_checkpoint_to_empty_test() ->
- History = {[{?SNODE, []}]},
- ?assertEqual(
- add_checkpoint(?ENTRY(2, 3), History),
- {[
- {?SNODE, [
- ?ENTRY(2, 3)
- ]}
- ]}
- ).
-
-add_second_checkpoint_test() ->
- History = {[{?SNODE, [?ENTRY(2, 3)]}]},
- ?assertEqual(
- add_checkpoint(?ENTRY(10, 9), History),
- {[
- {?SNODE, [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}
- ).
-
-add_third_checkpoint_test() ->
- History =
- {[
- {?SNODE, [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]},
- ?assertEqual(
- add_checkpoint(?ENTRY(11, 10), History),
- {[
- {?SNODE, [
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}
- ).
-
-add_fourth_checkpoint_test() ->
- History =
- {[
- {?SNODE, [
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]},
- ?assertEqual(
- add_checkpoint(?ENTRY(12, 13), History),
- {[
- {?SNODE, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}
- ).
-
-add_checkpoint_with_replacement_test() ->
- History =
- {[
- {?SNODE, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]},
- % Picking a source_seq of 16 to force 10, 11, and 12
- % into the same bucket to show we drop the 11 entry.
- ?assertEqual(
- add_checkpoint(?ENTRY(16, 16), History),
- {[
- {?SNODE, [
- ?ENTRY(16, 16),
- ?ENTRY(12, 13),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}
- ).
-
-add_checkpoint_drops_redundant_checkpoints_test() ->
- % I've added comments showing the bucket ID based
- % on the ?ENTRY passed to add_checkpoint
- History =
- {[
- {?SNODE, [
- % Bucket 0
- ?ENTRY(15, 15),
- % Bucket 1
- ?ENTRY(14, 14),
- % Bucket 1
- ?ENTRY(13, 13),
- % Bucket 2
- ?ENTRY(12, 12),
- % Bucket 2
- ?ENTRY(11, 11),
- % Bucket 2
- ?ENTRY(10, 10),
- % Bucket 2
- ?ENTRY(9, 9),
- % Bucket 3
- ?ENTRY(8, 8),
- % Bucket 3
- ?ENTRY(7, 7),
- % Bucket 3
- ?ENTRY(6, 6),
- % Bucket 3
- ?ENTRY(5, 5),
- % Bucket 3
- ?ENTRY(4, 4),
- % Bucket 3
- ?ENTRY(3, 3),
- % Bucket 3
- ?ENTRY(2, 2),
- % Bucket 3
- ?ENTRY(1, 1)
- ]}
- ]},
- ?assertEqual(
- add_checkpoint(?ENTRY(16, 16), History),
- {[
- {?SNODE, [
- % Bucket 0
- ?ENTRY(16, 16),
- % Bucket 0
- ?ENTRY(15, 15),
- % Bucket 1
- ?ENTRY(14, 14),
- % Bucket 1
- ?ENTRY(13, 13),
- % Bucket 2
- ?ENTRY(12, 12),
- % Bucket 2
- ?ENTRY(9, 9),
- % Bucket 3
- ?ENTRY(8, 8),
- % Bucket 3
- ?ENTRY(1, 1)
- ]}
- ]}
- ).
-
-add_checkpoint_show_not_always_a_drop_test() ->
- % Depending on the edge conditions of buckets we
- % may not always drop values when adding new
- % checkpoints. In this case 12 stays because there's
- % no longer a value for 10 or 11.
- %
- % I've added comments showing the bucket ID based
- % on the ?ENTRY passed to add_checkpoint
- History =
- {[
- {?SNODE, [
- % Bucket 0
- ?ENTRY(16, 16),
- % Bucket 1
- ?ENTRY(15, 15),
- % Bucket 1
- ?ENTRY(14, 14),
- % Bucket 2
- ?ENTRY(13, 13),
- % Bucket 2
- ?ENTRY(12, 12),
- % Bucket 3
- ?ENTRY(9, 9),
- % Bucket 3
- ?ENTRY(8, 8),
- % Bucket 4
- ?ENTRY(1, 1)
- ]}
- ]},
- ?assertEqual(
- add_checkpoint(?ENTRY(17, 17), History),
- {[
- {?SNODE, [
- % Bucket 0
- ?ENTRY(17, 17),
- % Bucket 0
- ?ENTRY(16, 16),
- % Bucket 1
- ?ENTRY(15, 15),
- % Bucket 1
- ?ENTRY(14, 14),
- % Bucket 2
- ?ENTRY(13, 13),
- % Bucket 2
- ?ENTRY(12, 12),
- % Bucket 3
- ?ENTRY(9, 9),
- % Bucket 3
- ?ENTRY(8, 8),
- % Bucket 4
- ?ENTRY(1, 1)
- ]}
- ]}
- ).
-
-add_checkpoint_big_jump_show_lots_drop_test() ->
- % I've added comments showing the bucket ID based
- % on the ?ENTRY passed to add_checkpoint
- History =
- {[
- {?SNODE, [
- % Bucket 4
- ?ENTRY(16, 16),
- % Bucket 4
- ?ENTRY(15, 15),
- % Bucket 4
- ?ENTRY(14, 14),
- % Bucket 4
- ?ENTRY(13, 13),
- % Bucket 4
- ?ENTRY(12, 12),
- % Bucket 4
- ?ENTRY(9, 9),
- % Bucket 4
- ?ENTRY(8, 8),
- % Bucket 4
- ?ENTRY(1, 1)
- ]}
- ]},
- ?assertEqual(
- add_checkpoint(?ENTRY(32, 32), History),
- {[
- {?SNODE, [
- % Bucket 0
- ?ENTRY(32, 32),
- % Bucket 4
- ?ENTRY(16, 16),
- % Bucket 4
- ?ENTRY(1, 1)
- ]}
- ]}
- ).
-
-add_checkpoint_show_filter_history_test() ->
- History =
- {[
- {?SNODE, [
- ?ENTRY(16, 16),
- ?ENTRY(15, 15),
- ?ENTRY(14, 14),
- ?ENTRY(13, 13),
- ?ENTRY(12, 12),
- ?ENTRY(9, 9),
- ?ENTRY(8, 8),
- ?ENTRY(1, 1)
- ]}
- ]},
- % Drop for both
- ?assertEqual(
- add_checkpoint(?ENTRY(10, 10), History),
- {[
- {?SNODE, [
- ?ENTRY(10, 10),
- ?ENTRY(9, 9),
- ?ENTRY(8, 8),
- ?ENTRY(1, 1)
- ]}
- ]}
- ),
- % Drop four source
- ?assertEqual(
- add_checkpoint(?ENTRY(10, 200), History),
- {[
- {?SNODE, [
- ?ENTRY(10, 200),
- ?ENTRY(9, 9),
- ?ENTRY(8, 8),
- ?ENTRY(1, 1)
- ]}
- ]}
- ),
- % Drop for target. Obviously a source_seq of 200
- % will end up droping the 8 entry.
- ?assertEqual(
- add_checkpoint(?ENTRY(200, 10), History),
- {[
- {?SNODE, [
- ?ENTRY(200, 10),
- ?ENTRY(9, 9),
- ?ENTRY(1, 1)
- ]}
- ]}
- ).
-
-add_checkpoint_from_other_node_test() ->
- History =
- {[
- {<<"not_the_source">>, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]},
- % No filtering
- ?assertEqual(
- add_checkpoint(?ENTRY(1, 1), History),
- {[
- {?SNODE, [
- ?ENTRY(1, 1)
- ]},
- {<<"not_the_source">>, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}
- ),
- % No dropping
- ?assertEqual(
- add_checkpoint(?ENTRY(200, 200), History),
- {[
- {?SNODE, [
- ?ENTRY(200, 200)
- ]},
- {<<"not_the_source">>, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}
- ).
-
--endif.
diff --git a/src/mem3/src/mem3_seeds.erl b/src/mem3/src/mem3_seeds.erl
deleted file mode 100644
index 6d74398e7..000000000
--- a/src/mem3/src/mem3_seeds.erl
+++ /dev/null
@@ -1,176 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_seeds).
--behaviour(gen_server).
-
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- terminate/2
-]).
-
--export([
- start_link/0,
- get_seeds/0,
- get_status/0
-]).
-
--record(st, {
- ready = false,
- seeds = [],
- jobref = nil,
- % nested proplist keyed on node name
- status = []
-}).
-
--define(REPLICATION_INTERVAL, 60000).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_seeds() ->
- case config:get("cluster", "seedlist") of
- undefined ->
- [];
- List ->
- Nodes = string:tokens(List, ","),
- Seeds = [list_to_atom(Node) || Node <- Nodes] -- [node()],
- mem3_util:rotate_list(node(), Seeds)
- end.
-
-get_status() ->
- gen_server:call(?MODULE, get_status).
-
-init([]) ->
- Seeds = get_seeds(),
- InitStatus = [{Seed, {[]}} || Seed <- Seeds],
- State = #st{
- seeds = Seeds,
- ready =
- case Seeds of
- [] -> true;
- _ -> false
- end,
- jobref = start_replication(Seeds),
- status = InitStatus
- },
- {ok, State}.
-
-handle_call(get_status, _From, St) ->
- Status =
- {[
- {status,
- case St#st.ready of
- true -> ok;
- false -> seeding
- end},
- {seeds, {St#st.status}}
- ]},
- {reply, {ok, Status}, St}.
-
-handle_cast(_Msg, St) ->
- {noreply, St}.
-
-handle_info(start_replication, #st{jobref = nil} = St) ->
- JobRef = start_replication(St#st.seeds),
- {noreply, St#st{jobref = JobRef}};
-handle_info({'DOWN', Ref, _, Pid, Output}, #st{jobref = {Pid, Ref}} = St) ->
- {noreply, update_state(St, Output)};
-handle_info(_Msg, St) ->
- {noreply, St}.
-
-terminate(_Reason, _St) ->
- ok.
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-% internal functions
-
-start_replication([]) ->
- nil;
-start_replication([Seed | _]) ->
- spawn_monitor(fun() ->
- Reply = mem3_rpc:pull_replication(Seed),
- exit({ok, Reply})
- end).
-
-update_state(State, {ok, Data}) ->
- #st{seeds = [Current | Tail], status = Status} = State,
- Report =
- {[
- {timestamp, list_to_binary(mem3_util:iso8601_timestamp())},
- {last_replication_status, ok},
- format_data(Data)
- ]},
- NewStatus = lists:ukeymerge(1, [{Current, Report}], Status),
- Ready = is_ready(State#st.ready, Data),
- case Ready of
- true ->
- Seeds = Tail ++ [Current],
- Job = nil;
- false ->
- % Try to progress this same seed again
- Seeds = [Current | Tail],
- Job = start_replication([Current | Tail])
- end,
- State#st{
- seeds = Seeds,
- jobref = Job,
- ready = Ready,
- status = NewStatus
- };
-update_state(State, {_Error, _Stack}) ->
- #st{seeds = [Current | Tail], status = Status} = State,
- Report =
- {[
- {timestamp, list_to_binary(mem3_util:iso8601_timestamp())},
- {last_replication_status, error}
- ]},
- NewStatus = lists:ukeymerge(1, [{Current, Report}], Status),
- Seeds = Tail ++ [Current],
- if
- not State#st.ready ->
- erlang:send_after(1000, self(), start_replication);
- true ->
- ok
- end,
- State#st{
- seeds = Seeds,
- jobref = nil,
- status = NewStatus
- }.
-
-is_ready(true, _) ->
- true;
-is_ready(false, Data) ->
- lists:all(fun({_DbName, Pending}) -> Pending =:= {ok, 0} end, Data).
-
-format_data(Data) ->
- Formatted = lists:map(
- fun({DbName, Status}) ->
- case Status of
- {ok, Pending} when is_number(Pending) ->
- {DbName, Pending};
- {error, Tag} ->
- {DbName, list_to_binary(io_lib:format("~p", [Tag]))};
- _Else ->
- {DbName, unknown_error}
- end
- end,
- Data
- ),
- {pending_updates, {Formatted}}.
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
deleted file mode 100644
index 8bbc92411..000000000
--- a/src/mem3/src/mem3_shards.erl
+++ /dev/null
@@ -1,789 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_shards).
--behaviour(gen_server).
--vsn(3).
--behaviour(config_listener).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
--export([handle_config_change/5, handle_config_terminate/3]).
-
--export([start_link/0]).
--export([opts_for_db/1]).
--export([for_db/1, for_db/2, for_docid/2, for_docid/3, get/3, local/1, fold/2]).
--export([for_shard_range/1]).
--export([set_max_size/1]).
--export([get_changes_pid/0]).
-
--record(st, {
- max_size = 25000,
- cur_size = 0,
- changes_pid,
- update_seq,
- write_timeout
-}).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DBS, mem3_dbs).
--define(SHARDS, mem3_shards).
--define(ATIMES, mem3_atimes).
--define(OPENERS, mem3_openers).
--define(RELISTEN_DELAY, 5000).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-opts_for_db(DbName0) ->
- DbName = mem3:dbname(DbName0),
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- case couch_db:open_doc(Db, DbName, [ejson_body]) of
- {ok, #doc{body = {Props}}} ->
- mem3_util:get_shard_opts(Props);
- {not_found, _} ->
- erlang:error(database_does_not_exist, ?b2l(DbName))
- end.
-
-for_db(DbName) ->
- for_db(DbName, []).
-
-for_db(DbName, Options) ->
- Shards =
- try ets:lookup(?SHARDS, DbName) of
- [] ->
- load_shards_from_disk(DbName);
- Else ->
- gen_server:cast(?MODULE, {cache_hit, DbName}),
- Else
- catch
- error:badarg ->
- load_shards_from_disk(DbName)
- end,
- case lists:member(ordered, Options) of
- true -> Shards;
- false -> mem3_util:downcast(Shards)
- end.
-
-for_docid(DbName, DocId) ->
- for_docid(DbName, DocId, []).
-
-for_docid(DbName, DocId, Options) ->
- HashKey = mem3_hash:calculate(DbName, DocId),
- ShardHead = #shard{
- dbname = DbName,
- range = ['$1', '$2'],
- _ = '_'
- },
- OrderedShardHead = #ordered_shard{
- dbname = DbName,
- range = ['$1', '$2'],
- _ = '_'
- },
- Conditions = [{'=<', '$1', HashKey}, {'=<', HashKey, '$2'}],
- ShardSpec = {ShardHead, Conditions, ['$_']},
- OrderedShardSpec = {OrderedShardHead, Conditions, ['$_']},
- Shards =
- try ets:select(?SHARDS, [ShardSpec, OrderedShardSpec]) of
- [] ->
- load_shards_from_disk(DbName, DocId);
- Else ->
- gen_server:cast(?MODULE, {cache_hit, DbName}),
- Else
- catch
- error:badarg ->
- load_shards_from_disk(DbName, DocId)
- end,
- case lists:member(ordered, Options) of
- true -> Shards;
- false -> mem3_util:downcast(Shards)
- end.
-
-for_shard_range(ShardName) ->
- DbName = mem3:dbname(ShardName),
- [B, E] = mem3:range(ShardName),
- ShardHead = #shard{
- dbname = DbName,
- range = ['$1', '$2'],
- _ = '_'
- },
- OrderedShardHead = #ordered_shard{
- dbname = DbName,
- range = ['$1', '$2'],
- _ = '_'
- },
- % see mem3_util:range_overlap/2 for an explanation how it works
- Conditions = [{'=<', '$1', E}, {'=<', B, '$2'}],
- ShardSpec = {ShardHead, Conditions, ['$_']},
- OrderedShardSpec = {OrderedShardHead, Conditions, ['$_']},
- Shards =
- try ets:select(?SHARDS, [ShardSpec, OrderedShardSpec]) of
- [] ->
- filter_shards_by_range([B, E], load_shards_from_disk(DbName));
- Else ->
- gen_server:cast(?MODULE, {cache_hit, DbName}),
- Else
- catch
- error:badarg ->
- filter_shards_by_range([B, E], load_shards_from_disk(DbName))
- end,
- mem3_util:downcast(Shards).
-
-get(DbName, Node, Range) ->
- Res = lists:foldl(
- fun(#shard{node = N, range = R} = S, Acc) ->
- case {N, R} of
- {Node, Range} -> [S | Acc];
- _ -> Acc
- end
- end,
- [],
- for_db(DbName)
- ),
- case Res of
- [] -> {error, not_found};
- [Shard] -> {ok, Shard};
- [_ | _] -> {error, duplicates}
- end.
-
-local(DbName) when is_list(DbName) ->
- local(list_to_binary(DbName));
-local(DbName) ->
- Pred = fun
- (#shard{node = Node}) when Node == node() -> true;
- (_) -> false
- end,
- lists:filter(Pred, for_db(DbName)).
-
-fold(Fun, Acc) ->
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- FAcc = {Db, Fun, Acc},
- try
- {ok, LastAcc} = couch_db:fold_docs(Db, fun fold_fun/2, FAcc),
- {_Db, _UFun, UAcc} = LastAcc,
- UAcc
- after
- couch_db:close(Db)
- end.
-
-set_max_size(Size) when is_integer(Size), Size > 0 ->
- gen_server:call(?MODULE, {set_max_size, Size}).
-
-get_changes_pid() ->
- gen_server:call(?MODULE, get_changes_pid).
-
-handle_config_change("mem3", "shard_cache_size", SizeList, _, _) ->
- Size = list_to_integer(SizeList),
- {ok, gen_server:call(?MODULE, {set_max_size, Size}, infinity)};
-handle_config_change("mem3", "shards_db", _DbName, _, _) ->
- {ok, gen_server:call(?MODULE, shard_db_changed, infinity)};
-handle_config_change("mem3", "shard_write_timeout", Timeout, _, _) ->
- Timeout =
- try
- list_to_integer(Timeout)
- catch
- _:_ ->
- 1000
- end,
- {ok, gen_server:call(?MODULE, {set_write_timeout, Timeout})};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-init([]) ->
- couch_util:set_mqd_off_heap(?MODULE),
- ets:new(?SHARDS, [
- bag,
- public,
- named_table,
- {keypos, #shard.dbname},
- {read_concurrency, true}
- ]),
- ets:new(?DBS, [set, protected, named_table]),
- ets:new(?ATIMES, [ordered_set, protected, named_table]),
- ets:new(?OPENERS, [bag, public, named_table]),
- ok = config:listen_for_changes(?MODULE, nil),
- SizeList = config:get("mem3", "shard_cache_size", "25000"),
- WriteTimeout = config:get_integer("mem3", "shard_write_timeout", 1000),
- UpdateSeq = get_update_seq(),
- {ok, #st{
- max_size = list_to_integer(SizeList),
- cur_size = 0,
- changes_pid = start_changes_listener(UpdateSeq),
- update_seq = UpdateSeq,
- write_timeout = WriteTimeout
- }}.
-
-handle_call({set_max_size, Size}, _From, St) ->
- {reply, ok, cache_free(St#st{max_size = Size})};
-handle_call(shard_db_changed, _From, St) ->
- exit(St#st.changes_pid, shard_db_changed),
- {reply, ok, St};
-handle_call({set_write_timeout, Timeout}, _From, St) ->
- {reply, ok, St#st{write_timeout = Timeout}};
-handle_call(get_changes_pid, _From, St) ->
- {reply, {ok, St#st.changes_pid}, St};
-handle_call(_Call, _From, St) ->
- {noreply, St}.
-
-handle_cast({cache_hit, DbName}, St) ->
- couch_stats:increment_counter([mem3, shard_cache, hit]),
- cache_hit(DbName),
- {noreply, St};
-handle_cast({cache_insert, DbName, Writer, UpdateSeq}, St) ->
- % This comparison correctly uses the `<` operator
- % and not `=<`. The easiest way to understand why is
- % to think of when a _dbs db doesn't change. If it used
- % `=<` it would be impossible to insert anything into
- % the cache.
- NewSt =
- case UpdateSeq < St#st.update_seq of
- true ->
- Writer ! cancel,
- St;
- false ->
- cache_free(cache_insert(St, DbName, Writer, St#st.write_timeout))
- end,
- {noreply, NewSt};
-handle_cast({cache_remove, DbName}, St) ->
- couch_stats:increment_counter([mem3, shard_cache, eviction]),
- {noreply, cache_remove(St, DbName)};
-handle_cast({cache_insert_change, DbName, Writer, UpdateSeq}, St) ->
- Msg = {cache_insert, DbName, Writer, UpdateSeq},
- {noreply, NewSt} = handle_cast(Msg, St),
- {noreply, NewSt#st{update_seq = UpdateSeq}};
-handle_cast({cache_remove_change, DbName, UpdateSeq}, St) ->
- {noreply, NewSt} = handle_cast({cache_remove, DbName}, St),
- {noreply, NewSt#st{update_seq = UpdateSeq}};
-handle_cast(_Msg, St) ->
- {noreply, St}.
-
-handle_info({'DOWN', _, _, Pid, Reason}, #st{changes_pid = Pid} = St) ->
- {NewSt, Seq} =
- case Reason of
- {seq, EndSeq} ->
- {St, EndSeq};
- shard_db_changed ->
- {cache_clear(St), get_update_seq()};
- _ ->
- couch_log:notice("~p changes listener died ~p", [?MODULE, Reason]),
- {St, get_update_seq()}
- end,
- erlang:send_after(5000, self(), {start_listener, Seq}),
- {noreply, NewSt#st{changes_pid = undefined}};
-handle_info({start_listener, Seq}, St) ->
- {noreply, St#st{
- changes_pid = start_changes_listener(Seq)
- }};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(_Msg, St) ->
- {noreply, St}.
-
-terminate(_Reason, #st{changes_pid = Pid}) ->
- exit(Pid, kill),
- ok.
-
-code_change(_OldVsn, #st{} = St, _Extra) ->
- {ok, St}.
-
-%% internal functions
-
-start_changes_listener(SinceSeq) ->
- Self = self(),
- {Pid, _} = erlang:spawn_monitor(fun() ->
- erlang:spawn_link(fun() ->
- Ref = erlang:monitor(process, Self),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- end,
- exit(shutdown)
- end),
- listen_for_changes(SinceSeq)
- end),
- Pid.
-
-fold_fun(#full_doc_info{} = FDI, Acc) ->
- DI = couch_doc:to_doc_info(FDI),
- fold_fun(DI, Acc);
-fold_fun(#doc_info{} = DI, {Db, UFun, UAcc}) ->
- case couch_db:open_doc(Db, DI, [ejson_body, conflicts]) of
- {ok, Doc} ->
- {Props} = Doc#doc.body,
- Shards = mem3_util:build_shards(Doc#doc.id, Props),
- NewUAcc = lists:foldl(UFun, UAcc, Shards),
- {ok, {Db, UFun, NewUAcc}};
- _ ->
- {ok, {Db, UFun, UAcc}}
- end.
-
-get_update_seq() ->
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- Seq = couch_db:get_update_seq(Db),
- couch_db:close(Db),
- Seq.
-
-listen_for_changes(Since) ->
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- Args = #changes_args{
- feed = "continuous",
- since = Since,
- heartbeat = true,
- include_docs = true
- },
- ChangesFun = couch_changes:handle_db_changes(Args, Since, Db),
- ChangesFun(fun changes_callback/2).
-
-changes_callback(start, Acc) ->
- {ok, Acc};
-changes_callback({stop, EndSeq}, _) ->
- exit({seq, EndSeq});
-changes_callback({change, {Change}, _}, _) ->
- DbName = couch_util:get_value(<<"id">>, Change),
- Seq = couch_util:get_value(<<"seq">>, Change),
- case DbName of
- <<"_design/", _/binary>> ->
- ok;
- _Else ->
- case mem3_util:is_deleted(Change) of
- true ->
- gen_server:cast(?MODULE, {cache_remove_change, DbName, Seq});
- false ->
- case couch_util:get_value(doc, Change) of
- {error, Reason} ->
- couch_log:error(
- "missing partition table for ~s: ~p",
- [DbName, Reason]
- );
- {Doc} ->
- Shards = mem3_util:build_ordered_shards(DbName, Doc),
- IdleTimeout = config:get_integer(
- "mem3", "writer_idle_timeout", 30000
- ),
- Writer = spawn_shard_writer(DbName, Shards, IdleTimeout),
- ets:insert(?OPENERS, {DbName, Writer}),
- Msg = {cache_insert_change, DbName, Writer, Seq},
- gen_server:cast(?MODULE, Msg),
- [
- create_if_missing(mem3:name(S))
- || S <-
- Shards,
- mem3:node(S) =:= node()
- ]
- end
- end
- end,
- {ok, Seq};
-changes_callback(timeout, _) ->
- ok.
-
-load_shards_from_disk(DbName) when is_binary(DbName) ->
- couch_stats:increment_counter([mem3, shard_cache, miss]),
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- try
- load_shards_from_db(Db, DbName)
- after
- couch_db:close(Db)
- end.
-
-load_shards_from_db(ShardDb, DbName) ->
- case couch_db:open_doc(ShardDb, DbName, [ejson_body]) of
- {ok, #doc{body = {Props}}} ->
- Seq = couch_db:get_update_seq(ShardDb),
- Shards = mem3_util:build_ordered_shards(DbName, Props),
- IdleTimeout = config:get_integer("mem3", "writer_idle_timeout", 30000),
- case maybe_spawn_shard_writer(DbName, Shards, IdleTimeout) of
- Writer when is_pid(Writer) ->
- case ets:insert_new(?OPENERS, {DbName, Writer}) of
- true ->
- Msg = {cache_insert, DbName, Writer, Seq},
- gen_server:cast(?MODULE, Msg);
- false ->
- Writer ! cancel
- end;
- ignore ->
- ok
- end,
- Shards;
- {not_found, _} ->
- erlang:error(database_does_not_exist, ?b2l(DbName))
- end.
-
-load_shards_from_disk(DbName, DocId) ->
- Shards = load_shards_from_disk(DbName),
- HashKey = mem3_hash:calculate(hd(Shards), DocId),
- [S || S <- Shards, in_range(S, HashKey)].
-
-in_range(Shard, HashKey) ->
- [B, E] = mem3:range(Shard),
- B =< HashKey andalso HashKey =< E.
-
-create_if_missing(ShardName) ->
- case couch_server:exists(ShardName) of
- true ->
- ok;
- false ->
- Options = opts_for_db(ShardName),
- case couch_server:create(ShardName, [?ADMIN_CTX] ++ Options) of
- {ok, Db} ->
- couch_db:close(Db);
- Error ->
- couch_log:error(
- "~p tried to create ~s, got ~p",
- [?MODULE, ShardName, Error]
- )
- end
- end.
-
-cache_insert(#st{cur_size = Cur} = St, DbName, Writer, Timeout) ->
- NewATime = couch_util:unique_monotonic_integer(),
- true = ets:delete(?SHARDS, DbName),
- flush_write(DbName, Writer, Timeout),
- case ets:lookup(?DBS, DbName) of
- [{DbName, ATime}] ->
- true = ets:delete(?ATIMES, ATime),
- true = ets:insert(?ATIMES, {NewATime, DbName}),
- true = ets:insert(?DBS, {DbName, NewATime}),
- St;
- [] ->
- true = ets:insert(?ATIMES, {NewATime, DbName}),
- true = ets:insert(?DBS, {DbName, NewATime}),
- St#st{cur_size = Cur + 1}
- end.
-
-cache_remove(#st{cur_size = Cur} = St, DbName) ->
- true = ets:delete(?SHARDS, DbName),
- case ets:lookup(?DBS, DbName) of
- [{DbName, ATime}] ->
- true = ets:delete(?DBS, DbName),
- true = ets:delete(?ATIMES, ATime),
- St#st{cur_size = Cur - 1};
- [] ->
- St
- end.
-
-cache_hit(DbName) ->
- case ets:lookup(?DBS, DbName) of
- [{DbName, ATime}] ->
- NewATime = couch_util:unique_monotonic_integer(),
- true = ets:delete(?ATIMES, ATime),
- true = ets:insert(?ATIMES, {NewATime, DbName}),
- true = ets:insert(?DBS, {DbName, NewATime});
- [] ->
- ok
- end.
-
-cache_free(#st{max_size = Max, cur_size = Cur} = St) when Max =< Cur ->
- ATime = ets:first(?ATIMES),
- [{ATime, DbName}] = ets:lookup(?ATIMES, ATime),
- true = ets:delete(?ATIMES, ATime),
- true = ets:delete(?DBS, DbName),
- true = ets:delete(?SHARDS, DbName),
- cache_free(St#st{cur_size = Cur - 1});
-cache_free(St) ->
- St.
-
-cache_clear(St) ->
- true = ets:delete_all_objects(?DBS),
- true = ets:delete_all_objects(?SHARDS),
- true = ets:delete_all_objects(?ATIMES),
- St#st{cur_size = 0}.
-
-maybe_spawn_shard_writer(DbName, Shards, IdleTimeout) ->
- case ets:member(?OPENERS, DbName) of
- true ->
- ignore;
- false ->
- spawn_shard_writer(DbName, Shards, IdleTimeout)
- end.
-
-spawn_shard_writer(DbName, Shards, IdleTimeout) ->
- erlang:spawn(fun() -> shard_writer(DbName, Shards, IdleTimeout) end).
-
-shard_writer(DbName, Shards, IdleTimeout) ->
- try
- receive
- write ->
- true = ets:insert(?SHARDS, Shards);
- cancel ->
- ok
- after IdleTimeout ->
- ok
- end
- after
- true = ets:delete_object(?OPENERS, {DbName, self()})
- end.
-
-flush_write(DbName, Writer, WriteTimeout) ->
- Ref = erlang:monitor(process, Writer),
- Writer ! write,
- receive
- {'DOWN', Ref, _, _, normal} ->
- ok;
- {'DOWN', Ref, _, _, Error} ->
- erlang:exit({mem3_shards_bad_write, Error})
- after WriteTimeout ->
- erlang:exit({mem3_shards_write_timeout, DbName})
- end.
-
-filter_shards_by_range(Range, Shards) ->
- lists:filter(
- fun
- (#ordered_shard{range = R}) -> mem3_util:range_overlap(Range, R);
- (#shard{range = R}) -> mem3_util:range_overlap(Range, R)
- end,
- Shards
- ).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(DB, <<"eunit_db_name">>).
--define(INFINITY, 99999999).
-
-mem3_shards_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_maybe_spawn_shard_writer_already_exists(),
- t_maybe_spawn_shard_writer_new(),
- t_flush_writer_exists_normal(),
- t_flush_writer_times_out(),
- t_flush_writer_crashes(),
- t_writer_deletes_itself_when_done(),
- t_writer_does_not_delete_other_writers_for_same_shard(),
- t_spawn_writer_in_load_shards_from_db(),
- t_cache_insert_takes_new_update(),
- t_cache_insert_ignores_stale_update_and_kills_worker()
- ]
- }
- }.
-
-setup_all() ->
- ets:new(?SHARDS, [bag, public, named_table, {keypos, #shard.dbname}]),
- ets:new(?OPENERS, [bag, public, named_table]),
- ets:new(?DBS, [set, public, named_table]),
- ets:new(?ATIMES, [ordered_set, public, named_table]),
- meck:expect(config, get, ["mem3", "shards_db", '_'], "_dbs"),
- ok.
-
-teardown_all(_) ->
- meck:unload(),
- ets:delete(?ATIMES),
- ets:delete(?DBS),
- ets:delete(?OPENERS),
- ets:delete(?SHARDS).
-
-setup() ->
- ets:delete_all_objects(?ATIMES),
- ets:delete_all_objects(?DBS),
- ets:delete_all_objects(?OPENERS),
- ets:delete_all_objects(?SHARDS).
-
-teardown(_) ->
- ok.
-
-t_maybe_spawn_shard_writer_already_exists() ->
- ?_test(begin
- ets:insert(?OPENERS, {?DB, self()}),
- Shards = mock_shards(),
- WRes = maybe_spawn_shard_writer(?DB, Shards, ?INFINITY),
- ?assertEqual(ignore, WRes)
- end).
-
-t_maybe_spawn_shard_writer_new() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = maybe_spawn_shard_writer(?DB, Shards, 1000),
- WRef = erlang:monitor(process, WPid),
- ?assert(is_pid(WPid)),
- ?assert(is_process_alive(WPid)),
- WPid ! write,
- ?assertEqual(normal, wait_writer_result(WRef)),
- ?assertEqual(Shards, ets:tab2list(?SHARDS))
- end).
-
-t_flush_writer_exists_normal() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- ?assertEqual(ok, flush_write(?DB, WPid, ?INFINITY)),
- ?assertEqual(Shards, ets:tab2list(?SHARDS))
- end).
-
-t_flush_writer_times_out() ->
- ?_test(begin
- WPid = spawn(fun() ->
- receive
- will_never_receive_this -> ok
- end
- end),
- Error = {mem3_shards_write_timeout, ?DB},
- ?assertExit(Error, flush_write(?DB, WPid, 100)),
- exit(WPid, kill)
- end).
-
-t_flush_writer_crashes() ->
- ?_test(begin
- WPid = spawn(fun() ->
- receive
- write -> exit('kapow!')
- end
- end),
- Error = {mem3_shards_bad_write, 'kapow!'},
- ?assertExit(Error, flush_write(?DB, WPid, 1000))
- end).
-
-t_writer_deletes_itself_when_done() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- WRef = erlang:monitor(process, WPid),
- ets:insert(?OPENERS, {?DB, WPid}),
- WPid ! write,
- ?assertEqual(normal, wait_writer_result(WRef)),
- ?assertEqual(Shards, ets:tab2list(?SHARDS)),
- ?assertEqual([], ets:tab2list(?OPENERS))
- end).
-
-t_writer_does_not_delete_other_writers_for_same_shard() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- WRef = erlang:monitor(process, WPid),
- ets:insert(?OPENERS, {?DB, WPid}),
- % should not be deleted
- ets:insert(?OPENERS, {?DB, self()}),
- WPid ! write,
- ?assertEqual(normal, wait_writer_result(WRef)),
- ?assertEqual(Shards, ets:tab2list(?SHARDS)),
- ?assertEqual(1, ets:info(?OPENERS, size)),
- ?assertEqual([{?DB, self()}], ets:tab2list(?OPENERS))
- end).
-
-t_spawn_writer_in_load_shards_from_db() ->
- ?_test(begin
- meck:expect(couch_db, open_doc, 3, {ok, #doc{body = {[]}}}),
- meck:expect(couch_db, get_update_seq, 1, 1),
- meck:expect(mem3_util, build_ordered_shards, 2, mock_shards()),
- % register to get cache_insert cast
- erlang:register(?MODULE, self()),
- load_shards_from_db(test_util:fake_db([{name, <<"testdb">>}]), ?DB),
- meck:validate(couch_db),
- meck:validate(mem3_util),
- Cast =
- receive
- {'$gen_cast', Msg} -> Msg
- after 1000 ->
- timeout
- end,
- ?assertMatch({cache_insert, ?DB, Pid, 1} when is_pid(Pid), Cast),
- {cache_insert, _, WPid, _} = Cast,
- exit(WPid, kill),
- ?assertEqual([{?DB, WPid}], ets:tab2list(?OPENERS)),
- meck:unload(couch_db),
- meck:unload(mem3_util)
- end).
-
-t_cache_insert_takes_new_update() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- Msg = {cache_insert, ?DB, WPid, 2},
- {noreply, NewState} = handle_cast(Msg, mock_state(1)),
- ?assertMatch(#st{cur_size = 1}, NewState),
- ?assertEqual(Shards, ets:tab2list(?SHARDS)),
- ?assertEqual([], ets:tab2list(?OPENERS))
- end).
-
-t_cache_insert_ignores_stale_update_and_kills_worker() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- WRef = erlang:monitor(process, WPid),
- Msg = {cache_insert, ?DB, WPid, 1},
- {noreply, NewState} = handle_cast(Msg, mock_state(2)),
- ?assertEqual(normal, wait_writer_result(WRef)),
- ?assertMatch(#st{cur_size = 0}, NewState),
- ?assertEqual([], ets:tab2list(?SHARDS)),
- ?assertEqual([], ets:tab2list(?OPENERS))
- end).
-
-mock_state(UpdateSeq) ->
- #st{
- update_seq = UpdateSeq,
- changes_pid = self(),
- write_timeout = 1000
- }.
-
-mock_shards() ->
- [
- #ordered_shard{
- name = <<"testshardname">>,
- node = node(),
- dbname = ?DB,
- range = [0, 1],
- order = 1
- }
- ].
-
-wait_writer_result(WRef) ->
- receive
- {'DOWN', WRef, _, _, Result} ->
- Result
- after 1000 ->
- timeout
- end.
-
-spawn_link_mock_writer(Db, Shards, Timeout) ->
- erlang:spawn_link(fun() -> shard_writer(Db, Shards, Timeout) end).
-
-mem3_shards_changes_test_() ->
- {
- "Test mem3_shards changes listener",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- [
- fun should_kill_changes_listener_on_shutdown/0
- ]
- }
- }.
-
-should_kill_changes_listener_on_shutdown() ->
- {ok, Pid} = ?MODULE:start_link(),
- {ok, ChangesPid} = get_changes_pid(),
- ?assert(is_process_alive(ChangesPid)),
- true = erlang:unlink(Pid),
- true = test_util:stop_sync_throw(
- ChangesPid, fun() -> exit(Pid, shutdown) end, wait_timeout
- ),
- ?assertNot(is_process_alive(ChangesPid)),
- exit(Pid, shutdown).
-
--endif.
diff --git a/src/mem3/src/mem3_sup.erl b/src/mem3/src/mem3_sup.erl
deleted file mode 100644
index a2dc5ba8d..000000000
--- a/src/mem3/src/mem3_sup.erl
+++ /dev/null
@@ -1,41 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sup).
--behaviour(supervisor).
--export([start_link/0, init/1]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init(_Args) ->
- Children = [
- child(mem3_events),
- child(mem3_nodes),
- child(mem3_seeds),
- % Order important?
- child(mem3_sync_nodes),
- child(mem3_sync),
- child(mem3_shards),
- child(mem3_sync_event_listener),
- child(mem3_reshard_sup)
- ],
- {ok, {{one_for_one, 10, 1}, couch_epi:register_service(mem3_epi, Children)}}.
-
-child(mem3_events) ->
- MFA = {gen_event, start_link, [{local, mem3_events}]},
- {mem3_events, MFA, permanent, 1000, worker, dynamic};
-child(mem3_reshard_sup = Child) ->
- MFA = {Child, start_link, []},
- {Child, MFA, permanent, infinity, supervisor, [Child]};
-child(Child) ->
- {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}.
diff --git a/src/mem3/src/mem3_sync.erl b/src/mem3/src/mem3_sync.erl
deleted file mode 100644
index 3d1c18420..000000000
--- a/src/mem3/src/mem3_sync.erl
+++ /dev/null
@@ -1,367 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync).
--behaviour(gen_server).
--vsn(1).
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
--export([
- start_link/0,
- get_active/0,
- get_queue/0,
- push/1, push/2,
- remove_node/1,
- remove_shard/1,
- initial_sync/1,
- get_backlog/0,
- nodes_db/0,
- shards_db/0,
- users_db/0,
- find_next_node/0
-]).
--export([
- local_dbs/0
-]).
-
--import(queue, [in/2, out/1, to_list/1, join/2, from_list/1, is_empty/1]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(state, {
- active = [],
- count = 0,
- limit,
- dict = dict:new(),
- waiting = queue:new()
-}).
-
--record(job, {name, node, count = nil, pid = nil}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_active() ->
- gen_server:call(?MODULE, get_active).
-
-get_queue() ->
- gen_server:call(?MODULE, get_queue).
-
-get_backlog() ->
- gen_server:call(?MODULE, get_backlog).
-
-push(#shard{name = Name}, Target) ->
- push(Name, Target);
-push(Name, #shard{node = Node}) ->
- push(Name, Node);
-push(Name, Node) ->
- push(#job{name = Name, node = Node}).
-
-push(#job{node = Node} = Job) when Node =/= node() ->
- gen_server:cast(?MODULE, {push, Job});
-push(_) ->
- ok.
-
-remove_node(Node) ->
- gen_server:cast(?MODULE, {remove_node, Node}).
-
-remove_shard(Shard) ->
- gen_server:cast(?MODULE, {remove_shard, Shard}).
-
-init([]) ->
- process_flag(trap_exit, true),
- Concurrency = config:get("mem3", "sync_concurrency", "10"),
- gen_event:add_handler(mem3_events, mem3_sync_event, []),
- initial_sync(),
- {ok, #state{limit = list_to_integer(Concurrency)}}.
-
-handle_call({push, Job}, From, State) ->
- handle_cast({push, Job#job{pid = From}}, State);
-handle_call(get_active, _From, State) ->
- {reply, State#state.active, State};
-handle_call(get_queue, _From, State) ->
- {reply, to_list(State#state.waiting), State};
-handle_call(get_backlog, _From, #state{active = A, waiting = WQ} = State) ->
- CA = lists:sum([C || #job{count = C} <- A, is_integer(C)]),
- CW = lists:sum([C || #job{count = C} <- to_list(WQ), is_integer(C)]),
- {reply, CA + CW, State}.
-
-handle_cast({push, DbName, Node}, State) ->
- handle_cast({push, #job{name = DbName, node = Node}}, State);
-handle_cast({push, Job}, #state{count = Count, limit = Limit} = State) when
- Count >= Limit
-->
- {noreply, add_to_queue(State, Job)};
-handle_cast({push, Job}, State) ->
- #state{active = L, count = C} = State,
- #job{name = DbName, node = Node} = Job,
- case is_running(DbName, Node, L) of
- true ->
- {noreply, add_to_queue(State, Job)};
- false ->
- Pid = start_push_replication(Job),
- {noreply, State#state{active = [Job#job{pid = Pid} | L], count = C + 1}}
- end;
-handle_cast({remove_node, Node}, #state{waiting = W0} = State) ->
- {Alive, Dead} = lists:partition(fun(#job{node = N}) -> N =/= Node end, to_list(W0)),
- Dict = remove_entries(State#state.dict, Dead),
- [
- exit(Pid, die_now)
- || #job{node = N, pid = Pid} <- State#state.active,
- N =:= Node
- ],
- {noreply, State#state{dict = Dict, waiting = from_list(Alive)}};
-handle_cast({remove_shard, Shard}, #state{waiting = W0} = State) ->
- {Alive, Dead} = lists:partition(
- fun(#job{name = S}) ->
- S =/= Shard
- end,
- to_list(W0)
- ),
- Dict = remove_entries(State#state.dict, Dead),
- [
- exit(Pid, die_now)
- || #job{name = S, pid = Pid} <- State#state.active,
- S =:= Shard
- ],
- {noreply, State#state{dict = Dict, waiting = from_list(Alive)}}.
-
-handle_info({'EXIT', Active, normal}, State) ->
- handle_replication_exit(State, Active);
-handle_info({'EXIT', Active, die_now}, State) ->
- % we forced this one ourselves, do not retry
- handle_replication_exit(State, Active);
-handle_info({'EXIT', Active, {{not_found, no_db_file}, _Stack}}, State) ->
- % target doesn't exist, do not retry
- handle_replication_exit(State, Active);
-handle_info({'EXIT', Active, Reason}, State) ->
- NewState =
- case lists:keyfind(Active, #job.pid, State#state.active) of
- #job{name = OldDbName, node = OldNode} = Job ->
- couch_log:warning("~s ~s ~s ~w", [?MODULE, OldDbName, OldNode, Reason]),
- case Reason of
- {pending_changes, Count} ->
- maybe_resubmit(State, Job#job{pid = nil, count = Count});
- _ ->
- case mem3:db_is_current(Job#job.name) of
- true ->
- timer:apply_after(5000, ?MODULE, push, [Job#job{pid = nil}]);
- false ->
- % no need to retry (db deleted or recreated)
- ok
- end,
- State
- end;
- false ->
- State
- end,
- handle_replication_exit(NewState, Active);
-handle_info(Msg, State) ->
- couch_log:notice("unexpected msg at replication manager ~p", [Msg]),
- {noreply, State}.
-
-terminate(_Reason, State) ->
- [exit(Pid, shutdown) || #job{pid = Pid} <- State#state.active],
- ok.
-
-code_change(_, #state{waiting = WaitingList} = State, _) when is_list(WaitingList) ->
- {ok, State#state{waiting = from_list(WaitingList)}};
-code_change(_, State, _) ->
- {ok, State}.
-
-maybe_resubmit(State, #job{name = DbName, node = Node} = Job) ->
- case lists:member(DbName, local_dbs()) of
- true ->
- case find_next_node() of
- Node ->
- add_to_queue(State, Job);
- _ ->
- % don't resubmit b/c we have a new replication target
- State
- end;
- false ->
- add_to_queue(State, Job)
- end.
-
-handle_replication_exit(State, Pid) ->
- #state{active = Active, limit = Limit, dict = D, waiting = Waiting} = State,
- Active1 = lists:keydelete(Pid, #job.pid, Active),
- case is_empty(Waiting) of
- true ->
- {noreply, State#state{active = Active1, count = length(Active1)}};
- _ ->
- Count = length(Active1),
- NewState =
- if
- Count < Limit ->
- case next_replication(Active1, Waiting, queue:new()) of
- % all waiting replications are also active
- nil ->
- State#state{active = Active1, count = Count};
- {#job{name = DbName, node = Node} = Job, StillWaiting} ->
- NewPid = start_push_replication(Job),
- State#state{
- active = [Job#job{pid = NewPid} | Active1],
- count = Count + 1,
- dict = dict:erase({DbName, Node}, D),
- waiting = StillWaiting
- }
- end;
- true ->
- State#state{active = Active1, count = Count}
- end,
- {noreply, NewState}
- end.
-
-start_push_replication(#job{name = Name, node = Node, pid = From}) ->
- if
- From =/= nil -> gen_server:reply(From, ok);
- true -> ok
- end,
- spawn_link(fun() ->
- case mem3_rep:go(Name, maybe_redirect(Node)) of
- {ok, Pending} when Pending > 0 ->
- exit({pending_changes, Pending});
- _ ->
- ok
- end
- end).
-
-add_to_queue(State, #job{name = DbName, node = Node, pid = From} = Job) ->
- #state{dict = D, waiting = WQ} = State,
- case dict:is_key({DbName, Node}, D) of
- true ->
- if
- From =/= nil -> gen_server:reply(From, ok);
- true -> ok
- end,
- State;
- false ->
- couch_log:debug("adding ~s -> ~p to mem3_sync queue", [DbName, Node]),
- State#state{
- dict = dict:store({DbName, Node}, ok, D),
- waiting = in(Job, WQ)
- }
- end.
-
-sync_nodes_and_dbs() ->
- Node = find_next_node(),
- [push(Db, Node) || Db <- local_dbs()].
-
-initial_sync() ->
- [net_kernel:connect_node(Node) || Node <- mem3:nodes()],
- mem3_sync_nodes:add(nodes()).
-
-initial_sync(Live) ->
- sync_nodes_and_dbs(),
- Acc = {node(), Live, []},
- {_, _, Shards} = mem3_shards:fold(fun initial_sync_fold/2, Acc),
- submit_replication_tasks(node(), Live, Shards).
-
-initial_sync_fold(#shard{dbname = Db} = Shard, {LocalNode, Live, AccShards}) ->
- case AccShards of
- [#shard{dbname = AccDb} | _] when Db =/= AccDb ->
- submit_replication_tasks(LocalNode, Live, AccShards),
- {LocalNode, Live, [Shard]};
- _ ->
- {LocalNode, Live, [Shard | AccShards]}
- end.
-
-submit_replication_tasks(LocalNode, Live, Shards) ->
- SplitFun = fun(#shard{node = Node}) -> Node =:= LocalNode end,
- {Local, Remote} = lists:partition(SplitFun, Shards),
- lists:foreach(
- fun(#shard{name = ShardName}) ->
- [
- sync_push(ShardName, N)
- || #shard{node = N, name = Name} <- Remote,
- Name =:= ShardName,
- lists:member(N, Live)
- ]
- end,
- Local
- ).
-
-sync_push(ShardName, N) ->
- gen_server:call(mem3_sync, {push, #job{name = ShardName, node = N}}, infinity).
-
-find_next_node() ->
- LiveNodes = [node() | nodes()],
- AllNodes0 = lists:sort(mem3:nodes()),
- AllNodes1 = [X || X <- AllNodes0, lists:member(X, LiveNodes)],
- AllNodes = AllNodes1 ++ [hd(AllNodes1)],
- [_Self, Next | _] = lists:dropwhile(fun(N) -> N =/= node() end, AllNodes),
- Next.
-
-%% @doc Finds the next {DbName,Node} pair in the list of waiting replications
-%% which does not correspond to an already running replication
--spec next_replication([#job{}], queue:queue(_), queue:queue(_)) ->
- {#job{}, queue:queue(_)} | nil.
-next_replication(Active, Waiting, WaitingAndRunning) ->
- case is_empty(Waiting) of
- true ->
- nil;
- false ->
- {{value, #job{name = S, node = N} = Job}, RemQ} = out(Waiting),
- case is_running(S, N, Active) of
- true ->
- next_replication(Active, RemQ, in(Job, WaitingAndRunning));
- false ->
- {Job, join(RemQ, WaitingAndRunning)}
- end
- end.
-
-is_running(DbName, Node, ActiveList) ->
- [] =/= [true || #job{name = S, node = N} <- ActiveList, S =:= DbName, N =:= Node].
-
-remove_entries(Dict, Entries) ->
- lists:foldl(
- fun(#job{name = S, node = N}, D) ->
- dict:erase({S, N}, D)
- end,
- Dict,
- Entries
- ).
-
-local_dbs() ->
- UsersDb = users_db(),
- % users db might not have been created so don't include it unless it exists
- case couch_server:exists(UsersDb) of
- true -> [nodes_db(), shards_db(), UsersDb];
- false -> [nodes_db(), shards_db()]
- end.
-
-nodes_db() ->
- ?l2b(config:get("mem3", "nodes_db", "_nodes")).
-
-shards_db() ->
- ?l2b(config:get("mem3", "shards_db", "_dbs")).
-
-users_db() ->
- ?l2b(config:get("couch_httpd_auth", "authentication_db", "_users")).
-
-maybe_redirect(Node) ->
- case config:get("mem3.redirects", atom_to_list(Node)) of
- undefined ->
- Node;
- Redirect ->
- couch_log:debug("Redirecting push from ~p to ~p", [Node, Redirect]),
- list_to_existing_atom(Redirect)
- end.
diff --git a/src/mem3/src/mem3_sync_event.erl b/src/mem3/src/mem3_sync_event.erl
deleted file mode 100644
index ec6debb45..000000000
--- a/src/mem3/src/mem3_sync_event.erl
+++ /dev/null
@@ -1,89 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_event).
--behaviour(gen_event).
--vsn(1).
-
--export([
- init/1,
- handle_event/2,
- handle_call/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
-init(_) ->
- net_kernel:monitor_nodes(true),
- {ok, nil}.
-
-handle_event({add_node, Node}, State) when Node =/= node() ->
- net_kernel:connect_node(Node),
- mem3_sync_nodes:add([Node]),
- {ok, State};
-handle_event({remove_node, Node}, State) ->
- mem3_sync:remove_node(Node),
- {ok, State};
-handle_event(_Event, State) ->
- {ok, State}.
-
-handle_call(_Request, State) ->
- {ok, ok, State}.
-
-handle_info({nodeup, Node}, State) ->
- Nodes0 = lists:usort([node() | drain_nodeups([Node])]),
- Nodes = lists:filter(fun(N) -> lists:member(N, mem3:nodes()) end, Nodes0),
- wait_for_rexi(Nodes, 5),
- {ok, State};
-handle_info({nodedown, Node}, State) ->
- mem3_sync:remove_node(Node),
- {ok, State};
-handle_info(_Info, State) ->
- {ok, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-drain_nodeups(Acc) ->
- receive
- {nodeup, Node} ->
- drain_nodeups([Node | Acc])
- after 0 ->
- Acc
- end.
-
-wait_for_rexi([], _Retries) ->
- ok;
-wait_for_rexi(Waiting, Retries) ->
- % Hack around rpc:multicall/4 so that we can
- % be sure which nodes gave which response
- Msg = {call, rexi_server_mon, status, [], group_leader()},
- {Resp, _Bad} = gen_server:multi_call(Waiting, rex, Msg, 1000),
- Up = [N || {N, R} <- Resp, R == ok],
- NotUp = Waiting -- Up,
- case length(Up) > 0 of
- true ->
- mem3_sync_nodes:add(Up);
- false ->
- ok
- end,
- case length(NotUp) > 0 andalso Retries > 0 of
- true ->
- timer:sleep(1000),
- wait_for_rexi(NotUp, Retries - 1);
- false ->
- ok
- end.
diff --git a/src/mem3/src/mem3_sync_event_listener.erl b/src/mem3/src/mem3_sync_event_listener.erl
deleted file mode 100644
index a01921f85..000000000
--- a/src/mem3/src/mem3_sync_event_listener.erl
+++ /dev/null
@@ -1,356 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_event_listener).
--behavior(couch_event_listener).
--vsn(1).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_event/3,
- handle_cast/2,
- handle_info/2
-]).
-
--include_lib("mem3/include/mem3.hrl").
-
--ifdef(TEST).
--define(RELISTEN_DELAY, 500).
--else.
--define(RELISTEN_DELAY, 5000).
--endif.
-
--record(state, {
- nodes,
- shards,
- users,
- delay,
- frequency,
- last_push,
- buckets
-}).
-
-%% Calling mem3_sync:push/2 on every update has a measurable performance cost,
-%% so we'd like to coalesce multiple update messages from couch_event in to a
-%% single push call. Doing this while ensuring both correctness (i.e., no lost
-%% updates) and an even load profile is somewhat subtle. This implementation
-%% groups updated shards in a list of "buckets" (see bucket_shard/2) and
-%% guarantees that each shard is in no more than one bucket at a time - i.e.,
-%% any update messages received before the shard's current bucket has been
-%% pushed will be ignored - thereby reducing the frequency with which a single
-%% shard will be pushed. mem3_sync:push/2 is called on all shards in the
-%% *oldest* bucket roughly every mem3.sync_frequency milliseconds (see
-%% maybe_push_shards/1) to even out the load on mem3_sync.
-
-start_link() ->
- couch_event_listener:start_link(?MODULE, [], [all_dbs]).
-
-init(_) ->
- ok = subscribe_for_config(),
- Delay = config:get_integer("mem3", "sync_delay", 5000),
- Frequency = config:get_integer("mem3", "sync_frequency", 500),
- Buckets = lists:duplicate(Delay div Frequency + 1, sets:new()),
- St = #state{
- nodes = mem3_sync:nodes_db(),
- shards = mem3_sync:shards_db(),
- users = mem3_sync:users_db(),
- delay = Delay,
- frequency = Frequency,
- buckets = Buckets
- },
- {ok, St}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_event(NodesDb, updated, #state{nodes = NodesDb} = St) ->
- Nodes = mem3:nodes(),
- Live = nodes(),
- [mem3_sync:push(NodesDb, N) || N <- Nodes, lists:member(N, Live)],
- maybe_push_shards(St);
-handle_event(ShardsDb, updated, #state{shards = ShardsDb} = St) ->
- mem3_sync:push(ShardsDb, mem3_sync:find_next_node()),
- maybe_push_shards(St);
-handle_event(UsersDb, updated, #state{users = UsersDb} = St) ->
- mem3_sync:push(UsersDb, mem3_sync:find_next_node()),
- maybe_push_shards(St);
-handle_event(<<"shards/", _/binary>> = ShardName, updated, St) ->
- Buckets = bucket_shard(ShardName, St#state.buckets),
- maybe_push_shards(St#state{buckets = Buckets});
-handle_event(<<"shards/", _:18/binary, _/binary>> = ShardName, deleted, St) ->
- mem3_sync:remove_shard(ShardName),
- maybe_push_shards(St);
-handle_event(_DbName, _Event, St) ->
- maybe_push_shards(St).
-
-handle_cast({set_frequency, Frequency}, St) ->
- #state{delay = Delay, buckets = Buckets0} = St,
- Buckets1 = rebucket_shards(Delay, Frequency, Buckets0),
- maybe_push_shards(St#state{frequency = Frequency, buckets = Buckets1});
-handle_cast({set_delay, Delay}, St) ->
- #state{frequency = Frequency, buckets = Buckets0} = St,
- Buckets1 = rebucket_shards(Delay, Frequency, Buckets0),
- maybe_push_shards(St#state{delay = Delay, buckets = Buckets1});
-handle_cast(Msg, St) ->
- couch_log:notice("unexpected cast to mem3_sync_event_listener: ~p", [Msg]),
- maybe_push_shards(St).
-
-handle_info(timeout, St) ->
- maybe_push_shards(St);
-handle_info({config_change, "mem3", "sync_delay", Value, _}, St) ->
- set_config(set_delay, Value, "ignoring bad value for mem3.sync_delay"),
- maybe_push_shards(St);
-handle_info({config_change, "mem3", "sync_frequency", Value, _}, St) ->
- set_config(set_frequency, Value, "ignoring bad value for mem3.sync_frequency"),
- maybe_push_shards(St);
-handle_info({gen_event_EXIT, _Handler, _Reason}, St) ->
- erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
- maybe_push_shards(St);
-handle_info(restart_config_listener, St) ->
- ok = subscribe_for_config(),
- maybe_push_shards(St);
-handle_info({get_state, Ref, Caller}, St) ->
- Caller ! {Ref, St},
- {ok, St};
-handle_info(Msg, St) ->
- couch_log:notice("unexpected info to mem3_sync_event_listener: ~p", [Msg]),
- maybe_push_shards(St).
-
-set_config(Cmd, Value, Error) ->
- try list_to_integer(Value) of
- IntegerValue ->
- couch_event_listener:cast(self(), {Cmd, IntegerValue})
- catch
- error:badarg ->
- couch_log:warning("~s: ~p", [Error, Value])
- end.
-
-bucket_shard(ShardName, [B | Bs] = Buckets0) ->
- case waiting(ShardName, Buckets0) of
- true -> Buckets0;
- false -> [sets:add_element(ShardName, B) | Bs]
- end.
-
-waiting(_, []) ->
- false;
-waiting(ShardName, [B | Bs]) ->
- case sets:is_element(ShardName, B) of
- true -> true;
- false -> waiting(ShardName, Bs)
- end.
-
-rebucket_shards(Frequency, Delay, Buckets0) ->
- case (Delay div Frequency + 1) - length(Buckets0) of
- 0 ->
- Buckets0;
- N when N < 0 ->
- %% Reduce the number of buckets by merging the last N + 1 together
- {ToMerge, [B | Buckets1]} = lists:split(abs(N), Buckets0),
- [sets:union([B | ToMerge]) | Buckets1];
- M ->
- %% Extend the number of buckets by M
- lists:duplicate(M, sets:new()) ++ Buckets0
- end.
-
-%% To ensure that mem3_sync:push/2 is indeed called with roughly the frequency
-%% specified by #state.frequency, every message callback must return via a call
-%% to maybe_push_shards/1 rather than directly. All timing coordination - i.e.,
-%% calling mem3_sync:push/2 or setting a proper timeout to ensure that pending
-%% messages aren't dropped in case no further messages arrive - is handled here.
-maybe_push_shards(#state{last_push = undefined} = St) ->
- {ok, St#state{last_push = os:timestamp()}, St#state.frequency};
-maybe_push_shards(St) ->
- #state{frequency = Frequency, last_push = LastPush, buckets = Buckets0} = St,
- Now = os:timestamp(),
- Delta = timer:now_diff(Now, LastPush) div 1000,
- case Delta > Frequency of
- true ->
- {Buckets1, [ToPush]} = lists:split(length(Buckets0) - 1, Buckets0),
- Buckets2 = [sets:new() | Buckets1],
- %% There's no sets:map/2!
- sets:fold(
- fun(ShardName, _) -> push_shard(ShardName) end,
- undefined,
- ToPush
- ),
- {ok, St#state{last_push = Now, buckets = Buckets2}, Frequency};
- false ->
- {ok, St, Frequency - Delta}
- end.
-
-push_shard(ShardName) ->
- try mem3_shards:for_shard_range(ShardName) of
- Shards ->
- Live = nodes(),
- lists:foreach(
- fun(#shard{node = N}) ->
- case lists:member(N, Live) of
- true -> mem3_sync:push(ShardName, N);
- false -> ok
- end
- end,
- Shards
- )
- catch
- error:database_does_not_exist ->
- ok
- end.
-
-subscribe_for_config() ->
- config:subscribe_for_changes([
- {"mem3", "sync_delay"},
- {"mem3", "sync_frequency"}
- ]).
-
--ifdef(TEST).
--include_lib("couch/include/couch_eunit.hrl").
-
-setup_all() ->
- % couch_log used by config app
- ok = meck:expect(couch_log, notice, 2, ok),
- ok = meck:expect(couch_log, warning, 2, ok),
-
- application:start(config),
-
- ok = meck:new(couch_event, [passthrough]),
- ok = meck:expect(couch_event, register_all, ['_'], ok),
-
- ok = meck:new(config_notifier, [passthrough]),
- ok = meck:expect(config_notifier, handle_event, [
- {[{'_', '_', '_', "error", '_'}, '_'], meck:raise(throw, raised_error)},
- {['_', '_'], meck:passthrough()}
- ]).
-
-teardown_all(_) ->
- meck:unload(),
- application:stop(config).
-
-setup() ->
- {ok, Pid} = ?MODULE:start_link(),
- erlang:unlink(Pid),
- wait_config_subscribed(Pid),
- Pid.
-
-teardown(Pid) ->
- exit(Pid, shutdown).
-
-subscribe_for_config_test_() ->
- {
- "Subscribe for configuration changes",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_set_sync_delay/1,
- fun should_set_sync_frequency/1,
- fun should_restart_listener/1,
- fun should_terminate/1
- ]
- }
- }
- }.
-
-should_set_sync_delay(Pid) ->
- ?_test(begin
- config:set("mem3", "sync_delay", "123", false),
- wait_state(Pid, #state.delay, 123),
- ?assertMatch(#state{delay = 123}, get_state(Pid)),
- ok
- end).
-
-should_set_sync_frequency(Pid) ->
- ?_test(begin
- config:set("mem3", "sync_frequency", "456", false),
- wait_state(Pid, #state.frequency, 456),
- ?assertMatch(#state{frequency = 456}, get_state(Pid)),
- ok
- end).
-
-should_restart_listener(_Pid) ->
- ?_test(begin
- meck:reset(config_notifier),
- config:set("mem3", "sync_frequency", "error", false),
-
- meck:wait(config_notifier, subscribe, '_', 1000),
- ok
- end).
-
-should_terminate(Pid) ->
- ?_test(begin
- ?assert(is_process_alive(Pid)),
-
- EventMgr = whereis(config_event),
- EventMgrWasAlive = (catch is_process_alive(EventMgr)),
-
- Ref = erlang:monitor(process, Pid),
-
- RestartFun = fun() -> exit(EventMgr, kill) end,
- {_, _} = test_util:with_process_restart(config_event, RestartFun),
-
- ?assertNot(is_process_alive(EventMgr)),
-
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- after 1000 ->
- ?debugFmt("~n XKCD should_terminate EventMgrWasAlive:~p MsgQueue:~p PInfo:~p ~n", [
- EventMgrWasAlive, process_info(self(), messages), process_info(Pid)
- ]),
- ?assert(false)
- end,
-
- ?assert(is_process_alive(whereis(config_event))),
- ok
- end).
-
-get_state(Pid) ->
- Ref = make_ref(),
- Pid ! {get_state, Ref, self()},
- receive
- {Ref, State} -> State
- after 500 ->
- timeout
- end.
-
-wait_state(Pid, Field, Val) when is_pid(Pid), is_integer(Field) ->
- WaitFun = fun() ->
- case get_state(Pid) of
- #state{} = S when element(Field, S) == Val ->
- true;
- _ ->
- wait
- end
- end,
- test_util:wait(WaitFun).
-
-wait_config_subscribed(Pid) ->
- WaitFun = fun() ->
- Handlers = gen_event:which_handlers(config_event),
- Pids = [Id || {config_notifier, Id} <- Handlers],
- case lists:member(Pid, Pids) of
- true -> true;
- false -> wait
- end
- end,
- test_util:wait(WaitFun).
-
--endif.
diff --git a/src/mem3/src/mem3_sync_nodes.erl b/src/mem3/src/mem3_sync_nodes.erl
deleted file mode 100644
index 43ca8b756..000000000
--- a/src/mem3/src/mem3_sync_nodes.erl
+++ /dev/null
@@ -1,98 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_nodes).
--behaviour(gen_server).
--vsn(1).
-
--export([start_link/0]).
--export([add/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--export([monitor_sync/1]).
-
--record(st, {
- tid
-}).
-
--record(job, {
- nodes,
- pid,
- retry
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-add(Nodes) ->
- gen_server:cast(?MODULE, {add, Nodes}).
-
-init([]) ->
- {ok, #st{
- tid = ets:new(?MODULE, [set, protected, {keypos, #job.nodes}])
- }}.
-
-terminate(_Reason, St) ->
- [exit(Pid, kill) || #job{pid = Pid} <- ets:tab2list(St#st.tid)],
- ok.
-
-handle_call(Msg, _From, St) ->
- {stop, {invalid_call, Msg}, invalid_call, St}.
-
-handle_cast({add, Nodes}, #st{tid = Tid} = St) ->
- case ets:lookup(Tid, Nodes) of
- [] ->
- Pid = start_sync(Nodes),
- ets:insert(Tid, #job{nodes = Nodes, pid = Pid, retry = false});
- [#job{retry = false} = Job] ->
- ets:insert(Tid, Job#job{retry = true});
- _ ->
- ok
- end,
- {noreply, St};
-handle_cast(Msg, St) ->
- {stop, {invalid_cast, Msg}, St}.
-
-handle_info({'DOWN', _, _, _, {sync_done, Nodes}}, #st{tid = Tid} = St) ->
- case ets:lookup(Tid, Nodes) of
- [#job{retry = true} = Job] ->
- Pid = start_sync(Nodes),
- ets:insert(Tid, Job#job{pid = Pid, retry = false});
- _ ->
- ets:delete(Tid, Nodes)
- end,
- {noreply, St};
-handle_info({'DOWN', _, _, _, {sync_error, Nodes}}, #st{tid = Tid} = St) ->
- Pid = start_sync(Nodes),
- ets:insert(Tid, #job{nodes = Nodes, pid = Pid, retry = false}),
- {noreply, St};
-handle_info(Msg, St) ->
- {stop, {invalid_info, Msg}, St}.
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-start_sync(Nodes) ->
- {Pid, _} = spawn_monitor(?MODULE, monitor_sync, [Nodes]),
- Pid.
-
-monitor_sync(Nodes) ->
- process_flag(trap_exit, true),
- Pid = spawn_link(mem3_sync, initial_sync, [Nodes]),
- receive
- {'EXIT', Pid, normal} ->
- exit({sync_done, Nodes});
- _ ->
- exit({sync_error, Nodes})
- end.
diff --git a/src/mem3/src/mem3_sync_security.erl b/src/mem3/src/mem3_sync_security.erl
deleted file mode 100644
index fc1726901..000000000
--- a/src/mem3/src/mem3_sync_security.erl
+++ /dev/null
@@ -1,125 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_security).
-
--export([maybe_sync/2, maybe_sync_int/2]).
--export([go/0, go/1]).
-
--include_lib("mem3/include/mem3.hrl").
-
-maybe_sync(#shard{} = Src, #shard{} = Dst) ->
- case is_local(Src#shard.name) of
- false ->
- erlang:spawn(?MODULE, maybe_sync_int, [Src, Dst]);
- true ->
- ok
- end.
-
-maybe_sync_int(#shard{name = Name} = Src, Dst) ->
- DbName = mem3:dbname(Name),
- case fabric:get_all_security(DbName, [{shards, [Src, Dst]}]) of
- {ok, WorkerObjs} ->
- Objs = [Obj || {_Worker, Obj} <- WorkerObjs],
- case length(lists:usort(Objs)) of
- 1 -> ok;
- 2 -> go(DbName)
- end;
- {error, no_majority} ->
- go(DbName);
- Else ->
- Args = [DbName, Else],
- couch_log:error("Error checking security objects for ~s :: ~p", Args)
- end.
-
-go() ->
- {ok, Dbs} = fabric:all_dbs(),
- lists:foreach(fun handle_existing_db/1, Dbs).
-
-go(DbName) when is_binary(DbName) ->
- handle_existing_db(DbName).
-
-handle_existing_db(DbName) ->
- try handle_db(DbName) of
- _ -> ok
- catch
- error:database_does_not_exist ->
- couch_log:error(
- "Db was deleted while getting security"
- " object. DbName: ~p",
- [DbName]
- ),
- ok
- end.
-
-handle_db(DbName) ->
- ShardCount = length(mem3:shards(DbName)),
- case get_all_security(DbName) of
- {ok, SecObjs} ->
- case is_ok(SecObjs, ShardCount) of
- ok ->
- ok;
- {fixable, SecObj} ->
- couch_log:info("Sync security object for ~p: ~p", [DbName, SecObj]),
- case fabric:set_security(DbName, SecObj) of
- ok ->
- ok;
- Error ->
- couch_log:error(
- "Error setting security object in ~p: ~p",
- [DbName, Error]
- )
- end;
- broken ->
- couch_log:error("Bad security object in ~p: ~p", [DbName, SecObjs])
- end;
- Error ->
- couch_log:error("Error getting security objects for ~p: ~p", [
- DbName, Error
- ])
- end.
-
-get_all_security(DbName) ->
- case fabric:get_all_security(DbName) of
- {ok, SecObjs} ->
- SecObjsDict = lists:foldl(
- fun({_, SO}, Acc) ->
- dict:update_counter(SO, 1, Acc)
- end,
- dict:new(),
- SecObjs
- ),
- {ok, dict:to_list(SecObjsDict)};
- Error ->
- Error
- end.
-
-is_ok([_], _) ->
- % One security object is the happy case
- ok;
-is_ok([_, _] = SecObjs0, ShardCount) ->
- % Figure out if we have a simple majority of security objects
- % and if so, use that as the correct value. Otherwise we abort
- % and rely on human intervention.
- {Count, SecObj} = lists:max([{C, O} || {O, C} <- SecObjs0]),
- case Count >= ((ShardCount div 2) + 1) of
- true -> {fixable, SecObj};
- false -> broken
- end;
-is_ok(_, _) ->
- % Anything else requires human intervention
- broken.
-
-is_local(<<"shards/", _/binary>>) ->
- false;
-is_local(_) ->
- true.
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
deleted file mode 100644
index 8547fc071..000000000
--- a/src/mem3/src/mem3_util.erl
+++ /dev/null
@@ -1,755 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_util).
-
--export([
- name_shard/2,
- create_partition_map/5,
- build_shards/2,
- n_val/2,
- q_val/1,
- to_atom/1,
- to_integer/1,
- write_db_doc/1,
- delete_db_doc/1,
- shard_info/1,
- ensure_exists/1,
- open_db_doc/1,
- update_db_doc/1
-]).
--export([get_or_create_db/2, get_or_create_db_int/2]).
--export([is_deleted/1, rotate_list/2]).
--export([get_shard_opts/1, get_engine_opt/1, get_props_opt/1]).
--export([get_shard_props/1, find_dirty_shards/0]).
--export([
- iso8601_timestamp/0,
- live_nodes/0,
- replicate_dbs_to_all_nodes/1,
- replicate_dbs_from_all_nodes/1,
- range_overlap/2,
- get_ring/1,
- get_ring/2,
- get_ring/3,
- get_ring/4,
- non_overlapping_shards/1,
- non_overlapping_shards/3,
- calculate_max_n/1
-]).
-
-%% do not use outside mem3.
--export([build_ordered_shards/2, downcast/1]).
-
--export([create_partition_map/4, name_shard/1]).
--deprecated({create_partition_map, 4, eventually}).
--deprecated({name_shard, 1, eventually}).
-
-% CRC32 space
--define(RINGTOP, 2 bsl 31).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-name_shard(Shard) ->
- name_shard(Shard, "").
-
-name_shard(#shard{dbname = DbName, range = Range} = Shard, Suffix) ->
- Name = make_name(DbName, Range, Suffix),
- Shard#shard{name = ?l2b(Name)};
-name_shard(#ordered_shard{dbname = DbName, range = Range} = Shard, Suffix) ->
- Name = make_name(DbName, Range, Suffix),
- Shard#ordered_shard{name = ?l2b(Name)}.
-
-make_name(DbName, [B, E], Suffix) ->
- [
- "shards/",
- couch_util:to_hex(<<B:32/integer>>),
- "-",
- couch_util:to_hex(<<E:32/integer>>),
- "/",
- DbName,
- Suffix
- ].
-
-create_partition_map(DbName, N, Q, Nodes) ->
- create_partition_map(DbName, N, Q, Nodes, "").
-
-create_partition_map(DbName, N, Q, Nodes, Suffix) when Q > 0 ->
- UniqueShards = make_key_ranges((?RINGTOP) div Q, 0, []),
- Shards0 = lists:flatten([lists:duplicate(N, S) || S <- UniqueShards]),
- Shards1 = attach_nodes(Shards0, [], Nodes, []),
- [name_shard(S#shard{dbname = DbName}, Suffix) || S <- Shards1].
-
-make_key_ranges(I, CurrentPos, Acc) when I > 0, CurrentPos >= ?RINGTOP ->
- Acc;
-make_key_ranges(Increment, Start, Acc) when Increment > 0 ->
- case Start + 2 * Increment of
- X when X > ?RINGTOP ->
- End = ?RINGTOP - 1;
- _ ->
- End = Start + Increment - 1
- end,
- make_key_ranges(Increment, End + 1, [#shard{range = [Start, End]} | Acc]).
-
-attach_nodes([], Acc, _, _) ->
- lists:reverse(Acc);
-attach_nodes(Shards, Acc, [], UsedNodes) ->
- attach_nodes(Shards, Acc, lists:reverse(UsedNodes), []);
-attach_nodes([S | Rest], Acc, [Node | Nodes], UsedNodes) ->
- attach_nodes(Rest, [S#shard{node = Node} | Acc], Nodes, [Node | UsedNodes]).
-
-open_db_doc(DocId) ->
- {ok, Db} = couch_db:open(mem3_sync:shards_db(), [?ADMIN_CTX]),
- try
- couch_db:open_doc(Db, DocId, [ejson_body])
- after
- couch_db:close(Db)
- end.
-
-write_db_doc(Doc) ->
- write_db_doc(mem3_sync:shards_db(), Doc, true).
-
-write_db_doc(DbName, #doc{id = Id, body = Body} = Doc, ShouldMutate) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- try couch_db:open_doc(Db, Id, [ejson_body]) of
- {ok, #doc{body = Body}} ->
- % the doc is already in the desired state, we're done here
- ok;
- {not_found, _} when ShouldMutate ->
- try couch_db:update_doc(Db, Doc, []) of
- {ok, _} ->
- ok
- catch
- conflict ->
- % check to see if this was a replication race or a different edit
- write_db_doc(DbName, Doc, false)
- end;
- _ ->
- % the doc already exists in a different state
- conflict
- after
- couch_db:close(Db)
- end.
-
-update_db_doc(Doc) ->
- update_db_doc(mem3_sync:shards_db(), Doc, true).
-
-update_db_doc(DbName, #doc{id = Id, body = Body} = Doc, ShouldMutate) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- try couch_db:open_doc(Db, Id, [ejson_body]) of
- {ok, #doc{body = Body}} ->
- % the doc is already in the desired state, we're done here
- ok;
- {ok, #doc{body = Body1}} ->
- % the doc has a new body to be written
- {ok, _} = couch_db:update_doc(Db, Doc#doc{body = Body1}, []),
- ok;
- {not_found, _} when ShouldMutate ->
- try couch_db:update_doc(Db, Doc, []) of
- {ok, _} ->
- ok
- catch
- conflict ->
- % check to see if this was a replication race or a different edit
- update_db_doc(DbName, Doc, false)
- end;
- _ ->
- % the doc already exists in a different state
- conflict
- after
- couch_db:close(Db)
- end.
-
-delete_db_doc(DocId) ->
- gen_server:cast(mem3_shards, {cache_remove, DocId}),
- delete_db_doc(mem3_sync:shards_db(), DocId, true).
-
-delete_db_doc(DbName, DocId, ShouldMutate) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- {ok, Revs} = couch_db:open_doc_revs(Db, DocId, all, []),
- try [Doc#doc{deleted = true} || {ok, #doc{deleted = false} = Doc} <- Revs] of
- [] ->
- not_found;
- Docs when ShouldMutate ->
- try couch_db:update_docs(Db, Docs, []) of
- {ok, _} ->
- ok
- catch
- conflict ->
- % check to see if this was a replication race or if leafs survived
- delete_db_doc(DbName, DocId, false)
- end;
- _ ->
- % we have live leafs that we aren't allowed to delete. let's bail
- conflict
- after
- couch_db:close(Db)
- end.
-
-%% Always returns original #shard records.
--spec build_shards(binary(), list()) -> [#shard{}].
-build_shards(DbName, DocProps) ->
- build_shards_by_node(DbName, DocProps).
-
-%% Will return #ordered_shard records if by_node and by_range
-%% are symmetrical, #shard records otherwise.
--spec build_ordered_shards(binary(), list()) ->
- [#shard{}] | [#ordered_shard{}].
-build_ordered_shards(DbName, DocProps) ->
- ByNode = build_shards_by_node(DbName, DocProps),
- ByRange = build_shards_by_range(DbName, DocProps),
- Symmetrical = lists:sort(ByNode) =:= lists:sort(downcast(ByRange)),
- case Symmetrical of
- true -> ByRange;
- false -> ByNode
- end.
-
-build_shards_by_node(DbName, DocProps) ->
- {ByNode} = couch_util:get_value(<<"by_node">>, DocProps, {[]}),
- Suffix = couch_util:get_value(<<"shard_suffix">>, DocProps, ""),
- lists:flatmap(
- fun({Node, Ranges}) ->
- lists:map(
- fun(Range) ->
- [B, E] = string:tokens(?b2l(Range), "-"),
- Beg = httpd_util:hexlist_to_integer(B),
- End = httpd_util:hexlist_to_integer(E),
- name_shard(
- #shard{
- dbname = DbName,
- node = to_atom(Node),
- range = [Beg, End],
- opts = get_shard_opts(DocProps)
- },
- Suffix
- )
- end,
- Ranges
- )
- end,
- ByNode
- ).
-
-build_shards_by_range(DbName, DocProps) ->
- {ByRange} = couch_util:get_value(<<"by_range">>, DocProps, {[]}),
- Suffix = couch_util:get_value(<<"shard_suffix">>, DocProps, ""),
- lists:flatmap(
- fun({Range, Nodes}) ->
- lists:map(
- fun({Node, Order}) ->
- [B, E] = string:tokens(?b2l(Range), "-"),
- Beg = httpd_util:hexlist_to_integer(B),
- End = httpd_util:hexlist_to_integer(E),
- name_shard(
- #ordered_shard{
- dbname = DbName,
- node = to_atom(Node),
- range = [Beg, End],
- order = Order,
- opts = get_shard_opts(DocProps)
- },
- Suffix
- )
- end,
- lists:zip(Nodes, lists:seq(1, length(Nodes)))
- )
- end,
- ByRange
- ).
-
-to_atom(Node) when is_binary(Node) ->
- list_to_atom(binary_to_list(Node));
-to_atom(Node) when is_atom(Node) ->
- Node.
-
-to_integer(N) when is_integer(N) ->
- N;
-to_integer(N) when is_binary(N) ->
- list_to_integer(binary_to_list(N));
-to_integer(N) when is_list(N) ->
- list_to_integer(N).
-
-get_shard_opts(DocProps) ->
- get_engine_opt(DocProps) ++ get_props_opt(DocProps).
-
-get_engine_opt(DocProps) ->
- case couch_util:get_value(<<"engine">>, DocProps) of
- Engine when is_binary(Engine) ->
- [{engine, Engine}];
- _ ->
- []
- end.
-
-get_props_opt(DocProps) ->
- case couch_util:get_value(<<"props">>, DocProps) of
- {Props} when is_list(Props) ->
- [{props, db_props_from_json(Props)}];
- _ ->
- []
- end.
-
-db_props_from_json([]) ->
- [];
-db_props_from_json([{<<"partitioned">>, Value} | Rest]) ->
- [{partitioned, Value} | db_props_from_json(Rest)];
-db_props_from_json([{<<"hash">>, [MBin, FBin, A]} | Rest]) ->
- M = binary_to_existing_atom(MBin, utf8),
- F = binary_to_existing_atom(FBin, utf8),
- [{hash, [M, F, A]} | db_props_from_json(Rest)];
-db_props_from_json([{K, V} | Rest]) ->
- [{K, V} | db_props_from_json(Rest)].
-
-n_val(undefined, NodeCount) ->
- n_val(config:get_integer("cluster", "n", 3), NodeCount);
-n_val(N, NodeCount) when is_list(N) ->
- n_val(list_to_integer(N), NodeCount);
-n_val(N, NodeCount) when is_integer(NodeCount), N > NodeCount ->
- couch_log:error("Request to create N=~p DB but only ~p node(s)", [N, NodeCount]),
- NodeCount;
-n_val(N, _) when N < 1 ->
- 1;
-n_val(N, _) ->
- N.
-
-q_val(Q) when is_list(Q) ->
- q_val(list_to_integer(Q));
-q_val(Q) when Q > 0 ->
- Q;
-q_val(_) ->
- throw({error, invalid_q_value}).
-
-shard_info(DbName) ->
- [
- {n, mem3:n(DbName)},
- {q, length(mem3:shards(DbName)) div mem3:n(DbName)}
- ].
-
-ensure_exists(DbName) when is_list(DbName) ->
- ensure_exists(list_to_binary(DbName));
-ensure_exists(DbName) ->
- Options = [nologifmissing, sys_db, {create_if_missing, true}, ?ADMIN_CTX],
- case couch_db:open(DbName, Options) of
- {ok, Db} ->
- {ok, Db};
- file_exists ->
- couch_db:open(DbName, [sys_db, ?ADMIN_CTX])
- end.
-
-is_deleted(Change) ->
- case couch_util:get_value(<<"deleted">>, Change) of
- undefined ->
- % keep backwards compatibility for a while
- couch_util:get_value(deleted, Change, false);
- Else ->
- Else
- end.
-
-rotate_list(_Key, []) ->
- [];
-rotate_list(Key, List) when not is_binary(Key) ->
- rotate_list(term_to_binary(Key), List);
-rotate_list(Key, List) ->
- {H, T} = lists:split(erlang:crc32(Key) rem length(List), List),
- T ++ H.
-
-downcast(#shard{} = S) ->
- S;
-downcast(#ordered_shard{} = S) ->
- #shard{
- name = S#ordered_shard.name,
- node = S#ordered_shard.node,
- dbname = S#ordered_shard.dbname,
- range = S#ordered_shard.range,
- ref = S#ordered_shard.ref,
- opts = S#ordered_shard.opts
- };
-downcast(Shards) when is_list(Shards) ->
- [downcast(Shard) || Shard <- Shards].
-
-iso8601_timestamp() ->
- {_, _, Micro} = Now = os:timestamp(),
- {{Year, Month, Date}, {Hour, Minute, Second}} = calendar:now_to_datetime(Now),
- Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
- io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
-
-live_nodes() ->
- LiveNodes = [node() | nodes()],
- Mem3Nodes = lists:sort(mem3:nodes()),
- [N || N <- Mem3Nodes, lists:member(N, LiveNodes)].
-
-% Replicate "dbs" db to all nodes. Basically push the changes to all the live
-% mem3:nodes(). Returns only after all current changes have been replicated,
-% which could be a while.
-%
-replicate_dbs_to_all_nodes(Timeout) ->
- DbName = mem3_sync:shards_db(),
- Targets = mem3_util:live_nodes() -- [node()],
- Res = [start_replication(node(), T, DbName, Timeout) || T <- Targets],
- collect_replication_results(Res, Timeout).
-
-% Replicate "dbs" db from all nodes to this node. Basically make an rpc call
-% to all the nodes an have them push their changes to this node. Then monitor
-% them until they are all done.
-%
-replicate_dbs_from_all_nodes(Timeout) ->
- DbName = mem3_sync:shards_db(),
- Sources = mem3_util:live_nodes() -- [node()],
- Res = [start_replication(S, node(), DbName, Timeout) || S <- Sources],
- collect_replication_results(Res, Timeout).
-
-% Spawn and monitor a single replication of a database to a target node.
-% Returns {ok, PidRef}. This function could be called locally or remotely from
-% mem3_rpc, for instance when replicating other nodes' data to this node.
-%
-start_replication(Source, Target, DbName, Timeout) ->
- spawn_monitor(fun() ->
- case mem3_rpc:replicate(Source, Target, DbName, Timeout) of
- {ok, 0} ->
- exit(ok);
- Other ->
- exit(Other)
- end
- end).
-
-collect_replication_results(Replications, Timeout) ->
- Res = [collect_replication_result(R, Timeout) || R <- Replications],
- case [R || R <- Res, R =/= ok] of
- [] ->
- ok;
- Errors ->
- {error, Errors}
- end.
-
-collect_replication_result({Pid, Ref}, Timeout) when is_pid(Pid) ->
- receive
- {'DOWN', Ref, _, _, Res} ->
- Res
- after Timeout ->
- demonitor(Pid, [flush]),
- exit(Pid, kill),
- {error, {timeout, Timeout, node(Pid)}}
- end;
-collect_replication_result(Error, _) ->
- {error, Error}.
-
-% Consider these cases:
-%
-% A-------B
-%
-% overlap:
-% X--------Y
-% X-Y
-% X-------Y
-% X-------------------Y
-%
-% no overlap:
-% X-Y because A !=< Y
-% X-Y because X !=< B
-%
-range_overlap([A, B], [X, Y]) when
- is_integer(A),
- is_integer(B),
- is_integer(X),
- is_integer(Y),
- A =< B,
- X =< Y
-->
- A =< Y andalso X =< B.
-
-non_overlapping_shards(Shards) ->
- {Start, End} = lists:foldl(
- fun(Shard, {Min, Max}) ->
- [B, E] = mem3:range(Shard),
- {min(B, Min), max(E, Max)}
- end,
- {0, ?RING_END},
- Shards
- ),
- non_overlapping_shards(Shards, Start, End).
-
-non_overlapping_shards([], _, _) ->
- [];
-non_overlapping_shards(Shards, Start, End) ->
- Ranges = lists:map(
- fun(Shard) ->
- [B, E] = mem3:range(Shard),
- {B, E}
- end,
- Shards
- ),
- Ring = get_ring(Ranges, fun sort_ranges_fun/2, Start, End),
- lists:filter(
- fun(Shard) ->
- [B, E] = mem3:range(Shard),
- lists:member({B, E}, Ring)
- end,
- Shards
- ).
-
-% Given a list of shards, return the maximum number of copies
-% across all the ranges. If the ring is incomplete it will return 0.
-% If there it is an n = 1 database, it should return 1, etc.
-calculate_max_n(Shards) ->
- Ranges = lists:map(
- fun(Shard) ->
- [B, E] = mem3:range(Shard),
- {B, E}
- end,
- Shards
- ),
- calculate_max_n(Ranges, get_ring(Ranges), 0).
-
-calculate_max_n(_Ranges, [], N) ->
- N;
-calculate_max_n(Ranges, Ring, N) ->
- NewRanges = Ranges -- Ring,
- calculate_max_n(NewRanges, get_ring(NewRanges), N + 1).
-
-get_ring(Ranges) ->
- get_ring(Ranges, fun sort_ranges_fun/2, 0, ?RING_END).
-
-get_ring(Ranges, SortFun) when is_function(SortFun, 2) ->
- get_ring(Ranges, SortFun, 0, ?RING_END).
-
-get_ring(Ranges, Start, End) when
- is_integer(Start),
- is_integer(End),
- Start >= 0,
- End >= 0,
- Start =< End
-->
- get_ring(Ranges, fun sort_ranges_fun/2, Start, End).
-
-% Build a ring out of a list of possibly overlapping ranges. If a ring cannot
-% be built then [] is returned. Start and End supply a custom range such that
-% only intervals in that range will be considered. SortFun is a custom sorting
-% function to sort intervals before the ring is built. The custom sort function
-% can be used to prioritize how the ring is built, for example, whether to use
-% shortest ranges first (and thus have more total shards) or longer or any
-% other scheme.
-%
-get_ring([], _SortFun, _Start, _End) ->
- [];
-get_ring(Ranges, SortFun, Start, End) when
- is_function(SortFun, 2),
- is_integer(Start),
- is_integer(End),
- Start >= 0,
- End >= 0,
- Start =< End
-->
- Sorted = lists:usort(SortFun, Ranges),
- case get_subring_int(Start, End, Sorted) of
- fail -> [];
- Ring -> Ring
- end.
-
-get_subring_int(_, _, []) ->
- fail;
-get_subring_int(Start, EndMax, [{Start, End} = Range | Tail]) ->
- case End =:= EndMax of
- true ->
- [Range];
- false ->
- case get_subring_int(End + 1, EndMax, Tail) of
- fail ->
- get_subring_int(Start, EndMax, Tail);
- Acc ->
- [Range | Acc]
- end
- end;
-get_subring_int(Start1, _, [{Start2, _} | _]) when Start2 > Start1 ->
- % Found a gap, this attempt is done
- fail;
-get_subring_int(Start1, EndMax, [{Start2, _} | Rest]) when Start2 < Start1 ->
- % We've overlapped the head, skip the shard
- get_subring_int(Start1, EndMax, Rest).
-
-% Sort ranges by starting point, then sort so that
-% the longest range comes first
-sort_ranges_fun({B, E1}, {B, E2}) ->
- E2 =< E1;
-sort_ranges_fun({B1, _}, {B2, _}) ->
- B1 =< B2.
-
-add_db_config_options(DbName, Options) ->
- DbOpts =
- case mem3:dbname(DbName) of
- DbName -> [];
- MDbName -> mem3_shards:opts_for_db(MDbName)
- end,
- merge_opts(DbOpts, Options).
-
-get_or_create_db(DbName, Options) ->
- case couch_db:open(DbName, Options) of
- {ok, _} = OkDb ->
- OkDb;
- {not_found, no_db_file} ->
- try
- Options1 = [{create_if_missing, true} | Options],
- Options2 = add_db_config_options(DbName, Options1),
- couch_db:open(DbName, Options2)
- catch
- error:database_does_not_exist ->
- throw({error, missing_target})
- end;
- Else ->
- Else
- end.
-
-get_or_create_db_int(DbName, Options) ->
- case couch_db:open_int(DbName, Options) of
- {ok, _} = OkDb ->
- OkDb;
- {not_found, no_db_file} ->
- try
- Options1 = [{create_if_missing, true} | Options],
- Options2 = add_db_config_options(DbName, Options1),
- couch_db:open_int(DbName, Options2)
- catch
- error:database_does_not_exist ->
- throw({error, missing_target})
- end;
- Else ->
- Else
- end.
-
-%% merge two proplists, atom options only valid in Old
-merge_opts(New, Old) ->
- lists:foldl(
- fun({Key, Val}, Acc) ->
- lists:keystore(Key, 1, Acc, {Key, Val})
- end,
- Old,
- New
- ).
-
-get_shard_props(ShardName) ->
- case couch_db:open_int(ShardName, []) of
- {ok, Db} ->
- Props =
- case couch_db_engine:get_props(Db) of
- undefined -> [];
- Else -> Else
- end,
- %% We don't normally store the default engine name
- EngineProps =
- case couch_db_engine:get_engine(Db) of
- couch_bt_engine ->
- [];
- EngineName ->
- [{engine, EngineName}]
- end,
- [{props, Props} | EngineProps];
- {not_found, _} ->
- not_found;
- Else ->
- Else
- end.
-
-find_dirty_shards() ->
- mem3_shards:fold(
- fun(#shard{node = Node, name = Name, opts = Opts} = Shard, Acc) ->
- case Opts of
- [] ->
- Acc;
- [{props, []}] ->
- Acc;
- _ ->
- Props = rpc:call(Node, ?MODULE, get_shard_props, [Name]),
- case Props =:= Opts of
- true ->
- Acc;
- false ->
- [{Shard, Props} | Acc]
- end
- end
- end,
- []
- ).
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-range_overlap_test_() ->
- [
- ?_assertEqual(Res, range_overlap(R1, R2))
- || {R1, R2, Res} <- [
- {[2, 6], [1, 3], true},
- {[2, 6], [3, 4], true},
- {[2, 6], [4, 8], true},
- {[2, 6], [1, 9], true},
- {[2, 6], [1, 2], true},
- {[2, 6], [6, 7], true},
- {[2, 6], [0, 1], false},
- {[2, 6], [7, 9], false}
- ]
- ].
-
-non_overlapping_shards_test() ->
- [
- ?_assertEqual(Res, non_overlapping_shards(Shards))
- || {Shards, Res} <- [
- {
- [shard(0, ?RING_END)],
- [shard(0, ?RING_END)]
- },
- {
- [shard(0, 1)],
- [shard(0, 1)]
- },
- {
- [shard(0, 1), shard(0, 1)],
- [shard(0, 1)]
- },
- {
- [shard(0, 1), shard(3, 4)],
- []
- },
- {
- [shard(0, 1), shard(1, 2), shard(2, 3)],
- [shard(0, 1), shard(2, 3)]
- },
- {
- [shard(1, 2), shard(0, 1)],
- [shard(0, 1), shard(1, 2)]
- },
- {
- [shard(0, 1), shard(0, 2), shard(2, 5), shard(3, 5)],
- [shard(0, 2), shard(2, 5)]
- },
- {
- [shard(0, 2), shard(4, 5), shard(1, 3)],
- []
- }
- ]
- ].
-
-calculate_max_n_test_() ->
- [
- ?_assertEqual(Res, calculate_max_n(Shards))
- || {Res, Shards} <- [
- {0, []},
- {0, [shard(1, ?RING_END)]},
- {1, [shard(0, ?RING_END)]},
- {1, [shard(0, ?RING_END), shard(1, ?RING_END)]},
- {2, [shard(0, ?RING_END), shard(0, ?RING_END)]},
- {2, [shard(0, 1), shard(2, ?RING_END), shard(0, ?RING_END)]},
- {0, [shard(0, 3), shard(5, ?RING_END), shard(1, ?RING_END)]}
- ]
- ].
-
-shard(Begin, End) ->
- #shard{range = [Begin, End]}.
-
--endif.
diff --git a/src/mem3/test/eunit/mem3_bdu_test.erl b/src/mem3/test/eunit/mem3_bdu_test.erl
deleted file mode 100644
index 849295691..000000000
--- a/src/mem3/test/eunit/mem3_bdu_test.erl
+++ /dev/null
@@ -1,259 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_bdu_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
-
--define(USER, "mem3_bdu_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(JSON, {"Content-Type", "application/json"}).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Db = ?tempdb(),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/"]),
- ShardsDb = "_node/_local/" ++ config:get("mem3", "shards_db", "_dbs"),
- {Url, Db, ShardsDb}.
-
-teardown({Url, Db, _}) ->
- sync_delete_db(Url, Db),
- ok = config:delete("admins", ?USER, _Persist = false).
-
-start_couch() ->
- test_util:start_couch([mem3, chttpd]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-mem3_bdu_shard_doc_test_() ->
- {
- "mem3 bdu shard doc tests",
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- ?TDEF_FE(t_can_insert_shard_map_doc),
- ?TDEF_FE(t_missing_by_node_section),
- ?TDEF_FE(t_by_node_not_a_map),
- ?TDEF_FE(t_missing_by_range_section),
- ?TDEF_FE(t_by_range_not_a_map),
- ?TDEF_FE(t_missing_range_in_by_range),
- ?TDEF_FE(t_missing_node_in_by_range_node_list),
- ?TDEF_FE(t_missing_node_in_by_node),
- ?TDEF_FE(t_missing_range_in_by_node_range_list),
- ?TDEF_FE(t_by_node_val_not_array),
- ?TDEF_FE(t_by_range_val_not_array),
- ?TDEF_FE(t_design_docs_are_not_validated),
- ?TDEF_FE(t_replicated_changes_not_validated)
- ]
- }
- }
- }.
-
-t_can_insert_shard_map_doc({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{Node => [Range]},
- <<"by_range">> => #{Range => [Node]},
- <<"suffix">> => suffix()
- },
- {Code, Res} = req(post, Top ++ ShardsDb, ShardMap),
- ?assertEqual(201, Code),
- ?assertMatch(#{<<"ok">> := true}, Res).
-
-t_missing_by_node_section({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_range">> => #{Range => [Node]}
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_by_node_not_a_map({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => 42,
- <<"by_range">> => #{Range => [Node]}
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_missing_by_range_section({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{Node => [Range]}
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_by_range_not_a_map({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{Node => [Range]},
- <<"by_range">> => 42
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_missing_range_in_by_range({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{Node => [Range]},
- <<"by_range">> => #{<<"xyz">> => [Node]}
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_missing_node_in_by_range_node_list({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{Node => [Range]},
- <<"by_range">> => #{Range => [<<"xyz">>]}
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_missing_node_in_by_node({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{<<"xyz">> => [Range]},
- <<"by_range">> => #{Range => [Node]}
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_missing_range_in_by_node_range_list({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{Node => [<<"xyz">>]},
- <<"by_range">> => #{Range => [Node]}
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_by_node_val_not_array({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{Node => 42},
- <<"by_range">> => #{Range => [Node]}
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_by_range_val_not_array({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{Node => [Range]},
- <<"by_range">> => #{Range => 42}
- },
- ?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
-t_design_docs_are_not_validated({Top, _, ShardsDb}) ->
- Suffix = integer_to_list(erlang:system_time() + rand:uniform(1000)),
- DDocId = list_to_binary("_design/ddoc_bdu_test-" ++ Suffix),
- DDoc = #{<<"_id">> => DDocId},
- {Code, Res} = req(post, Top ++ ShardsDb, DDoc),
- ?assertEqual(201, Code),
- #{<<"rev">> := Rev} = Res,
- Deleted = #{
- <<"id">> => DDocId,
- <<"_rev">> => Rev,
- <<"_deleted">> => true
- },
- ?assertMatch({200, _}, req(post, Top ++ ShardsDb, Deleted)).
-
-t_replicated_changes_not_validated({Top, Db, ShardsDb}) ->
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
- ShardMap = #{
- <<"_id">> => Db,
- <<"by_node">> => #{Node => [Range]},
- % missing <<"by_range">>, we can tollerate it
- % and not crash the backend
- <<"suffix">> => suffix(),
- <<"_rev">> => <<"1-abc">>,
- <<"_revisions">> => #{
- <<"ids">> => [<<"abc">>],
- <<"start">> => 1
- }
- },
- Docs = #{
- <<"docs">> => [ShardMap],
- <<"new_edits">> => false
- },
- {Code, Res} = req(post, Top ++ ShardsDb ++ "/_bulk_docs", Docs),
- ?assertEqual(201, Code),
- ?assertEqual([], Res),
- Deleted = #{
- <<"id">> => Db,
- <<"_rev">> => <<"1-abc">>,
- <<"_deleted">> => true
- },
- ?assertMatch({200, _}, req(post, Top ++ ShardsDb, Deleted)).
-
-delete_db(Top, Db) when is_binary(Db) ->
- Url = Top ++ binary_to_list(Db),
- case test_request:get(Url, [?AUTH]) of
- {ok, 404, _, _} ->
- not_found;
- {ok, 200, _, _} ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]),
- ok
- end.
-
-sync_delete_db(Top, Db) when is_binary(Db) ->
- delete_db(Top, Db),
- try
- Shards = mem3:local_shards(Db),
- ShardNames = [mem3:name(S) || S <- Shards],
- [couch_server:delete(N, [?ADMIN_CTX]) || N <- ShardNames],
- ok
- catch
- error:database_does_not_exist ->
- ok
- end.
-
-req(Method, Url, #{} = Body) ->
- req(Method, Url, jiffy:encode(Body));
-req(Method, Url, Body) ->
- Headers = [?JSON, ?AUTH],
- {ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body),
- {Code, jiffy:decode(Res, [return_maps])}.
-
-suffix() ->
- integer_to_list(erlang:system_time(second)).
diff --git a/src/mem3/test/eunit/mem3_cluster_test.erl b/src/mem3/test/eunit/mem3_cluster_test.erl
deleted file mode 100644
index d1a0fcd38..000000000
--- a/src/mem3/test/eunit/mem3_cluster_test.erl
+++ /dev/null
@@ -1,129 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_cluster_test).
-
--behavior(mem3_cluster).
-
--include_lib("eunit/include/eunit.hrl").
-
--export([
- cluster_unstable/1,
- cluster_stable/1
-]).
-
-% Mem3 cluster callbacks
-
-cluster_unstable(Server) ->
- Server ! cluster_unstable,
- Server.
-
-cluster_stable(Server) ->
- Server ! cluster_stable,
- Server.
-
-mem3_cluster_test_test_() ->
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_cluster_stable_during_startup_period(),
- t_cluster_unstable_delivered_on_nodeup(),
- t_cluster_unstable_delivered_on_nodedown(),
- t_wait_period_is_reset_after_last_change()
- ]
- }.
-
-t_cluster_stable_during_startup_period() ->
- ?_test(begin
- {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2),
- register(?MODULE, Pid),
- receive
- cluster_stable ->
- ?assert(true)
- after 1500 ->
- throw(timeout)
- end,
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-t_cluster_unstable_delivered_on_nodeup() ->
- ?_test(begin
- {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2),
- register(?MODULE, Pid),
- Pid ! {nodeup, node()},
- receive
- cluster_unstable ->
- ?assert(true)
- after 1000 ->
- throw(timeout)
- end,
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-t_cluster_unstable_delivered_on_nodedown() ->
- ?_test(begin
- {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2),
- register(?MODULE, Pid),
- Pid ! {nodedown, node()},
- receive
- cluster_unstable ->
- ?assert(true)
- after 1000 ->
- throw(timeout)
- end,
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-t_wait_period_is_reset_after_last_change() ->
- ?_test(begin
- {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 1),
- register(?MODULE, Pid),
- timer:sleep(800),
- % after 800 sec send a nodeup
- Pid ! {nodeup, node()},
- receive
- cluster_stable ->
- ?assert(false)
- after 400 ->
- % stability check should have been reset
- ?assert(true)
- end,
- timer:sleep(1000),
- receive
- cluster_stable ->
- ?assert(true)
- after 0 ->
- % cluster_stable arrives after enough quiet time
- ?assert(false)
- end,
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-% Test helper functions
-
-setup() ->
- ok.
-
-teardown(_) ->
- case whereis(?MODULE) of
- undefined ->
- ok;
- Pid when is_pid(Pid) ->
- unlink(Pid),
- exit(Pid, kill)
- end.
diff --git a/src/mem3/test/eunit/mem3_hash_test.erl b/src/mem3/test/eunit/mem3_hash_test.erl
deleted file mode 100644
index beeb0ac63..000000000
--- a/src/mem3/test/eunit/mem3_hash_test.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_hash_test).
-
--include_lib("eunit/include/eunit.hrl").
-
-hash_test() ->
- ?assertEqual(1624516141, mem3_hash:crc32(0)),
- ?assertEqual(3816901808, mem3_hash:crc32("0")),
- ?assertEqual(3523407757, mem3_hash:crc32(<<0>>)),
- ?assertEqual(4108050209, mem3_hash:crc32(<<"0">>)),
- ?assertEqual(3094724072, mem3_hash:crc32(zero)),
- ok.
diff --git a/src/mem3/test/eunit/mem3_rep_test.erl b/src/mem3/test/eunit/mem3_rep_test.erl
deleted file mode 100644
index 31a6d9b77..000000000
--- a/src/mem3/test/eunit/mem3_rep_test.erl
+++ /dev/null
@@ -1,318 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_rep_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(ID, <<"_id">>).
-% seconds
--define(TIMEOUT, 60).
-
-setup() ->
- {AllSrc, AllTgt} = {?tempdb(), ?tempdb()},
- {PartSrc, PartTgt} = {?tempdb(), ?tempdb()},
- create_db(AllSrc, [{q, 1}, {n, 1}]),
- create_db(AllTgt, [{q, 2}, {n, 1}]),
- PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}],
- create_db(PartSrc, [{q, 1}, {n, 1}, {props, PartProps}]),
- create_db(PartTgt, [{q, 2}, {n, 1}, {props, PartProps}]),
- #{allsrc => AllSrc, alltgt => AllTgt, partsrc => PartSrc, parttgt => PartTgt}.
-
-teardown(#{} = Dbs) ->
- maps:map(fun(_, Db) -> delete_db(Db) end, Dbs).
-
-start_couch() ->
- test_util:start_couch([mem3, fabric]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-mem3_reshard_db_test_() ->
- {
- "mem3 rep db tests",
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun replicate_basics/1,
- fun replicate_small_batches/1,
- fun replicate_low_batch_count/1,
- fun replicate_with_partitions/1
- ]
- }
- }
- }.
-
-replicate_basics(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9]},
- add_test_docs(AllSrc, DocSpec),
- SDocs = get_all_docs(AllSrc),
-
- [Src] = lists:sort(mem3:local_shards(AllSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts = [{batch_size, 1000}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
-
- ?assertEqual(SDocs, get_all_docs(AllTgt))
- end)}.
-
-replicate_small_batches(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9]},
- add_test_docs(AllSrc, DocSpec),
- SDocs = get_all_docs(AllSrc),
-
- [Src] = lists:sort(mem3:local_shards(AllSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts = [{batch_size, 2}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
-
- ?assertEqual(SDocs, get_all_docs(AllTgt))
- end)}.
-
-replicate_low_batch_count(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9]},
- add_test_docs(AllSrc, DocSpec),
- SDocs = get_all_docs(AllSrc),
-
- [Src] = lists:sort(mem3:local_shards(AllSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
-
- Opts1 = [{batch_size, 2}, {batch_count, 1}],
- ?assertMatch({ok, 8}, mem3_rep:go(Src, TMap, Opts1)),
-
- Opts2 = [{batch_size, 1}, {batch_count, 2}],
- ?assertMatch({ok, 6}, mem3_rep:go(Src, TMap, Opts2)),
-
- Opts3 = [{batch_size, 1000}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts3)),
-
- ?assertEqual(SDocs, get_all_docs(AllTgt))
- end)}.
-
-replicate_with_partitions(#{partsrc := PartSrc, parttgt := PartTgt}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- DocSpec = #{
- pdocs => #{
- <<"PX">> => 15,
- <<"PY">> => 19
- }
- },
- add_test_docs(PartSrc, DocSpec),
- SDocs = get_all_docs(PartSrc),
- PXSrc = get_partition_info(PartSrc, <<"PX">>),
- PYSrc = get_partition_info(PartSrc, <<"PY">>),
-
- [Src] = lists:sort(mem3:local_shards(PartSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(PartTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts = [{batch_size, 1000}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
-
- ?assertEqual(PXSrc, get_partition_info(PartTgt, <<"PX">>)),
- ?assertEqual(PYSrc, get_partition_info(PartTgt, <<"PY">>)),
- ?assertEqual(SDocs, get_all_docs(PartTgt))
- end)}.
-
-get_partition_info(DbName, Partition) ->
- with_proc(fun() ->
- {ok, PInfo} = fabric:get_partition_info(DbName, Partition),
- maps:with(
- [
- <<"doc_count">>, <<"doc_del_count">>, <<"partition">>
- ],
- to_map(PInfo)
- )
- end).
-
-get_all_docs(DbName) ->
- get_all_docs(DbName, #mrargs{}).
-
-get_all_docs(DbName, #mrargs{} = QArgs0) ->
- GL = erlang:group_leader(),
- with_proc(
- fun() ->
- Cb = fun
- ({row, Props}, Acc) ->
- Doc = to_map(couch_util:get_value(doc, Props)),
- #{?ID := Id} = Doc,
- {ok, Acc#{Id => Doc}};
- ({meta, _}, Acc) ->
- {ok, Acc};
- (complete, Acc) ->
- {ok, Acc}
- end,
- QArgs = QArgs0#mrargs{include_docs = true},
- {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs),
- Docs
- end,
- GL
- ).
-
-to_map([_ | _] = Props) ->
- to_map({Props});
-to_map({[_ | _]} = EJson) ->
- jiffy:decode(jiffy:encode(EJson), [return_maps]).
-
-create_db(DbName, Opts) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
-delete_db(DbName) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
-with_proc(Fun) ->
- with_proc(Fun, undefined, 30000).
-
-with_proc(Fun, GroupLeader) ->
- with_proc(Fun, GroupLeader, 30000).
-
-with_proc(Fun, GroupLeader, Timeout) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- case GroupLeader of
- undefined -> ok;
- _ -> erlang:group_leader(GroupLeader, self())
- end,
- exit({with_proc_res, Fun()})
- end),
- receive
- {'DOWN', Ref, process, Pid, {with_proc_res, Res}} ->
- Res;
- {'DOWN', Ref, process, Pid, Error} ->
- error(Error)
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- error({with_proc_timeout, Fun, Timeout})
- end.
-
-add_test_docs(DbName, #{} = DocSpec) ->
- Docs =
- docs(maps:get(docs, DocSpec, [])) ++
- pdocs(maps:get(pdocs, DocSpec, #{})),
- Res = update_docs(DbName, Docs),
- Docs1 = lists:map(
- fun({Doc, {ok, {RevPos, Rev}}}) ->
- Doc#doc{revs = {RevPos, [Rev]}}
- end,
- lists:zip(Docs, Res)
- ),
- case delete_docs(maps:get(delete, DocSpec, []), Docs1) of
- [] -> ok;
- [_ | _] = Deleted -> update_docs(DbName, Deleted)
- end,
- ok.
-
-update_docs(DbName, Docs) ->
- with_proc(fun() ->
- case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of
- {accepted, Res} -> Res;
- {ok, Res} -> Res
- end
- end).
-
-delete_docs([S, E], Docs) when E >= S ->
- ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)],
- lists:filtermap(
- fun(#doc{id = Id} = Doc) ->
- case lists:member(Id, ToDelete) of
- true -> {true, Doc#doc{deleted = true}};
- false -> false
- end
- end,
- Docs
- );
-delete_docs(_, _) ->
- [].
-
-pdocs(#{} = PMap) ->
- maps:fold(
- fun(Part, DocSpec, DocsAcc) ->
- docs(DocSpec, <<Part/binary, ":">>) ++ DocsAcc
- end,
- [],
- PMap
- ).
-
-docs(DocSpec) ->
- docs(DocSpec, <<"">>).
-
-docs(N, Prefix) when is_integer(N), N > 0 ->
- docs([0, N - 1], Prefix);
-docs([S, E], Prefix) when E >= S ->
- [doc(Prefix, I) || I <- lists:seq(S, E)];
-docs(_, _) ->
- [].
-
-doc(Pref, Id) ->
- Body = bodyprops(),
- doc(Pref, Id, Body, 42).
-
-doc(Pref, Id, BodyProps, AttSize) ->
- #doc{
- id = doc_id(Pref, Id),
- body = {BodyProps},
- atts = atts(AttSize)
- }.
-
-doc_id(Pref, Id) ->
- IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])),
- <<Pref/binary, IdBin/binary>>.
-
-bodyprops() ->
- [
- {<<"g">>,
- {[
- {<<"type">>, <<"Polygon">>},
- {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]}
- ]}}
- ].
-
-atts(0) ->
- [];
-atts(Size) when is_integer(Size), Size >= 1 ->
- Data = <<<<"x">> || _ <- lists:seq(1, Size)>>,
- [
- couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, Data}
- ])
- ].
diff --git a/src/mem3/test/eunit/mem3_reshard_api_test.erl b/src/mem3/test/eunit/mem3_reshard_api_test.erl
deleted file mode 100644
index 6e4107a5c..000000000
--- a/src/mem3/test/eunit/mem3_reshard_api_test.erl
+++ /dev/null
@@ -1,984 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_api_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/src/mem3_reshard.hrl").
-
--define(USER, "mem3_reshard_api_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(JSON, {"Content-Type", "application/json"}).
--define(RESHARD, "_reshard/").
--define(JOBS, "_reshard/jobs/").
--define(STATE, "_reshard/state").
--define(ID, <<"id">>).
--define(OK, <<"ok">>).
-% seconds
--define(TIMEOUT, 60).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/"]),
- {Db1, Db2, Db3} = {?tempdb(), ?tempdb(), ?tempdb()},
- create_db(Url, Db1, "?q=1&n=1"),
- create_db(Url, Db2, "?q=1&n=1"),
- create_db(Url, Db3, "?q=2&n=1"),
- {Url, {Db1, Db2, Db3}}.
-
-teardown({Url, {Db1, Db2, Db3}}) ->
- mem3_reshard:reset_state(),
- application:unset_env(mem3, reshard_disabled),
- delete_db(Url, Db1),
- delete_db(Url, Db2),
- delete_db(Url, Db3),
- ok = config:delete("reshard", "max_jobs", _Persist = false),
- ok = config:delete("reshard", "require_node_param", _Persist = false),
- ok = config:delete("reshard", "require_range_param", _Persist = false),
- ok = config:delete("admins", ?USER, _Persist = false),
- meck:unload().
-
-start_couch() ->
- test_util:start_couch([mem3, chttpd]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-mem3_reshard_api_test_() ->
- {
- "mem3 shard split api tests",
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun basics/1,
- fun create_job_basic/1,
- fun create_two_jobs/1,
- fun create_multiple_jobs_from_one_post/1,
- fun start_stop_cluster_basic/1,
- fun test_disabled/1,
- fun start_stop_cluster_with_a_job/1,
- fun individual_job_start_stop/1,
- fun individual_job_stop_when_cluster_stopped/1,
- fun create_job_with_invalid_arguments/1,
- fun create_job_with_db/1,
- fun create_job_with_shard_name/1,
- fun completed_job_handling/1,
- fun handle_db_deletion_in_initial_copy/1,
- fun handle_db_deletion_in_topoff1/1,
- fun handle_db_deletion_in_copy_local_docs/1,
- fun handle_db_deletion_in_build_indices/1,
- fun handle_db_deletion_in_update_shard_map/1,
- fun handle_db_deletion_in_wait_source_close/1,
- fun recover_in_initial_copy/1,
- fun recover_in_topoff1/1,
- fun recover_in_copy_local_docs/1,
- fun recover_in_build_indices/1,
- fun recover_in_update_shard_map/1,
- fun recover_in_wait_source_close/1,
- fun recover_in_topoff3/1,
- fun recover_in_source_delete/1,
- fun check_max_jobs/1,
- fun check_node_and_range_required_params/1,
- fun cleanup_completed_jobs/1
- ]
- }
- }
- }.
-
-basics({Top, _}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- % GET /_reshard
- ?assertMatch(
- {200, #{
- <<"state">> := <<"running">>,
- <<"state_reason">> := null,
- <<"completed">> := 0,
- <<"failed">> := 0,
- <<"running">> := 0,
- <<"stopped">> := 0,
- <<"total">> := 0
- }},
- req(get, Top ++ ?RESHARD)
- ),
-
- % GET _reshard/state
- ?assertMatch(
- {200, #{<<"state">> := <<"running">>}},
- req(get, Top ++ ?STATE)
- ),
-
- % GET _reshard/jobs
- ?assertMatch(
- {200, #{
- <<"jobs">> := [],
- <<"offset">> := 0,
- <<"total_rows">> := 0
- }},
- req(get, Top ++ ?JOBS)
- ),
-
- % Some invalid paths and methods
- ?assertMatch({404, _}, req(get, Top ++ ?RESHARD ++ "/invalidpath")),
- ?assertMatch({405, _}, req(put, Top ++ ?RESHARD, #{dont => thinkso})),
- ?assertMatch({405, _}, req(post, Top ++ ?RESHARD, #{nope => nope}))
- end)}.
-
-create_job_basic({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- % POST /_reshard/jobs
- {C1, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}),
- ?assertEqual(201, C1),
- ?assertMatch(
- [#{?OK := true, ?ID := J, <<"shard">> := S}] when
- is_binary(J) andalso is_binary(S),
- R1
- ),
- [#{?ID := Id, <<"shard">> := Shard}] = R1,
-
- % GET /_reshard/jobs
- ?assertMatch(
- {200, #{
- <<"jobs">> := [#{?ID := Id, <<"type">> := <<"split">>}],
- <<"offset">> := 0,
- <<"total_rows">> := 1
- }},
- req(get, Top ++ ?JOBS)
- ),
-
- % GET /_reshard/job/$jobid
- {C2, R2} = req(get, Top ++ ?JOBS ++ ?b2l(Id)),
- ?assertEqual(200, C2),
- ThisNode = atom_to_binary(node(), utf8),
- ?assertMatch(#{?ID := Id}, R2),
- ?assertMatch(#{<<"type">> := <<"split">>}, R2),
- ?assertMatch(#{<<"source">> := Shard}, R2),
- ?assertMatch(#{<<"history">> := History} when length(History) > 1, R2),
- ?assertMatch(#{<<"node">> := ThisNode}, R2),
- ?assertMatch(#{<<"split_state">> := SSt} when is_binary(SSt), R2),
- ?assertMatch(#{<<"job_state">> := JSt} when is_binary(JSt), R2),
- ?assertMatch(#{<<"state_info">> := #{}}, R2),
- ?assertMatch(#{<<"target">> := Target} when length(Target) == 2, R2),
-
- % GET /_reshard/job/$jobid/state
- ?assertMatch(
- {200, #{<<"state">> := S, <<"reason">> := R}} when
- is_binary(S) andalso (is_binary(R) orelse R =:= null),
- req(get, Top ++ ?JOBS ++ ?b2l(Id) ++ "/state")
- ),
-
- % GET /_reshard
- ?assertMatch(
- {200, #{<<"state">> := <<"running">>, <<"total">> := 1}},
- req(get, Top ++ ?RESHARD)
- ),
-
- % DELETE /_reshard/jobs/$jobid
- ?assertMatch(
- {200, #{?OK := true}},
- req(delete, Top ++ ?JOBS ++ ?b2l(Id))
- ),
-
- % GET _reshard/jobs
- ?assertMatch(
- {200, #{<<"jobs">> := [], <<"total_rows">> := 0}},
- req(get, Top ++ ?JOBS)
- ),
-
- % GET /_reshard/job/$jobid should be a 404
- ?assertMatch({404, #{}}, req(get, Top ++ ?JOBS ++ ?b2l(Id))),
-
- % DELETE /_reshard/jobs/$jobid should be a 404 as well
- ?assertMatch({404, #{}}, req(delete, Top ++ ?JOBS ++ ?b2l(Id)))
- end)}.
-
-create_two_jobs({Top, {Db1, Db2, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- ?assertMatch(
- {201, [#{?OK := true}]},
- req(post, Jobs, #{type => split, db => Db1})
- ),
- ?assertMatch(
- {201, [#{?OK := true}]},
- req(post, Jobs, #{type => split, db => Db2})
- ),
-
- ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD)),
-
- ?assertMatch(
- {200, #{
- <<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}],
- <<"offset">> := 0,
- <<"total_rows">> := 2
- }} when Id1 =/= Id2,
- req(get, Jobs)
- ),
-
- {200, #{<<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}]}} = req(get, Jobs),
-
- {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id1)),
- ?assertMatch({200, #{<<"total">> := 1}}, req(get, Top ++ ?RESHARD)),
- {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id2)),
- ?assertMatch({200, #{<<"total">> := 0}}, req(get, Top ++ ?RESHARD))
- end)}.
-
-create_multiple_jobs_from_one_post({Top, {_, _, Db3}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Jobs = Top ++ ?JOBS,
- {C1, R1} = req(post, Jobs, #{type => split, db => Db3}),
- ?assertMatch({201, [#{?OK := true}, #{?OK := true}]}, {C1, R1}),
- ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD))
- end)}.
-
-start_stop_cluster_basic({Top, _}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Url = Top ++ ?STATE,
-
- ?assertMatch(
- {200, #{
- <<"state">> := <<"running">>,
- <<"reason">> := null
- }},
- req(get, Url)
- ),
-
- ?assertMatch({200, _}, req(put, Url, #{state => stopped})),
- ?assertMatch(
- {200, #{
- <<"state">> := <<"stopped">>,
- <<"reason">> := R
- }} when is_binary(R),
- req(get, Url)
- ),
-
- ?assertMatch({200, _}, req(put, Url, #{state => running})),
-
- % Make sure the reason shows in the state GET request
- Reason = <<"somereason">>,
- ?assertMatch(
- {200, _},
- req(put, Url, #{
- state => stopped,
- reason => Reason
- })
- ),
- ?assertMatch(
- {200, #{
- <<"state">> := <<"stopped">>,
- <<"reason">> := Reason
- }},
- req(get, Url)
- ),
-
- % Top level summary also shows the reason
- ?assertMatch(
- {200, #{
- <<"state">> := <<"stopped">>,
- <<"state_reason">> := Reason
- }},
- req(get, Top ++ ?RESHARD)
- ),
- ?assertMatch({200, _}, req(put, Url, #{state => running})),
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, Url))
- end)}.
-
-test_disabled({Top, _}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- application:set_env(mem3, reshard_disabled, true),
- ?assertMatch({501, _}, req(get, Top ++ ?RESHARD)),
- ?assertMatch({501, _}, req(put, Top ++ ?STATE, #{state => running})),
-
- application:unset_env(mem3, reshard_disabled),
- ?assertMatch({200, _}, req(get, Top ++ ?RESHARD)),
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running}))
- end)}.
-
-start_stop_cluster_with_a_job({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Url = Top ++ ?STATE,
-
- ?assertMatch({200, _}, req(put, Url, #{state => stopped})),
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, Url)),
-
- % Can add jobs with global state stopped, they just won't be running
- {201, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}),
- ?assertMatch([#{?OK := true}], R1),
- [#{?ID := Id1}] = R1,
- {200, J1} = req(get, Top ++ ?JOBS ++ ?b2l(Id1)),
- ?assertMatch(#{?ID := Id1, <<"job_state">> := <<"stopped">>}, J1),
- % Check summary stats
- ?assertMatch(
- {200, #{
- <<"state">> := <<"stopped">>,
- <<"running">> := 0,
- <<"stopped">> := 1,
- <<"total">> := 1
- }},
- req(get, Top ++ ?RESHARD)
- ),
-
- % Can delete the job when stopped
- {200, #{?OK := true}} = req(delete, Top ++ ?JOBS ++ ?b2l(Id1)),
- ?assertMatch(
- {200, #{
- <<"state">> := <<"stopped">>,
- <<"running">> := 0,
- <<"stopped">> := 0,
- <<"total">> := 0
- }},
- req(get, Top ++ ?RESHARD)
- ),
-
- % Add same job again
- {201, [#{?ID := Id2}]} = req(post, Top ++ ?JOBS, #{
- type => split,
- db => Db1
- }),
- ?assertMatch(
- {200, #{?ID := Id2, <<"job_state">> := <<"stopped">>}},
- req(get, Top ++ ?JOBS ++ ?b2l(Id2))
- ),
-
- % Job should start after resharding is started on the cluster
- ?assertMatch({200, _}, req(put, Url, #{state => running})),
- ?assertMatch(
- {200, #{?ID := Id2, <<"job_state">> := JSt}} when
- JSt =/= <<"stopped">>,
- req(get, Top ++ ?JOBS ++ ?b2l(Id2))
- )
- end)}.
-
-individual_job_start_stop({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- intercept_state(topoff1),
-
- Body = #{type => split, db => Db1},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
-
- JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
- StUrl = JobUrl ++ "/state",
-
- % Wait for the the job to start running and intercept it in topoff1 state
- receive
- {JobPid, topoff1} -> ok
- end,
- % Tell the intercept to never finish checkpointing so job is left hanging
- % forever in running state
- JobPid ! cancel,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
-
- {200, _} = req(put, StUrl, #{state => stopped}),
- wait_state(StUrl, <<"stopped">>),
-
- % Stop/start resharding globally and job should still stay stopped
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
-
- % Start the job again
- ?assertMatch({200, _}, req(put, StUrl, #{state => running})),
- % Wait for the the job to start running and intercept it in topoff1 state
- receive
- {JobPid2, topoff1} -> ok
- end,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
- % Let it continue running and it should complete eventually
- JobPid2 ! continue,
- wait_state(StUrl, <<"completed">>)
- end)}.
-
-individual_job_stop_when_cluster_stopped({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- intercept_state(topoff1),
-
- Body = #{type => split, db => Db1},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
-
- JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
- StUrl = JobUrl ++ "/state",
-
- % Wait for the the job to start running and intercept in topoff1
- receive
- {JobPid, topoff1} -> ok
- end,
- % Tell the intercept to never finish checkpointing so job is left
- % hanging forever in running state
- JobPid ! cancel,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
-
- % Stop resharding globally
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- wait_state(StUrl, <<"stopped">>),
-
- % Stop the job specifically
- {200, _} = req(put, StUrl, #{state => stopped}),
- % Job stays stopped
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
-
- % Set cluster to running again
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
-
- % The job should stay stopped
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
-
- % It should be possible to resume job and it should complete
- ?assertMatch({200, _}, req(put, StUrl, #{state => running})),
-
- % Wait for the the job to start running and intercept in topoff1 state
- receive
- {JobPid2, topoff1} -> ok
- end,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
-
- % Let it continue running and it should complete eventually
- JobPid2 ! continue,
- wait_state(StUrl, <<"completed">>)
- end)}.
-
-create_job_with_invalid_arguments({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- % Nothing in the body
- ?assertMatch({400, _}, req(post, Jobs, #{})),
-
- % Missing type
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1})),
-
- % Have type but no db and no shard
- ?assertMatch({400, _}, req(post, Jobs, #{type => split})),
-
- % Have type and db but db is invalid
- ?assertMatch(
- {400, _},
- req(post, Jobs, #{
- db => <<"baddb">>,
- type => split
- })
- ),
-
- % Have type and shard but shard is not an existing database
- ?assertMatch(
- {404, _},
- req(post, Jobs, #{
- type => split,
- shard => <<"shards/80000000-ffffffff/baddb.1549492084">>
- })
- ),
-
- % Bad range values, too large, different types, inverted
- ?assertMatch(
- {400, _},
- req(post, Jobs, #{
- db => Db1,
- range => 42,
- type => split
- })
- ),
- ?assertMatch(
- {400, _},
- req(post, Jobs, #{
- db => Db1,
- range => <<"x">>,
- type => split
- })
- ),
- ?assertMatch(
- {400, _},
- req(post, Jobs, #{
- db => Db1,
- range => <<"ffffffff-80000000">>,
- type => split
- })
- ),
- ?assertMatch(
- {400, _},
- req(post, Jobs, #{
- db => Db1,
- range => <<"00000000-fffffffff">>,
- type => split
- })
- ),
-
- % Can't have both db and shard
- ?assertMatch(
- {400, _},
- req(post, Jobs, #{
- type => split,
- db => Db1,
- shard => <<"blah">>
- })
- )
- end)}.
-
-create_job_with_db({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Jobs = Top ++ ?JOBS,
- Body1 = #{type => split, db => Db1},
-
- % Node with db
- N = atom_to_binary(node(), utf8),
- {C1, R1} = req(post, Jobs, Body1#{node => N}),
- ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
- wait_to_complete_then_cleanup(Top, R1),
-
- % Range and db
- {C2, R2} = req(post, Jobs, Body1#{range => <<"00000000-7fffffff">>}),
- ?assertMatch({201, [#{?OK := true}]}, {C2, R2}),
- wait_to_complete_then_cleanup(Top, R2),
-
- % Node, range and db
- Range = <<"80000000-ffffffff">>,
- {C3, R3} = req(post, Jobs, Body1#{range => Range, node => N}),
- ?assertMatch({201, [#{?OK := true}]}, {C3, R3}),
- wait_to_complete_then_cleanup(Top, R3),
-
- ?assertMatch(
- [
- [16#00000000, 16#3fffffff],
- [16#40000000, 16#7fffffff],
- [16#80000000, 16#bfffffff],
- [16#c0000000, 16#ffffffff]
- ],
- [mem3:range(S) || S <- lists:sort(mem3:shards(Db1))]
- )
- end)}.
-
-create_job_with_shard_name({Top, {_, _, Db3}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Jobs = Top ++ ?JOBS,
- [S1, S2] = [mem3:name(S) || S <- lists:sort(mem3:shards(Db3))],
-
- % Shard only
- {C1, R1} = req(post, Jobs, #{type => split, shard => S1}),
- ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
- wait_to_complete_then_cleanup(Top, R1),
-
- % Shard with a node
- N = atom_to_binary(node(), utf8),
- {C2, R2} = req(post, Jobs, #{type => split, shard => S2, node => N}),
- ?assertMatch({201, [#{?OK := true}]}, {C2, R2}),
- wait_to_complete_then_cleanup(Top, R2),
-
- ?assertMatch(
- [
- [16#00000000, 16#3fffffff],
- [16#40000000, 16#7fffffff],
- [16#80000000, 16#bfffffff],
- [16#c0000000, 16#ffffffff]
- ],
- [mem3:range(S) || S <- lists:sort(mem3:shards(Db3))]
- )
- end)}.
-
-completed_job_handling({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- % Run job to completion
- {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
- ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
- [#{?ID := Id}] = R1,
- wait_to_complete(Top, R1),
-
- % Check top level stats
- ?assertMatch(
- {200, #{
- <<"state">> := <<"running">>,
- <<"state_reason">> := null,
- <<"completed">> := 1,
- <<"failed">> := 0,
- <<"running">> := 0,
- <<"stopped">> := 0,
- <<"total">> := 1
- }},
- req(get, Top ++ ?RESHARD)
- ),
-
- % Job state itself
- JobUrl = Jobs ++ ?b2l(Id),
- ?assertMatch(
- {200, #{
- <<"split_state">> := <<"completed">>,
- <<"job_state">> := <<"completed">>
- }},
- req(get, JobUrl)
- ),
-
- % Job's state endpoint
- StUrl = Jobs ++ ?b2l(Id) ++ "/state",
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Try to stop it and it should stay completed
- {200, _} = req(put, StUrl, #{state => stopped}),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Try to resume it and it should stay completed
- {200, _} = req(put, StUrl, #{state => running}),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Stop resharding globally and job should still stay completed
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Start resharding and job stays completed
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- ?assertMatch({200, #{?OK := true}}, req(delete, JobUrl))
- end)}.
-
-handle_db_deletion_in_topoff1({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = delete_source_in_state(Top, Db1, topoff1),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-handle_db_deletion_in_initial_copy({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = delete_source_in_state(Top, Db1, initial_copy),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-handle_db_deletion_in_copy_local_docs({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = delete_source_in_state(Top, Db1, copy_local_docs),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-handle_db_deletion_in_build_indices({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = delete_source_in_state(Top, Db1, build_indices),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-handle_db_deletion_in_update_shard_map({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = delete_source_in_state(Top, Db1, update_shardmap),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-handle_db_deletion_in_wait_source_close({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = delete_source_in_state(Top, Db1, wait_source_close),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-recover_in_topoff1({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = recover_in_state(Top, Db1, topoff1),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-recover_in_initial_copy({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = recover_in_state(Top, Db1, initial_copy),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-recover_in_copy_local_docs({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = recover_in_state(Top, Db1, copy_local_docs),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-recover_in_build_indices({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = recover_in_state(Top, Db1, build_indices),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-recover_in_update_shard_map({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = recover_in_state(Top, Db1, update_shardmap),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-recover_in_wait_source_close({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = recover_in_state(Top, Db1, wait_source_close),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-recover_in_topoff3({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = recover_in_state(Top, Db1, topoff3),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-recover_in_source_delete({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- JobId = recover_in_state(Top, Db1, source_delete),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-check_max_jobs({Top, {Db1, Db2, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- config:set("reshard", "max_jobs", "0", _Persist = false),
- {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
- ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]}, {C1, R1}),
-
- config:set("reshard", "max_jobs", "1", _Persist = false),
- {201, R2} = req(post, Jobs, #{type => split, db => Db1}),
- wait_to_complete(Top, R2),
-
- % Stop clustering so jobs are not started anymore and ensure max jobs
- % is enforced even if jobs are stopped
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
-
- {C3, R3} = req(post, Jobs, #{type => split, db => Db2}),
- ?assertMatch(
- {500, [#{<<"error">> := <<"max_jobs_exceeded">>}]},
- {C3, R3}
- ),
-
- % Allow the job to be created by raising max_jobs
- config:set("reshard", "max_jobs", "2", _Persist = false),
-
- {C4, R4} = req(post, Jobs, #{type => split, db => Db2}),
- ?assertEqual(201, C4),
-
- % Lower max_jobs after job is created but it's not running
- config:set("reshard", "max_jobs", "1", _Persist = false),
-
- % Start resharding again
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
-
- % Jobs that have been created already are not removed if max jobs is lowered
- % so make sure the job completes
- wait_to_complete(Top, R4)
- end)}.
-
-check_node_and_range_required_params({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
-
- config:set("reshard", "require_node_param", "true", _Persist = false),
- {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
- NodeRequiredErr = <<"`node` prameter is required">>,
- ?assertEqual(
- {400, #{
- <<"error">> => <<"bad_request">>,
- <<"reason">> => NodeRequiredErr
- }},
- {C1, R1}
- ),
-
- config:set("reshard", "require_range_param", "true", _Persist = false),
- {C2, R2} = req(post, Jobs, #{type => split, db => Db1, node => Node}),
- RangeRequiredErr = <<"`range` prameter is required">>,
- ?assertEqual(
- {400, #{
- <<"error">> => <<"bad_request">>,
- <<"reason">> => RangeRequiredErr
- }},
- {C2, R2}
- ),
-
- Body = #{type => split, db => Db1, range => Range, node => Node},
- {C3, R3} = req(post, Jobs, Body),
- ?assertMatch({201, [#{?OK := true}]}, {C3, R3}),
- wait_to_complete_then_cleanup(Top, R3)
- end)}.
-
-cleanup_completed_jobs({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- Body = #{type => split, db => Db1},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
- JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
- wait_state(JobUrl ++ "/state", <<"completed">>),
- delete_db(Top, Db1),
- wait_for_http_code(JobUrl, 404)
- end)}.
-
-% Test help functions
-
-wait_to_complete_then_cleanup(Top, Jobs) ->
- JobsUrl = Top ++ ?JOBS,
- lists:foreach(
- fun(#{?ID := Id}) ->
- wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>),
- {200, _} = req(delete, JobsUrl ++ ?b2l(Id))
- end,
- Jobs
- ).
-
-wait_to_complete(Top, Jobs) ->
- JobsUrl = Top ++ ?JOBS,
- lists:foreach(
- fun(#{?ID := Id}) ->
- wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>)
- end,
- Jobs
- ).
-
-intercept_state(State) ->
- TestPid = self(),
- meck:new(mem3_reshard_job, [passthrough]),
- meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) ->
- case Job#job.split_state of
- State ->
- TestPid ! {self(), State},
- receive
- continue -> meck:passthrough([Job]);
- cancel -> ok
- end;
- _ ->
- meck:passthrough([Job])
- end
- end).
-
-cancel_intercept() ->
- meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) ->
- meck:passthrough([Job])
- end).
-
-wait_state(Url, State) ->
- test_util:wait(
- fun() ->
- case req(get, Url) of
- {200, #{<<"state">> := State}} ->
- ok;
- {200, #{}} ->
- timer:sleep(100),
- wait
- end
- end,
- 30000
- ).
-
-wait_for_http_code(Url, Code) when is_integer(Code) ->
- test_util:wait(
- fun() ->
- case req(get, Url) of
- {Code, _} ->
- ok;
- {_, _} ->
- timer:sleep(100),
- wait
- end
- end,
- 30000
- ).
-
-delete_source_in_state(Top, Db, State) when is_atom(State), is_binary(Db) ->
- intercept_state(State),
- Body = #{type => split, db => Db},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
- receive
- {JobPid, State} -> ok
- end,
- sync_delete_db(Top, Db),
- JobPid ! continue,
- Id.
-
-recover_in_state(Top, Db, State) when is_atom(State) ->
- intercept_state(State),
- Body = #{type => split, db => Db},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
- receive
- {JobPid, State} -> ok
- end,
- % Job is now stuck in running we prevented it from executing
- % the given state
- JobPid ! cancel,
- % Now restart resharding
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- cancel_intercept(),
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
- Id.
-
-create_db(Top, Db, QArgs) when is_binary(Db) ->
- Url = Top ++ binary_to_list(Db) ++ QArgs,
- {ok, Status, _, _} = test_request:put(Url, [?JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-delete_db(Top, Db) when is_binary(Db) ->
- Url = Top ++ binary_to_list(Db),
- case test_request:get(Url, [?AUTH]) of
- {ok, 404, _, _} ->
- not_found;
- {ok, 200, _, _} ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]),
- ok
- end.
-
-sync_delete_db(Top, Db) when is_binary(Db) ->
- delete_db(Top, Db),
- try
- Shards = mem3:local_shards(Db),
- ShardNames = [mem3:name(S) || S <- Shards],
- [couch_server:delete(N, [?ADMIN_CTX]) || N <- ShardNames],
- ok
- catch
- error:database_does_not_exist ->
- ok
- end.
-
-req(Method, Url) ->
- Headers = [?AUTH],
- {ok, Code, _, Res} = test_request:request(Method, Url, Headers),
- {Code, jiffy:decode(Res, [return_maps])}.
-
-req(Method, Url, #{} = Body) ->
- req(Method, Url, jiffy:encode(Body));
-req(Method, Url, Body) ->
- Headers = [?JSON, ?AUTH],
- {ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body),
- {Code, jiffy:decode(Res, [return_maps])}.
diff --git a/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl b/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
deleted file mode 100644
index b9cafd75c..000000000
--- a/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
+++ /dev/null
@@ -1,398 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_changes_feed_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/src/mem3_reshard.hrl").
-
-% seconds
--define(TIMEOUT, 60).
-
--define(assertChanges(Expected, Received), begin
- ((fun() ->
- ExpectedIDs = lists:sort([I || #{id := I} <- Expected]),
- ReceivedIDs = lists:sort([I || #{id := I} <- Received]),
- ?assertEqual(ExpectedIDs, ReceivedIDs)
- end)())
-end).
-
-setup() ->
- Db1 = ?tempdb(),
- create_db(Db1, [{q, 1}, {n, 1}]),
- #{db1 => Db1}.
-
-teardown(#{} = Dbs) ->
- mem3_reshard:reset_state(),
- maps:map(fun(_, Db) -> delete_db(Db) end, Dbs).
-
-start_couch() ->
- test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-mem3_reshard_changes_feed_test_() ->
- {
- "mem3 shard split changes feed tests",
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun normal_feed_should_work_after_split/1,
- fun continuous_feed_should_work_during_split/1
- ]
- }
- }
- }.
-
-normal_feed_should_work_after_split(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- DocSpec = #{
- docs => [1, 10],
- delete => [5, 6]
- },
- add_test_docs(Db, DocSpec),
-
- % gather pre-shard changes
- BaseArgs = #changes_args{feed = "normal", dir = fwd, since = 0},
- {ok, OldChanges, OldEndSeq} = get_changes_feed(Db, BaseArgs),
-
- % Split the shard
- split_and_wait(Db),
-
- % verify changes list consistent for all the old seqs
- lists:foldl(
- fun(#{seq := Seq} = C, ExpectedChanges) ->
- Args = BaseArgs#changes_args{since = Seq},
- {ok, Changes, _EndSeq} = get_changes_feed(Db, Args),
- ?assertChanges(ExpectedChanges, Changes),
- [C | ExpectedChanges]
- end,
- [],
- OldChanges
- ),
-
- % confirm that old LastSeq respected
- Args1 = BaseArgs#changes_args{since = OldEndSeq},
- {ok, Changes1, EndSeq1} = get_changes_feed(Db, Args1),
- ?assertChanges([], Changes1),
-
- % confirm that new LastSeq also respected
- Args2 = BaseArgs#changes_args{since = EndSeq1},
- {ok, Changes2, EndSeq2} = get_changes_feed(Db, Args2),
- ?assertChanges([], Changes2),
- ?assertEqual(EndSeq2, EndSeq1),
-
- % confirm we didn't lost any changes and have consistent last seq
- {ok, Changes3, EndSeq3} = get_changes_feed(Db, BaseArgs),
- ?assertChanges(OldChanges, Changes3),
-
- % add some docs
- add_test_docs(Db, #{docs => [11, 15]}),
- Args4 = BaseArgs#changes_args{since = EndSeq3},
- {ok, Changes4, EndSeq4} = get_changes_feed(Db, Args4),
- AddedChanges = [#{id => ID} || #doc{id = ID} <- docs([11, 15])],
- ?assertChanges(AddedChanges, Changes4),
-
- % confirm include_docs and deleted works
- Args5 = BaseArgs#changes_args{include_docs = true},
- {ok, Changes5, EndSeq5} = get_changes_feed(Db, Args5),
- ?assertEqual(EndSeq4, EndSeq5),
- [SampleChange] = [C || #{id := ID} = C <- Changes5, ID == <<"00005">>],
- ?assertMatch(#{deleted := true}, SampleChange),
- ?assertMatch(#{doc := {Body}} when is_list(Body), SampleChange),
-
- % update and delete some pre and post split docs
- AllDocs = [couch_doc:from_json_obj(Doc) || #{doc := Doc} <- Changes5],
- UpdateDocs = lists:filtermap(
- fun
- (#doc{id = <<"00002">>}) -> true;
- (#doc{id = <<"00012">>}) -> true;
- (#doc{id = <<"00004">>} = Doc) -> {true, Doc#doc{deleted = true}};
- (#doc{id = <<"00014">>} = Doc) -> {true, Doc#doc{deleted = true}};
- (_) -> false
- end,
- AllDocs
- ),
- update_docs(Db, UpdateDocs),
-
- Args6 = BaseArgs#changes_args{since = EndSeq5},
- {ok, Changes6, EndSeq6} = get_changes_feed(Db, Args6),
- UpdatedChanges = [#{id => ID} || #doc{id = ID} <- UpdateDocs],
- ?assertChanges(UpdatedChanges, Changes6),
- [#{seq := Seq6} | _] = Changes6,
- ?assertEqual(EndSeq6, Seq6),
-
- Args7 = Args6#changes_args{dir = rev, limit = 4},
- {ok, Changes7, EndSeq7} = get_changes_feed(Db, Args7),
- ?assertEqual(4, length(Changes7)),
- [#{seq := Seq7} | _] = Changes7,
- ?assertEqual(EndSeq7, Seq7)
- end)}.
-
-continuous_feed_should_work_during_split(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- {UpdaterPid, UpdaterRef} = spawn_monitor(fun() ->
- Updater = fun U({State, I}) ->
- receive
- {get_state, {Pid, Ref}} ->
- Pid ! {state, Ref, {State, I}},
- U({State, I});
- add ->
- DocSpec = #{docs => [I, I]},
- add_test_docs(Db, DocSpec),
- U({State, I + 1});
- split ->
- spawn_monitor(fun() -> split_and_wait(Db) end),
- U({"in_process", I});
- stop ->
- receive
- {'DOWN', _, process, _, _} -> ok
- end,
- ok
- end
- end,
- Updater({"before", 1})
- end),
-
- Callback = fun
- (start, Acc) ->
- {ok, Acc};
- (waiting_for_updates, Acc) ->
- Ref = make_ref(),
- UpdaterPid ! {get_state, {self(), Ref}},
- receive
- {state, Ref, {State, _}} -> ok
- end,
- case {State, length(Acc)} of
- {"before", N} when N < 5 ->
- UpdaterPid ! add,
- {ok, Acc};
- {"before", _} ->
- UpdaterPid ! split,
- {ok, Acc};
- {"in_process", N} when N < 10 ->
- UpdaterPid ! add,
- {ok, Acc};
- {"in_process", _} ->
- {ok, Acc}
- end;
- (timeout, Acc) ->
- {ok, Acc};
- ({change, {Change}}, Acc) ->
- CM = maps:from_list(Change),
- {ok, [CM | Acc]};
- ({stop, EndSeq, _Pending}, Acc) ->
- % Notice updater is still running
- {stop, EndSeq, Acc}
- end,
-
- BaseArgs = #changes_args{
- feed = "continuous",
- heartbeat = 100,
- timeout = 1000
- },
- StopResult = get_changes_feed(Db, BaseArgs, Callback),
-
- % Changes feed stopped when source shard was deleted
- ?assertMatch({stop, _, _}, StopResult),
- {stop, StopEndSeq, StopChanges} = StopResult,
-
- % Add 5 extra docs to the db right after changes feed was stopped
- [UpdaterPid ! add || _ <- lists:seq(1, 5)],
-
- % The the number of documents that updater had added
- Ref = make_ref(),
- UpdaterPid ! {get_state, {self(), Ref}},
- DocCount =
- receive
- {state, Ref, {_, I}} -> I - 1
- end,
-
- UpdaterPid ! stop,
- receive
- {'DOWN', UpdaterRef, process, UpdaterPid, normal} ->
- ok;
- {'DOWN', UpdaterRef, process, UpdaterPid, Error} ->
- erlang:error(
- {test_context_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, Error},
- {reason, "Updater died"}
- ]}
- )
- end,
-
- AfterArgs = #changes_args{feed = "normal", since = StopEndSeq},
- {ok, AfterChanges, _} = get_changes_feed(Db, AfterArgs),
- DocIDs = [Id || #{id := Id} <- StopChanges ++ AfterChanges],
- ExpectedDocIDs = [doc_id(<<>>, N) || N <- lists:seq(1, DocCount)],
- ?assertEqual(ExpectedDocIDs, lists:usort(DocIDs))
- end)}.
-
-split_and_wait(Db) ->
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
- ResultShards = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(ResultShards)).
-
-wait_state(JobId, State) ->
- test_util:wait(
- fun() ->
- case mem3_reshard:job(JobId) of
- {ok, {Props}} ->
- case couch_util:get_value(job_state, Props) of
- State ->
- ok;
- _ ->
- timer:sleep(100),
- wait
- end;
- {error, not_found} ->
- timer:sleep(100),
- wait
- end
- end,
- 30000
- ).
-
-get_changes_feed(Db, Args) ->
- get_changes_feed(Db, Args, fun changes_callback/2).
-
-get_changes_feed(Db, Args, Callback) ->
- with_proc(fun() ->
- fabric:changes(Db, Callback, [], Args)
- end).
-
-changes_callback(start, Acc) ->
- {ok, Acc};
-changes_callback({change, {Change}}, Acc) ->
- CM = maps:from_list(Change),
- {ok, [CM | Acc]};
-changes_callback({stop, EndSeq, _Pending}, Acc) ->
- {ok, Acc, EndSeq}.
-
-%% common helpers from here
-
-create_db(DbName, Opts) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
-delete_db(DbName) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
-with_proc(Fun) ->
- with_proc(Fun, undefined, 30000).
-
-with_proc(Fun, GroupLeader) ->
- with_proc(Fun, GroupLeader, 30000).
-
-with_proc(Fun, GroupLeader, Timeout) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- case GroupLeader of
- undefined -> ok;
- _ -> erlang:group_leader(GroupLeader, self())
- end,
- exit({with_proc_res, Fun()})
- end),
- receive
- {'DOWN', Ref, process, Pid, {with_proc_res, Res}} ->
- Res;
- {'DOWN', Ref, process, Pid, Error} ->
- error(Error)
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- error({with_proc_timeout, Fun, Timeout})
- end.
-
-add_test_docs(DbName, #{} = DocSpec) ->
- Docs = docs(maps:get(docs, DocSpec, [])),
- Res = update_docs(DbName, Docs),
- Docs1 = lists:map(
- fun({Doc, {ok, {RevPos, Rev}}}) ->
- Doc#doc{revs = {RevPos, [Rev]}}
- end,
- lists:zip(Docs, Res)
- ),
- case delete_docs(maps:get(delete, DocSpec, []), Docs1) of
- [] -> ok;
- [_ | _] = Deleted -> update_docs(DbName, Deleted)
- end,
- ok.
-
-update_docs(DbName, Docs) ->
- with_proc(fun() ->
- case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of
- {accepted, Res} -> Res;
- {ok, Res} -> Res
- end
- end).
-
-delete_docs([S, E], Docs) when E >= S ->
- ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)],
- lists:filtermap(
- fun(#doc{id = Id} = Doc) ->
- case lists:member(Id, ToDelete) of
- true -> {true, Doc#doc{deleted = true}};
- false -> false
- end
- end,
- Docs
- );
-delete_docs(_, _) ->
- [].
-
-docs([S, E]) when E >= S ->
- [doc(<<"">>, I) || I <- lists:seq(S, E)];
-docs(_) ->
- [].
-
-doc(Pref, Id) ->
- Body = [{<<"a">>, <<"b">>}],
- doc(Pref, Id, Body, 42).
-
-doc(Pref, Id, BodyProps, AttSize) ->
- #doc{
- id = doc_id(Pref, Id),
- body = {BodyProps},
- atts = atts(AttSize)
- }.
-
-doc_id(Pref, Id) ->
- IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])),
- <<Pref/binary, IdBin/binary>>.
-
-atts(0) ->
- [];
-atts(Size) when is_integer(Size), Size >= 1 ->
- Data = <<<<"x">> || _ <- lists:seq(1, Size)>>,
- [
- couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, Data}
- ])
- ].
diff --git a/src/mem3/test/eunit/mem3_reshard_test.erl b/src/mem3/test/eunit/mem3_reshard_test.erl
deleted file mode 100644
index be539b47a..000000000
--- a/src/mem3/test/eunit/mem3_reshard_test.erl
+++ /dev/null
@@ -1,990 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/src/mem3_reshard.hrl").
-% for all_docs function
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(ID, <<"_id">>).
--define(TIMEOUT, 60).
-
-setup() ->
- HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
- case HaveDreyfus of
- false -> ok;
- true -> mock_dreyfus_indices()
- end,
-
- HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
- case HaveHastings of
- false -> ok;
- true -> mock_hastings_indices()
- end,
- {Db1, Db2} = {?tempdb(), ?tempdb()},
- create_db(Db1, [{q, 1}, {n, 1}]),
- PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}],
- create_db(Db2, [{q, 1}, {n, 1}, {props, PartProps}]),
- config:set("reshard", "retry_interval_sec", "0", _Persist1 = false),
- config:set("reshard", "index_retry_interval_sec", "0", _Persist2 = false),
- #{db1 => Db1, db2 => Db2}.
-
-teardown(#{} = Dbs) ->
- mem3_reshard:reset_state(),
- maps:map(fun(_, Db) -> delete_db(Db) end, Dbs),
- config:delete("reshard", "index_retry_interval_sec", _Persist1 = false),
- config:delete("reshard", "retry_interval_sec", _Persist2 = false),
- meck:unload().
-
-start_couch() ->
- test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-mem3_reshard_db_test_() ->
- {
- "mem3 shard split db tests",
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun split_one_shard/1,
- fun split_shard_with_lots_of_purges/1,
- fun update_docs_before_topoff1/1,
- fun indices_are_built/1,
- fun indices_can_be_built_with_errors/1,
- fun split_partitioned_db/1,
- fun split_twice/1,
- fun couch_events_are_emitted/1,
- fun retries_work/1,
- fun target_reset_in_initial_copy/1,
- fun split_an_incomplete_shard_map/1,
- fun target_shards_are_locked/1
- ]
- }
- }
- }.
-
-% This is a basic test to check that shard splitting preserves documents, and
-% db meta props like revs limits and security.
-split_one_shard(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9], mrview => 1, local => 1},
- add_test_docs(Db, DocSpec),
-
- % Save documents before the split
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
-
- % Set some custom metadata properties
- set_revs_limit(Db, 942),
- set_purge_infos_limit(Db, 943),
- SecObj = {[{<<"foo">>, <<"bar">>}]},
- set_security(Db, SecObj),
-
- % DbInfo is saved after setting metadata bits
- % as those could bump the update sequence
- DbInfo0 = get_db_info(Db),
-
- % Split the one shard
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update seq prefix number is a sum of all shard update sequences
- #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
- #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
- ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
-
- % Finally compare that the documents are still there after the split
- ?assertEqual(Docs0, Docs1),
-
- % Don't forget about the local but don't include internal checkpoints
- % as some of those are munged and transformed during the split
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end)}.
-
-% Test to check that shard with high number of purges can be split
-split_shard_with_lots_of_purges(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- % Set a low purge infos limit, we are planning to overrun it
- set_purge_infos_limit(Db, 10),
-
- % Add docs 1..20 and purge them
- add_test_docs(Db, #{docs => [1, 20]}),
- IdRevs = maps:fold(
- fun(Id, #{<<"_rev">> := Rev}, Acc) ->
- [{Id, [Rev]} | Acc]
- end,
- [],
- get_all_docs(Db)
- ),
- ?assertMatch({ok, _}, purge_docs(Db, IdRevs)),
-
- % Compact to trim the purge sequence
- ok = compact(Db),
-
- % Add some extra docs, these won't be purged
- add_test_docs(Db, #{docs => [21, 30]}),
- Docs0 = get_all_docs(Db),
-
- % Save db info before splitting
- DbInfo0 = get_db_info(Db),
-
- % Split the one shard
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(10, get_purge_infos_limit(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Finally compare that the documents are still there after the split
- ?assertEqual(Docs0, Docs1)
- end)}.
-
-% This test checks that document added while the shard is being split are not
-% lost. Topoff1 state happens before indices are built
-update_docs_before_topoff1(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- add_test_docs(Db, #{docs => 10}),
-
- intercept_state(topoff1),
-
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
-
- receive
- {JobPid, topoff1} -> ok
- end,
- add_test_docs(Db, #{docs => [10, 19], local => 1}),
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
- DbInfo0 = get_db_info(Db),
- JobPid ! continue,
-
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update sequence after initial copy with 10 docs would be 10 on each
- % target shard (to match the source) and the total update sequence
- % would have been 20. But then 10 more docs were added (3 might have
- % ended up on one target and 7 on another) so the final update sequence
- % would then be 20 + 10 = 30.
- ?assertMatch(#{<<"update_seq">> := 30}, update_seq_to_num(DbInfo1)),
-
- ?assertEqual(Docs0, Docs1),
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end)}.
-
-% This test that indices are built during shard splitting.
-indices_are_built(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
- HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
-
- add_test_docs(Db, #{docs => 10, mrview => 2, search => 2, geo => 2}),
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- MRViewGroupInfo = get_group_info(Db, <<"_design/mrview00000">>),
- ?assertMatch(#{<<"update_seq">> := 32}, MRViewGroupInfo),
-
- HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
- case HaveDreyfus of
- false ->
- ok;
- true ->
- % 4 because there are 2 indices and 2 target shards
- ?assertEqual(4, meck:num_calls(dreyfus_index, await, 2))
- end,
-
- HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
- case HaveHastings of
- false ->
- ok;
- true ->
- % 4 because there are 2 indices and 2 target shards
- ?assertEqual(4, meck:num_calls(hastings_index, await, 2))
- end
- end)}.
-
-% This test that indices are built despite intermittent errors.
-indices_can_be_built_with_errors(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- add_test_docs(Db, #{docs => 10, mrview => 2, search => 2, geo => 2}),
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- meck:expect(
- couch_index_server,
- get_index,
- 2,
- meck:seq([
- meck:raise(error, foo_reason),
- meck:raise(exit, killed),
- meck:passthrough()
- ])
- ),
- meck:expect(
- couch_index,
- get_state,
- 2,
- meck:seq([
- meck:raise(error, bar_reason),
- meck:raise(exit, killed),
- meck:val({not_ok, other}),
- meck:passthrough()
- ])
- ),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
- % Normally would expect 4 (2 shards x 2 mrviews), but there were 2
- % failures in get_index/2 and 3 in get_state/3 for a total of 4 + 5 = 9
- ?assertEqual(9, meck:num_calls(couch_index_server, get_index, 2)),
- % Normally would be 4 calls (2 shards x 2 mrviews), but there were
- % 3 extra failures in get_state/2 for a total of 4 + 3 = 7
- ?assertEqual(7, meck:num_calls(couch_index, get_state, 2)),
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- MRViewGroupInfo = get_group_info(Db, <<"_design/mrview00000">>),
- ?assertMatch(#{<<"update_seq">> := 32}, MRViewGroupInfo)
- end)}.
-
-mock_dreyfus_indices() ->
- meck:expect(dreyfus_index, design_doc_to_indexes, fun(Doc) ->
- #doc{body = {BodyProps}} = Doc,
- case couch_util:get_value(<<"indexes">>, BodyProps) of
- undefined ->
- [];
- {[_]} ->
- [{dreyfus, <<"db">>, dreyfus_index1}]
- end
- end),
- meck:expect(dreyfus_index_manager, get_index, fun(_, _) -> {ok, pid} end),
- meck:expect(dreyfus_index, await, fun(_, _) -> {ok, indexpid, someseq} end).
-
-mock_hastings_indices() ->
- meck:expect(hastings_index, design_doc_to_indexes, fun(Doc) ->
- #doc{body = {BodyProps}} = Doc,
- case couch_util:get_value(<<"st_indexes">>, BodyProps) of
- undefined ->
- [];
- {[_]} ->
- [{hastings, <<"db">>, hastings_index1}]
- end
- end),
- meck:expect(hastings_index_manager, get_index, fun(_, _) -> {ok, pid} end),
- meck:expect(hastings_index, await, fun(_, _) -> {ok, someseq} end).
-
-% Split partitioned database
-split_partitioned_db(#{db2 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- DocSpec = #{
- pdocs => #{
- <<"PX">> => 5,
- <<"PY">> => 5
- },
- mrview => 1,
- local => 1
- },
- add_test_docs(Db, DocSpec),
-
- % Save documents before the split
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
-
- % Set some custom metadata properties
- set_revs_limit(Db, 942),
- set_purge_infos_limit(Db, 943),
- SecObj = {[{<<"foo">>, <<"bar">>}]},
- set_security(Db, SecObj),
-
- % DbInfo is saved after setting metadata bits
- % as those could bump the update sequence
- DbInfo0 = get_db_info(Db),
- PX0 = get_partition_info(Db, <<"PX">>),
- PY0 = get_partition_info(Db, <<"PY">>),
-
- % Split the one shard
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update seq prefix number is a sum of all shard update sequences
- #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
- #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
- ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
-
- % Finally compare that documents are still there after the split
- ?assertEqual(Docs0, Docs1),
-
- ?assertEqual(PX0, get_partition_info(Db, <<"PX">>)),
- ?assertEqual(PY0, get_partition_info(Db, <<"PY">>)),
-
- % Don't forget about the local but don't include internal checkpoints
- % as some of those are munged and transformed during the split
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end)}.
-
-% Make sure a shard can be split again after it was split once. This checks that
-% too many got added to some range, such that on next split they'd fail to fit
-% in to any of the new target ranges.
-split_twice(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- DocSpec = #{docs => 100, delete => [80, 99], mrview => 2, local => 100},
- add_test_docs(Db, DocSpec),
-
- % Save documents before the split
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
-
- % Set some custom metadata properties
- set_revs_limit(Db, 942),
- set_purge_infos_limit(Db, 943),
- SecObj = {[{<<"foo">>, <<"bar">>}]},
- set_security(Db, SecObj),
-
- % DbInfo is saved after setting metadata bits
- % as those could bump the update sequence
- DbInfo0 = get_db_info(Db),
-
- % Split the one shard
- [#shard{name = Shard1}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId1} = mem3_reshard:start_split_job(Shard1),
- wait_state(JobId1, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update seq prefix number is a sum of all shard update sequences
- #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
- #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
- ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
-
- ?assertEqual(Docs0, Docs1),
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)),
-
- % Split the first range again
- [#shard{name = Shard2}, _] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId2} = mem3_reshard:start_split_job(Shard2),
- wait_state(JobId2, completed),
-
- Shards2 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(3, length(Shards2)),
- [R3, R4, R5] = [R || #shard{range = R} <- Shards2],
- ?assertEqual([16#00000000, 16#3fffffff], R3),
- ?assertEqual([16#40000000, 16#7fffffff], R4),
- ?assertEqual([16#80000000, 16#ffffffff], R5),
-
- % Check metadata bits after the second split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo2 = get_db_info(Db),
- Docs2 = get_all_docs(Db),
- Local2 = get_local_docs(Db),
-
- ?assertEqual(without_seqs(DbInfo1), without_seqs(DbInfo2)),
- % Update seq prefix number is a sum of all shard update sequences
- % But only 1 shard out of 2 was split
- #{<<"update_seq">> := UpdateSeq2} = update_seq_to_num(DbInfo2),
- ?assertEqual(trunc(UpdateSeq1 * 1.5), UpdateSeq2),
- ?assertEqual(Docs1, Docs2),
- ?assertEqual(without_meta_locals(Local1), without_meta_locals(Local2))
- end)}.
-
-couch_events_are_emitted(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- couch_event:register_all(self()),
-
- % Split the one shard
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- Flush = fun F(Events) ->
- receive
- {'$couch_event', DbName, Event} when
- Event =:= deleted orelse
- Event =:= updated
- ->
- case binary:match(DbName, Db) of
- nomatch -> F(Events);
- {_, _} -> F([Event | Events])
- end
- after 0 ->
- lists:reverse(Events)
- end
- end,
- Events = Flush([]),
- StartAtDeleted = lists:dropwhile(fun(E) -> E =/= deleted end, Events),
- ?assertMatch([deleted, deleted, updated, updated | _], StartAtDeleted),
- couch_event:unregister(self())
- end)}.
-
-retries_work(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- meck:expect(couch_db_split, split, fun(_, _, _) ->
- error(kapow)
- end),
-
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
-
- wait_state(JobId, failed),
- ?assertEqual(7, meck:num_calls(couch_db_split, split, 3))
- end)}.
-
-target_reset_in_initial_copy(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
- Job = #job{
- source = Src,
- target = [#shard{name = <<"t1">>}, #shard{name = <<"t2">>}],
- job_state = running,
- split_state = initial_copy
- },
- meck:expect(couch_db_split, cleanup_target, 2, ok),
- meck:expect(couch_server, exists, fun
- (<<"t1">>) -> true;
- (<<"t2">>) -> true;
- (DbName) -> meck:passthrough([DbName])
- end),
- JobPid = spawn(fun() -> mem3_reshard_job:initial_copy_impl(Job) end),
- meck:wait(2, couch_db_split, cleanup_target, ['_', '_'], 5000),
- exit(JobPid, kill),
- ?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2))
- end)}.
-
-split_an_incomplete_shard_map(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- meck:expect(mem3_util, calculate_max_n, 1, 0),
- ?assertMatch(
- {error, {not_enough_shard_copies, _}},
- mem3_reshard:start_split_job(Shard)
- )
- end)}.
-
-% Opening a db target db in initial copy phase will throw an error
-target_shards_are_locked(#{db1 := Db}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- add_test_docs(Db, #{docs => 10}),
-
- % Make the job stops right when it was about to copy the docs
- TestPid = self(),
- meck:new(couch_db, [passthrough]),
- meck:expect(couch_db, start_link, fun(Engine, TName, FilePath, Opts) ->
- TestPid ! {start_link, self(), TName},
- receive
- continue ->
- meck:passthrough([Engine, TName, FilePath, Opts])
- end
- end),
-
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- {Target0, JobPid} =
- receive
- {start_link, Pid, TName} -> {TName, Pid}
- end,
- ?assertEqual(
- {error, {locked, <<"shard splitting">>}},
- couch_db:open_int(Target0, [])
- ),
-
- % Send two continues for two targets
- JobPid ! continue,
- JobPid ! continue,
-
- wait_state(JobId, completed)
- end)}.
-
-intercept_state(State) ->
- TestPid = self(),
- meck:new(mem3_reshard_job, [passthrough]),
- meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) ->
- case Job#job.split_state of
- State ->
- TestPid ! {self(), State},
- receive
- continue -> meck:passthrough([Job]);
- cancel -> ok
- end;
- _ ->
- meck:passthrough([Job])
- end
- end).
-
-wait_state(JobId, State) ->
- test_util:wait(
- fun() ->
- case mem3_reshard:job(JobId) of
- {ok, {Props}} ->
- case couch_util:get_value(job_state, Props) of
- State ->
- ok;
- _ ->
- timer:sleep(100),
- wait
- end;
- {error, not_found} ->
- timer:sleep(100),
- wait
- end
- end,
- 30000
- ).
-
-set_revs_limit(DbName, Limit) ->
- with_proc(fun() -> fabric:set_revs_limit(DbName, Limit, [?ADMIN_CTX]) end).
-
-get_revs_limit(DbName) ->
- with_proc(fun() -> fabric:get_revs_limit(DbName) end).
-
-get_purge_infos_limit(DbName) ->
- with_proc(fun() -> fabric:get_purge_infos_limit(DbName) end).
-
-set_purge_infos_limit(DbName, Limit) ->
- with_proc(fun() ->
- fabric:set_purge_infos_limit(DbName, Limit, [?ADMIN_CTX])
- end).
-
-purge_docs(DbName, DocIdRevs) ->
- with_proc(fun() ->
- fabric:purge_docs(DbName, DocIdRevs, [])
- end).
-
-compact(DbName) ->
- InitFileSize = get_db_file_size(DbName),
- ok = with_proc(fun() -> fabric:compact(DbName) end),
- test_util:wait(
- fun() ->
- case {compact_running(DbName), get_db_file_size(DbName)} of
- {true, _} -> wait;
- {false, FileSize} when FileSize == InitFileSize -> wait;
- {false, FileSize} when FileSize < InitFileSize -> ok
- end
- end,
- 5000,
- 200
- ).
-
-compact_running(DbName) ->
- {ok, DbInfo} = with_proc(fun() -> fabric:get_db_info(DbName) end),
- #{<<"compact_running">> := CompactRunning} = to_map(DbInfo),
- CompactRunning.
-
-get_db_file_size(DbName) ->
- {ok, DbInfo} = with_proc(fun() -> fabric:get_db_info(DbName) end),
- #{<<"sizes">> := #{<<"file">> := FileSize}} = to_map(DbInfo),
- FileSize.
-
-set_security(DbName, SecObj) ->
- with_proc(fun() -> fabric:set_security(DbName, SecObj) end).
-
-get_security(DbName) ->
- with_proc(fun() -> fabric:get_security(DbName, [?ADMIN_CTX]) end).
-
-get_db_info(DbName) ->
- with_proc(fun() ->
- {ok, Info} = fabric:get_db_info(DbName),
- maps:with(
- [
- <<"db_name">>,
- <<"doc_count">>,
- <<"props">>,
- <<"doc_del_count">>,
- <<"update_seq">>,
- <<"purge_seq">>,
- <<"disk_format_version">>
- ],
- to_map(Info)
- )
- end).
-
-get_group_info(DbName, DesignId) ->
- with_proc(fun() ->
- {ok, GInfo} = fabric:get_view_group_info(DbName, DesignId),
- maps:with(
- [
- <<"language">>, <<"purge_seq">>, <<"signature">>, <<"update_seq">>
- ],
- to_map(GInfo)
- )
- end).
-
-get_partition_info(DbName, Partition) ->
- with_proc(fun() ->
- {ok, PInfo} = fabric:get_partition_info(DbName, Partition),
- maps:with(
- [
- <<"db_name">>, <<"doc_count">>, <<"doc_del_count">>, <<"partition">>
- ],
- to_map(PInfo)
- )
- end).
-
-get_all_docs(DbName) ->
- get_all_docs(DbName, #mrargs{}).
-
-get_all_docs(DbName, #mrargs{} = QArgs0) ->
- GL = erlang:group_leader(),
- with_proc(
- fun() ->
- Cb = fun
- ({row, Props}, Acc) ->
- Doc = to_map(couch_util:get_value(doc, Props)),
- #{?ID := Id} = Doc,
- {ok, Acc#{Id => Doc}};
- ({meta, _}, Acc) ->
- {ok, Acc};
- (complete, Acc) ->
- {ok, Acc}
- end,
- QArgs = QArgs0#mrargs{include_docs = true},
- {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs),
- Docs
- end,
- GL
- ).
-
-get_local_docs(DbName) ->
- LocalNS = {namespace, <<"_local">>},
- maps:map(
- fun(_, Doc) ->
- maps:without([<<"_rev">>], Doc)
- end,
- get_all_docs(DbName, #mrargs{extra = [LocalNS]})
- ).
-
-without_seqs(#{} = InfoMap) ->
- maps:without([<<"update_seq">>, <<"purge_seq">>], InfoMap).
-
-without_meta_locals(#{} = Local) ->
- maps:filter(
- fun
- (<<"_local/purge-mrview-", _/binary>>, _) -> false;
- (<<"_local/shard-sync-", _/binary>>, _) -> false;
- (_, _) -> true
- end,
- Local
- ).
-
-update_seq_to_num(#{} = InfoMap) ->
- maps:map(
- fun
- (<<"update_seq">>, Seq) -> seq_to_num(Seq);
- (<<"purge_seq">>, PSeq) -> seq_to_num(PSeq);
- (_, V) -> V
- end,
- InfoMap
- ).
-
-seq_to_num(Seq) ->
- [SeqNum, _] = binary:split(Seq, <<"-">>),
- binary_to_integer(SeqNum).
-
-to_map([_ | _] = Props) ->
- to_map({Props});
-to_map({[_ | _]} = EJson) ->
- jiffy:decode(jiffy:encode(EJson), [return_maps]).
-
-create_db(DbName, Opts) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
-delete_db(DbName) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
-with_proc(Fun) ->
- with_proc(Fun, undefined, 30000).
-
-with_proc(Fun, GroupLeader) ->
- with_proc(Fun, GroupLeader, 30000).
-
-with_proc(Fun, GroupLeader, Timeout) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- case GroupLeader of
- undefined -> ok;
- _ -> erlang:group_leader(GroupLeader, self())
- end,
- exit({with_proc_res, Fun()})
- end),
- receive
- {'DOWN', Ref, process, Pid, {with_proc_res, Res}} ->
- Res;
- {'DOWN', Ref, process, Pid, Error} ->
- error(Error)
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- error({with_proc_timeout, Fun, Timeout})
- end.
-
-add_test_docs(DbName, #{} = DocSpec) ->
- Docs =
- docs(maps:get(docs, DocSpec, [])) ++
- pdocs(maps:get(pdocs, DocSpec, #{})) ++
- ddocs(mrview, maps:get(mrview, DocSpec, [])) ++
- ddocs(search, maps:get(search, DocSpec, [])) ++
- ddocs(geo, maps:get(geo, DocSpec, [])) ++
- ldocs(maps:get(local, DocSpec, [])),
- Res = update_docs(DbName, Docs),
- Docs1 = lists:map(
- fun({Doc, {ok, {RevPos, Rev}}}) ->
- Doc#doc{revs = {RevPos, [Rev]}}
- end,
- lists:zip(Docs, Res)
- ),
- case delete_docs(maps:get(delete, DocSpec, []), Docs1) of
- [] -> ok;
- [_ | _] = Deleted -> update_docs(DbName, Deleted)
- end,
- ok.
-
-update_docs(DbName, Docs) ->
- with_proc(fun() ->
- case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of
- {accepted, Res} -> Res;
- {ok, Res} -> Res
- end
- end).
-
-delete_docs([S, E], Docs) when E >= S ->
- ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)],
- lists:filtermap(
- fun(#doc{id = Id} = Doc) ->
- case lists:member(Id, ToDelete) of
- true -> {true, Doc#doc{deleted = true}};
- false -> false
- end
- end,
- Docs
- );
-delete_docs(_, _) ->
- [].
-
-pdocs(#{} = PMap) ->
- maps:fold(
- fun(Part, DocSpec, DocsAcc) ->
- docs(DocSpec, <<Part/binary, ":">>) ++ DocsAcc
- end,
- [],
- PMap
- ).
-
-docs(DocSpec) ->
- docs(DocSpec, <<"">>).
-
-docs(N, Prefix) when is_integer(N), N > 0 ->
- docs([0, N - 1], Prefix);
-docs([S, E], Prefix) when E >= S ->
- [doc(Prefix, I) || I <- lists:seq(S, E)];
-docs(_, _) ->
- [].
-
-ddocs(Type, N) when is_integer(N), N > 0 ->
- ddocs(Type, [0, N - 1]);
-ddocs(Type, [S, E]) when E >= S ->
- Body = ddprop(Type),
- BType = atom_to_binary(Type, utf8),
- [doc(<<"_design/", BType/binary>>, I, Body, 0) || I <- lists:seq(S, E)];
-ddocs(_, _) ->
- [].
-
-ldocs(N) when is_integer(N), N > 0 ->
- ldocs([0, N - 1]);
-ldocs([S, E]) when E >= S ->
- [doc(<<"_local/">>, I, bodyprops(), 0) || I <- lists:seq(S, E)];
-ldocs(_) ->
- [].
-
-doc(Pref, Id) ->
- Body = bodyprops(),
- doc(Pref, Id, Body, 42).
-
-doc(Pref, Id, BodyProps, AttSize) ->
- #doc{
- id = doc_id(Pref, Id),
- body = {BodyProps},
- atts = atts(AttSize)
- }.
-
-doc_id(Pref, Id) ->
- IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])),
- <<Pref/binary, IdBin/binary>>.
-
-ddprop(mrview) ->
- [
- {<<"views">>,
- {[
- {<<"v1">>,
- {[
- {<<"map">>, <<"function(d){emit(d);}">>}
- ]}}
- ]}}
- ];
-ddprop(geo) ->
- [
- {<<"st_indexes">>,
- {[
- {<<"area">>,
- {[
- {<<"analyzer">>, <<"standard">>},
- {<<"index">>, <<"function(d){if(d.g){st_index(d.g)}}">>}
- ]}}
- ]}}
- ];
-ddprop(search) ->
- [
- {<<"indexes">>,
- {[
- {<<"types">>,
- {[
- {<<"index">>, <<"function(d){if(d.g){st_index(d.g.type)}}">>}
- ]}}
- ]}}
- ].
-
-bodyprops() ->
- [
- {<<"g">>,
- {[
- {<<"type">>, <<"Polygon">>},
- {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]}
- ]}}
- ].
-
-atts(0) ->
- [];
-atts(Size) when is_integer(Size), Size >= 1 ->
- Data = <<<<"x">> || _ <- lists:seq(1, Size)>>,
- [
- couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, Data}
- ])
- ].
diff --git a/src/mem3/test/eunit/mem3_ring_prop_tests.erl b/src/mem3/test/eunit/mem3_ring_prop_tests.erl
deleted file mode 100644
index 3e2310b21..000000000
--- a/src/mem3/test/eunit/mem3_ring_prop_tests.erl
+++ /dev/null
@@ -1,160 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_ring_prop_tests).
-
--ifdef(WITH_PROPER).
-
--include_lib("couch/include/couch_eunit_proper.hrl").
-
-property_test_() ->
- ?EUNIT_QUICKCHECK(60).
-
-% Properties
-
-prop_get_ring_with_connected_intervals() ->
- ?FORALL(
- {Start, End},
- oneof(ranges()),
- ?FORALL(
- Intervals,
- g_connected_intervals(Start, End),
- mem3_util:get_ring(Intervals, Start, End) =:= lists:sort(Intervals)
- )
- ).
-
-prop_get_ring_connected_plus_random_intervals() ->
- ?FORALL(
- {Intervals, Extra},
- {g_connected_intervals(1, 100), g_random_intervals(1, 100)},
- ?IMPLIES(
- sets:is_disjoint(endpoints(Intervals), endpoints(Extra)),
- begin
- AllInts = Intervals ++ Extra,
- Ring = mem3_util:get_ring(AllInts, 1, 100),
- Ring =:= lists:sort(Intervals)
- end
- )
- ).
-
-prop_get_ring_connected_with_sub_intervals() ->
- ?FORALL(
- Intervals,
- g_connected_intervals(1, 100),
- ?FORALL(
- SubIntervals,
- g_subintervals(Intervals),
- begin
- AllInts = Intervals ++ SubIntervals,
- Ring = mem3_util:get_ring(AllInts, 1, 100),
- Ring =:= lists:sort(Intervals)
- end
- )
- ).
-
-prop_get_ring_with_disconnected_intervals() ->
- ?FORALL(
- {Start, End},
- oneof(ranges()),
- ?FORALL(
- Intervals,
- g_disconnected_intervals(Start, End),
- mem3_util:get_ring(Intervals, Start, End) =:= []
- )
- ).
-
-% Generators
-
-ranges() ->
- [{1, 10}, {0, 2 bsl 31 - 1}, {2 bsl 31 - 10, 2 bsl 31 - 1}].
-
-g_connected_intervals(Begin, End) ->
- ?SIZED(Size, g_connected_intervals(Begin, End, 5 * Size)).
-
-g_connected_intervals(Begin, End, Split) when Begin =< End ->
- ?LET(
- N,
- choose(0, Split),
- begin
- if
- N == 0 ->
- [{Begin, End}];
- N > 0 ->
- Ns = lists:seq(1, N - 1),
- Bs = lists:usort([rand_range(Begin, End) || _ <- Ns]),
- Es = [B - 1 || B <- Bs],
- shuffle(lists:zip([Begin] ++ Bs, Es ++ [End]))
- end
- end
- ).
-
-g_non_trivial_connected_intervals(Begin, End, Split) ->
- ?SUCHTHAT(
- Connected,
- g_connected_intervals(Begin, End, Split),
- length(Connected) > 1
- ).
-
-g_disconnected_intervals(Begin, End) ->
- ?SIZED(Size, g_disconnected_intervals(Begin, End, Size)).
-
-g_disconnected_intervals(Begin, End, Split) when Begin =< End ->
- ?LET(
- Connected,
- g_non_trivial_connected_intervals(Begin, End, Split),
- begin
- I = rand:uniform(length(Connected)) - 1,
- {Before, [_ | After]} = lists:split(I, Connected),
- Before ++ After
- end
- ).
-
-g_subintervals(Intervals) ->
- lists:foldl(fun(R, Acc) -> split_interval(R) ++ Acc end, [], Intervals).
-
-split_interval({B, E}) when E - B >= 2 ->
- E1 = rand_range(B, E) - 1,
- B1 = E1 + 1,
- [{B, E1}, {B1, E}];
-split_interval(_Range) ->
- [].
-
-g_random_intervals(Start, End) ->
- ?LET(
- N,
- choose(1, 10),
- begin
- [
- begin
- B = rand_range(Start, End),
- E = rand_range(B, End),
- {B, E}
- end
- || _ <- lists:seq(1, N)
- ]
- end
- ).
-
-rand_range(B, B) ->
- B;
-rand_range(B, E) ->
- B + rand:uniform(E - B).
-
-shuffle(L) ->
- Tagged = [{rand:uniform(), X} || X <- L],
- [X || {_, X} <- lists:sort(Tagged)].
-
-endpoints(Ranges) ->
- {Begins, Ends} = lists:unzip(Ranges),
- sets:from_list(Begins ++ Ends).
-
--endif.
diff --git a/src/mem3/test/eunit/mem3_seeds_test.erl b/src/mem3/test/eunit/mem3_seeds_test.erl
deleted file mode 100644
index 2c9c1d383..000000000
--- a/src/mem3/test/eunit/mem3_seeds_test.erl
+++ /dev/null
@@ -1,82 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_seeds_test).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-a_test_() ->
- Tests = [
- {"empty seedlist should set status ok", fun empty_seedlist_status_ok/0},
- {"all seedlist nodes unreachable keeps status seeding", fun seedlist_misconfiguration/0},
- {"seedlist entries should be present in _nodes", fun check_nodelist/0},
- {"optional local _users db in mem3_sync:local_dbs()", fun check_local_dbs/0}
- ],
- {setup, fun setup/0, fun teardown/1, Tests}.
-
-empty_seedlist_status_ok() ->
- ok = application:start(mem3),
- try
- {ok, {Result}} = mem3_seeds:get_status(),
- ?assertEqual({[]}, couch_util:get_value(seeds, Result)),
- ?assertEqual(ok, couch_util:get_value(status, Result))
- after
- cleanup()
- end.
-
-seedlist_misconfiguration() ->
- config:set("cluster", "seedlist", "couchdb@node1.example.com,couchdb@node2.example.com", false),
- ok = application:start(mem3),
- try
- {ok, {Result}} = mem3_seeds:get_status(),
- {Seeds} = couch_util:get_value(seeds, Result),
- ?assertEqual(2, length(Seeds)),
- ?assertMatch({_}, couch_util:get_value('couchdb@node1.example.com', Seeds)),
- ?assertMatch({_}, couch_util:get_value('couchdb@node2.example.com', Seeds)),
- ?assertEqual(seeding, couch_util:get_value(status, Result))
- after
- cleanup()
- end.
-
-check_nodelist() ->
- config:set("cluster", "seedlist", "couchdb@node1.example.com,couchdb@node2.example.com", false),
- ok = application:start(mem3),
- try
- Nodes = mem3:nodes(),
- ?assert(lists:member('couchdb@node1.example.com', Nodes)),
- ?assert(lists:member('couchdb@node2.example.com', Nodes))
- after
- cleanup()
- end.
-
-check_local_dbs() ->
- LocalDbs = mem3_sync:local_dbs(),
- {ok, _} = couch_server:create(<<"_users">>, []),
- ?assertEqual(
- lists:append(LocalDbs, [<<"_users">>]),
- mem3_sync:local_dbs()
- ).
-
-cleanup() ->
- application:stop(mem3),
- Filename = config:get("mem3", "nodes_db", "_nodes") ++ ".couch",
- file:delete(filename:join([?BUILDDIR(), "tmp", "data", Filename])),
- case config:get("couch_httpd_auth", "authentication_db") of
- undefined -> ok;
- DbName -> couch_server:delete(list_to_binary(DbName), [])
- end.
-
-setup() ->
- test_util:start_couch([rexi]).
-
-teardown(Ctx) ->
- test_util:stop_couch(Ctx).
diff --git a/src/mem3/test/eunit/mem3_shards_test.erl b/src/mem3/test/eunit/mem3_shards_test.erl
deleted file mode 100644
index 6d2766fa2..000000000
--- a/src/mem3/test/eunit/mem3_shards_test.erl
+++ /dev/null
@@ -1,130 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_shards_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/src/mem3_reshard.hrl").
-% for all_docs function
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(ID, <<"_id">>).
--define(TIMEOUT, 60).
-
-setup() ->
- DbName = ?tempdb(),
- PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}],
- create_db(DbName, [{q, 8}, {n, 1}, {props, PartProps}]),
- {ok, DbDoc} = mem3_util:open_db_doc(DbName),
- #{dbname => DbName, dbdoc => DbDoc}.
-
-teardown(#{dbname := DbName}) ->
- delete_db(DbName).
-
-start_couch() ->
- test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-mem3_shards_db_create_props_test_() ->
- {
- "mem3 shards partition query database properties tests",
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun partitioned_shards_recreated_properly/1
- ]
- }
- }
- }.
-
-% This asserts that when the mem3_shards's changes listener on the shards db
-% encounters a db doc update for a db that has a missing shard on the local
-% instance, the shard creation logic will properly propagate the db's config
-% properties.
-% SEE: apache/couchdb#3631
-partitioned_shards_recreated_properly(#{dbname := DbName, dbdoc := DbDoc}) ->
- {timeout, ?TIMEOUT,
- ?_test(begin
- #doc{body = {Body0}} = DbDoc,
- Body1 = [{<<"foo">>, <<"bar">>} | Body0],
- Shards = [Shard | _] = lists:sort(mem3:shards(DbName)),
- ShardName = Shard#shard.name,
- ?assert(is_partitioned(Shards)),
- ok = with_proc(fun() -> couch_server:delete(ShardName, []) end),
- ?assertThrow({not_found, no_db_file}, is_partitioned(Shard)),
- ok = mem3_util:update_db_doc(DbDoc#doc{body = {Body1}}),
- Shards =
- [Shard | _] = test_util:wait_value(
- fun() ->
- lists:sort(mem3:shards(DbName))
- end,
- Shards
- ),
- ?assertEqual(
- true,
- test_util:wait_value(
- fun() ->
- catch is_partitioned(Shard)
- end,
- true
- )
- )
- end)}.
-
-is_partitioned([#shard{} | _] = Shards) ->
- lists:all(fun is_partitioned/1, Shards);
-is_partitioned(#shard{name = Name}) ->
- couch_util:with_db(Name, fun couch_db:is_partitioned/1);
-is_partitioned(Db) ->
- couch_db:is_partitioned(Db).
-
-create_db(DbName, Opts) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
-delete_db(DbName) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
-with_proc(Fun) ->
- with_proc(Fun, undefined, 30000).
-
-with_proc(Fun, GroupLeader) ->
- with_proc(Fun, GroupLeader, 30000).
-
-with_proc(Fun, GroupLeader, Timeout) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- case GroupLeader of
- undefined -> ok;
- _ -> erlang:group_leader(GroupLeader, self())
- end,
- exit({with_proc_res, Fun()})
- end),
- receive
- {'DOWN', Ref, process, Pid, {with_proc_res, Res}} ->
- Res;
- {'DOWN', Ref, process, Pid, Error} ->
- error(Error)
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- error({with_proc_timeout, Fun, Timeout})
- end.
diff --git a/src/mem3/test/eunit/mem3_sync_security_test.erl b/src/mem3/test/eunit/mem3_sync_security_test.erl
deleted file mode 100644
index 7f4b7b699..000000000
--- a/src/mem3/test/eunit/mem3_sync_security_test.erl
+++ /dev/null
@@ -1,57 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_security_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include("mem3.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-% seconds
--define(TIMEOUT, 5).
-
-go_test_() ->
- {
- "security property sync test",
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun sync_security_ok/1
- ]
- }
- }
- }.
-
-start_couch() ->
- test_util:start_couch([fabric, mem3]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-setup() ->
- ok = meck:new(fabric, [passthrough]),
- meck:expect(fabric, all_dbs, fun() ->
- {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]}
- end).
-
-teardown(_) ->
- meck:unload().
-
-sync_security_ok(_) ->
- {timeout, ?TIMEOUT, ?_assertEqual(ok, mem3_sync_security:go())}.
diff --git a/src/mem3/test/eunit/mem3_util_test.erl b/src/mem3/test/eunit/mem3_util_test.erl
deleted file mode 100644
index 9cab89f67..000000000
--- a/src/mem3/test/eunit/mem3_util_test.erl
+++ /dev/null
@@ -1,152 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_util_test).
-
--include("mem3.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-name_shard_test() ->
- Shard1 = #shard{},
- ?assertError(function_clause, mem3_util:name_shard(Shard1, ".1234")),
-
- Shard2 = #shard{dbname = <<"testdb">>, range = [0, 100]},
- #shard{name = Name2} = mem3_util:name_shard(Shard2, ".1234"),
- ?assertEqual(<<"shards/00000000-00000064/testdb.1234">>, Name2),
-
- ok.
-
-create_partition_map_test() ->
- {DbName1, N1, Q1, Nodes1} = {<<"testdb1">>, 3, 4, [a, b, c, d]},
- Map1 = mem3_util:create_partition_map(DbName1, N1, Q1, Nodes1),
- ?assertEqual(12, length(Map1)),
-
- {DbName2, N2, Q2, Nodes2} = {<<"testdb2">>, 1, 1, [a, b, c, d]},
- [#shard{name = Name2, node = Node2}] =
- Map2 =
- mem3_util:create_partition_map(DbName2, N2, Q2, Nodes2, ".1234"),
- ?assertEqual(1, length(Map2)),
- ?assertEqual(<<"shards/00000000-ffffffff/testdb2.1234">>, Name2),
- ?assertEqual(a, Node2),
- ok.
-
-build_shards_test() ->
- DocProps1 =
- [
- {<<"changelog">>, [
- [
- <<"add">>,
- <<"00000000-1fffffff">>,
- <<"bigcouch@node.local">>
- ],
- [
- <<"add">>,
- <<"20000000-3fffffff">>,
- <<"bigcouch@node.local">>
- ],
- [
- <<"add">>,
- <<"40000000-5fffffff">>,
- <<"bigcouch@node.local">>
- ],
- [
- <<"add">>,
- <<"60000000-7fffffff">>,
- <<"bigcouch@node.local">>
- ],
- [
- <<"add">>,
- <<"80000000-9fffffff">>,
- <<"bigcouch@node.local">>
- ],
- [
- <<"add">>,
- <<"a0000000-bfffffff">>,
- <<"bigcouch@node.local">>
- ],
- [
- <<"add">>,
- <<"c0000000-dfffffff">>,
- <<"bigcouch@node.local">>
- ],
- [
- <<"add">>,
- <<"e0000000-ffffffff">>,
- <<"bigcouch@node.local">>
- ]
- ]},
- {<<"by_node">>,
- {[
- {<<"bigcouch@node.local">>, [
- <<"00000000-1fffffff">>,
- <<"20000000-3fffffff">>,
- <<"40000000-5fffffff">>,
- <<"60000000-7fffffff">>,
- <<"80000000-9fffffff">>,
- <<"a0000000-bfffffff">>,
- <<"c0000000-dfffffff">>,
- <<"e0000000-ffffffff">>
- ]}
- ]}},
- {<<"by_range">>,
- {[
- {<<"00000000-1fffffff">>, [<<"bigcouch@node.local">>]},
- {<<"20000000-3fffffff">>, [<<"bigcouch@node.local">>]},
- {<<"40000000-5fffffff">>, [<<"bigcouch@node.local">>]},
- {<<"60000000-7fffffff">>, [<<"bigcouch@node.local">>]},
- {<<"80000000-9fffffff">>, [<<"bigcouch@node.local">>]},
- {<<"a0000000-bfffffff">>, [<<"bigcouch@node.local">>]},
- {<<"c0000000-dfffffff">>, [<<"bigcouch@node.local">>]},
- {<<"e0000000-ffffffff">>, [<<"bigcouch@node.local">>]}
- ]}}
- ],
- Shards1 = mem3_util:build_shards(<<"testdb1">>, DocProps1),
- ExpectedShards1 =
- [
- {shard, <<"shards/00000000-1fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
- [0, 536870911], undefined, []},
- {shard, <<"shards/20000000-3fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
- [536870912, 1073741823], undefined, []},
- {shard, <<"shards/40000000-5fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
- [1073741824, 1610612735], undefined, []},
- {shard, <<"shards/60000000-7fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
- [1610612736, 2147483647], undefined, []},
- {shard, <<"shards/80000000-9fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
- [2147483648, 2684354559], undefined, []},
- {shard, <<"shards/a0000000-bfffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
- [2684354560, 3221225471], undefined, []},
- {shard, <<"shards/c0000000-dfffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
- [3221225472, 3758096383], undefined, []},
- {shard, <<"shards/e0000000-ffffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
- [3758096384, 4294967295], undefined, []}
- ],
- ?assertEqual(ExpectedShards1, Shards1),
- ok.
-
-%% n_val tests
-
-nval_test_() ->
- {
- setup,
- fun() ->
- meck:new([config, couch_log]),
- meck:expect(couch_log, error, 2, ok),
- meck:expect(config, get_integer, 3, 5)
- end,
- fun(_) -> meck:unload() end,
- [
- ?_assertEqual(2, mem3_util:n_val(2, 4)),
- ?_assertEqual(1, mem3_util:n_val(-1, 4)),
- ?_assertEqual(4, mem3_util:n_val(6, 4)),
- ?_assertEqual(5, mem3_util:n_val(undefined, 6))
- ]
- }.
diff --git a/src/rexi/README.md b/src/rexi/README.md
deleted file mode 100644
index b2eeaea2b..000000000
--- a/src/rexi/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-Rexi is a tailor-made RPC server application for sending [CouchDB][1] operations to nodes in a cluster. It is used in [BigCouch][2] as the remote procedure vehicle to get [fabric][6] functions to execute on remote cluster nodes.
-
-Rexi better fits the needs of the BigCouch distributed data store by dropping some unneeded overhead in rex, the RPC server that ships with Erlang/OTP. Rexi is optimized for the case when you need to spawn a bunch of remote processes. Cast messages are sent from the origin to the remote rexi server, and local processes are spawned from there, which is vastly more efficient than spawning remote processes from the origin. You still get monitoring of the remote processes, but the request-handling process doesn't get stuck trying to connect to an overloaded/dead node. 'rexi_DOWN' messages will arrive at the client eventually. This has been an extremely advantageous mix of latency and failure detection, vastly improving the performance of BigCouch.
-
-Rexi is used in conjunction with 'Fabric' which is also an application within BigCouch, but can be used on a stand-alone basis.
-
-### Getting Started
-Rexi requires R13B03 or higher and can be built with [rebar][7], which comes bundled in the repository.
-
-### License
-[Apache 2.0][3]
-
-### Contact
- * [http://cloudant.com][4]
- * [info@cloudant.com][5]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/cloudant/BigCouch
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
-[4]: http://cloudant.com
-[5]: mailto:info@cloudant.com
-[6]: http://github.com/cloudant/fabric
-[7]: http://github.com/basho/rebar
diff --git a/src/rexi/include/rexi.hrl b/src/rexi/include/rexi.hrl
deleted file mode 100644
index a2d86b2ab..000000000
--- a/src/rexi/include/rexi.hrl
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(error, {
- timestamp,
- reason,
- mfa,
- nonce,
- stack
-}).
-
diff --git a/src/rexi/priv/stats_descriptions.cfg b/src/rexi/priv/stats_descriptions.cfg
deleted file mode 100644
index 93c29d95a..000000000
--- a/src/rexi/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,24 +0,0 @@
-{[rexi, buffered], [
- {type, counter},
- {desc, <<"number of rexi messages buffered">>}
-]}.
-{[rexi, down], [
- {type, counter},
- {desc, <<"number of rexi_DOWN messages handled">>}
-]}.
-{[rexi, dropped], [
- {type, counter},
- {desc, <<"number of rexi messages dropped from buffers">>}
-]}.
-{[rexi, streams, timeout, init_stream], [
- {type, counter},
- {desc, <<"number of rexi stream initialization timeouts">>}
-]}.
-{[rexi, streams, timeout, stream], [
- {type, counter},
- {desc, <<"number of rexi stream timeouts">>}
-]}.
-{[rexi, streams, timeout, wait_for_ack], [
- {type, counter},
- {desc, <<"number of rexi stream timeouts while waiting for acks">>}
-]}.
diff --git a/src/rexi/rebar.config b/src/rexi/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/rexi/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/rexi/src/rexi.app.src b/src/rexi/src/rexi.app.src
deleted file mode 100644
index 400293219..000000000
--- a/src/rexi/src/rexi.app.src
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, rexi, [
- {description, "Lightweight RPC server"},
- {vsn, git},
- {registered, [
- rexi_sup,
- rexi_server
- ]},
- {applications, [
- kernel,
- stdlib,
- couch_log,
- couch_stats,
- config
- ]},
- {mod, {rexi_app,[]}}
-]}.
diff --git a/src/rexi/src/rexi.erl b/src/rexi/src/rexi.erl
deleted file mode 100644
index 77830996e..000000000
--- a/src/rexi/src/rexi.erl
+++ /dev/null
@@ -1,330 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi).
--export([start/0, stop/0, restart/0]).
--export([cast/2, cast/3, cast/4, kill/2, kill_all/1]).
--export([reply/1, sync_reply/1, sync_reply/2]).
--export([async_server_call/2, async_server_call/3]).
--export([stream_init/0, stream_init/1]).
--export([stream_start/1, stream_cancel/1]).
--export([stream/1, stream/2, stream/3, stream_ack/1, stream_ack/2]).
--export([stream2/1, stream2/2, stream2/3, stream_last/1, stream_last/2]).
--export([ping/0]).
-
-start() ->
- application:start(rexi).
-
-stop() ->
- application:stop(rexi).
-
-restart() ->
- stop(),
- start().
-
-%% @equiv cast(Node, self(), MFA)
--spec cast(node(), {atom(), atom(), list()}) -> reference().
-cast(Node, MFA) ->
- cast(Node, self(), MFA).
-
-%% @doc Executes apply(M, F, A) on Node.
-%% You might want to use this instead of rpc:cast/4 for two reasons. First,
-%% the Caller pid and the returned reference are inserted into the remote
-%% process' dictionary as `rexi_from', so it has a way to communicate with you.
-%% Second, the remote process is monitored. If it exits with a Reason other
-%% than normal, Caller will receive a message of the form
-%% `{Ref, {rexi_EXIT, Reason}}' where Ref is the returned reference.
--spec cast(node(), pid(), {atom(), atom(), list()}) -> reference().
-cast(Node, Caller, MFA) ->
- Ref = make_ref(),
- Msg = cast_msg({doit, {Caller, Ref}, get(nonce), MFA}),
- rexi_utils:send(rexi_utils:server_pid(Node), Msg),
- Ref.
-
-%% @doc Executes apply(M, F, A) on Node.
-%% This version accepts a sync option which uses the erlang:send/2 call
-%% directly in process instead of deferring to a spawned process if
-%% erlang:send/2 were to block. If the sync option is omitted this call
-%% is identical to cast/3.
--spec cast(node(), pid(), {atom(), atom(), list()}, [atom()]) -> reference().
-cast(Node, Caller, MFA, Options) ->
- case lists:member(sync, Options) of
- true ->
- Ref = make_ref(),
- Msg = cast_msg({doit, {Caller, Ref}, get(nonce), MFA}),
- erlang:send(rexi_utils:server_pid(Node), Msg),
- Ref;
- false ->
- cast(Node, Caller, MFA)
- end.
-
-%% @doc Sends an async kill signal to the remote process associated with Ref.
-%% No rexi_EXIT message will be sent.
--spec kill(node(), reference()) -> ok.
-kill(Node, Ref) ->
- rexi_utils:send(rexi_utils:server_pid(Node), cast_msg({kill, Ref})),
- ok.
-
-%% @doc Sends an async kill signal to the remote processes associated with Refs.
-%% No rexi_EXIT message will be sent.
--spec kill_all([{node(), reference()}]) -> ok.
-kill_all(NodeRefs) when is_list(NodeRefs) ->
- %% use_kill_all is available since version 3.0. When performing a rolling
- %% cluster upgrade from 2.x, set this value to false, then revert it back
- %% to default (true) after all nodes have been upgraded.
- case config:get_boolean("rexi", "use_kill_all", true) of
- true ->
- PerNodeMap = lists:foldl(
- fun({Node, Ref}, Acc) ->
- maps:update_with(
- Node,
- fun(Refs) ->
- [Ref | Refs]
- end,
- [Ref],
- Acc
- )
- end,
- #{},
- NodeRefs
- ),
- maps:map(
- fun(Node, Refs) ->
- ServerPid = rexi_utils:server_pid(Node),
- rexi_utils:send(ServerPid, cast_msg({kill_all, Refs}))
- end,
- PerNodeMap
- );
- false ->
- lists:foreach(fun({Node, Ref}) -> kill(Node, Ref) end, NodeRefs)
- end,
- ok.
-
-%% @equiv async_server_call(Server, self(), Request)
--spec async_server_call(pid() | {atom(), node()}, any()) -> reference().
-async_server_call(Server, Request) ->
- async_server_call(Server, self(), Request).
-
-%% @doc Sends a properly formatted gen_server:call Request to the Server and
-%% returns the reference which the Server will include in its reply. The
-%% function acts more like cast() than call() in that the server process
-%% is not monitored. Clients who want to know if the server is alive should
-%% monitor it themselves before calling this function.
--spec async_server_call(pid() | {atom(), node()}, pid(), any()) -> reference().
-async_server_call(Server, Caller, Request) ->
- Ref = make_ref(),
- rexi_utils:send(Server, {'$gen_call', {Caller, Ref}, Request}),
- Ref.
-
-%% @doc convenience function to reply to the original rexi Caller.
--spec reply(any()) -> any().
-reply(Reply) ->
- {Caller, Ref} = get(rexi_from),
- erlang:send(Caller, {Ref, Reply}).
-
-%% @equiv sync_reply(Reply, 300000)
-sync_reply(Reply) ->
- sync_reply(Reply, 300000).
-
-%% @doc convenience function to reply to caller and wait for response. Message
-%% is of the form {OriginalRef, {self(),reference()}, Reply}, which enables the
-%% original caller to respond back.
--spec sync_reply(any(), pos_integer() | infinity) -> any().
-sync_reply(Reply, Timeout) ->
- {Caller, Ref} = get(rexi_from),
- Tag = make_ref(),
- erlang:send(Caller, {Ref, {self(), Tag}, Reply}),
- receive
- {Tag, Response} ->
- Response
- after Timeout ->
- timeout
- end.
-
-%% @equiv stream_init(300000)
-stream_init() ->
- stream_init(300000).
-
-%% @doc Initialize an RPC stream that involves sending multiple
-%% messages back to the coordinator.
-%%
-%% This should be called by rexi workers. It blocks until the
-%% coordinator responds with whether this worker should proceed.
-%% This function will either return with `ok` or call
-%% `erlang:exit/1`.
--spec stream_init(pos_integer()) -> ok.
-stream_init(Timeout) ->
- case sync_reply(rexi_STREAM_INIT, Timeout) of
- rexi_STREAM_START ->
- ok;
- rexi_STREAM_CANCEL ->
- exit(normal);
- timeout ->
- couch_stats:increment_counter(
- [rexi, streams, timeout, init_stream]
- ),
- exit(timeout);
- Else ->
- exit({invalid_stream_message, Else})
- end.
-
-%% @doc Start a worker stream
-%%
-%% If a coordinator wants to continue using a streaming worker it
-%% should use this function to inform the worker to continue
-%% sending messages. The `From` should be the value provided by
-%% the worker in the rexi_STREAM_INIT message.
--spec stream_start({pid(), any()}) -> ok.
-stream_start({Pid, _Tag} = From) when is_pid(Pid) ->
- gen_server:reply(From, rexi_STREAM_START).
-
-%% @doc Cancel a worker stream
-%%
-%% If a coordinator decideds that a worker is not going to be part
-%% of the response it should use this function to cancel the worker.
-%% The `From` should be the value provided by the worker in the
-%% rexi_STREAM_INIT message.
--spec stream_cancel({pid(), any()}) -> ok.
-stream_cancel({Pid, _Tag} = From) when is_pid(Pid) ->
- gen_server:reply(From, rexi_STREAM_CANCEL).
-
-%% @equiv stream(Msg, 100, 300000)
-stream(Msg) ->
- stream(Msg, 10, 300000).
-
-%% @equiv stream(Msg, Limit, 300000)
-stream(Msg, Limit) ->
- stream(Msg, Limit, 300000).
-
-%% @doc convenience function to stream messages to caller while blocking when
-%% a specific number of messages are outstanding. Message is of the form
-%% {OriginalRef, self(), Reply}, which enables the original caller to ack.
--spec stream(any(), integer(), pos_integer() | infinity) -> any().
-stream(Msg, Limit, Timeout) ->
- try maybe_wait(Limit, Timeout) of
- {ok, Count} ->
- put(rexi_unacked, Count + 1),
- {Caller, Ref} = get(rexi_from),
- erlang:send(Caller, {Ref, self(), Msg}),
- ok
- catch
- throw:timeout ->
- couch_stats:increment_counter([rexi, streams, timeout, stream]),
- exit(timeout)
- end.
-
-%% @equiv stream2(Msg, 5, 300000)
-stream2(Msg) ->
- Limit = config:get_integer("rexi", "stream_limit", 5),
- stream2(Msg, Limit).
-
-%% @equiv stream2(Msg, Limit, 300000)
-stream2(Msg, Limit) ->
- stream2(Msg, Limit, 300000).
-
-%% @doc Stream a message back to the coordinator. It limits the
-%% number of unacked messsages to Limit and throws a timeout error
-%% if it doesn't receive an ack in Timeout milliseconds. This
-%% is a combination of the old stream_start and stream functions
-%% which automatically does the stream initialization logic.
--spec stream2(any(), pos_integer(), pos_integer() | inifinity) -> any().
-stream2(Msg, Limit, Timeout) ->
- maybe_init_stream(Timeout),
- try maybe_wait(Limit, Timeout) of
- {ok, Count} ->
- put(rexi_unacked, Count + 1),
- {Caller, Ref} = get(rexi_from),
- erlang:send(Caller, {Ref, self(), Msg}),
- ok
- catch
- throw:timeout ->
- couch_stats:increment_counter([rexi, streams, timeout, stream]),
- exit(timeout)
- end.
-
-%% @equiv stream_last(Msg, 300000)
-stream_last(Msg) ->
- stream_last(Msg, 300000).
-
-%% @doc Send the last message in a stream. This difference between
-%% this and stream is that it uses rexi:reply/1 which doesn't include
-%% the worker pid and doesn't wait for a response from the controller.
-stream_last(Msg, Timeout) ->
- maybe_init_stream(Timeout),
- rexi:reply(Msg),
- ok.
-
-%% @equiv stream_ack(Client, 1)
-stream_ack(Client) ->
- erlang:send(Client, {rexi_ack, 1}).
-
-%% @doc Ack streamed messages
-stream_ack(Client, N) ->
- erlang:send(Client, {rexi_ack, N}).
-
-%% Sends a ping message to the coordinator. This is for long running
-%% operations on a node that could exceed the rexi timeout
-ping() ->
- {Caller, _} = get(rexi_from),
- erlang:send(Caller, {rexi, '$rexi_ping'}).
-
-%% internal functions %%
-
-cast_msg(Msg) -> {'$gen_cast', Msg}.
-
-maybe_init_stream(Timeout) ->
- case get(rexi_STREAM_INITED) of
- true ->
- ok;
- _ ->
- init_stream(Timeout)
- end.
-
-init_stream(Timeout) ->
- case sync_reply(rexi_STREAM_INIT, Timeout) of
- rexi_STREAM_START ->
- put(rexi_STREAM_INITED, true),
- ok;
- rexi_STREAM_CANCEL ->
- exit(normal);
- timeout ->
- exit(timeout);
- Else ->
- exit({invalid_stream_message, Else})
- end.
-
-maybe_wait(Limit, Timeout) ->
- case get(rexi_unacked) of
- undefined ->
- {ok, 0};
- Count when Count >= Limit ->
- wait_for_ack(Count, Timeout);
- Count ->
- drain_acks(Count)
- end.
-
-wait_for_ack(Count, Timeout) ->
- receive
- {rexi_ack, N} -> drain_acks(Count - N)
- after Timeout ->
- couch_stats:increment_counter([rexi, streams, timeout, wait_for_ack]),
- throw(timeout)
- end.
-
-drain_acks(Count) when Count < 0 ->
- erlang:error(mismatched_rexi_ack);
-drain_acks(Count) ->
- receive
- {rexi_ack, N} -> drain_acks(Count - N)
- after 0 ->
- {ok, Count}
- end.
diff --git a/src/rexi/src/rexi_app.erl b/src/rexi/src/rexi_app.erl
deleted file mode 100644
index 61e7886e1..000000000
--- a/src/rexi/src/rexi_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, StartArgs) ->
- rexi_sup:start_link(StartArgs).
-
-stop(_State) ->
- ok.
diff --git a/src/rexi/src/rexi_buffer.erl b/src/rexi/src/rexi_buffer.erl
deleted file mode 100644
index 7f0079f03..000000000
--- a/src/rexi/src/rexi_buffer.erl
+++ /dev/null
@@ -1,107 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(rexi_buffer).
-
--behaviour(gen_server).
--vsn(1).
-
-% gen_server callbacks
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
--export([
- send/2,
- start_link/1
-]).
-
--record(state, {
- buffer = queue:new(),
- sender = nil,
- count = 0,
- max_count
-}).
-
-start_link(ServerId) ->
- gen_server:start_link({local, ServerId}, ?MODULE, nil, []).
-
-send(Dest, Msg) ->
- Server = list_to_atom(lists:concat([rexi_buffer, "_", get_node(Dest)])),
- gen_server:cast(Server, {deliver, Dest, Msg}).
-
-init(_) ->
- %% TODO Leverage os_mon to discover available memory in the system
- Max = list_to_integer(config:get("rexi", "buffer_count", "2000")),
- {ok, #state{max_count = Max}}.
-
-handle_call(erase_buffer, _From, State) ->
- {reply, ok, State#state{buffer = queue:new(), count = 0}, 0};
-handle_call(get_buffered_count, _From, State) ->
- {reply, State#state.count, State, 0}.
-
-handle_cast({deliver, Dest, Msg}, #state{buffer = Q, count = C} = State) ->
- couch_stats:increment_counter([rexi, buffered]),
- Q2 = queue:in({Dest, Msg}, Q),
- case should_drop(State) of
- true ->
- couch_stats:increment_counter([rexi, dropped]),
- {noreply, State#state{buffer = queue:drop(Q2)}, 0};
- false ->
- {noreply, State#state{buffer = Q2, count = C + 1}, 0}
- end.
-
-handle_info(timeout, #state{sender = nil, buffer = {[], []}, count = 0} = State) ->
- {noreply, State};
-handle_info(timeout, #state{sender = nil, count = C} = State) when C > 0 ->
- #state{buffer = Q, count = C} = State,
- {{value, {Dest, Msg}}, Q2} = queue:out_r(Q),
- NewState = State#state{buffer = Q2, count = C - 1},
- case erlang:send(Dest, Msg, [noconnect, nosuspend]) of
- ok when C =:= 1 ->
- % We just sent the last queued messsage, we'll use this opportunity
- % to hibernate the process and run a garbage collection
- {noreply, NewState, hibernate};
- ok when C > 1 ->
- % Use a zero timeout to recurse into this handler ASAP
- {noreply, NewState, 0};
- _Else ->
- % We're experiencing delays, keep buffering internally
- Sender = spawn_monitor(erlang, send, [Dest, Msg]),
- {noreply, NewState#state{sender = Sender}}
- end;
-handle_info(timeout, State) ->
- % Waiting on a sender to return
- {noreply, State};
-handle_info({'DOWN', Ref, _, Pid, _}, #state{sender = {Pid, Ref}} = State) ->
- {noreply, State#state{sender = nil}, 0}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, {state, Buffer, Sender, Count}, _Extra) ->
- Max = list_to_integer(config:get("rexi", "buffer_count", "2000")),
- {ok, #state{buffer = Buffer, sender = Sender, count = Count, max_count = Max}};
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-should_drop(#state{count = Count, max_count = Max}) ->
- Count >= Max.
-
-get_node({_, Node}) when is_atom(Node) ->
- Node;
-get_node(Pid) when is_pid(Pid) ->
- node(Pid).
diff --git a/src/rexi/src/rexi_monitor.erl b/src/rexi/src/rexi_monitor.erl
deleted file mode 100644
index 7fe66db71..000000000
--- a/src/rexi/src/rexi_monitor.erl
+++ /dev/null
@@ -1,67 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_monitor).
--export([start/1, stop/1]).
--export([wait_monitors/1]).
-
-%% @doc spawn_links a process which monitors the supplied list of items and
-%% returns the process ID. If a monitored process exits, the caller will
-%% receive a {rexi_DOWN, MonitoringPid, DeadPid, Reason} message.
--spec start([pid() | atom() | {atom(), node()}]) -> pid().
-start(Procs) ->
- Parent = self(),
- Nodes = [node() | nodes()],
- {Mon, Skip} = lists:partition(
- fun(P) -> should_monitor(P, Nodes) end,
- Procs
- ),
- spawn_link(fun() ->
- [notify_parent(Parent, P, noconnect) || P <- Skip],
- [erlang:monitor(process, P) || P <- Mon],
- wait_monitors(Parent)
- end).
-
-%% @doc Cleanly shut down the monitoring process and flush all rexi_DOWN
-%% messages from our mailbox.
--spec stop(pid()) -> ok.
-stop(MonitoringPid) ->
- MonitoringPid ! {self(), shutdown},
- flush_down_messages().
-
-%% internal functions %%
-
-notify_parent(Parent, Pid, Reason) ->
- couch_stats:increment_counter([rexi, down]),
- erlang:send(Parent, {rexi_DOWN, self(), Pid, Reason}).
-
-should_monitor(Pid, Nodes) when is_pid(Pid) ->
- lists:member(node(Pid), Nodes);
-should_monitor({_, Node}, Nodes) ->
- lists:member(Node, Nodes).
-
-wait_monitors(Parent) ->
- receive
- {'DOWN', _, process, Pid, Reason} ->
- notify_parent(Parent, Pid, Reason),
- ?MODULE:wait_monitors(Parent);
- {Parent, shutdown} ->
- ok
- end.
-
-flush_down_messages() ->
- receive
- {rexi_DOWN, _, _, _} ->
- flush_down_messages()
- after 0 ->
- ok
- end.
diff --git a/src/rexi/src/rexi_server.erl b/src/rexi/src/rexi_server.erl
deleted file mode 100644
index 47c128d7b..000000000
--- a/src/rexi/src/rexi_server.erl
+++ /dev/null
@@ -1,207 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_server).
--behaviour(gen_server).
--vsn(1).
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
-]).
-
--export([start_link/1, init_p/2, init_p/3]).
-
-% for the stacktrace macro only so far
--include_lib("couch/include/couch_db.hrl").
-
--include_lib("rexi/include/rexi.hrl").
-
--record(job, {
- client :: reference(),
- worker :: reference(),
- client_pid :: pid(),
- worker_pid :: pid()
-}).
-
--record(st, {
- workers = ets:new(workers, [private, {keypos, #job.worker}]),
- clients = ets:new(clients, [private, {keypos, #job.client}]),
- errors = queue:new(),
- error_limit = 0,
- error_count = 0
-}).
-
-start_link(ServerId) ->
- gen_server:start_link({local, ServerId}, ?MODULE, [], []).
-
-init([]) ->
- couch_util:set_mqd_off_heap(?MODULE),
- {ok, #st{}}.
-
-handle_call(get_errors, _From, #st{errors = Errors} = St) ->
- {reply, {ok, lists:reverse(queue:to_list(Errors))}, St};
-handle_call(get_last_error, _From, #st{errors = Errors} = St) ->
- try
- {reply, {ok, queue:get_r(Errors)}, St}
- catch
- error:empty ->
- {reply, {error, empty}, St}
- end;
-handle_call({set_error_limit, N}, _From, #st{error_count = Len, errors = Q} = St) ->
- if
- N < Len ->
- {NewQ, _} = queue:split(N, Q);
- true ->
- NewQ = Q
- end,
- NewLen = queue:len(NewQ),
- {reply, ok, St#st{error_limit = N, error_count = NewLen, errors = NewQ}};
-handle_call(_Request, _From, St) ->
- {reply, ignored, St}.
-
-handle_cast({doit, From, MFA}, St) ->
- handle_cast({doit, From, undefined, MFA}, St);
-handle_cast({doit, {ClientPid, ClientRef} = From, Nonce, MFA}, State) ->
- {LocalPid, Ref} = spawn_monitor(?MODULE, init_p, [From, MFA, Nonce]),
- Job = #job{
- client = ClientRef,
- worker = Ref,
- client_pid = ClientPid,
- worker_pid = LocalPid
- },
- {noreply, add_job(Job, State)};
-handle_cast({kill, FromRef}, St) ->
- kill_worker(FromRef, St),
- {noreply, St};
-handle_cast({kill_all, FromRefs}, St) ->
- lists:foreach(fun(FromRef) -> kill_worker(FromRef, St) end, FromRefs),
- {noreply, St};
-handle_cast(_, St) ->
- couch_log:notice("rexi_server ignored_cast", []),
- {noreply, St}.
-
-handle_info({'DOWN', Ref, process, _, normal}, #st{workers = Workers} = St) ->
- case find_worker(Ref, Workers) of
- #job{} = Job ->
- {noreply, remove_job(Job, St)};
- false ->
- {noreply, St}
- end;
-handle_info({'DOWN', Ref, process, Pid, Error}, #st{workers = Workers} = St) ->
- case find_worker(Ref, Workers) of
- #job{worker_pid = Pid, worker = Ref, client_pid = CPid, client = CRef} = Job ->
- case Error of
- #error{reason = {_Class, Reason}, stack = Stack} ->
- notify_caller({CPid, CRef}, {Reason, Stack}),
- St1 = save_error(Error, St),
- {noreply, remove_job(Job, St1)};
- _ ->
- notify_caller({CPid, CRef}, Error),
- {noreply, remove_job(Job, St)}
- end;
- false ->
- {noreply, St}
- end;
-handle_info(_Info, St) ->
- {noreply, St}.
-
-terminate(_Reason, St) ->
- ets:foldl(
- fun(#job{worker_pid = Pid}, _) -> exit(Pid, kill) end,
- nil,
- St#st.workers
- ),
- ok.
-
-code_change(_OldVsn, #st{} = State, _Extra) ->
- {ok, State}.
-
-init_p(From, MFA) ->
- init_p(From, MFA, undefined).
-
-%% @doc initializes a process started by rexi_server.
--spec init_p(
- {pid(), reference()},
- {atom(), atom(), list()},
- string() | undefined
-) -> any().
-init_p(From, {M,F,A}, Nonce) ->
- put(rexi_from, From),
- put('$initial_call', {M,F,length(A)}),
- put(nonce, Nonce),
- try apply(M, F, A) catch exit:normal -> ok; ?STACKTRACE(Class, Reason, Stack0)
- Stack = clean_stack(Stack0),
- {ClientPid, _ClientRef} = From,
- couch_log:error(
- "rexi_server: from: ~s(~p) mfa: ~s:~s/~p ~p:~p ~100p", [
- node(ClientPid), ClientPid, M, F, length(A),
- Class, Reason, Stack]),
- exit(#error{
- timestamp = os:timestamp(),
- reason = {Class, Reason},
- mfa = {M,F,A},
- nonce = Nonce,
- stack = Stack
- })
- end.
-
-%% internal
-
-save_error(_E, #st{error_limit = 0} = St) ->
- St;
-save_error(E, #st{errors = Q, error_limit = L, error_count = C} = St) when C >= L ->
- St#st{errors = queue:in(E, queue:drop(Q))};
-save_error(E, #st{errors = Q, error_count = C} = St) ->
- St#st{errors = queue:in(E, Q), error_count = C + 1}.
-
-clean_stack(S) ->
- lists:map(
- fun
- ({M, F, A}) when is_list(A) -> {M, F, length(A)};
- (X) -> X
- end,
- S
- ).
-
-add_job(Job, #st{workers = Workers, clients = Clients} = State) ->
- ets:insert(Workers, Job),
- ets:insert(Clients, Job),
- State.
-
-remove_job(Job, #st{workers = Workers, clients = Clients} = State) ->
- ets:delete_object(Workers, Job),
- ets:delete_object(Clients, Job),
- State.
-
-find_worker(Ref, Tab) ->
- case ets:lookup(Tab, Ref) of
- [] -> false;
- [Worker] -> Worker
- end.
-
-notify_caller({Caller, Ref}, Reason) ->
- rexi_utils:send(Caller, {Ref, {rexi_EXIT, Reason}}).
-
-kill_worker(FromRef, #st{clients = Clients} = St) ->
- case find_worker(FromRef, Clients) of
- #job{worker = KeyRef, worker_pid = Pid} = Job ->
- erlang:demonitor(KeyRef),
- exit(Pid, kill),
- remove_job(Job, St),
- ok;
- false ->
- ok
- end.
diff --git a/src/rexi/src/rexi_server_mon.erl b/src/rexi/src/rexi_server_mon.erl
deleted file mode 100644
index 9057807e6..000000000
--- a/src/rexi/src/rexi_server_mon.erl
+++ /dev/null
@@ -1,164 +0,0 @@
-% Copyright 2010-2013 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_server_mon).
--behaviour(gen_server).
--behaviour(mem3_cluster).
--vsn(1).
-
--export([
- start_link/1,
- status/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--export([
- cluster_stable/1,
- cluster_unstable/1
-]).
-
--define(CLUSTER_STABILITY_PERIOD_SEC, 15).
-
-start_link(ChildMod) ->
- Name = list_to_atom(lists:concat([ChildMod, "_mon"])),
- gen_server:start_link({local, Name}, ?MODULE, ChildMod, []).
-
-status() ->
- gen_server:call(?MODULE, status).
-
-% Mem3 cluster callbacks
-
-cluster_unstable(Server) ->
- couch_log:notice("~s : cluster unstable", [?MODULE]),
- gen_server:cast(Server, cluster_unstable),
- Server.
-
-cluster_stable(Server) ->
- gen_server:cast(Server, cluster_stable),
- Server.
-
-% gen_server callbacks
-
-init(ChildMod) ->
- {ok, _Mem3Cluster} = mem3_cluster:start_link(
- ?MODULE,
- self(),
- ?CLUSTER_STABILITY_PERIOD_SEC,
- ?CLUSTER_STABILITY_PERIOD_SEC
- ),
- start_servers(ChildMod),
- couch_log:notice("~s : started servers", [ChildMod]),
- {ok, ChildMod}.
-
-terminate(_Reason, _St) ->
- ok.
-
-handle_call(status, _From, ChildMod) ->
- case missing_servers(ChildMod) of
- [] ->
- {reply, ok, ChildMod};
- Missing ->
- {reply, {waiting, length(Missing)}, ChildMod}
- end;
-handle_call(Msg, _From, St) ->
- couch_log:notice("~s ignored_call ~w", [?MODULE, Msg]),
- {reply, ignored, St}.
-
-% If cluster is unstable a node was added or just removed. Check if any nodes
-% can be started, but do not immediately stop nodes, defer that till cluster
-% stabilized.
-handle_cast(cluster_unstable, ChildMod) ->
- couch_log:notice("~s : cluster unstable", [ChildMod]),
- start_servers(ChildMod),
- {noreply, ChildMod};
-% When cluster is stable, start any servers for new nodes and stop servers for
-% the ones that disconnected.
-handle_cast(cluster_stable, ChildMod) ->
- couch_log:notice("~s : cluster stable", [ChildMod]),
- start_servers(ChildMod),
- stop_servers(ChildMod),
- {noreply, ChildMod};
-handle_cast(Msg, St) ->
- couch_log:notice("~s ignored_cast ~w", [?MODULE, Msg]),
- {noreply, St}.
-
-handle_info(Msg, St) ->
- couch_log:notice("~s ignored_info ~w", [?MODULE, Msg]),
- {noreply, St}.
-
-code_change(_OldVsn, nil, _Extra) ->
- {ok, rexi_server};
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-start_servers(ChildMod) ->
- lists:foreach(
- fun(Id) ->
- {ok, _} = start_server(ChildMod, Id)
- end,
- missing_servers(ChildMod)
- ).
-
-stop_servers(ChildMod) ->
- lists:foreach(
- fun(Id) ->
- ok = stop_server(ChildMod, Id)
- end,
- extra_servers(ChildMod)
- ).
-
-server_ids(ChildMod) ->
- Nodes = [node() | nodes()],
- [list_to_atom(lists:concat([ChildMod, "_", Node])) || Node <- Nodes].
-
-running_servers(ChildMod) ->
- [Id || {Id, _, _, _} <- supervisor:which_children(sup_module(ChildMod))].
-
-missing_servers(ChildMod) ->
- server_ids(ChildMod) -- running_servers(ChildMod).
-
-extra_servers(ChildMod) ->
- running_servers(ChildMod) -- server_ids(ChildMod).
-
-start_server(ChildMod, ChildId) ->
- ChildSpec = {
- ChildId,
- {ChildMod, start_link, [ChildId]},
- permanent,
- brutal_kill,
- worker,
- [ChildMod]
- },
- case supervisor:start_child(sup_module(ChildMod), ChildSpec) of
- {ok, Pid} ->
- {ok, Pid};
- Else ->
- erlang:error(Else)
- end.
-
-stop_server(ChildMod, ChildId) ->
- SupMod = sup_module(ChildMod),
- ok = supervisor:terminate_child(SupMod, ChildId),
- ok = supervisor:delete_child(SupMod, ChildId).
-
-sup_module(ChildMod) ->
- list_to_atom(lists:concat([ChildMod, "_sup"])).
diff --git a/src/rexi/src/rexi_server_sup.erl b/src/rexi/src/rexi_server_sup.erl
deleted file mode 100644
index 53497197f..000000000
--- a/src/rexi/src/rexi_server_sup.erl
+++ /dev/null
@@ -1,26 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_server_sup).
--behaviour(supervisor).
-
--export([init/1]).
-
--export([start_link/1]).
-
-start_link(Name) ->
- supervisor:start_link({local, Name}, ?MODULE, []).
-
-init([]) ->
- {ok, {{one_for_one, 1, 1}, []}}.
diff --git a/src/rexi/src/rexi_sup.erl b/src/rexi/src/rexi_sup.erl
deleted file mode 100644
index 3bea0ed15..000000000
--- a/src/rexi/src/rexi_sup.erl
+++ /dev/null
@@ -1,65 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_sup).
--behaviour(supervisor).
-
--export([start_link/1]).
--export([init/1]).
-
-start_link(Args) ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
-
-init([]) ->
- {ok,
- {{rest_for_one, 3, 10}, [
- {
- rexi_server,
- {rexi_server, start_link, [rexi_server]},
- permanent,
- 100,
- worker,
- [rexi_server]
- },
- {
- rexi_server_sup,
- {rexi_server_sup, start_link, [rexi_server_sup]},
- permanent,
- 100,
- supervisor,
- [rexi_server_sup]
- },
- {
- rexi_server_mon,
- {rexi_server_mon, start_link, [rexi_server]},
- permanent,
- 100,
- worker,
- [rexi_server_mon]
- },
- {
- rexi_buffer_sup,
- {rexi_server_sup, start_link, [rexi_buffer_sup]},
- permanent,
- 100,
- supervisor,
- [rexi_server_sup]
- },
- {
- rexi_buffer_mon,
- {rexi_server_mon, start_link, [rexi_buffer]},
- permanent,
- 100,
- worker,
- [rexi_server_mon]
- }
- ]}}.
diff --git a/src/rexi/src/rexi_utils.erl b/src/rexi/src/rexi_utils.erl
deleted file mode 100644
index d59c5ea0f..000000000
--- a/src/rexi/src/rexi_utils.erl
+++ /dev/null
@@ -1,105 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_utils).
-
--export([server_id/1, server_pid/1, send/2, recv/6]).
-
-%% @doc Return a rexi_server id for the given node.
-server_id(Node) ->
- case config:get_boolean("rexi", "server_per_node", true) of
- true ->
- list_to_atom("rexi_server_" ++ atom_to_list(Node));
- _ ->
- rexi_server
- end.
-
-%% @doc Return a {server_id(node()), Node} Pid name for the given Node.
-server_pid(Node) ->
- {server_id(node()), Node}.
-
-%% @doc send a message as quickly as possible
-send(Dest, Msg) ->
- case erlang:send(Dest, Msg, [noconnect, nosuspend]) of
- ok ->
- ok;
- _ ->
- % treat nosuspend and noconnect the same
- rexi_buffer:send(Dest, Msg)
- end.
-
-%% @doc set up the receive loop with an overall timeout
--spec recv([any()], integer(), function(), any(), timeout(), timeout()) ->
- {ok, any()} | {timeout, any()} | {error, atom()} | {error, atom(), any()}.
-recv(Refs, Keypos, Fun, Acc0, infinity, PerMsgTO) ->
- process_mailbox(Refs, Keypos, Fun, Acc0, nil, PerMsgTO);
-recv(Refs, Keypos, Fun, Acc0, GlobalTimeout, PerMsgTO) ->
- TimeoutRef = erlang:make_ref(),
- TRef = erlang:send_after(GlobalTimeout, self(), {timeout, TimeoutRef}),
- try
- process_mailbox(Refs, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO)
- after
- erlang:cancel_timer(TRef)
- end.
-
-process_mailbox(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
- case process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) of
- {ok, Acc} ->
- process_mailbox(RefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
- {new_refs, NewRefList, Acc} ->
- process_mailbox(NewRefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
- {stop, Acc} ->
- {ok, Acc};
- Error ->
- Error
- end.
-
-process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
- receive
- {timeout, TimeoutRef} ->
- {timeout, Acc0};
- {rexi, Ref, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- {ok, Acc0};
- Worker ->
- Fun(Msg, Worker, Acc0)
- end;
- {rexi, Ref, From, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- {ok, Acc0};
- Worker ->
- Fun(Msg, {Worker, From}, Acc0)
- end;
- {rexi, '$rexi_ping'} ->
- {ok, Acc0};
- {Ref, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- % this was some non-matching message which we will ignore
- {ok, Acc0};
- Worker ->
- Fun(Msg, Worker, Acc0)
- end;
- {Ref, From, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- {ok, Acc0};
- Worker ->
- Fun(Msg, {Worker, From}, Acc0)
- end;
- {rexi_DOWN, _, _, _} = Msg ->
- Fun(Msg, nil, Acc0)
- after PerMsgTO ->
- {timeout, Acc0}
- end.
diff --git a/src/setup/.gitignore b/src/setup/.gitignore
deleted file mode 100644
index f84f14c93..000000000
--- a/src/setup/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-ebin
-.rebar
-*~
-*.swp
diff --git a/src/setup/LICENSE b/src/setup/LICENSE
deleted file mode 100644
index 94ad231b8..000000000
--- a/src/setup/LICENSE
+++ /dev/null
@@ -1,203 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/src/setup/README.md b/src/setup/README.md
deleted file mode 100644
index 8a76d9dc5..000000000
--- a/src/setup/README.md
+++ /dev/null
@@ -1,210 +0,0 @@
-This module implements /_cluster_setup and manages the setting up, duh, of a CouchDB cluster.
-
-### Testing
-
-```bash
-git clone https://git-wip-us.apache.org/repos/asf/couchdb.git
-cd couchdb
-git checkout setup
-./configure
-make
-dev/run --no-join -n 2 --admin a:b
-```
-
-Then, in a new terminal:
-
- $ src/setup/test/t.sh
-
-Before running each test, kill the `dev/run` script, then reset the
-CouchDB instances with:
-
- $ rm -rf dev/lib/ dev/logs/
- $ dev/run --no-join -n 2 --admin a:b
-
-before running the next shell script.
-
-The Plan:
-
-N. End User Action
-- What happens behind the scenes.
-
-
-1. Launch CouchDB with `$ couchdb`, or init.d, or any other way, exactly
-like it is done in 1.x.x.
-- CouchDB launches and listens on 127.0.0.1:5984
-
-From here on, there are two paths, one is via Fauxton (a) the other is
-using a HTTP endpoint (b). Fauxton just uses the HTTP endpoint in (b).
-(b) can be used to set up a cluster programmatically.
-
-When using (b) you POST HTTP requests with a JSON request body (the request content type has to be set to application/json).
-
-If you have already setup a server admin account, you might need to pass the credentials to the HTTP calls using HTTP basic authentication.
-Alternativaly, if you use the cURL command you can can add username and password inline, like so:
-
-```
-curl -X PUT "http://admin:password@127.0.0.1:5984/mydb"
-```
-
-2.a. Go to Fauxton. There is a “Cluster Setup” tab in the sidebar. Go
-to the tab and get presented with a form that asks you to enter an admin
-username, admin password and optionally a bind_address and port to bind
-to publicly. Submit the form with the [Enable Cluster] button.
-
-If this is a single node install that already has an admin set up, there
-is no need to ask for admin credentials here. If the bind_address is !=
-127.0.0.1, we can skip this entirely and Fauxton can show the add_node
-UI right away.
-
-- POST a JSON entity to /_cluster_setup, the entity looks like:
-```
-{
- "action":"enable_cluster",
- "username":"username",
- "password":"password",
- "bind_address":"0.0.0.0",
- "port": 5984
-}
-```
-
-This sets up the admin user on the current node and binds to 0.0.0.0:5984
-or the specified ip:port. Logs admin user into Fauxton automatically.
-
-2.b. POST to /_cluster_setup as shown above.
-
-Repeat on all nodes.
-- keep the same username/password everywhere.
-
-
-3. Pick any one node, for simplicity use the first one, to be the
-“setup coordination node”.
-- this is a “master” node that manages the setup and requires all
- other nodes to be able to see it and vice versa. Setup won’t work
- with unavailable nodes (duh). The notion of “master” will be gone
- once the setup is finished. At that point, the system has no
- master node. Ignore I ever said “master”.
-
-a. Go to Fauxton / Cluster Setup, once we have enabled the cluster, the
-UI shows an “Add Node” interface with the fields admin, and node:
-- POST a JSON entity to /_cluster_setup, the entity looks like:
-```
-{
- "action":"add_node",
- "username":"username",
- "password":"password",
- "host":"192.168.1.100",
- ["port": 5984],
- "name": "node1" // as in “node1@hostname”, same as in vm.args
-}
-```
-
-In the example above, this adds the node with IP address 192.168.1.100 to the cluster.
-
-b. as in a, but without the Fauxton bits, just POST to /_cluster_setup
-- this request will do this:
- - on the “setup coordination node”:
- - check if we have an Erlang Cookie Secret. If not, generate
- a UUID and set the erlang cookie to to that UUID.
- - store the cookie in config.ini, re-set_cookie() on startup.
- - make a POST request to the node specified in the body above
- using the admin credentials in the body above:
- POST to http://username:password@node_b:5984/_cluster_setup with:
-```
- {
- "action": "receive_cookie",
- "cookie": "<secretcookie>",
- }
-```
-
- - when the request to node B returns, we know the Erlang-level
- inter-cluster communication is enabled and we can start adding
- the node on the CouchDB level. To do that, the “setup
- coordination node” does this to it’s own HTTP endpoint:
- PUT /nodes/node_b:5984 or the same thing with internal APIs.
-
-- Repeat for all nodes.
-- Fauxton keeps a list of all set up nodes for users to see.
-
-
-4.a. When all nodes are added, click the [Finish Cluster Setup] button
-in Fauxton.
-- this does POST /_cluster_setup
-```
- {
- "action": "finish_cluster"
- }
-```
-
-b. Same as in a.
-
-- this manages the final setup bits, like creating the _users,
- _replicator and _metadata, _db_updates endpoints and
- whatever else is needed. // TBD: collect what else is needed.
-
-## Single node auto setup
-
-Option `single_node` set to `true` in `[couchdb]` configuration executes single node configuration on startup so the node is ready for use immediately.
-
-### Testing single_node auto setup
-
-Pass `--config-overrides single_node=true` and `-n 1` to `dev/run`
-
-
- $ dev/run --no-join -n 1 --admin a:b --config-overrides single_node=true
-
-
-Then, in a new terminal:
-
- $ src/setup/test/t-single_node.sh
-
-The script should show that single node is enabled.
-
-## The Setup Endpoint
-
-This is not a REST-y endpoint, it is a simple state machine operated
-by HTTP POST with JSON bodies that have an `action` field.
-
-### State 1: No Cluster Enabled
-
-This is right after starting a node for the first time, and any time
-before the cluster is enabled as outlined above.
-
-```
-GET /_cluster_setup
-{"state": "cluster_disabled"}
-
-POST /_cluster_setup {"action":"enable_cluster"...} -> Transition to State 2
-POST /_cluster_setup {"action":"enable_cluster"...} with empty admin user/pass or invalid host/post or host/port not available -> Error
-POST /_cluster_setup {"action":"anything_but_enable_cluster"...} -> Error
-```
-
-### State 2: Cluster enabled, admin user set, waiting for nodes to be added.
-
-```
-GET /_cluster_setup
-{"state":"cluster_enabled","nodes":[]}
-
-POST /_cluster_setup {"action":"enable_cluster"...} -> Error
-POST /_cluster_setup {"action":"add_node"...} -> Stay in State 2, but return "nodes":["node B"}] on GET
-POST /_cluster_setup {"action":"add_node"...} -> if target node not available, Error
-POST /_cluster_setup {"action":"finish_cluster"} with no nodes set up -> Error
-POST /_cluster_setup {"action":"finish_cluster"} -> Transition to State 3
-POST /_cluster_setup {"action":"delete_node"...} -> Stay in State 2, but delete node from /nodes, reflect the change in GET /_cluster_setup
-POST /_cluster_setup {"action":"delete_node","node":"unknown"} -> Error Unknown Node
-```
-
-### State 3: Cluster set up, all nodes operational
-
-```
-GET /_cluster_setup
-{"state":"cluster_finished","nodes":["node a", "node b", ...]}
-
-POST /_cluster_setup {"action":"enable_cluster"...} -> Error
-POST /_cluster_setup {"action":"finish_cluster"...} -> Stay in State 3, do nothing
-POST /_cluster_setup {"action":"add_node"...} -> Error
-POST /_cluster_setup?i_know_what_i_am_doing=true {"action":"add_node"...} -> Add node, stay in State 3.
-POST /_cluster_setup {"action":"delete_node"...} -> Stay in State 3, but delete node from /nodes, reflect the change in GET /_cluster_setup
-POST /_cluster_setup {"action":"delete_node","node":"unknown"} -> Error Unknown Node
-```
-
-// TBD: we need to persist the setup state somewhere.
diff --git a/src/setup/src/setup.app.src b/src/setup/src/setup.app.src
deleted file mode 100644
index ae685c971..000000000
--- a/src/setup/src/setup.app.src
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, setup,
- [
- {description, ""},
- {vsn, "1"},
- {registered, []},
- {applications, [
- kernel,
- stdlib,
- couch_epi,
- chttpd,
- couch_log
- ]},
- {mod, { setup_app, []}},
- {env, []}
- ]}.
diff --git a/src/setup/src/setup.erl b/src/setup/src/setup.erl
deleted file mode 100644
index 1757a43e7..000000000
--- a/src/setup/src/setup.erl
+++ /dev/null
@@ -1,395 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup).
-
--export([enable_cluster/1, finish_cluster/1, add_node/1, receive_cookie/1]).
--export([is_cluster_enabled/0, has_cluster_system_dbs/1, cluster_system_dbs/0]).
--export([enable_single_node/1, is_single_node_enabled/1]).
-
--include_lib("../couch/include/couch_db.hrl").
-
-require_admins(undefined, {undefined, undefined}) ->
- % no admin in CouchDB, no admin in request
- throw({error, "Cluster setup requires admin account to be configured"});
-require_admins(_, _) ->
- ok.
-
-require_node_count(undefined) ->
- throw({error, "Cluster setup requires node_count to be configured"});
-require_node_count(_) ->
- ok.
-
-error_local_bind_address() ->
- throw({error, "Cluster setup requires a remote bind_address (not 127.0.0.1 nor ::1)"}).
-
-error_invalid_bind_address(InvalidBindAddress) ->
- throw(
- {error,
- io:format(
- "Setup requires a valid IP bind_address. " ++
- "~p is invalid.",
- [InvalidBindAddress]
- )}
- ).
-
-require_remote_bind_address(OldBindAddress, NewBindAddress) ->
- case {OldBindAddress, NewBindAddress} of
- {"127.0.0.1", undefined} -> error_local_bind_address();
- {_, <<"127.0.0.1">>} -> error_local_bind_address();
- {"::1", undefined} -> error_local_bind_address();
- {_, <<"::1">>} -> error_local_bind_address();
- {_, undefined} -> ok;
- {_, PresentNewBindAddress} -> require_valid_bind_address(PresentNewBindAddress)
- end.
-
-require_valid_bind_address(BindAddress) ->
- ListBindAddress = binary_to_list(BindAddress),
- case inet_parse:address(ListBindAddress) of
- {ok, _} -> ok;
- {error, _} -> error_invalid_bind_address(ListBindAddress)
- end.
-
-is_cluster_enabled() ->
- % bind_address != 127.0.0.1 AND admins != empty
- BindAddress = config:get("chttpd", "bind_address"),
- Admins = config:get("admins"),
- case {BindAddress, Admins} of
- {"127.0.0.1", _} -> false;
- {_, []} -> false;
- {_, _} -> true
- end.
-
-is_single_node_enabled(Dbs) ->
- % admins != empty AND dbs exist
- Admins = config:get("admins"),
- HasDbs = has_cluster_system_dbs(Dbs),
- case {Admins, HasDbs} of
- {[], _} -> false;
- {_, false} -> false;
- {_, _} -> true
- end.
-
-cluster_system_dbs() ->
- ["_users", "_replicator"].
-
-has_cluster_system_dbs([]) ->
- true;
-has_cluster_system_dbs([Db | Dbs]) ->
- case catch fabric:get_db_info(Db) of
- {ok, _} -> has_cluster_system_dbs(Dbs);
- _ -> false
- end.
-
-enable_cluster(Options) ->
- case couch_util:get_value(remote_node, Options, undefined) of
- undefined ->
- enable_cluster_int(Options, is_cluster_enabled());
- _ ->
- enable_cluster_http(Options)
- end.
-
-get_remote_request_options(Options) ->
- case couch_util:get_value(remote_current_user, Options, undefined) of
- undefined ->
- [];
- _ ->
- [
- {basic_auth, {
- binary_to_list(couch_util:get_value(remote_current_user, Options)),
- binary_to_list(couch_util:get_value(remote_current_password, Options))
- }}
- ]
- end.
-
-enable_cluster_http(Options) ->
- % POST to nodeB/_setup
- RequestOptions = get_remote_request_options(Options),
- AdminUsername = couch_util:get_value(username, Options),
- AdminPasswordHash = config:get("admins", binary_to_list(AdminUsername)),
-
- Body = ?JSON_ENCODE(
- {[
- {<<"action">>, <<"enable_cluster">>},
- {<<"username">>, AdminUsername},
- {<<"password_hash">>, ?l2b(AdminPasswordHash)},
- {<<"bind_address">>, couch_util:get_value(bind_address, Options)},
- {<<"port">>, couch_util:get_value(port, Options)},
- {<<"node_count">>, couch_util:get_value(node_count, Options)}
- ]}
- ),
-
- Headers = [
- {"Content-Type", "application/json"}
- ],
-
- RemoteNode = couch_util:get_value(remote_node, Options),
- Port = get_port(couch_util:get_value(port, Options, 5984)),
-
- Url = binary_to_list(<<"http://", RemoteNode/binary, ":", Port/binary, "/_cluster_setup">>),
-
- case ibrowse:send_req(Url, Headers, post, Body, RequestOptions) of
- {ok, "201", _, _} ->
- ok;
- Else ->
- {error, Else}
- end.
-
-enable_cluster_int(_Options, true) ->
- {error, cluster_enabled};
-enable_cluster_int(Options, false) ->
- % if no admin in config and no admin in req -> error
- CurrentAdmins = config:get("admins"),
- NewCredentials = {
- proplists:get_value(username, Options),
- case proplists:get_value(password_hash, Options) of
- undefined -> proplists:get_value(password, Options);
- Pw -> Pw
- end
- },
- ok = require_admins(CurrentAdmins, NewCredentials),
- % if bind_address == 127.0.0.1 and no bind_address in req -> error
- CurrentBindAddress = config:get("chttpd", "bind_address"),
- NewBindAddress = proplists:get_value(bind_address, Options),
- ok = require_remote_bind_address(CurrentBindAddress, NewBindAddress),
- NodeCount = couch_util:get_value(node_count, Options),
- ok = require_node_count(NodeCount),
- Port = proplists:get_value(port, Options),
-
- setup_node(NewCredentials, NewBindAddress, NodeCount, Port),
- couch_log:debug("Enable Cluster: ~p~n", [Options]).
-
-set_admin(Username, Password) ->
- config:set("admins", binary_to_list(Username), binary_to_list(Password), #{sensitive => true}).
-
-setup_node(NewCredentials, NewBindAddress, NodeCount, Port) ->
- case NewCredentials of
- {undefined, undefined} ->
- ok;
- {Username, Password} ->
- set_admin(Username, Password)
- end,
-
- ok = require_valid_bind_address(NewBindAddress),
- case NewBindAddress of
- undefined ->
- config:set("chttpd", "bind_address", "0.0.0.0");
- NewBindAddress ->
- config:set("chttpd", "bind_address", binary_to_list(NewBindAddress))
- end,
-
- % for single node setups, set n=1, for larger setups, don’t
- % exceed n=3 as a default
- config:set_integer("cluster", "n", min(NodeCount, 3)),
-
- case Port of
- undefined ->
- ok;
- Port when is_binary(Port) ->
- config:set("chttpd", "port", binary_to_list(Port));
- Port when is_integer(Port) ->
- config:set_integer("chttpd", "port", Port)
- end.
-
-finish_cluster(Options) ->
- % ensure that uuid is set
- couch_server:get_uuid(),
-
- ok = wait_connected(),
- ok = sync_admins(),
- ok = sync_uuid(),
- ok = sync_auth_secret(),
- Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
- finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)).
-
-wait_connected() ->
- Nodes = other_nodes(),
- Result = test_util:wait(fun() ->
- case disconnected(Nodes) of
- [] -> ok;
- _ -> wait
- end
- end),
- case Result of
- timeout ->
- Reason = "Cluster setup timed out waiting for nodes to connect",
- throw({setup_error, Reason});
- ok ->
- ok
- end.
-
-other_nodes() ->
- mem3:nodes() -- [node()].
-
-disconnected(Nodes) ->
- lists:filter(
- fun(Node) ->
- case net_adm:ping(Node) of
- pong -> false;
- pang -> true
- end
- end,
- Nodes
- ).
-
-sync_admins() ->
- ok = lists:foreach(
- fun({User, Pass}) ->
- sync_admin(User, Pass)
- end,
- config:get("admins")
- ).
-
-sync_admin(User, Pass) ->
- sync_config("admins", User, Pass).
-
-sync_uuid() ->
- Uuid = config:get("couchdb", "uuid"),
- sync_config("couchdb", "uuid", Uuid).
-
-sync_auth_secret() ->
- Secret = config:get("chttpd_auth", "secret"),
- sync_config("chttpd_auth", "secret", Secret).
-
-sync_config(Section, Key, Value) ->
- {Results, Errors} = rpc:multicall(
- other_nodes(),
- config,
- set,
- [Section, Key, Value]
- ),
- case validate_multicall(Results, Errors) of
- ok ->
- ok;
- error ->
- couch_log:error(
- "~p sync_admin results ~p errors ~p",
- [?MODULE, Results, Errors]
- ),
- Reason = "Cluster setup unable to sync admin passwords",
- throw({setup_error, Reason})
- end.
-
-validate_multicall(Results, Errors) ->
- AllOk = lists:all(
- fun
- (ok) -> true;
- (_) -> false
- end,
- Results
- ),
- case AllOk andalso Errors == [] of
- true ->
- ok;
- false ->
- error
- end.
-
-finish_cluster_int(_Dbs, true) ->
- {error, cluster_finished};
-finish_cluster_int(Dbs, false) ->
- lists:foreach(fun fabric:create_db/1, Dbs).
-
-enable_single_node(Options) ->
- % if no admin in config and no admin in req -> error
- CurrentAdmins = config:get("admins"),
- NewCredentials = {
- proplists:get_value(username, Options),
- case proplists:get_value(password_hash, Options) of
- undefined -> proplists:get_value(password, Options);
- Pw -> Pw
- end
- },
- ok = require_admins(CurrentAdmins, NewCredentials),
- % skip bind_address validation, anything is fine
- NewBindAddress = proplists:get_value(bind_address, Options),
- Port = proplists:get_value(port, Options),
-
- setup_node(NewCredentials, NewBindAddress, 1, Port),
- Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
- finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)),
- couch_log:debug("Enable Single Node: ~p~n", [Options]).
-
-add_node(Options) ->
- add_node_int(Options, is_cluster_enabled()).
-
-add_node_int(_Options, false) ->
- {error, cluster_not_enabled};
-add_node_int(Options, true) ->
- couch_log:debug("add node_int: ~p~n", [Options]),
- ErlangCookie = erlang:get_cookie(),
-
- % POST to nodeB/_setup
- RequestOptions = [
- {basic_auth, {
- binary_to_list(proplists:get_value(username, Options)),
- binary_to_list(proplists:get_value(password, Options))
- }}
- ],
-
- Body = ?JSON_ENCODE(
- {[
- {<<"action">>, <<"receive_cookie">>},
- {<<"cookie">>, atom_to_binary(ErlangCookie, utf8)}
- ]}
- ),
-
- Headers = [
- {"Content-Type", "application/json"}
- ],
-
- Host = proplists:get_value(host, Options),
- Port = get_port(proplists:get_value(port, Options, 5984)),
- Name = proplists:get_value(name, Options, get_default_name(Port)),
-
- Url = binary_to_list(<<"http://", Host/binary, ":", Port/binary, "/_cluster_setup">>),
-
- case ibrowse:send_req(Url, Headers, post, Body, RequestOptions) of
- {ok, "201", _, _} ->
- % when done, PUT :5986/nodes/nodeB
- create_node_doc(Host, Name);
- Else ->
- Else
- end.
-
-get_port(Port) when is_integer(Port) ->
- list_to_binary(integer_to_list(Port));
-get_port(Port) when is_list(Port) ->
- list_to_binary(Port);
-get_port(Port) when is_binary(Port) ->
- Port.
-
-create_node_doc(Host, Name) ->
- {ok, Db} = couch_db:open_int(<<"_nodes">>, []),
- Doc = {[{<<"_id">>, <<Name/binary, "@", Host/binary>>}]},
- Options = [],
- CouchDoc = couch_doc:from_json_obj(Doc),
-
- couch_db:update_doc(Db, CouchDoc, Options).
-
-get_default_name(Port) ->
- case Port of
- % shortcut for easier development
- <<"15984">> ->
- <<"node1">>;
- <<"25984">> ->
- <<"node2">>;
- <<"35984">> ->
- <<"node3">>;
- % by default, all nodes have the user `couchdb`
- _ ->
- <<"couchdb">>
- end.
-
-receive_cookie(Options) ->
- Cookie = proplists:get_value(cookie, Options),
- erlang:set_cookie(node(), binary_to_atom(Cookie, latin1)).
diff --git a/src/setup/src/setup_app.erl b/src/setup/src/setup_app.erl
deleted file mode 100644
index 330450131..000000000
--- a/src/setup/src/setup_app.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup_app).
-
--behaviour(application).
-
-%% Application callbacks
--export([start/2, stop/1]).
-
-%% ===================================================================
-%% Application callbacks
-%% ===================================================================
-
-start(_StartType, _StartArgs) ->
- setup_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/setup/src/setup_epi.erl b/src/setup/src/setup_epi.erl
deleted file mode 100644
index df717dbc3..000000000
--- a/src/setup/src/setup_epi.erl
+++ /dev/null
@@ -1,48 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- setup.
-
-providers() ->
- [
- {chttpd_handlers, setup_httpd_handlers}
- ].
-
-services() ->
- [].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/setup/src/setup_httpd.erl b/src/setup/src/setup_httpd.erl
deleted file mode 100644
index 418a72845..000000000
--- a/src/setup/src/setup_httpd.erl
+++ /dev/null
@@ -1,186 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup_httpd).
--include_lib("couch/include/couch_db.hrl").
-
--export([handle_setup_req/1]).
-
-handle_setup_req(#httpd{method = 'POST'} = Req) ->
- ok = chttpd:verify_is_server_admin(Req),
- couch_httpd:validate_ctype(Req, "application/json"),
- Setup = get_body(Req),
- couch_log:notice("Setup: ~p~n", [remove_sensitive(Setup)]),
- Action = binary_to_list(couch_util:get_value(<<"action">>, Setup, <<"missing">>)),
- case handle_action(Action, Setup) of
- ok ->
- chttpd:send_json(Req, 201, {[{ok, true}]});
- {error, Message} ->
- couch_httpd:send_error(Req, 400, <<"bad_request">>, Message)
- end;
-handle_setup_req(#httpd{method = 'GET'} = Req) ->
- ok = chttpd:verify_is_server_admin(Req),
- Dbs = chttpd:qs_json_value(Req, "ensure_dbs_exist", setup:cluster_system_dbs()),
- couch_log:notice("Dbs: ~p~n", [Dbs]),
- SingleNodeConfig = config:get_boolean("couchdb", "single_node", false),
- case SingleNodeConfig of
- true ->
- chttpd:send_json(Req, 200, {[{state, single_node_enabled}]});
- _ ->
- case config:get_integer("cluster", "n", 3) of
- 1 ->
- case setup:is_single_node_enabled(Dbs) of
- false ->
- chttpd:send_json(Req, 200, {[{state, single_node_disabled}]});
- true ->
- chttpd:send_json(Req, 200, {[{state, single_node_enabled}]})
- end;
- _ ->
- case setup:is_cluster_enabled() of
- false ->
- chttpd:send_json(Req, 200, {[{state, cluster_disabled}]});
- true ->
- case setup:has_cluster_system_dbs(Dbs) of
- false ->
- chttpd:send_json(Req, 200, {[{state, cluster_enabled}]});
- true ->
- chttpd:send_json(Req, 200, {[{state, cluster_finished}]})
- end
- end
- end
- end;
-handle_setup_req(#httpd{} = Req) ->
- chttpd:send_method_not_allowed(Req, "GET,POST").
-
-get_options(Options, Setup) ->
- ExtractValues = fun({Tag, Option}, OptionsAcc) ->
- case couch_util:get_value(Option, Setup) of
- undefined -> OptionsAcc;
- Value -> [{Tag, Value} | OptionsAcc]
- end
- end,
- lists:foldl(ExtractValues, [], Options).
-
-handle_action("enable_cluster", Setup) ->
- Options = get_options(
- [
- {username, <<"username">>},
- {password, <<"password">>},
- {password_hash, <<"password_hash">>},
- {bind_address, <<"bind_address">>},
- {port, <<"port">>},
- {remote_node, <<"remote_node">>},
- {remote_current_user, <<"remote_current_user">>},
- {remote_current_password, <<"remote_current_password">>},
- {node_count, <<"node_count">>}
- ],
- Setup
- ),
- case setup:enable_cluster(Options) of
- {error, cluster_enabled} ->
- {error, <<"Cluster is already enabled">>};
- _ ->
- ok
- end;
-handle_action("finish_cluster", Setup) ->
- couch_log:notice("finish_cluster: ~p~n", [remove_sensitive(Setup)]),
-
- Options = get_options(
- [
- {ensure_dbs_exist, <<"ensure_dbs_exist">>}
- ],
- Setup
- ),
- case setup:finish_cluster(Options) of
- {error, cluster_finished} ->
- {error, <<"Cluster is already finished">>};
- Else ->
- couch_log:notice("finish_cluster: ~p~n", [Else]),
- ok
- end;
-handle_action("enable_single_node", Setup) ->
- couch_log:notice("enable_single_node: ~p~n", [remove_sensitive(Setup)]),
-
- Options = get_options(
- [
- {ensure_dbs_exist, <<"ensure_dbs_exist">>},
- {username, <<"username">>},
- {password, <<"password">>},
- {password_hash, <<"password_hash">>},
- {bind_address, <<"bind_address">>},
- {port, <<"port">>}
- ],
- Setup
- ),
- case setup:enable_single_node(Options) of
- {error, cluster_finished} ->
- {error, <<"Cluster is already finished">>};
- Else ->
- couch_log:notice("Else: ~p~n", [Else]),
- ok
- end;
-handle_action("add_node", Setup) ->
- couch_log:notice("add_node: ~p~n", [remove_sensitive(Setup)]),
-
- Options = get_options(
- [
- {username, <<"username">>},
- {password, <<"password">>},
- {host, <<"host">>},
- {port, <<"port">>},
- {name, <<"name">>}
- ],
- Setup
- ),
- case setup:add_node(Options) of
- {error, cluster_not_enabled} ->
- {error, <<"Cluster is not enabled.">>};
- {error, {conn_failed, {error, econnrefused}}} ->
- {error, <<"Add node failed. Invalid Host and/or Port.">>};
- {error, wrong_credentials} ->
- {error, <<"Add node failed. Invalid admin credentials,">>};
- {error, Message} ->
- {error, Message};
- _ ->
- ok
- end;
-handle_action("remove_node", Setup) ->
- couch_log:notice("remove_node: ~p~n", [remove_sensitive(Setup)]);
-handle_action("receive_cookie", Setup) ->
- couch_log:notice("receive_cookie: ~p~n", [remove_sensitive(Setup)]),
- Options = get_options(
- [
- {cookie, <<"cookie">>}
- ],
- Setup
- ),
- case setup:receive_cookie(Options) of
- {error, Error} ->
- {error, Error};
- _ ->
- ok
- end;
-handle_action(_, _) ->
- couch_log:notice("invalid_action: ~n", []),
- {error, <<"Invalid Action'">>}.
-
-get_body(Req) ->
- case catch couch_httpd:json_body_obj(Req) of
- {Body} ->
- Body;
- Else ->
- couch_log:notice("Body Fail: ~p~n", [Else]),
- couch_httpd:send_error(Req, 400, <<"bad_request">>, <<"Missing JSON body'">>)
- end.
-
-remove_sensitive(KVList) ->
- lists:keyreplace(<<"password">>, 1, KVList, {<<"password">>, <<"****">>}).
diff --git a/src/setup/src/setup_httpd_handlers.erl b/src/setup/src/setup_httpd_handlers.erl
deleted file mode 100644
index 994c217e8..000000000
--- a/src/setup/src/setup_httpd_handlers.erl
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1]).
-
-url_handler(<<"_cluster_setup">>) -> fun setup_httpd:handle_setup_req/1;
-url_handler(_) -> no_match.
-
-db_handler(_) -> no_match.
-
-design_handler(_) -> no_match.
diff --git a/src/setup/src/setup_sup.erl b/src/setup/src/setup_sup.erl
deleted file mode 100644
index e80ad8ef0..000000000
--- a/src/setup/src/setup_sup.erl
+++ /dev/null
@@ -1,44 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup_sup).
-
--behaviour(supervisor).
-
-%% API
--export([start_link/0]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-%% ===================================================================
-%% API functions
-%% ===================================================================
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% ===================================================================
-%% Supervisor callbacks
-%% ===================================================================
-
-init([]) ->
- case config:get_boolean("couchdb", "single_node", false) of
- true ->
- setup:finish_cluster([]);
- false ->
- ok
- end,
- {ok, {{one_for_one, 5, 10}, couch_epi:register_service(setup_epi, [])}}.
diff --git a/src/setup/test/t-frontend-setup.sh b/src/setup/test/t-frontend-setup.sh
deleted file mode 100755
index e025cfba2..000000000
--- a/src/setup/test/t-frontend-setup.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/sh -ex
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-echo "To test, comment out the fake_uuid line in dev/run"
-
-HEADERS="-HContent-Type:application/json"
-# show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-
-# Enable Cluster on node A
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_cluster","username":"foo","password":"baz","bind_address":"0.0.0.0","node_count":2}' $HEADERS
-
-# Enable Cluster on node B
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_cluster","remote_node":"127.0.0.1","port":"25984","remote_current_user":"a","remote_current_password":"b","username":"foo","password":"baz","bind_address":"0.0.0.0","node_count":2}' $HEADERS
-
-# Add node B on node A
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"add_node","username":"foo","password":"baz","host":"127.0.0.1","port":25984,"name":"node2"}' $HEADERS
-
-# Show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-
-# Show db doesn’t exist on node A
-curl a:b@127.0.0.1:15984/foo
-
-# Show db doesn’t exist on node B
-curl a:b@127.0.0.1:25984/foo
-
-# Create database (on node A)
-curl -X PUT a:b@127.0.0.1:15984/foo
-
-# Show db does exist on node A
-curl a:b@127.0.0.1:15984/foo
-
-# Show db does exist on node B
-curl a:b@127.0.0.1:25984/foo
-
-# Finish cluster
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"finish_cluster"}' $HEADERS
-
-# Show system dbs exist on node A
-curl a:b@127.0.0.1:15984/_users
-curl a:b@127.0.0.1:15984/_replicator
-curl a:b@127.0.0.1:15984/_global_changes
-
-# Show system dbs exist on node B
-curl a:b@127.0.0.1:25984/_users
-curl a:b@127.0.0.1:25984/_replicator
-curl a:b@127.0.0.1:25984/_global_changes
-
-# Number of nodes is set to 2
-curl a:b@127.0.0.1:25984/_node/node2@127.0.0.1/_config/cluster/n
-
-# uuid and auth secret are the same
-curl a:b@127.0.0.1:15984/_node/node1@127.0.0.1/_config/couchdb/uuid
-curl a:b@127.0.0.1:15984/_node/node2@127.0.0.1/_config/couchdb/uuid
-
-curl a:b@127.0.0.1:15984/_node/node1@127.0.0.1/_config/couch_httpd_auth/secret
-curl a:b@127.0.0.1:15984/_node/node2@127.0.0.1/_config/couch_httpd_auth/secret
-
-
-echo "YAY ALL GOOD"
diff --git a/src/setup/test/t-single-node-auto-setup.sh b/src/setup/test/t-single-node-auto-setup.sh
deleted file mode 100755
index 0276990f5..000000000
--- a/src/setup/test/t-single-node-auto-setup.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh -ex
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-HEADERS="-HContent-Type:application/json"
-
-# Show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-curl a:b@127.0.0.1:15984/_all_dbs
-curl a:b@127.0.0.1:15984/_cluster_setup
-
-# Change the check
-curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_replicator","_users"]'
-
-echo "YAY ALL GOOD"
diff --git a/src/setup/test/t-single-node.sh b/src/setup/test/t-single-node.sh
deleted file mode 100755
index d49043773..000000000
--- a/src/setup/test/t-single-node.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/sh -ex
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-HEADERS="-HContent-Type:application/json"
-# show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-curl a:b@127.0.0.1:15984/_cluster_setup
-
-# Enable Cluster on single node
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_single_node","username":"foo","password":"baz","bind_address":"127.0.0.1"}' $HEADERS
-
-# Show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-curl a:b@127.0.0.1:15984/_all_dbs
-curl a:b@127.0.0.1:15984/_cluster_setup
-
-# Delete a database
-curl -X DELETE a:b@127.0.0.1:15984/_global_changes
-
-# Should show single_node_disabled
-curl a:b@127.0.0.1:15984/_cluster_setup
-
-# Change the check
-curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_replicator","_users"]'
-
-# delete all the things
-curl -X DELETE a:b@127.0.0.1:15984/_replicator
-curl -X DELETE a:b@127.0.0.1:15984/_users
-
-# setup only creating _users
-curl -g a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_single_node","username":"foo","password":"baz","bind_address":"127.0.0.1","ensure_dbs_exist":["_users"]}' $HEADERS
-
-# check it
-curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_users"]'
-
-echo "YAY ALL GOOD"
diff --git a/src/setup/test/t.sh b/src/setup/test/t.sh
deleted file mode 100755
index 6bd74cdd7..000000000
--- a/src/setup/test/t.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/sh -ex
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-HEADERS="-HContent-Type:application/json"
-# show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-
-# Enable Cluster on node A
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_cluster","username":"foo","password":"baz","bind_address":"0.0.0.0","node_count":2}' $HEADERS
-
-# Enable Cluster on node B
-curl a:b@127.0.0.1:25984/_cluster_setup -d '{"action":"enable_cluster","username":"foo","password":"baz","bind_address":"0.0.0.0","node_count":2}' $HEADERS
-
-# Add node B on node A
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"add_node","username":"foo","password":"baz","host":"127.0.0.1","port":25984,"name":"node2"}' $HEADERS
-
-# Show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-
-# Show db doesn’t exist on node A
-curl a:b@127.0.0.1:15984/foo
-
-# Show db doesn’t exist on node B
-curl a:b@127.0.0.1:25984/foo
-
-# Create database (on node A)
-curl -X PUT a:b@127.0.0.1:15984/foo
-
-# Show db does exist on node A
-curl a:b@127.0.0.1:15984/foo
-
-# Show db does exist on node B
-curl a:b@127.0.0.1:25984/foo
-
-# Finish cluster
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"finish_cluster"}' $HEADERS
-
-# Show system dbs exist on node A
-curl a:b@127.0.0.1:15984/_users
-curl a:b@127.0.0.1:15984/_replicator
-curl a:b@127.0.0.1:15984/_metadata
-curl a:b@127.0.0.1:15984/_global_changes
-
-# Show system dbs exist on node B
-curl a:b@127.0.0.1:25984/_users
-curl a:b@127.0.0.1:25984/_replicator
-curl a:b@127.0.0.1:25984/_metadata
-curl a:b@127.0.0.1:25984/_global_changes
-
-# Number of nodes is set to 2
-curl a:b@127.0.0.1:25984/_node/node2@127.0.0.1/_config/cluster/n
-
-echo "YAY ALL GOOD"
diff --git a/src/smoosh/README.md b/src/smoosh/README.md
deleted file mode 100644
index 9f9a48074..000000000
--- a/src/smoosh/README.md
+++ /dev/null
@@ -1,140 +0,0 @@
-Smoosh
-======
-
-Smoosh is CouchDB's auto-compaction daemon. It is notified when
-databases and views are updated and may then elect to enqueue them for
-compaction.
-
-API
----
-
-All API functions are in smoosh.erl and only the exported functions in
-this module should be called from outside of the smoosh application.
-
-Additionally, smoosh responds to config changes dynamically and these
-changes are the principal means of interacting with smoosh.
-
-Top-Level Settings
-------------------
-
-The main settings one interacts with are:
-
-<dl>
-<dt>db_channels<dd>A comma-separated list of channel names for
-databases.
-<dt>view_channels<dd>A comma-separated list of channel names for
-views.
-<dt>staleness<dd>The number of minutes that the (expensive) priority
-calculation can be stale for before it is recalculated. Defaults to 5.
-</dl>
-
-Sometimes it's necessary to use the following:
-
-<dl>
-<dt>cleanup_index_files</dt><dd>Whether smoosh cleans up the files
-for indexes that have been deleted. Defaults to false and probably
-shouldn't be changed unless the cluster is running low on disk space,
-and only after considering the ramifications.</dd>
-<dt>wait_secs</dt><dd>The time a channel waits before starting compactions
-to allow time to observe the system and make a smarter decision about what
-to compact first. Hardly ever changed from the default. Default 30 (seconds).
-</dd>
-</dl>
-
-Channel Settings
-----------------
-
-A channel has several important settings that control runtime
-behavior.
-
-<dl>
-<dt>capacity<dd>The maximum number of items the channel can hold (lowest priority item is removed to make room for new items). Defaults to 9999.
-<dt>concurrency<dd>The maximum number of jobs that can run concurrently. Defaults to 1.
-<dt>max_priority<dd>The item must have a priority lower than this to be enqueued. Defaults to infinity.
-<dt>max_size<dd>The item must be no larger than this many bytes in length to be enqueued. Defaults to infinity.
-<dt>min_priority<dd>The item must have a priority at least this high to be enqueued. Defaults to 5.0 for ratio and 16 mb for slack.
-<dt>min_changes<dd>The minimum number of changes since last compaction before the item will be enqueued. Defaults to 0. Currently only works for databases.
-<dt>min_size<dd>The item must be at least this many bytes in length to be enqueued. Defaults to 1mb (1048576 bytes).
-<dt>priority<dd>The method used to calculate priority. Can be ratio (calculated as disk_size/data_size) or slack (calculated as disk_size-data_size). Defaults to ratio.
-</dl>
-
-Structure
----------
-
-Smoosh consists of a central gen_server (smoosh_server) which manages
-a number of subordinate smoosh_channel gen_servers. This is not
-properly managed by OTP yet.
-
-Compaction Scheduling Algorithm
--------------------------------
-
-Smoosh decides whether to compact a database or view by evaluating the
-item against the selection criteria of each _channel_ in the order
-they are configured. By default there are two channels for databases
-("ratio_dbs" and "slack_dbs"), and two channels for views ("ratio_views"
-and "slack_views")
-
-Smoosh will enqueue the new item to the first channel that accepts
-it. If none accept it, the item is not enqueued for compaction.
-
-Notes on the data_size value
-----------------------------
-
-Every database and view shard has a data_size value. In CouchDB this
-accurately reflects the post-compaction file size. In DbCore, it is
-the size of the file that we bill for. It excludes the b+tree and
-database footer overhead. We also bill customers for the uncompressed
-size of their documents, though we store them compressed on disk.
-These two systems were developed independently (ours predates
-CouchDB's) and DbCore only calculates the billing size value.
-
-Because of the way our data_size is currently calculated, it can
-sometimes be necessary to enqueue databases and views with very low
-ratios. Due to this, it is also currently impossible to tell how
-optimally compacted a cluster is.
-
-Example config commands
------------------------
-
-Change the set of database channels;
-
- config:set("smoosh", "db_channels", "small_dbs,medium_dbs,large_dbs").
-
-Change the set of database channels on all live nodes in the cluster;
-
- rpc:multicall(config, set, ["smoosh", "db_channels", "small_dbs,medium_dbs,large_dbs"]).
-
-Change the concurrency of the ratio_dbs database channel to 2
-
- config:set("smoosh.ratio_dbs", "concurrency", "2").
-
-Change it on all live nodes in the cluster;
-
- rpc:multicall(config, set, ["smoosh.ratio_dbs", "concurrency", "2"]).
-
-Example API commands
---------------------
-
-smoosh:status()
-
-This prints the state of each channel; how many jobs they are
-currently running and how many jobs are enqueued (as well as the
-lowest and highest priority of those enqueued items). The idea is to
-provide, at a glance, sufficient insight into smoosh that an operator
-can assess whether smoosh is adequately targeting the reclaimable
-space in the cluster. In general, a healthy status output will have
-items in the ratio_dbs and ratio_views channels. Owing to the default
-settings, the slack_dbs and slack_views will almost certainly have
-items in them. Historically, we've not found that the slack channels,
-on their own, are particularly adept at keeping things well compacted.
-
-smoosh:enqueue_all_dbs(), smoosh:enqueue_all_views()
-
-These functions do just what they say but should not generally need to
-be called, smoosh is supposed to be autonomous. Call them if you get
-alerted to a disk space issue, they might well help. If they do, that
-indicates a bug in smoosh as it should already have enqueued eligible
-shards once they met the configured settings.
-
-
-
diff --git a/src/smoosh/operator_guide.md b/src/smoosh/operator_guide.md
deleted file mode 100644
index fafee30d4..000000000
--- a/src/smoosh/operator_guide.md
+++ /dev/null
@@ -1,398 +0,0 @@
-# An operator's guide to smoosh
-
-Smoosh is the auto-compactor for the databases. It automatically selects and
-processes the compacting of database shards on each node.
-
-## Smoosh Channels
-
-Smoosh works using the concept of channels. A channel is essentially a queue of pending
-compactions. There are separate sets of channels for database and view compactions. Each
-channel is assigned a configuration which defines whether a compaction ends up in
-the channel's queue and how compactions are prioritised within that queue.
-
-Smoosh takes each channel and works through the compactions queued in each in priority
-order. Each channel is processed concurrently, so the priority levels only matter within
-a given channel.
-
-Finally, each channel has an assigned number of active compactions, which defines how
-many compactions happen for that channel in parallel. For example, a cluster with
-a lot of database churn but few views might require more active compactions to the
-database channel(s).
-
-It's important to remember that a channel is local to a dbcore node, that is
-each node maintains and processes an independent set of compactions.
-
-### Channel configuration options
-
-#### Channel types
-
-Each channel has a basic type for the algorithm it uses to select pending
-compactions for its queue and how it prioritises them.
-
-The two queue types are:
-
-* **ratio**: this uses the ratio `total_bytes / user_bytes` as its driving
-calculation. The result _X_ must be greater than some configurable value _Y_ for a
-compaction to be added to the queue. Compactions are then prioritised for
-higher values of _X_.
-
-* **slack**: this uses `total_bytes - user_bytes` as its driving calculation.
-The result _X_ must be greater than some configurable value _Y_ for a compaction
-to be added to the queue. Compactions are prioritised for higher values of _X_.
-
-In both cases, _Y_ is set using the `min_priority` configuration variable. The
-calculation of _X_ is described in [Priority calculation](#priority-calculation), below.
-
-Both algorithms operate on two main measures:
-
-* **user_bytes**: this is the amount of data the user has in the file. It
-doesn't include storage overhead: old revisions, on-disk btree structure and
-so on.
-
-* **total_bytes**: the size of the file on disk.
-
-Channel type is set using the `priority` configuration setting.
-
-#### Further configuration options
-
-Beyond its basic type, there are several other configuration options which
-can be applied to a queue.
-
-*All options MUST be set as strings.* See the [smoosh readme][srconfig] for
-all settings and their defaults.
-
-#### Priority calculation
-
-The algorithm type and certain configuration options feed into the priority
-calculation.
-
-The priority is calculated when a compaction is enqueued. As each channel
-has a different configuration, each channel will end up with a different
-priority value. The enqueue code checks each channel in turn to see whether the
-compaction passes its configured priority threshold (`min_priority`). Once
-a channel is found that can accept the compaction, the compaction is added
-to that channel's queue and the enqueue process stops. Therefore the
-ordering of channels has a bearing in what channel a compaction ends up in.
-
-If you want to follow this along, the call order is all in `smoosh_server`,
-`enqueue_request -> find_channel -> get_priority`.
-
-The priority calculation is probably the easiest way to understand the effects
-of configuration variables. It's defined in `smoosh_server#get_priority/3`,
-currently [here][ss].
-
-[ss]: https://github.com/apache/couchdb-smoosh/blob/master/src/smoosh_server.erl#L277
-[srconfig]: https://github.com/apache/couchdb-smoosh#channel-settings
-
-#### Background Detail
-
-`user_bytes` is called `data_size` in `db_info` blocks. It is the total of all bytes
-that are used to store docs and their attachments.
-
-Since `.couch` files are append only, every update adds data to the file. When
-you update a btree, a new leaf node is written and all the nodes back up the
-root. In this update, old data is never overwritten and these parts of the
-file are no longer live; this includes old btree nodes and document bodies.
-Compaction takes this file and writes a new file that only contains live data.
-
-`total_data` is the number of bytes in the file as reported by `ls -al filename`.
-
-#### Flaws
-
-An important flaw in this calculation is that `total_data` takes into account
-the compression of data on disk, whereas `user_bytes` does not. This can give
-unexpected results to calculations, as the values are not directly comparable.
-
-However, it's the best measure we currently have.
-
-[Even more info](https://github.com/apache/couchdb-smoosh#notes-on-the-data_size-value).
-
-#### State diagram
-
-Below is a diagram of smoosh's initial state during the recovery process.
-
-```
-stateDiagram
- [*] --> init
- init --> start_recovery: send_after(?START_DELAY_IN_MSEC, self(), start_recovery)
- note right of start_recovery
- activated = false
- paused = true
- end note
- start_recovery --> activate: send_after(?ACTIVATE_DELAY_IN_MSEC, self(), activate)
- note right of activate
- state has been recovered
- activated = true
- paused = true
- end note
- activate --> schedule_unpause
- schedule_unpause --> [*]: after 30 sec, paused = false and compaction of new jobs begin
-```
-
-![Smoosh State Recovery Process Diagram](recovery_process_diagram.jpeg)
-
-### Defining a channel
-
-Defining a channel is done via normal dbcore configuration, with some
-convention as to the parameter names.
-
-Channel configuration is defined using `smoosh.channel_name` top level config
-options. Defining a channel is just setting the various options you want
-for the channel, then bringing it into smoosh's sets of active channels by
-adding it to either `db_channels` or `view_channels`.
-
-This means that smoosh channels can be defined either for a single node or
-globally across a cluster, by setting the configuration either globally or
-locally. In the example, we set up a new global channel.
-
-It's important to choose good channel names. There are some conventional ones:
-
-* `ratio_dbs`: a ratio channel for dbs, usually using the default settings.
-* `slack_dbs`: a slack channel for dbs, usually using the default settings.
-* `ratio_views`: a ratio channel for views, usually using the default settings.
-* `slack_views`: a slack channel for views, usually using the default settings.
-
-These four are defined by default if there are no others set ([source][source1]).
-
-[source1]: https://github.com/apache/couchdb-smoosh/blob/master/src/smoosh_server.erl#L75
-
-And some standard names for ones we often have to add:
-
-* `big_dbs`: a ratio channel for only enqueuing large database shards. What
- _large_ means is very workload specific.
-
-Channels have certain defaults for their configuration, defined in the
-[smoosh readme][srconfig]. It's only neccessary to set up how this channel
-differs from those defaults. Below, we just need to set the `min_size` and
-`concurrency` settings, and allow the `priority` to default to `ratio`
-along with the other defaults.
-
-```bash
-# Define the new channel
-(couchdb@db1.foo.bar)3> rpc:multicall(config, set, ["smoosh.big_dbs", "min_size", "20000000000"]).
-{[ok,ok,ok],[]}
-(couchdb@db1.foo.bar)3> rpc:multicall(config, set, ["smoosh.big_dbs", "concurrency", "2"]).
-{[ok,ok,ok],[]}
-
-# Add the channel to the db_channels set -- note we need to get the original
-# value first so we can add the new one to the existing list!
-(couchdb@db1.foo.bar)5> rpc:multicall(config, get, ["smoosh", "db_channels"]).
-{["ratio_dbs","ratio_dbs","ratio_dbs"],[]}
-(couchdb@db1.foo.bar)6> rpc:multicall(config, set, ["smoosh", "db_channels", "ratio_dbs,big_dbs"]).
-{[ok,ok,ok],[]}
-```
-
-### Viewing active channels
-
-```bash
-(couchdb@db3.foo.bar)3> rpc:multicall(config, get, ["smoosh", "db_channels"]).
-{["ratio_dbs,big_dbs","ratio_dbs,big_dbs","ratio_dbs,big_dbs"],[]}
-(couchdb@db3.foo.bar)4> rpc:multicall(config, get, ["smoosh", "view_channels"]).
-{["ratio_views","ratio_views","ratio_views"],[]}
-```
-
-### Removing a channel
-
-```bash
-# Remove it from the active set
-(couchdb@db1.foo.bar)5> rpc:multicall(config, get, ["smoosh", "db_channels"]).
-{["ratio_dbs,big_dbs", "ratio_dbs,big_dbs", "ratio_dbs,big_dbs"],[]}
-(couchdb@db1.foo.bar)6> rpc:multicall(config, set, ["smoosh", "db_channels", "ratio_dbs"]).
-{[ok,ok,ok],[]}
-
-# Delete the config -- you need to do each value
-(couchdb@db1.foo.bar)3> rpc:multicall(config, delete, ["smoosh.big_dbs", "concurrency"]).
-{[ok,ok,ok],[]}
-(couchdb@db1.foo.bar)3> rpc:multicall(config, delete, ["smoosh.big_dbs", "min_size"]).
-{[ok,ok,ok],[]}
-```
-
-### Getting channel configuration
-
-As far as I know, you have to get each setting separately:
-
-```
-(couchdb@db1.foo.bar)1> rpc:multicall(config, get, ["smoosh.big_dbs", "concurrency"]).
-{["2","2","2"],[]}
-
-```
-
-### Setting channel configuration
-
-The same as defining a channel, you just need to set the new value:
-
-```
-(couchdb@db1.foo.bar)2> rpc:multicall(config, set, ["smoosh.ratio_dbs", "concurrency", "1"]).
-{[ok,ok,ok],[]}
-```
-
-It sometimes takes a little while to take affect.
-
-
-
-## Standard operating procedures
-
-There are a few standard things that operators often have to do when responding
-to pages.
-
-In addition to the below, in some circumstances it's useful to define new
-channels with certain properties (`big_dbs` is a common one) if smoosh isn't
-selecting and prioritising compactions that well.
-
-### Checking smoosh's status
-
-You can see the queued items for each channel by going into `remsh` on a node
-and using:
-
-```
-> smoosh:status().
-{ok,[{"ratio_dbs",
- [{active,1},
- {starting,0},
- {waiting,[{size,522},
- {min,{5.001569007970237,{1378,394651,323864}}},
- {max,{981756.5441159063,{1380,370286,655752}}}]}]},
- {"slack_views",
- [{active,1},
- {starting,0},
- {waiting,[{size,819},
- {min,{16839814,{1375,978920,326458}}},
- {max,{1541336279,{1380,370205,709896}}}]}]},
- {"slack_dbs",
- [{active,1},
- {starting,0},
- {waiting,[{size,286},
- {min,{19004944,{1380,295245,887295}}},
- {max,{48770817098,{1380,370185,876596}}}]}]},
- {"ratio_views",
- [{active,1},
- {starting,0},
- {waiting,[{size,639},
- {min,{5.0126340031149335,{1380,186581,445489}}},
- {max,{10275.555632057285,{1380,370411,421477}}}]}]}]}
-```
-
-This gives you the node-local status for each queue.
-
-Under each channel there is some information about the channel:
-
-* `active`: number of current compactions in the channel.
-* `starting`: number of compactions starting-up.
-* `waiting`: number of queued compactions.
- * `min` and `max` give an idea of the queued jobs' effectiveness. The values
- for these are obviously dependent on whether the queue is ratio or slack.
-
-For ratio queues, the default minimum for smoosh to enqueue a compaction is 5. In
-the example above, we can guess that 981,756 is quite high. This could be a
-small database, however, so it doesn't necessarily mean useful compactions
-from the point of view of reclaiming disk space.
-
-For this example, we can see that there are quite a lot of queued compactions,
-but we don't know which would be most effective to run to reclaim disk space.
-It's also worth noting that the waiting queue sizes are only meaningful
-related to other factors on the cluster (e.g., db number and size).
-
-
-### Smoosh IOQ priority
-
-This is a global setting which affects all channels. Increasing it allows each
-active compaction to (hopefully) proceed faster as the compaction work is of
-a higher priority relative to other jobs. Decreasing it (hopefully) has the
-converse effect.
-
-By this point you'll [know whether smoosh is backing up](#checking-smooshs-status).
-If it's falling behind (big queues), try increasing compaction priority.
-
-Smoosh's IOQ priority is controlled via the `ioq` -> `compaction` queue.
-
-```
-> rpc:multicall(config, get, ["ioq", "compaction"]).
-{[undefined,undefined,undefined],[]}
-
-```
-
-Priority by convention runs 0 to 1, though the priority can be any positive
-number. The default for compaction is 0.01; pretty low.
-
-If it looks like smoosh has a bunch of work that it's not getting
-through, priority can be increased. However, be careful that this
-doesn't adversely impact the customer experience. If it will, and
-it's urgent, at least drop them a warning.
-
-```
-> rpc:multicall(config, set, ["ioq", "compaction", "0.5"]).
-{[ok,ok,ok],[]}
-```
-
-In general, this should be a temporary measure. For some clusters,
-a change from the default may be required to help smoosh keep up
-with particular workloads.
-
-### Granting specific channels more workers
-
-Giving smoosh a higher concurrency for a given channel can allow a backlog
-in that channel to catch up.
-
-Again, some clusters run best with specific channels having more workers.
-
-From [assessing disk space](#assess-the-space-on-the-disk), you should
-know whether the biggest offenders are db or view files. From this,
-you can infer whether it's worth giving a specific smoosh channel a
-higher concurrency.
-
-The current setting can be seen for a channel like so:
-
-```
-> rpc:multicall(config, get, ["smoosh.ratio_dbs", "concurrency"]).
-{["2","2","2"], []}
-```
-
-`undefined` means the default is used.
-
-If we knew that disk space for DBs was the major user of disk space, we might
-want to increase a `_dbs` channel. Experience shows `ratio_dbs` is often best
-but evaluate this based on the current status.
-
-If we want to increase the ratio_dbs setting:
-
-```
-> rpc:multicall(config, set, ["smoosh.ratio_dbs", "concurrency", "2"]).
-{[ok,ok,ok],[]}
-```
-
-### Suspending smoosh
-
-If smoosh itself is causing issues, it's possible to suspend its operation.
-This differs from either `application:stop(smoosh).` or setting all channel's
-concurrency to zero because it both pauses on going compactions and maintains
-the channel queues intact.
-
-If, for example, a node's compactions are causing disk space issues, smoosh
-could be suspended while working out which channel is causing the problem. For
-example, a big_dbs channel might be creating huge compaction-in-progress
-files if there's not much in the shard to compact away.
-
-It's therefore useful to use when testing to see if smoosh is causing a
-problem.
-
-```
-# suspend
-smoosh:suspend().
-
-# resume a suspended smoosh
-smoosh:resume().
-```
-
-Suspend is currently pretty literal: `erlang:suspend_process(Pid, [unless_suspending])`
-is called for each compaction process in each channel. `resume_process` is called
-for resume.
-
-### Restarting Smoosh
-
-Restarting Smoosh is a long shot and is a brute force approach in the hope that
-when Smoosh rescans the DBs that it makes the right decisions. If required to take
-this step contact rnewson or davisp so that they can inspect Smoosh and see the bug.
-
-```
-> exit(whereis(smoosh_server), kill), smoosh:enqueue_all_dbs(), smoosh:enqueue_all_views().
-```
diff --git a/src/smoosh/rebar.config b/src/smoosh/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/smoosh/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/smoosh/recovery_process_diagram.jpeg b/src/smoosh/recovery_process_diagram.jpeg
deleted file mode 100644
index 300db5cd0..000000000
--- a/src/smoosh/recovery_process_diagram.jpeg
+++ /dev/null
Binary files differ
diff --git a/src/smoosh/src/smoosh.app.src b/src/smoosh/src/smoosh.app.src
deleted file mode 100644
index 4549c6610..000000000
--- a/src/smoosh/src/smoosh.app.src
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, smoosh, [
- {description, "Auto-compaction daemon"},
- {vsn, git},
- {registered, [smoosh_server]},
- {applications, [
- kernel,
- stdlib,
- couch_log,
- config,
- couch_event,
- couch,
- mem3
- ]},
- {mod, {smoosh_app, []}},
- {env, []}
-]}.
diff --git a/src/smoosh/src/smoosh.erl b/src/smoosh/src/smoosh.erl
deleted file mode 100644
index 950500ffa..000000000
--- a/src/smoosh/src/smoosh.erl
+++ /dev/null
@@ -1,84 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--export([suspend/0, resume/0, enqueue/1, status/0]).
--export([enqueue_all_dbs/0, enqueue_all_dbs/1, enqueue_all_views/0]).
-
-suspend() ->
- smoosh_server:suspend().
-
-resume() ->
- smoosh_server:resume().
-
-enqueue(Object) ->
- smoosh_server:enqueue(Object).
-
-sync_enqueue(Object) ->
- smoosh_server:sync_enqueue(Object).
-
-sync_enqueue(Object, Timeout) ->
- smoosh_server:sync_enqueue(Object, Timeout).
-
-status() ->
- smoosh_server:status().
-
-enqueue_all_dbs() ->
- fold_local_shards(
- fun(#shard{name = Name}, _Acc) ->
- sync_enqueue(Name)
- end,
- ok
- ).
-
-enqueue_all_dbs(Timeout) ->
- fold_local_shards(
- fun(#shard{name = Name}, _Acc) ->
- sync_enqueue(Name, Timeout)
- end,
- ok
- ).
-
-enqueue_all_views() ->
- fold_local_shards(
- fun(#shard{name = Name}, _Acc) ->
- catch enqueue_views(Name)
- end,
- ok
- ).
-
-fold_local_shards(Fun, Acc0) ->
- mem3:fold_shards(
- fun(Shard, Acc1) ->
- case node() == Shard#shard.node of
- true ->
- Fun(Shard, Acc1);
- false ->
- Acc1
- end
- end,
- Acc0
- ).
-
-enqueue_views(ShardName) ->
- DbName = mem3:dbname(ShardName),
- {ok, DDocs} = fabric:design_docs(DbName),
- [sync_enqueue({ShardName, id(DDoc)}) || DDoc <- DDocs].
-
-id(#doc{id = Id}) ->
- Id;
-id({Props}) ->
- couch_util:get_value(<<"_id">>, Props).
diff --git a/src/smoosh/src/smoosh_app.erl b/src/smoosh/src/smoosh_app.erl
deleted file mode 100644
index eba3579fe..000000000
--- a/src/smoosh/src/smoosh_app.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_app).
-
--behaviour(application).
-
-%% Application callbacks
--export([start/2, stop/1]).
-
-%% ===================================================================
-%% Application callbacks
-%% ===================================================================
-
-start(_StartType, _StartArgs) ->
- smoosh_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/smoosh/src/smoosh_channel.erl b/src/smoosh/src/smoosh_channel.erl
deleted file mode 100644
index 952f4fd50..000000000
--- a/src/smoosh/src/smoosh_channel.erl
+++ /dev/null
@@ -1,548 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_channel).
--behaviour(gen_server).
--vsn(1).
--include_lib("couch/include/couch_db.hrl").
-
-% public api.
--export([start_link/1, close/1, suspend/1, resume/1, activate/1, get_status/1]).
--export([enqueue/3, last_updated/2, flush/1, is_key/2, is_activated/1, persist/1]).
-
-% gen_server api.
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2
-]).
-
--define(VSN, 1).
--define(CHECKPOINT_INTERVAL_IN_MSEC, 180000).
-
--ifndef(TEST).
--define(START_DELAY_IN_MSEC, 60000).
--define(ACTIVATE_DELAY_IN_MSEC, 30000).
--else.
--define(START_DELAY_IN_MSEC, 0).
--define(ACTIVATE_DELAY_IN_MSEC, 0).
--endif.
-
-% records.
-
-% When the state is set to activated = true, the channel has completed the state
-% recovery process that occurs on (re)start and is accepting new compaction jobs.
-% Note: if activated = false and a request for a new compaction job is received,
-% smoosh will enqueue this new job after the state recovery process has finished.
-% When the state is set to paused = false, the channel is actively compacting any
-% compaction jobs that are scheduled.
-% See operator_guide.md --> State diagram.
-
--record(state, {
- active = [],
- name,
- waiting,
- paused = true,
- starting = [],
- activated = false,
- requests = []
-}).
-
-% public functions.
-
-start_link(Name) ->
- gen_server:start_link(?MODULE, Name, []).
-
-suspend(ServerRef) ->
- gen_server:call(ServerRef, suspend).
-
-resume(ServerRef) ->
- gen_server:call(ServerRef, resume_and_activate).
-
-activate(ServerRef) ->
- gen_server:call(ServerRef, activate).
-
-enqueue(ServerRef, Object, Priority) ->
- gen_server:cast(ServerRef, {enqueue, Object, Priority}).
-
-last_updated(ServerRef, Object) ->
- gen_server:call(ServerRef, {last_updated, Object}).
-
-get_status(ServerRef) ->
- gen_server:call(ServerRef, status).
-
-close(ServerRef) ->
- gen_server:call(ServerRef, close).
-
-flush(ServerRef) ->
- gen_server:call(ServerRef, flush).
-
-is_key(ServerRef, Key) ->
- gen_server:call(ServerRef, {is_key, Key}).
-
-is_activated(ServerRef) ->
- gen_server:call(ServerRef, is_activated).
-
-persist(ServerRef) ->
- gen_server:call(ServerRef, persist).
-
-% gen_server functions.
-
-init(Name) ->
- erlang:send_after(60 * 1000, self(), check_window),
- process_flag(trap_exit, true),
- Waiting = smoosh_priority_queue:new(Name),
- State = #state{name = Name, waiting = Waiting, paused = true, activated = false},
- erlang:send_after(?START_DELAY_IN_MSEC, self(), start_recovery),
- {ok, State}.
-
-handle_call({last_updated, Object}, _From, State) ->
- LastUpdated = smoosh_priority_queue:last_updated(Object, State#state.waiting),
- {reply, LastUpdated, State};
-handle_call(suspend, _From, State) ->
- #state{active = Active} = State,
- [
- catch erlang:suspend_process(Pid, [unless_suspending])
- || {_, Pid} <- Active
- ],
- {reply, ok, State#state{paused = true}};
-handle_call(resume_and_activate, _From, State) ->
- #state{active = Active} = State,
- [catch erlang:resume_process(Pid) || {_, Pid} <- Active],
- {reply, ok, State#state{paused = false, activated = true}};
-handle_call(activate, _From, State) ->
- {reply, ok, State#state{activated = true}};
-handle_call(status, _From, State) ->
- {reply,
- {ok, [
- {active, length(State#state.active)},
- {starting, length(State#state.starting)},
- {waiting, smoosh_priority_queue:info(State#state.waiting)}
- ]},
- State};
-handle_call(close, _From, State) ->
- {stop, normal, ok, State};
-handle_call(flush, _From, #state{waiting = Q} = State) ->
- {reply, ok, State#state{waiting = smoosh_priority_queue:flush(Q)}};
-handle_call({is_key, Key}, _From, #state{waiting = Waiting} = State) ->
- {reply, smoosh_priority_queue:is_key(Key, Waiting), State};
-handle_call(is_activated, _From, #state{activated = Activated} = State0) ->
- {reply, Activated, State0};
-handle_call(persist, _From, State) ->
- persist_queue(State),
- {reply, ok, State}.
-
-handle_cast({enqueue, _Object, 0}, #state{} = State) ->
- {noreply, State};
-handle_cast({enqueue, Object, Priority}, #state{activated = true} = State) ->
- {noreply, maybe_start_compaction(add_to_queue(Object, Priority, State))};
-handle_cast({enqueue, Object, Priority}, #state{activated = false, requests = Requests} = State0) ->
- Level = smoosh_utils:log_level("compaction_log_level", "debug"),
- couch_log:Level(
- "~p Channel is not activated yet. Adding ~p to requests with priority ~p.", [
- ?MODULE,
- Object,
- Priority
- ]
- ),
- {noreply, State0#state{requests = [{Object, Priority} | Requests]}}.
-
-% We accept noproc here due to possibly having monitored a restarted compaction
-% pid after it finished.
-handle_info({'DOWN', Ref, _, Job, Reason}, State) when
- Reason == normal;
- Reason == noproc
-->
- #state{active = Active, starting = Starting} = State,
- {noreply,
- maybe_start_compaction(
- State#state{
- active = lists:keydelete(Job, 2, Active),
- starting = lists:keydelete(Ref, 1, Starting)
- }
- )};
-handle_info({'DOWN', Ref, _, Job, Reason}, State) ->
- #state{active = Active0, starting = Starting0} = State,
- case lists:keytake(Job, 2, Active0) of
- {value, {Key, _Pid}, Active1} ->
- State1 = maybe_remonitor_cpid(
- State#state{active = Active1},
- Key,
- Reason
- ),
- {noreply, maybe_start_compaction(State1)};
- false ->
- case lists:keytake(Ref, 1, Starting0) of
- {value, {_, Key}, Starting1} ->
- couch_log:warning("failed to start compaction of ~p: ~p", [
- smoosh_utils:stringify(Key),
- Reason
- ]),
- {ok, _} = timer:apply_after(5000, smoosh_server, enqueue, [Key]),
- {noreply, maybe_start_compaction(State#state{starting = Starting1})};
- false ->
- {noreply, State}
- end
- end;
-handle_info({Ref, {ok, Pid}}, State) when is_reference(Ref) ->
- case lists:keytake(Ref, 1, State#state.starting) of
- {value, {_, Key}, Starting1} ->
- Level = smoosh_utils:log_level("compaction_log_level", "notice"),
- couch_log:Level(
- "~s: Started compaction for ~s",
- [State#state.name, smoosh_utils:stringify(Key)]
- ),
- erlang:monitor(process, Pid),
- erlang:demonitor(Ref, [flush]),
- {noreply, State#state{
- active = [{Key, Pid} | State#state.active],
- starting = Starting1
- }};
- false ->
- {noreply, State}
- end;
-handle_info(check_window, State) ->
- #state{paused = Paused, name = Name} = State,
- StrictWindow = smoosh_utils:get(Name, "strict_window", "false"),
- FinalState =
- case {not Paused, smoosh_utils:in_allowed_window(Name)} of
- {false, false} ->
- % already in desired state
- State;
- {true, true} ->
- % already in desired state
- State;
- {false, true} ->
- % resume is always safe even if we did not previously suspend
- {reply, ok, NewState} = handle_call(resume_and_activate, nil, State),
- NewState;
- {true, false} ->
- if
- StrictWindow =:= "true" ->
- {reply, ok, NewState} = handle_call(suspend, nil, State),
- NewState;
- true ->
- State#state{paused = true}
- end
- end,
- erlang:send_after(60 * 1000, self(), check_window),
- {noreply, FinalState};
-handle_info(start_recovery, #state{name = Name, waiting = Waiting0} = State0) ->
- RecActive = recover(active_file_name(Name)),
- Waiting1 = lists:foldl(
- fun(DbName, Acc) ->
- case couch_db:exists(DbName) andalso couch_db:is_compacting(DbName) of
- true ->
- Priority = smoosh_server:get_priority(Name, DbName),
- smoosh_priority_queue:in(DbName, Priority, Priority, Acc);
- false ->
- Acc
- end
- end,
- Waiting0,
- RecActive
- ),
- State1 = maybe_start_compaction(State0#state{paused = false, waiting = Waiting1}),
- Level = smoosh_utils:log_level("compaction_log_level", "debug"),
- couch_log:Level(
- "~p Previously active compaction jobs (if any) have been successfully recovered and restarted.",
- [?MODULE]
- ),
- erlang:send_after(?ACTIVATE_DELAY_IN_MSEC, self(), activate),
- {noreply, State1#state{paused = true}};
-handle_info(activate, State) ->
- {noreply, activate_channel(State)};
-handle_info(persist, State) ->
- persist_queue(State),
- erlang:send_after(?CHECKPOINT_INTERVAL_IN_MSEC, self(), persist),
- {noreply, State};
-handle_info(pause, State) ->
- {noreply, State#state{paused = true}};
-handle_info(unpause, State) ->
- {noreply, maybe_start_compaction(State#state{paused = false})}.
-
-terminate(_Reason, _State) ->
- ok.
-
-persist_queue(State) ->
- write_state_to_file(State).
-
-recover(FilePath) ->
- case do_recover(FilePath) of
- {ok, List} ->
- List;
- error ->
- []
- end.
-
-do_recover(FilePath) ->
- case file:read_file(FilePath) of
- {ok, Content} ->
- <<Vsn, Binary/binary>> = Content,
- try parse_state(Vsn, ?VSN, Binary) of
- Term ->
- Level = smoosh_utils:log_level("compaction_log_level", "debug"),
- couch_log:Level(
- "~p Successfully restored state file ~s", [?MODULE, FilePath]
- ),
- {ok, Term}
- catch
- error:Reason ->
- couch_log:error(
- "~p Invalid state file (~p). Deleting ~s", [?MODULE, Reason, FilePath]
- ),
- file:delete(FilePath),
- error
- end;
- {error, enoent} ->
- Level = smoosh_utils:log_level("compaction_log_level", "debug"),
- couch_log:Level(
- "~p (~p) State file ~s does not exist. Not restoring.", [?MODULE, enoent, FilePath]
- ),
- error;
- {error, Reason} ->
- couch_log:error(
- "~p Cannot read the state file (~p). Deleting ~s", [?MODULE, Reason, FilePath]
- ),
- file:delete(FilePath),
- error
- end.
-
-parse_state(1, ?VSN, Binary) ->
- erlang:binary_to_term(Binary, [safe]);
-parse_state(Vsn, ?VSN, _) ->
- error({unsupported_version, Vsn}).
-
-write_state_to_file(#state{name = Name, active = Active, starting = Starting, waiting = Waiting}) ->
- Active1 = lists:foldl(
- fun({DbName, _}, Acc) ->
- [DbName | Acc]
- end,
- [],
- Active
- ),
- Starting1 = lists:foldl(
- fun({_, DbName}, Acc) ->
- [DbName | Acc]
- end,
- [],
- Starting
- ),
- smoosh_utils:write_to_file(Active1, active_file_name(Name), ?VSN),
- smoosh_utils:write_to_file(Starting1, starting_file_name(Name), ?VSN),
- smoosh_priority_queue:write_to_file(Waiting).
-
-active_file_name(Name) ->
- filename:join(config:get("smoosh", "state_dir", "."), Name ++ ".active").
-
-starting_file_name(Name) ->
- filename:join(config:get("smoosh", "state_dir", "."), Name ++ ".starting").
-
-% private functions.
-
-add_to_queue(Key, Priority, State) ->
- #state{active = Active, waiting = Q} = State,
- case lists:keymember(Key, 1, Active) of
- true ->
- State;
- false ->
- Capacity = list_to_integer(smoosh_utils:get(State#state.name, "capacity", "9999")),
- Level = smoosh_utils:log_level("compaction_log_level", "notice"),
- couch_log:Level(
- "~s: adding ~p to internal compactor queue with priority ~p",
- [State#state.name, Key, Priority]
- ),
- State#state{
- waiting = smoosh_priority_queue:in(Key, Priority, Priority, Capacity, Q)
- }
- end.
-
-maybe_activate(#state{activated = true} = State) ->
- State;
-maybe_activate(State) ->
- activate_channel(State).
-
-activate_channel(#state{name = Name, waiting = Waiting0, requests = Requests0} = State0) ->
- RecStarting = recover(starting_file_name(Name)),
- Starting = lists:foldl(
- fun(DbName, Acc) ->
- case couch_db:exists(DbName) of
- true ->
- Priority = smoosh_server:get_priority(Name, DbName),
- smoosh_priority_queue:in(DbName, Priority, Priority, Acc);
- false ->
- Acc
- end
- end,
- Waiting0,
- RecStarting
- ),
- Waiting1 = smoosh_priority_queue:recover(Starting),
- Requests1 = lists:reverse(Requests0),
- Waiting2 = lists:foldl(
- fun({DbName, Priority}, Acc) ->
- case couch_db:exists(DbName) of
- true ->
- smoosh_priority_queue:in(DbName, Priority, Priority, Acc);
- false ->
- Acc
- end
- end,
- Waiting1,
- Requests1
- ),
- State1 = maybe_start_compaction(State0#state{
- waiting = Waiting2, paused = false, activated = true, requests = []
- }),
- handle_info(persist, State1),
- schedule_unpause(),
- State1#state{paused = true}.
-
-maybe_start_compaction(#state{paused = true} = State) ->
- State;
-maybe_start_compaction(State) ->
- Concurrency = list_to_integer(
- smoosh_utils:get(
- State#state.name,
- "concurrency",
- "1"
- )
- ),
- if
- length(State#state.active) + length(State#state.starting) < Concurrency ->
- case smoosh_priority_queue:out(State#state.waiting) of
- false ->
- maybe_activate(State);
- {Key, Priority, Q} ->
- try
- State2 =
- case start_compact(State, Key) of
- false ->
- State;
- State1 ->
- Level = smoosh_utils:log_level(
- "compaction_log_level",
- "notice"
- ),
- couch_log:Level(
- "~s: Starting compaction for ~s (priority ~p)",
- [State#state.name, smoosh_utils:stringify(Key), Priority]
- ),
- State1
- end,
- maybe_start_compaction(State2#state{waiting = Q})
- catch
- Class:Exception ->
- couch_log:warning(
- "~s: ~p ~p for ~s",
- [
- State#state.name,
- Class,
- Exception,
- smoosh_utils:stringify(Key)
- ]
- ),
- maybe_start_compaction(State#state{waiting = Q})
- end
- end;
- true ->
- State
- end.
-
-start_compact(State, DbName) when is_list(DbName) ->
- start_compact(State, ?l2b(DbName));
-start_compact(State, DbName) when is_binary(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- start_compact(State, Db)
- after
- couch_db:close(Db)
- end;
-start_compact(State, {Shard, GroupId}) ->
- case smoosh_utils:ignore_db({Shard, GroupId}) of
- false ->
- DbName = mem3:dbname(Shard),
- {ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, Shard, GroupId
- ),
- spawn(fun() -> cleanup_index_files(DbName, Shard) end),
- Ref = erlang:monitor(process, Pid),
- Pid ! {'$gen_call', {self(), Ref}, compact},
- State#state{starting = [{Ref, {Shard, GroupId}} | State#state.starting]};
- _ ->
- false
- end;
-start_compact(State, Db) ->
- case smoosh_utils:ignore_db(Db) of
- false ->
- DbPid = couch_db:get_pid(Db),
- Key = couch_db:name(Db),
- case couch_db:get_compactor_pid(Db) of
- nil ->
- Ref = erlang:monitor(process, DbPid),
- DbPid ! {'$gen_call', {self(), Ref}, start_compact},
- State#state{starting = [{Ref, Key} | State#state.starting]};
- % Compaction is already running, so monitor existing compaction pid.
- CPid ->
- Level = smoosh_utils:log_level("compaction_log_level", "notice"),
- couch_log:Level(
- "Db ~s continuing compaction",
- [smoosh_utils:stringify(Key)]
- ),
- erlang:monitor(process, CPid),
- State#state{active = [{Key, CPid} | State#state.active]}
- end;
- _ ->
- false
- end.
-
-maybe_remonitor_cpid(State, DbName, Reason) when is_binary(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- case couch_db:get_compactor_pid_sync(Db) of
- nil ->
- couch_log:warning(
- "exit for compaction of ~p: ~p",
- [smoosh_utils:stringify(DbName), Reason]
- ),
- {ok, _} = timer:apply_after(5000, smoosh_server, enqueue, [DbName]),
- State;
- CPid ->
- Level = smoosh_utils:log_level("compaction_log_level", "notice"),
- couch_log:Level(
- "~s compaction already running. Re-monitor Pid ~p",
- [smoosh_utils:stringify(DbName), CPid]
- ),
- erlang:monitor(process, CPid),
- State#state{active = [{DbName, CPid} | State#state.active]}
- end;
-% not a database compaction, so ignore the pid check
-maybe_remonitor_cpid(State, Key, Reason) ->
- couch_log:warning(
- "exit for compaction of ~p: ~p",
- [smoosh_utils:stringify(Key), Reason]
- ),
- {ok, _} = timer:apply_after(5000, smoosh_server, enqueue, [Key]),
- State.
-
-schedule_unpause() ->
- WaitSecs = list_to_integer(config:get("smoosh", "wait_secs", "30")),
- erlang:send_after(WaitSecs * 1000, self(), unpause).
-
-cleanup_index_files(DbName, _Shard) ->
- case config:get("smoosh", "cleanup_index_files", "false") of
- "true" ->
- fabric:cleanup_index_files(DbName);
- _ ->
- ok
- end.
diff --git a/src/smoosh/src/smoosh_priority_queue.erl b/src/smoosh/src/smoosh_priority_queue.erl
deleted file mode 100644
index 30dcf4d20..000000000
--- a/src/smoosh/src/smoosh_priority_queue.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_priority_queue).
-
--export([new/1, recover/1]).
-
--export([last_updated/2, is_key/2, in/4, in/5, out/1, size/1, info/1]).
-
--export([flush/1]).
-
--export([from_list/2, to_list/1]).
-
--export([is_empty/1]).
-
--export([file_name/1, write_to_file/1]).
-
--define(VSN, 1).
-
--record(priority_queue, {
- name,
- map,
- tree
-}).
-
-new(Name) ->
- #priority_queue{name = Name, map = maps:new(), tree = gb_trees:empty()}.
-
-recover(#priority_queue{name = Name, map = Map0} = Q) ->
- case do_recover(file_name(Q)) of
- {ok, Terms} ->
- Map = maps:merge(Map0, Terms),
- Tree = maps:fold(
- fun(Key, {TreeKey, Value}, TreeAcc) ->
- gb_trees:enter(TreeKey, {Key, Value}, TreeAcc)
- end,
- gb_trees:empty(),
- Map
- ),
- #priority_queue{name = Name, map = Map, tree = Tree};
- error ->
- Q
- end.
-
-write_to_file(#priority_queue{map = Map} = Q) ->
- smoosh_utils:write_to_file(Map, file_name(Q), ?VSN).
-
-flush(#priority_queue{name = Name} = Q) ->
- Q#priority_queue{name = Name, map = maps:new(), tree = gb_trees:empty()}.
-
-last_updated(Key, #priority_queue{map = Map}) ->
- case maps:find(Key, Map) of
- {ok, {_Priority, {LastUpdatedMTime, _MInt}}} ->
- LastUpdatedMTime;
- error ->
- false
- end.
-
-is_key(Key, #priority_queue{map = Map}) ->
- maps:is_key(Key, Map).
-
-in(Key, Value, Priority, Q) ->
- in(Key, Value, Priority, infinity, Q).
-
-in(Key, Value, Priority, Capacity, #priority_queue{name = Name, map = Map, tree = Tree}) ->
- Tree1 =
- case maps:find(Key, Map) of
- {ok, TreeKey} ->
- gb_trees:delete_any(TreeKey, Tree);
- error ->
- Tree
- end,
- Now = {erlang:monotonic_time(), erlang:unique_integer([monotonic])},
- TreeKey1 = {Priority, Now},
- Tree2 = gb_trees:enter(TreeKey1, {Key, Value}, Tree1),
- Map1 = maps:put(Key, TreeKey1, Map),
- truncate(Capacity, #priority_queue{name = Name, map = Map1, tree = Tree2}).
-
-out(#priority_queue{name = Name, map = Map, tree = Tree}) ->
- case gb_trees:is_empty(Tree) of
- true ->
- false;
- false ->
- {_, {Key, Value}, Tree1} = gb_trees:take_largest(Tree),
- Map1 = maps:remove(Key, Map),
- Q = #priority_queue{name = Name, map = Map1, tree = Tree1},
- {Key, Value, Q}
- end.
-
-size(#priority_queue{tree = Tree}) ->
- gb_trees:size(Tree).
-
-info(#priority_queue{tree = Tree} = Q) ->
- [
- {size, ?MODULE:size(Q)}
- | case gb_trees:is_empty(Tree) of
- true ->
- [];
- false ->
- {Min, _, _} = gb_trees:take_smallest(Tree),
- {Max, _, _} = gb_trees:take_largest(Tree),
- [{min, Min}, {max, Max}]
- end
- ].
-
-from_list(Orddict, #priority_queue{name = Name}) ->
- Map = maps:from_list(Orddict),
- Tree = gb_trees:from_orddict(Orddict),
- #priority_queue{name = Name, map = Map, tree = Tree}.
-
-to_list(#priority_queue{tree = Tree}) ->
- gb_trees:to_list(Tree).
-
-is_empty(#priority_queue{tree = Tree}) ->
- gb_trees:is_empty(Tree).
-
-file_name(#priority_queue{name = Name}) ->
- filename:join(config:get("smoosh", "state_dir", "."), Name ++ ".waiting").
-
-truncate(infinity, Q) ->
- Q;
-truncate(Capacity, Q) when Capacity > 0 ->
- truncate(Capacity, ?MODULE:size(Q), Q).
-
-truncate(Capacity, Size, Q) when Size =< Capacity ->
- Q;
-truncate(Capacity, Size, #priority_queue{name = Name, map = Map, tree = Tree}) when Size > 0 ->
- {_, {Key, _}, Tree1} = gb_trees:take_smallest(Tree),
- Q1 = #priority_queue{name = Name, map = maps:remove(Key, Map), tree = Tree1},
- truncate(Capacity, ?MODULE:size(Q1), Q1).
-
-do_recover(FilePath) ->
- case file:read_file(FilePath) of
- {ok, Content} ->
- <<Vsn, Binary/binary>> = Content,
- try parse_queue(Vsn, ?VSN, Binary) of
- Bin ->
- Level = smoosh_utils:log_level("compaction_log_level", "debug"),
- couch_log:Level(
- "~p Successfully restored state file ~s", [?MODULE, FilePath]
- ),
- {ok, Bin}
- catch
- error:Reason ->
- couch_log:error(
- "~p Invalid queue file (~p). Deleting ~s", [?MODULE, Reason, FilePath]
- ),
- file:delete(FilePath),
- error
- end;
- {error, enoent} ->
- Level = smoosh_utils:log_level("compaction_log_level", "debug"),
- couch_log:Level(
- "~p (~p) Queue file ~s does not exist. Not restoring.", [?MODULE, enoent, FilePath]
- ),
- error;
- {error, Reason} ->
- couch_log:error(
- "~p Cannot read the queue file (~p). Deleting ~s", [?MODULE, Reason, FilePath]
- ),
- file:delete(FilePath),
- error
- end.
-
-parse_queue(1, ?VSN, Binary) ->
- erlang:binary_to_term(Binary, [safe]);
-parse_queue(Vsn, ?VSN, _) ->
- error({unsupported_version, Vsn}).
diff --git a/src/smoosh/src/smoosh_server.erl b/src/smoosh/src/smoosh_server.erl
deleted file mode 100644
index 50d80ce37..000000000
--- a/src/smoosh/src/smoosh_server.erl
+++ /dev/null
@@ -1,640 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_server).
--behaviour(gen_server).
--vsn(4).
--behaviour(config_listener).
--include_lib("couch/include/couch_db.hrl").
-
-% public api.
--export([
- start_link/0,
- suspend/0,
- resume/0,
- enqueue/1,
- sync_enqueue/1,
- sync_enqueue/2,
- handle_db_event/3,
- status/0
-]).
-
--define(SECONDS_PER_MINUTE, 60).
-
-% gen_server api.
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- terminate/2
-]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
-% exported but for internal use.
--export([enqueue_request/2]).
--export([get_priority/2]).
-
-% exported for testing and debugging
--export([get_channel/1]).
-
--ifdef(TEST).
--define(RELISTEN_DELAY, 50).
--else.
--define(RELISTEN_DELAY, 5000).
--endif.
-
-% private records.
-
--record(state, {
- db_channels = [],
- view_channels = [],
- tab,
- event_listener,
- waiting = maps:new()
-}).
-
--record(channel, {
- name,
- pid
-}).
-
-% public functions.
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-suspend() ->
- gen_server:call(?MODULE, suspend).
-
-resume() ->
- gen_server:call(?MODULE, resume).
-
-status() ->
- gen_server:call(?MODULE, status).
-
-enqueue(Object) ->
- gen_server:cast(?MODULE, {enqueue, Object}).
-
-sync_enqueue(Object) ->
- gen_server:call(?MODULE, {enqueue, Object}).
-
-sync_enqueue(Object, Timeout) ->
- gen_server:call(?MODULE, {enqueue, Object}, Timeout).
-
-handle_db_event(DbName, local_updated, St) ->
- smoosh_server:enqueue(DbName),
- {ok, St};
-handle_db_event(DbName, updated, St) ->
- smoosh_server:enqueue(DbName),
- {ok, St};
-handle_db_event(DbName, {index_commit, IdxName}, St) ->
- smoosh_server:enqueue({DbName, IdxName}),
- {ok, St};
-handle_db_event(DbName, {index_collator_upgrade, IdxName}, St) ->
- smoosh_server:enqueue({DbName, IdxName}),
- {ok, St};
-handle_db_event(_DbName, _Event, St) ->
- {ok, St}.
-
-% for testing and debugging only
-get_channel(ChannelName) ->
- gen_server:call(?MODULE, {get_channel, ChannelName}).
-
-% gen_server functions.
-
-init([]) ->
- process_flag(trap_exit, true),
- ok = config:listen_for_changes(?MODULE, nil),
- {ok, Pid} = start_event_listener(),
- DbChannels = smoosh_utils:split(
- config:get("smoosh", "db_channels", "upgrade_dbs,ratio_dbs,slack_dbs")
- ),
- ViewChannels = smoosh_utils:split(
- config:get("smoosh", "view_channels", "upgrade_views,ratio_views,slack_views")
- ),
- Tab = ets:new(channels, [{keypos, #channel.name}]),
- {ok,
- create_missing_channels(#state{
- db_channels = DbChannels,
- view_channels = ViewChannels,
- event_listener = Pid,
- tab = Tab
- })}.
-
-handle_config_change("smoosh", "db_channels", L, _, _) ->
- {ok, gen_server:cast(?MODULE, {new_db_channels, smoosh_utils:split(L)})};
-handle_config_change("smoosh", "view_channels", L, _, _) ->
- {ok, gen_server:cast(?MODULE, {new_view_channels, smoosh_utils:split(L)})};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_Server, stop, _State) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(
- ?RELISTEN_DELAY,
- whereis(?MODULE),
- restart_config_listener
- ).
-
-handle_call(status, _From, State) ->
- Acc = ets:foldl(fun get_channel_status/2, [], State#state.tab),
- {reply, {ok, Acc}, State};
-handle_call({enqueue, Object}, _From, State) ->
- {noreply, NewState} = handle_cast({enqueue, Object}, State),
- {reply, ok, NewState};
-handle_call(suspend, _From, State) ->
- ets:foldl(
- fun(#channel{name = Name, pid = P}, _) ->
- Level = smoosh_utils:log_level("compaction_log_level", "debug"),
- couch_log:Level("Suspending ~p", [Name]),
- smoosh_channel:suspend(P)
- end,
- 0,
- State#state.tab
- ),
- {reply, ok, State};
-handle_call(resume, _From, State) ->
- ets:foldl(
- fun(#channel{name = Name, pid = P}, _) ->
- Level = smoosh_utils:log_level("compaction_log_level", "debug"),
- couch_log:Level("Resuming ~p", [Name]),
- smoosh_channel:resume(P)
- end,
- 0,
- State#state.tab
- ),
- {reply, ok, State};
-handle_call({get_channel, ChannelName}, _From, #state{tab = Tab} = State) ->
- {reply, {ok, channel_pid(Tab, ChannelName)}, State}.
-
-handle_cast({new_db_channels, Channels}, State) ->
- [
- smoosh_channel:close(channel_pid(State#state.tab, C))
- || C <- State#state.db_channels -- Channels
- ],
- {noreply, create_missing_channels(State#state{db_channels = Channels})};
-handle_cast({new_view_channels, Channels}, State) ->
- [
- smoosh_channel:close(channel_pid(State#state.tab, C))
- || C <- State#state.view_channels -- Channels
- ],
- {noreply, create_missing_channels(State#state{view_channels = Channels})};
-handle_cast({enqueue, Object}, State) ->
- #state{waiting = Waiting} = State,
- case maps:is_key(Object, Waiting) of
- true ->
- {noreply, State};
- false ->
- {_Pid, Ref} = spawn_monitor(?MODULE, enqueue_request, [State, Object]),
- {noreply, State#state{waiting = maps:put(Object, Ref, Waiting)}}
- end.
-
-handle_info({'EXIT', Pid, Reason}, #state{event_listener = Pid} = State) ->
- Level = smoosh_utils:log_level("compaction_log_level", "notice"),
- couch_log:Level("update notifier died ~p", [Reason]),
- {ok, Pid1} = start_event_listener(),
- {noreply, State#state{event_listener = Pid1}};
-handle_info({'EXIT', Pid, Reason}, State) ->
- Level = smoosh_utils:log_level("compaction_log_level", "notice"),
- couch_log:Level("~p ~p died ~p", [?MODULE, Pid, Reason]),
- case ets:match_object(State#state.tab, #channel{pid = Pid, _ = '_'}) of
- [#channel{name = Name}] ->
- ets:delete(State#state.tab, Name);
- _ ->
- ok
- end,
- {noreply, create_missing_channels(State)};
-handle_info({'DOWN', Ref, _, _, _}, State) ->
- Waiting = maps:filter(
- fun(_Key, Value) -> Value =/= Ref end,
- State#state.waiting
- ),
- {noreply, State#state{waiting = Waiting}};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-terminate(_Reason, State) ->
- ets:foldl(
- fun(#channel{pid = P}, _) -> smoosh_channel:close(P) end,
- 0,
- State#state.tab
- ),
- ok.
-
-code_change(_OldVsn, {state, DbChannels, ViewChannels, Tab, EventListener, Waiting}, _Extra) ->
- {ok, #state{
- db_channels = DbChannels,
- view_channels = ViewChannels,
- tab = Tab,
- event_listener = EventListener,
- waiting = Waiting
- }};
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-% private functions.
-
-get_channel_status(#channel{name = Name, pid = P}, Acc0) when is_pid(P) ->
- try gen_server:call(P, status) of
- {ok, Status} ->
- [{Name, Status} | Acc0];
- _ ->
- Acc0
- catch
- _:_ ->
- Acc0
- end;
-get_channel_status(_, Acc0) ->
- Acc0.
-
-start_event_listener() ->
- couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]).
-
-enqueue_request(State, Object) ->
- try
- case find_channel(State, Object) of
- false ->
- ok;
- {ok, Pid, Priority} ->
- smoosh_channel:enqueue(Pid, Object, Priority)
- end
- catch
- ?STACKTRACE(Class, Exception, Stack)
- couch_log:warning("~s: ~p ~p for ~s : ~p",
- [?MODULE, Class, Exception,
- smoosh_utils:stringify(Object), Stack])
- end.
-
-find_channel(#state{} = State, {Shard, GroupId}) ->
- find_channel(State#state.tab, State#state.view_channels, {Shard, GroupId});
-find_channel(#state{} = State, DbName) ->
- find_channel(State#state.tab, State#state.db_channels, DbName).
-
-find_channel(_Tab, [], _Object) ->
- false;
-find_channel(Tab, [Channel | Rest], Object) ->
- Pid = channel_pid(Tab, Channel),
- LastUpdated = smoosh_channel:last_updated(Pid, Object),
- StalenessInSec =
- config:get_integer("smoosh", "staleness", 5) *
- ?SECONDS_PER_MINUTE,
- Staleness = erlang:convert_time_unit(StalenessInSec, seconds, native),
- Now = erlang:monotonic_time(),
- Activated = smoosh_channel:is_activated(Pid),
- StaleEnough = LastUpdated =:= false orelse Now - LastUpdated > Staleness,
- case Activated andalso StaleEnough of
- true ->
- case smoosh_utils:ignore_db(Object) of
- true ->
- find_channel(Tab, Rest, Object);
- _ ->
- case get_priority(Channel, Object) of
- 0 ->
- find_channel(Tab, Rest, Object);
- Priority ->
- {ok, Pid, Priority}
- end
- end;
- false ->
- find_channel(Tab, Rest, Object)
- end.
-
-channel_pid(Tab, Channel) ->
- [#channel{pid = Pid}] = ets:lookup(Tab, Channel),
- Pid.
-
-create_missing_channels(State) ->
- create_missing_channels(State#state.tab, State#state.db_channels),
- create_missing_channels(State#state.tab, State#state.view_channels),
- State.
-
-create_missing_channels(_Tab, []) ->
- ok;
-create_missing_channels(Tab, [Channel | Rest]) ->
- case ets:lookup(Tab, Channel) of
- [] ->
- {ok, Pid} = smoosh_channel:start_link(Channel),
- true = ets:insert(Tab, [#channel{name = Channel, pid = Pid}]);
- _ ->
- ok
- end,
- create_missing_channels(Tab, Rest).
-
-get_priority(Channel, {Shard, GroupId}) ->
- case couch_index_server:get_index(couch_mrview_index, Shard, GroupId) of
- {ok, Pid} ->
- try
- {ok, ViewInfo} = couch_index:get_info(Pid),
- {SizeInfo} = couch_util:get_value(sizes, ViewInfo),
- DiskSize = couch_util:get_value(file, SizeInfo),
- ActiveSize = couch_util:get_value(active, SizeInfo),
- NeedsUpgrade = needs_upgrade(ViewInfo),
- get_priority(Channel, DiskSize, ActiveSize, NeedsUpgrade)
- catch
- exit:{timeout, _} ->
- 0
- end;
- {not_found, _Reason} ->
- 0;
- {error, Reason} ->
- couch_log:warning(
- "Failed to get group_pid for ~p ~p ~p: ~p",
- [Channel, Shard, GroupId, Reason]
- ),
- 0
- end;
-get_priority(Channel, DbName) when is_list(DbName) ->
- get_priority(Channel, ?l2b(DbName));
-get_priority(Channel, DbName) when is_binary(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- get_priority(Channel, Db)
- after
- couch_db:close(Db)
- end;
-get_priority(Channel, Db) ->
- {ok, DocInfo} = couch_db:get_db_info(Db),
- {SizeInfo} = couch_util:get_value(sizes, DocInfo),
- DiskSize = couch_util:get_value(file, SizeInfo),
- ActiveSize = couch_util:get_value(active, SizeInfo),
- NeedsUpgrade = needs_upgrade(DocInfo),
- case db_changed(Channel, DocInfo) of
- true -> get_priority(Channel, DiskSize, ActiveSize, NeedsUpgrade);
- false -> 0
- end.
-
-get_priority(Channel, DiskSize, DataSize, NeedsUpgrade) ->
- Priority = get_priority(Channel),
- MinSize = to_number(Channel, "min_size", "1048576"),
- MaxSize = to_number(Channel, "max_size", "infinity"),
- DefaultMinPriority =
- case Priority of
- "slack" -> "536870912";
- _ -> "2.0"
- end,
- MinPriority = to_number(Channel, "min_priority", DefaultMinPriority),
- MaxPriority = to_number(Channel, "max_priority", "infinity"),
- if
- Priority =:= "upgrade", NeedsUpgrade ->
- 1;
- DiskSize =< MinSize ->
- 0;
- DiskSize > MaxSize ->
- 0;
- DataSize =:= 0 ->
- MinPriority;
- Priority =:= "ratio", DiskSize / DataSize =< MinPriority ->
- 0;
- Priority =:= "ratio", DiskSize / DataSize > MaxPriority ->
- 0;
- Priority =:= "ratio" ->
- DiskSize / DataSize;
- Priority =:= "slack", DiskSize - DataSize =< MinPriority ->
- 0;
- Priority =:= "slack", DiskSize - DataSize > MaxPriority ->
- 0;
- Priority =:= "slack" ->
- DiskSize - DataSize;
- true ->
- 0
- end.
-
-db_changed(Channel, Info) ->
- case couch_util:get_value(compacted_seq, Info) of
- undefined ->
- true;
- CompactedSeq ->
- MinChanges = list_to_integer(
- smoosh_utils:get(Channel, "min_changes", "0")
- ),
- UpdateSeq = couch_util:get_value(update_seq, Info),
- UpdateSeq - CompactedSeq >= MinChanges
- end.
-
-to_number(Channel, Name, Default) ->
- case smoosh_utils:get(Channel, Name, Default) of
- "infinity" ->
- infinity;
- Value ->
- try
- list_to_float(Value)
- catch
- error:badarg ->
- list_to_integer(Value)
- end
- end.
-
-get_priority("ratio_dbs") ->
- "ratio";
-get_priority("ratio_views") ->
- "ratio";
-get_priority("slack_dbs") ->
- "slack";
-get_priority("slack_views") ->
- "slack";
-get_priority("upgrade_dbs") ->
- "upgrade";
-get_priority("upgrade_views") ->
- "upgrade";
-get_priority(Channel) ->
- smoosh_utils:get(Channel, "priority", "ratio").
-
-needs_upgrade(Props) ->
- db_needs_upgrade(Props) orelse view_needs_upgrade(Props).
-
-db_needs_upgrade(Props) ->
- DiskVersion = couch_util:get_value(disk_format_version, Props),
- case couch_util:get_value(engine, Props) of
- couch_bt_engine ->
- (couch_bt_engine_header:latest(DiskVersion) =:= false);
- _ ->
- false
- end.
-
-view_needs_upgrade(Props) ->
- case couch_util:get_value(collator_versions, Props) of
- undefined ->
- false;
- Versions when is_list(Versions) ->
- Enabled = couch_mrview_util:compact_on_collator_upgrade(),
- Enabled andalso length(Versions) >= 2
- end.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- Ctx = test_util:start_couch([couch_log]),
- meck:new([config, couch_index, couch_index_server], [passthrough]),
- Pid = list_to_pid("<0.0.0>"),
- meck:expect(couch_index_server, get_index, 3, {ok, Pid}),
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-setup() ->
- Shard = <<"shards/00000000-1fffffff/test.1529510412">>,
- GroupId = <<"_design/ddoc">>,
- {ok, Shard, GroupId}.
-
-teardown(_) ->
- ok.
-
-config_change_test_() ->
- {
- "Test config updates",
- {
- foreach,
- fun() -> test_util:start_couch([smoosh]) end,
- fun test_util:stop_couch/1,
- [
- fun t_restart_config_listener/1
- ]
- }
- }.
-
-get_priority_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun t_ratio_view/1,
- fun t_slack_view/1,
- fun t_no_data_view/1,
- fun t_below_min_priority_view/1,
- fun t_below_min_size_view/1,
- fun t_timeout_view/1,
- fun t_missing_view/1,
- fun t_invalid_view/1
- ]
- }
- }.
-
-t_restart_config_listener(_) ->
- ?_test(begin
- ConfigMonitor = config_listener_mon(),
- ?assert(is_process_alive(ConfigMonitor)),
- test_util:stop_sync(ConfigMonitor),
- ?assertNot(is_process_alive(ConfigMonitor)),
- NewConfigMonitor = test_util:wait(fun() ->
- case config_listener_mon() of
- undefined -> wait;
- Pid -> Pid
- end
- end),
- ?assert(is_process_alive(NewConfigMonitor))
- end).
-
-t_ratio_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 5242880}, {active, 524288}]}}]}
- end),
- ?assertEqual(10.0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_slack_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 1073741824}, {active, 536870911}]}}]}
- end),
- ?assertEqual(2.0000000037252903, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(536870913, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_no_data_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 5242880}, {active, 0}]}}]}
- end),
- ?assertEqual(2.0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(536870912, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(2.0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_below_min_priority_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 5242880}, {active, 1048576}]}}]}
- end),
- ?assertEqual(5.0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_below_min_size_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 1048576}, {active, 512000}]}}]}
- end),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_timeout_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- exit({timeout, get_info})
- end),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_missing_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index_server, get_index, 3, {not_found, missing}),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_invalid_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index_server, get_index, 3, {error, undef}),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-config_listener_mon() ->
- IsConfigMonitor = fun(P) ->
- [M | _] = string:tokens(couch_debug:process_name(P), ":"),
- M =:= "config_listener_mon"
- end,
- [{_, MonitoredBy}] = process_info(whereis(?MODULE), [monitored_by]),
- case lists:filter(IsConfigMonitor, MonitoredBy) of
- [Pid] -> Pid;
- [] -> undefined
- end.
-
--endif.
diff --git a/src/smoosh/src/smoosh_sup.erl b/src/smoosh/src/smoosh_sup.erl
deleted file mode 100644
index abd55a2eb..000000000
--- a/src/smoosh/src/smoosh_sup.erl
+++ /dev/null
@@ -1,38 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_sup).
-
--behaviour(supervisor).
-
-%% API
--export([start_link/0]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-%% ===================================================================
-%% API functions
-%% ===================================================================
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% ===================================================================
-%% Supervisor callbacks
-%% ===================================================================
-
-init([]) ->
- {ok, {{one_for_one, 5, 10}, [?CHILD(smoosh_server, worker)]}}.
diff --git a/src/smoosh/src/smoosh_utils.erl b/src/smoosh/src/smoosh_utils.erl
deleted file mode 100644
index 354b3df57..000000000
--- a/src/smoosh/src/smoosh_utils.erl
+++ /dev/null
@@ -1,108 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_utils).
--include_lib("couch/include/couch_db.hrl").
-
--export([get/2, get/3, split/1, stringify/1, ignore_db/1]).
--export([in_allowed_window/1, write_to_file/3]).
--export([log_level/2]).
-
-get(Channel, Key) ->
- ?MODULE:get(Channel, Key, undefined).
-
-get(Channel, Key, Default) ->
- config:get("smoosh." ++ Channel, Key, Default).
-
-split(CSV) ->
- re:split(CSV, "\\s*,\\s*", [{return, list}, trim]).
-
-stringify({DbName, GroupId}) ->
- io_lib:format("~s ~s", [DbName, GroupId]);
-stringify(DbName) ->
- io_lib:format("~s", [DbName]).
-
-ignore_db({DbName, _GroupName}) ->
- ignore_db(DbName);
-ignore_db(DbName) when is_binary(DbName) ->
- ignore_db(?b2l(DbName));
-ignore_db(DbName) when is_list(DbName) ->
- case config:get("smoosh.ignore", DbName, false) of
- "true" ->
- true;
- _ ->
- false
- end;
-ignore_db(Db) ->
- ignore_db(couch_db:name(Db)).
-
-in_allowed_window(Channel) ->
- From = parse_time(get(Channel, "from"), {00, 00}),
- To = parse_time(get(Channel, "to"), {24, 00}),
- in_allowed_window(From, To).
-
-in_allowed_window(From, To) ->
- {_, {HH, MM, _}} = calendar:universal_time(),
- case From < To of
- true ->
- ({HH, MM} >= From) andalso ({HH, MM} < To);
- false ->
- ({HH, MM} >= From) orelse ({HH, MM} < To)
- end.
-
-file_delete(Path) ->
- case file:delete(Path) of
- Ret when Ret =:= ok; Ret =:= {error, enoent} ->
- ok;
- Error ->
- Error
- end.
-
-throw_on_error(_Args, ok) ->
- ok;
-throw_on_error(Args, {error, Reason}) ->
- throw({error, {Reason, Args}}).
-
-write_to_file(Content, FileName, VSN) ->
- Level = log_level("compaction_log_level", "debug"),
- couch_log:Level("~p Writing state ~s", [?MODULE, FileName]),
- OnDisk = <<VSN, (erlang:term_to_binary(Content, [compressed, {minor_version, 1}]))/binary>>,
- TmpFileName = FileName ++ ".tmp",
- try
- throw_on_error(TmpFileName, file_delete(TmpFileName)),
- throw_on_error(TmpFileName, file:write_file(TmpFileName, OnDisk, [sync])),
- throw_on_error(FileName, file_delete(FileName)),
- throw_on_error([TmpFileName, FileName], file:rename(TmpFileName, FileName))
- catch
- throw:Error ->
- Error
- end.
-
-parse_time(undefined, Default) ->
- Default;
-parse_time(String, Default) ->
- case string:tokens(String, ":") of
- [HH, MM] ->
- try
- {list_to_integer(HH), list_to_integer(MM)}
- catch
- error:badarg ->
- couch_log:error("Malformed compaction schedule configuration: ~s", [String]),
- Default
- end;
- _Else ->
- couch_log:error("Malformed compaction schedule configuration: ~s", [String]),
- Default
- end.
-
-log_level(Key, Default) when is_list(Key), is_list(Default) ->
- list_to_existing_atom(config:get("smoosh", Key, Default)).
diff --git a/src/smoosh/test/exunit/scheduling_window_test.exs b/src/smoosh/test/exunit/scheduling_window_test.exs
deleted file mode 100644
index 9da4a3150..000000000
--- a/src/smoosh/test/exunit/scheduling_window_test.exs
+++ /dev/null
@@ -1,79 +0,0 @@
-defmodule SmooshSchedulingWindowTest do
- use Couch.Test.ExUnit.Case
-
- setup_all(context) do
- test_ctx = :test_util.start_couch([])
-
- on_exit(fn ->
- :config.delete('smoosh.test_channel', 'from')
- :config.delete('smoosh.test_channel', 'to')
- :test_util.stop_couch(test_ctx)
- end)
-
- context
- end
-
- test "in_allowed_window returns true by default", _context do
- assert :smoosh_utils.in_allowed_window('nonexistent_channel') == true
- end
-
- test "in_allowed_window ignores bad input", _context do
- :config.set('smoosh.test_channel', 'from', 'midnight', false)
- :config.set('smoosh.test_channel', 'to', 'infinity', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == true
- end
-
- test "in_allowed_window returns false when now < from < to", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, 18_000)
- to = DateTime.add(now, 36_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == false
- end
-
- test "in_allowed_window returns true when from < now < to", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, -18_000)
- to = DateTime.add(now, 18_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == true
- end
-
- test "in_allowed_window returns false when from < to < now", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, -36_000)
- to = DateTime.add(now, -18_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == false
- end
-
- test "in_allowed_window returns true when to < from < now", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, -18_000)
- to = DateTime.add(now, -36_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == true
- end
-
- test "in_allowed_window returns false when to < now < from", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, 18_000)
- to = DateTime.add(now, -18_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == false
- end
-
- test "in_allowed_window returns true when now < to < from", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, 36_000)
- to = DateTime.add(now, 18_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == true
- end
-end
diff --git a/src/smoosh/test/exunit/test_helper.exs b/src/smoosh/test/exunit/test_helper.exs
deleted file mode 100644
index 314050085..000000000
--- a/src/smoosh/test/exunit/test_helper.exs
+++ /dev/null
@@ -1,2 +0,0 @@
-ExUnit.configure(formatters: [JUnitFormatter, ExUnit.CLIFormatter])
-ExUnit.start()
diff --git a/src/smoosh/test/smoosh_priority_queue_tests.erl b/src/smoosh/test/smoosh_priority_queue_tests.erl
deleted file mode 100644
index 289804ca5..000000000
--- a/src/smoosh/test/smoosh_priority_queue_tests.erl
+++ /dev/null
@@ -1,167 +0,0 @@
--module(smoosh_priority_queue_tests).
-
--include_lib("proper/include/proper.hrl").
--include_lib("couch/include/couch_eunit.hrl").
-
--define(PROP_PREFIX, "prop_").
-
--define(CAPACITY, 3).
-
--define(RANDOM_CHANNEL, lists:flatten(io_lib:format("~p", [erlang:timestamp()]))).
-
-setup() ->
- Ctx = test_util:start_couch(),
- Ctx.
-
-teardown(Ctx) ->
- test_util:stop_couch(Ctx).
-
-smoosh_priority_queue_test_() ->
- {
- "smoosh priority queue test",
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- fun prop_inverse_test_/0,
- fun no_halt_on_corrupted_file_test/0,
- fun no_halt_on_missing_file_test/0
- ]
- }
- }.
-
-%% ==========
-%% Tests
-%% ----------
-
-%% define all tests to be able to run them individually
-prop_inverse_test_() ->
- ?_test(begin
- test_property(prop_inverse)
- end).
-
-no_halt_on_corrupted_file_test() ->
- ?_test(begin
- Name = ?RANDOM_CHANNEL,
- Q = smoosh_priority_queue:new(Name),
- FilePath = smoosh_priority_queue:file_name(Q),
- ok = file:write_file(FilePath, <<"garbage">>),
- ?assertEqual(Q, smoosh_priority_queue:recover(Q)),
- ok
- end).
-
-no_halt_on_missing_file_test() ->
- ?_test(begin
- Name = ?RANDOM_CHANNEL,
- Q = smoosh_priority_queue:new(Name),
- FilePath = smoosh_priority_queue:file_name(Q),
- ok = file:delete(FilePath),
- ?assertEqual(Q, smoosh_priority_queue:recover(Q)),
- ok
- end).
-
-%% ==========
-%% Properties
-%% ----------
-
-prop_inverse() ->
- ?FORALL(
- Q,
- queue(),
- begin
- List = smoosh_priority_queue:to_list(Q),
- equal(Q, smoosh_priority_queue:from_list(List, Q))
- end
- ).
-
-%% ==========
-%% Generators
-%% ----------
-
-key() ->
- proper_types:oneof([proper_types:binary(), {proper_types:binary(), proper_types:binary()}]).
-value() ->
- proper_types:oneof([proper_types:binary(), {proper_types:binary(), proper_types:binary()}]).
-priority() -> integer().
-item() -> {key(), value(), priority()}.
-
-items_list() ->
- ?LET(L, list(item()), L).
-
-simple_queue() ->
- ?LET(
- L,
- items_list(),
- from_list(L)
- ).
-
-with_deleted() ->
- ?LET(
- Q,
- ?LET(
- {{K0, V0, P0}, Q0},
- {item(), simple_queue()},
- smoosh_priority_queue:in(K0, V0, P0, ?CAPACITY, Q0)
- ),
- frequency([
- {1, Q},
- {2, element(3, smoosh_priority_queue:out(Q))}
- ])
- ).
-
-queue() ->
- with_deleted().
-
-%% ==========================
-%% Proper related boilerplate
-%% --------------------------
-
-test_property(Property) when is_atom(Property) ->
- test_property({atom_to_list(Property), Property});
-test_property({Id, Property}) ->
- Name = string:sub_string(Id, length(?PROP_PREFIX) + 1),
- Opts = [long_result, {numtests, 1000}, {to_file, user}],
- {Name, {timeout, 60, fun() -> test_it(Property, Opts) end}}.
-
-test_it(Property, Opts) ->
- case proper:quickcheck(?MODULE:Property(), Opts) of
- true ->
- true;
- Else ->
- erlang:error(
- {propertyFailed, [
- {module, ?MODULE},
- {property, Property},
- {result, Else}
- ]}
- )
- end.
-
-%% ================
-%% Helper functions
-%% ----------------
-
-new() ->
- Q = smoosh_priority_queue:new("foo"),
- smoosh_priority_queue:recover(Q).
-
-from_list(List) ->
- lists:foldl(
- fun({Key, Value, Priority}, Queue) ->
- smoosh_priority_queue:in(Key, Value, Priority, ?CAPACITY, Queue)
- end,
- new(),
- List
- ).
-
-equal(Q1, Q2) ->
- out_all(Q1) =:= out_all(Q2).
-
-out_all(Q) ->
- out_all(Q, []).
-out_all(Q0, Acc) ->
- case smoosh_priority_queue:out(Q0) of
- {K, V, Q1} -> out_all(Q1, [{K, V} | Acc]);
- false -> lists:reverse(Acc)
- end.
diff --git a/src/smoosh/test/smoosh_tests.erl b/src/smoosh/test/smoosh_tests.erl
deleted file mode 100644
index adabc8c49..000000000
--- a/src/smoosh/test/smoosh_tests.erl
+++ /dev/null
@@ -1,129 +0,0 @@
--module(smoosh_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(KILOBYTE, binary:copy(<<"x">>, 1024)).
-
-%% ==========
-%% Setup
-%% ----------
-
-setup(ChannelType) ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
- {ok, ChannelPid} = smoosh_server:get_channel(ChannelType),
- smoosh_channel:flush(ChannelPid),
- ok = config:set(config_section(ChannelType), "min_size", "200000", false),
- DbName.
-
-teardown(ChannelType, DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok = config:delete(config_section(DbName), "min_size", false),
- {ok, ChannelPid} = smoosh_server:get_channel(ChannelType),
- smoosh_channel:flush(ChannelPid),
- ok.
-
-config_section(ChannelType) ->
- "smoosh." ++ ChannelType.
-
-%% ==========
-%% Tests
-%% ----------
-
-smoosh_test_() ->
- {
- "Testing smoosh",
- {
- setup,
- fun() -> test_util:start_couch([smoosh]) end,
- fun test_util:stop/1,
- [
- channels_tests(),
- persistence_tests()
- ]
- }
- }.
-
-persistence_tests() ->
- Tests = [
- fun should_persist_queue/2
- ],
- {
- "Should persist queue state",
- [
- make_test_case("ratio_dbs", Tests)
- ]
- }.
-
-channels_tests() ->
- Tests = [
- fun should_enqueue/2
- ],
- {
- "Various channels tests",
- [
- make_test_case("ratio_dbs", Tests)
- ]
- }.
-
-make_test_case(Type, Funs) ->
- {foreachx, fun setup/1, fun teardown/2, [{Type, Fun} || Fun <- Funs]}.
-
-should_enqueue(ChannelType, DbName) ->
- ?_test(begin
- ok = grow_db_file(DbName, 300),
- ok = wait_enqueue(ChannelType, DbName),
- ?assert(is_enqueued(ChannelType, DbName)),
- ok
- end).
-
-should_persist_queue(ChannelType, DbName) ->
- ?_test(begin
- {ok, ChannelPid} = smoosh_server:get_channel(ChannelType),
- ok = grow_db_file(DbName, 300),
- ok = wait_enqueue(ChannelType, DbName),
- ok = smoosh_channel:persist(ChannelPid),
- Q0 = channel_queue(ChannelType),
- ok = application:stop(smoosh),
- ok = application:start(smoosh),
- Q1 = channel_queue(ChannelType),
- ?assertEqual(Q0, Q1),
- ok
- end).
-
-grow_db_file(DbName, SizeInKb) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- FilePath = couch_db:get_filepath(Db),
- {ok, Fd} = file:open(FilePath, [append]),
- Bytes = binary:copy(?KILOBYTE, SizeInKb),
- file:write(Fd, Bytes),
- ok = file:close(Fd),
- Doc = couch_doc:from_json_obj(
- {[
- {<<"_id">>, ?l2b(?docid())},
- {<<"value">>, ?l2b(?docid())}
- ]}
- ),
- {ok, _} = couch_db:update_docs(Db, [Doc], []),
- couch_db:close(Db),
- ok.
-
-is_enqueued(ChannelType, DbName) ->
- {ok, ChannelPid} = smoosh_server:get_channel(ChannelType),
- smoosh_channel:is_key(ChannelPid, DbName).
-
-wait_enqueue(ChannelType, DbName) ->
- test_util:wait(fun() ->
- case is_enqueued(ChannelType, DbName) of
- false ->
- wait;
- true ->
- ok
- end
- end).
-
-channel_queue(ChannelType) ->
- Q0 = smoosh_priority_queue:new(ChannelType),
- smoosh_priority_queue:recover(Q0).
diff --git a/src/weatherreport/.gitignore b/src/weatherreport/.gitignore
deleted file mode 100644
index d6cf1d58f..000000000
--- a/src/weatherreport/.gitignore
+++ /dev/null
@@ -1,13 +0,0 @@
-doc/
-deps/
-ebin/*
-log/
-edoc/
-index.html
-weatherreport
-*.png
-pkg/
-erl_crash.dump
-.eunit/
-*~
-#*#
diff --git a/src/weatherreport/.manifest b/src/weatherreport/.manifest
deleted file mode 100644
index 73b293867..000000000
--- a/src/weatherreport/.manifest
+++ /dev/null
@@ -1,5 +0,0 @@
-src
-weatherreport
-doc
-LICENSE
-README.md
diff --git a/src/weatherreport/LICENSE b/src/weatherreport/LICENSE
deleted file mode 100644
index e454a5258..000000000
--- a/src/weatherreport/LICENSE
+++ /dev/null
@@ -1,178 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
diff --git a/src/weatherreport/README.md b/src/weatherreport/README.md
deleted file mode 100644
index 09f66421f..000000000
--- a/src/weatherreport/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# Weather Report
-
-`weatherreport` is an escript and set of tools that diagnoses common problems which could affect a CouchDB node or cluster.
-
-## Overview
-
-Here is a basic example of using `weatherreport` followed immediately by the command's output:
-
-```bash
-$ ./weatherreport --etc /path/to/etc
-[warning] Cluster member node3@127.0.0.1 is not connected to this node. Please check whether it is down.
-```
-
-## Usage
-
-For most cases, you can just run the `weatherreport` command as given at the top of this README. However, sometimes you might want to know some extra detail or run only specific checks. For that, there are command-line options. Execute `weatherreport --help` to learn more about these options:
-
-```bash
-weatherreport --help
-Usage: weatherreport [-c <path>] [-d <level>] [-e] [-h] [-l] [check_name ...]
-
- -c, --etc Path to the CouchDB configuration directory
- -d, --level Minimum message severity level (default: notice)
- -l, --list Describe available diagnostic tasks
- -e, --expert Perform more detailed diagnostics
- -h, --help Display help/usage
- check_name A specific check to run
-```
-
-To get an idea of what checks will be run, use the `--list` option:
-
-```bash
-weatherreport --list
-Available diagnostic checks:
-
- custodian Shard safety/liveness checks
- disk Data directory permissions and atime
- internal_replication Check the number of pending internal replication jobs
- ioq Check the total number of active IOQ requests
- mem3_sync Check there is a registered mem3_sync process
- membership Cluster membership validity
- memory_use Measure memory usage
- message_queues Check for processes with large mailboxes
- node_stats Check useful erlang statistics for diagnostics
- nodes_connected Cluster node liveness
- process_calls Check for large numbers of processes with the same current/initial call
- process_memory Check for processes with high memory usage
- safe_to_rebuild Check whether the node can safely be taken out of service
- search Check the local search node is responsive
- tcp_queues Measure the length of tcp queues in the kernel
-```
-
-If you want all the gory details about what WeatherReport is doing, you can run the checks at a more verbose logging level with the --level option:
-
-```bash
-$ ./weatherreport --etc /path/to/etc -d debug
-[debug] Not connected to the local cluster node, trying to connect. alive:false connect_failed:undefined
-[debug] Starting distributed Erlang.
-[debug] Connected to local cluster node 'node1@127.0.0.1'.
-[debug] Local RPC: mem3:nodes([]) [5000]
-[debug] Local RPC: os:getpid([]) [5000]
-[debug] Running shell command: ps -o pmem,rss -p 73905
-[debug] Shell command output:
-%MEM RSS
- 0.3 25116
-
-
-[debug] Local RPC: erlang:nodes([]) [5000]
-[debug] Local RPC: mem3:nodes([]) [5000]
-[warning] Cluster member node3@127.0.0.1 is not connected to this node. Please check whether it is down.
-[info] Process is using 0.3% of available RAM, totalling 25116 KB of real memory.
-```
-
-Most times you'll want to use the defaults, but any Syslog severity name will do (from most to least verbose): `debug, info, notice, warning, error, critical, alert, emergency`.
-
-Finally, if you want to run just a single diagnostic or a list of specific ones, you can pass their name(s):
-
-```bash
-$ ./weatherreport --etc /path/to/etc nodes_connected
-[warning] Cluster member node3@127.0.0.1 is not connected to this node. Please check whether it is down.
-```
diff --git a/src/weatherreport/how_to_add_a_check.md b/src/weatherreport/how_to_add_a_check.md
deleted file mode 100644
index b78640e61..000000000
--- a/src/weatherreport/how_to_add_a_check.md
+++ /dev/null
@@ -1,113 +0,0 @@
-# How to add a check
-
-A new check can be added by creating a new file in the src/ directory named
-`weatherreport_check_NAME.erl` where `NAME` is a short descriptive check name
-(e.g. memory_use).
-
-The file must be an erlang module which implements the `weatherreport_check`
-behaviour. This requires the following four functions to be implemented (see
-the documentation of the `weatherreport_check` module for more details):
-
- - `description/0` Return a short description of what the check does. This will
- be printed to the console when `weatherreport` is run with the `-l` option.
-
- - `valid/0` Check that running the diagnostic check is valid. Any preconditions
- required by the check (e.g. cluster connectivity) should be carried out here.
- If a check has no prerequisites then this function can just return `true`.
-
- - `check/0` The function that actually performs the check. Typically this will
- involve either calls to the local OS (via `weatherreport_util:run_command/1`,
- calls to the local cluster node (via `weatherreport_node:local_command/3`) or
- calls to the cluster (via `weatherreport_node:cluster_command/3`). This
- command should return a list of tuples of the form `{LogLevel, Message}`
- where `LogLevel` is an atom that specifies a supported log level (e.g.
- `warning` or `info`) and `Message` is any erlang term that is matched by the
- `format/1` function.
-
- - `format/1` This function is used to format the messages returned by `check/0`
- and its clauses must match all possible messages returnable by `check/0`. It
- should return a tuple of the form `{String, Args}` where `String` is the
- format string `Args` is the list of formatting arguments. The format string
- should be a human-readable description of the message.
-
-## Annotated example
-
-The following annotated example is based on `weatherreport_check_memory_use.erl`
-and the file header and licence is omitted.
-
-```erlang
-%% @doc Diagnostic that checks the current memory usage. If memory
-%% usage is high, a warning message will be sent, otherwise only
-%% informational messages.
-```
-
-The module begins with an edoc declaration which provides af full description of
-the check. Any relevant details which cannot be communicated in the one-line
-string returned by `description/0` function should be included here.
-
-```erlang
--module(weatherreport_check_memory_use).
--behaviour(weatherreport_check).
-
--export([description/0,
- valid/0,
- check/0,
- format/1]).
-```
-
-The module name is specified, the `weatherreport_check` behaviour is set and the
-functions required by that behaviour are exported.
-
-```erlang
--spec description() -> string().
-description() ->
- "Measure memory usage".
-```
-
-Define `description/0` which returns a concise description for inclusion in
-command line output.
-
-```erlang
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-```
-
-Define `valid/0` which is used to check that we can connect to the local cluster
-node. Connectivity to the local node is required in this check so that the OS
-process ID can be obtained.
-
-```erlang
--spec check() -> [{atom(), term()}].
-check() ->
- Pid = weatherreport_node:pid(),
- Output = weatherreport_util:run_command("ps -o pmem,rss -p " ++ Pid),
- [_,_,Percent, RealSize| _] = string:tokens(Output, "/n \n"),
- Messages = [{info, {process_usage, Percent, RealSize}}],
- case weatherreport_util:binary_to_float(list_to_binary(Percent)) >= 90 of
- false ->
- Messages;
- true ->
- [{critical, {high_memory, Percent}} | Messages]
- end.
-```
-
-The actual code that carries out the check. Note that an `info` message is
-always returned and a `critical` message is appended to the `Messages` list
-only if memory usage exceeds a hard-coded threshold. Note also that there are
-two message forms: `{process_usage, Percent RealSize}` and
-`{high_memory, Percent}`. When `format/1` is defined it must match both of
-these message forms.
-
-```erlang
--spec format(term()) -> {io:format(), [term()]}.
-format({high_memory, Percent}) ->
- {"Memory usage is HIGH: ~s% of available RAM", [Percent]};
-format({process_usage, Percent, Real}) ->
- {"Process is using ~s% of available RAM, totalling ~s KB of real memory.", [Percent, Real]}.
-```
-
-Finally `format/1` is defined. There are two function clauses, one to match each
-of the message forms that can be returned by check. The tuple returned by this
-function will eventually be used to generate the text displayed in the console
-output.
diff --git a/src/weatherreport/rebar.config b/src/weatherreport/rebar.config
deleted file mode 100644
index 983755adf..000000000
--- a/src/weatherreport/rebar.config
+++ /dev/null
@@ -1,31 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% Modified to handle dependencies for weatherreport
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-{escript_shebang, "#!/usr/bin/env escript\n"}.
-{escript_comment, "%% -nocookie\n"}.
-
-{escript_incl_apps, [config, couch_log, couch_stats]}.
diff --git a/src/weatherreport/src/weatherreport.app.src b/src/weatherreport/src/weatherreport.app.src
deleted file mode 100644
index 6674fa537..000000000
--- a/src/weatherreport/src/weatherreport.app.src
+++ /dev/null
@@ -1,39 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic.app.src to weatherreport.app.src and
-%% modified to work with Apache CouchDB.
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-{application, weatherreport, [
- {description, "Diagnostic tools for Apache CouchDB"},
- {vsn, git},
- {registered, []},
- {applications, [
- kernel,
- stdlib,
- inets
- ]}
- ]}.
diff --git a/src/weatherreport/src/weatherreport.erl b/src/weatherreport/src/weatherreport.erl
deleted file mode 100644
index 8a46b4a87..000000000
--- a/src/weatherreport/src/weatherreport.erl
+++ /dev/null
@@ -1,203 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic.erl to weatherreport.erl and modified
-%% to work with Apache CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-%% @doc <p>The <code>weatherreport</code> module is the entry point for
-%% the escript. It is responsible for parsing command-line arguments
-%% and switches, printing the available checks, listing the help text,
-%% or running all or the specified checks, depending on the command
-%% line.</p>
-%%
-%% <p>The <code>getopt</code> application and module is used
-%% for command-line parsing. The defined switches and arguments are:</p>
-%% <pre>$ ./weatherreport --etc etc [-d level] [-l] [-h] [check_name...]</pre>
-%%
-%% <table class="options">
-%% <tr><td><code>--etc etc</code></td><td>the location of the CouchDB
-%% configuration directory</td></tr>
-%% <tr><td><code>-d, --level level</code>&#160;&#160;</td><td>the severity of
-%% messages you want to see, defaulting to 'notice'. Equivalent to
-%% syslog severity levels.</td></tr>
-%% <tr><td><code>-l, --list</code></td><td>lists available checks,
-%% that is, modules that implement <code>weatherreport_check</code>. A
-%% "short name" will be given for ease-of-use.</td></tr>
-%% <tr><td><code>-h, --help</code></td><td> - print command usage
-%% ("help")</td></tr>
-%% <tr><td><code>check_name</code></td><td>when given, a specific
-%% check or list of checks to run</td></tr>
-%% </table>
-%% @end
--module(weatherreport).
--export([main/1]).
-
--define(OPTS, [
- {etc, $c, "etc", string, "Path to the CouchDB configuration directory"},
- {level, $d, "level", {atom, notice}, "Minimum message severity level (default: notice)"},
- {expert, $e, "expert", undefined, "Perform more detailed diagnostics"},
- {usage, $h, "help", undefined, "Display help/usage"},
- {list, $l, "list", undefined, "Describe available diagnostic tasks"},
- {all_nodes, $a, "all-nodes", undefined, "Run weatherreport on all cluster nodes"},
- {timeout, $t, "timeout", integer, "Timeout value (in ms) for each diagnostic check"}
-]).
-
--define(USAGE_OPTS, [
- O
- || O <- ?OPTS,
- element(5, O) =/= undefined
-]).
-
-%% @doc The main entry point for the weatherreport escript.
--spec main(CommandLineArguments :: [string()]) -> any().
-main(Args) ->
- application:load(weatherreport),
-
- case weatherreport_getopt:parse(?OPTS, Args) of
- {ok, {Opts, NonOptArgs}} ->
- case process_opts(Opts) of
- list -> list_checks();
- usage -> usage();
- run -> run(NonOptArgs)
- end;
- {error, Error} ->
- io:format("Invalid option sequence given: ~w~n", [Error]),
- usage()
- end.
-
-list_checks() ->
- Descriptions = [
- {weatherreport_util:short_name(Mod), Mod:description()}
- || Mod <- weatherreport_check:modules()
- ],
- io:format("Available diagnostic checks:~n~n"),
- lists:foreach(
- fun({Mod, Desc}) ->
- io:format(" ~.20s ~s~n", [Mod, Desc])
- end,
- lists:sort(Descriptions)
- ).
-
-usage() ->
- weatherreport_getopt:usage(?USAGE_OPTS, "weatherreport ", "[check_name ...]", [
- {"check_name", "A specific check to run"}
- ]).
-
-run(InputChecks) ->
- case weatherreport_config:prepare() of
- {error, Reason} ->
- io:format("Fatal error: ~s~n", [Reason]),
- halt(1);
- _ ->
- ok
- end,
- Checks =
- case InputChecks of
- [] ->
- weatherreport_check:modules();
- _ ->
- ShortNames = [
- {weatherreport_util:short_name(Mod), Mod}
- || Mod <- weatherreport_check:modules()
- ],
- element(1, lists:foldr(fun validate_checks/2, {[], ShortNames}, InputChecks))
- end,
- Messages =
- case application:get_env(weatherreport, all_nodes) of
- {ok, true} ->
- weatherreport_runner:run(Checks, all);
- _ ->
- weatherreport_runner:run(Checks)
- end,
- case Messages of
- [] ->
- io:format("No diagnostic messages to report.~n"),
- halt(0);
- _ ->
- %% Print the most critical messages first
- FilteredMessages = lists:filter(
- fun({_, Level, _, _}) ->
- weatherreport_log:should_log(Level)
- end,
- Messages
- ),
- SortedMessages = lists:sort(
- fun({_, ALevel, _, _}, {_, BLevel, _, _}) ->
- weatherreport_log:level(ALevel) =< weatherreport_log:level(BLevel)
- end,
- FilteredMessages
- ),
- case SortedMessages of
- [] ->
- io:format("No diagnostic messages to report.~n"),
- halt(0);
- _ ->
- lists:foreach(fun weatherreport_check:print/1, SortedMessages),
- weatherreport_util:flush_stdout(),
- halt(1)
- end,
- halt(1)
- end.
-
-validate_checks(Check, {Mods, SNames}) ->
- case lists:keyfind(Check, 1, SNames) of
- {Check, Mod} ->
- {[Mod | Mods], lists:delete({Check, Mod}, SNames)};
- _ ->
- io:format("Unknown check '~s' specified, skipping.~n", [Check]),
- {Mods, SNames}
- end.
-
-process_opts(Opts) ->
- process_opts(Opts, run).
-
-process_opts([], Result) ->
- Result;
-process_opts([H | T], Result) ->
- process_opts(T, process_option(H, Result)).
-
-process_option({etc, Path}, Result) ->
- application:set_env(weatherreport, etc, filename:absname(Path)),
- Result;
-process_option({level, Level}, Result) ->
- application:set_env(weatherreport, log_level, Level),
- Result;
-process_option({timeout, Timeout}, Result) ->
- application:set_env(weatherreport, timeout, Timeout),
- Result;
-process_option(expert, Result) ->
- application:set_env(weatherreport, expert, true),
- Result;
-process_option(all_nodes, Result) ->
- application:set_env(weatherreport, all_nodes, true),
- Result;
-%% Help should have precedence over listing checks
-process_option(list, usage) ->
- usage;
-process_option(list, _) ->
- list;
-process_option(usage, _) ->
- usage.
diff --git a/src/weatherreport/src/weatherreport_check.erl b/src/weatherreport/src/weatherreport_check.erl
deleted file mode 100644
index 65ce1a416..000000000
--- a/src/weatherreport/src/weatherreport_check.erl
+++ /dev/null
@@ -1,113 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic_check.erl to weatherreport_check.erl
-%% and modified to work with Apache CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-%% @doc <p>Enforces a common API among all diagnostic modules and
-%% provides some automation around their execution.</p>
-%% <h2>Behaviour Specification</h2>
-%%
-%% <h3>description/0</h3>
-%% <pre>-spec description() -> iodata().</pre>
-%% <p>A short description of what the diagnostic does, which will be
-%% printed when the script is given the <code>-l</code> flag.</p>
-%%
-%% <h3>valid/0</h3>
-%% <pre>-spec valid() -> boolean().</pre>
-%% <p>Whether the diagnostic is valid to run. For example, some checks
-%% require connectivity to the cluster node and hence call {@link
-%% weatherreport_node:can_connect/0. weatherreport_node:can_connect()}.</p>
-%%
-%% <h3>check/0</h3>
-%% <pre>-spec check() -> [{atom(), term()}].</pre>
-%% <p>Runs the diagnostic, returning a list of pairs, where the first
-%% is a severity level and the second is any term that is understood
-%% by the <code>format/1</code> callback.</p>
-%%
-%% <h3>format/1</h3>
-%% <pre>-spec format(term()) -> iodata() | {io:format(), [term()]}.</pre>
-%% <p>Formats terms that were returned from <code>check/0</code> for
-%% output to the console. Valid return values are an iolist (string,
-%% binary, etc) or a pair of a format string and a list of terms, as
-%% you would pass to {@link io:format/2. io:format/2}.</p>
-%% @end
-
--module(weatherreport_check).
--export([behaviour_info/1]).
--export([
- check/2,
- modules/0,
- print/1
-]).
-
-%% @doc The behaviour definition for diagnostic modules.
--spec behaviour_info(atom()) -> 'undefined' | [{atom(), arity()}].
-behaviour_info(callbacks) ->
- [
- {description, 0},
- {valid, 0},
- {check, 1},
- {format, 1}
- ];
-behaviour_info(_) ->
- undefined.
-
-%% @doc Runs the diagnostic in the given module, if it is valid. Returns a
-%% list of messages that will be printed later using print/1.
--spec check(Module :: module(), list()) -> [{atom(), module(), term()}].
-check(Module, Opts) ->
- case Module:valid() of
- true ->
- [{Level, Module, Message} || {Level, Message} <- Module:check(Opts)];
- _ ->
- []
- end.
-
-%% @doc Collects a list of diagnostic modules included in the
-%% weatherreport application.
--spec modules() -> [module()].
-modules() ->
- {ok, Mods} = application:get_key(weatherreport, modules),
- [
- M
- || M <- Mods,
- Attr <- M:module_info(attributes),
- {behaviour, [?MODULE]} =:= Attr orelse {behavior, [?MODULE]} =:= Attr
- ].
-
-%% @doc Formats and prints the given message. The diagnostic
-%% module's format/1 function will be called to provide a
-%% human-readable message. It should return an iolist() or a 2-tuple
-%% consisting of a format string and a list of terms.
--spec print({Node :: atom(), Level :: atom(), Module :: module(), Data :: term()}) -> ok.
-print({Node, Level, Mod, Data}) ->
- case Mod:format(Data) of
- {Format, Terms} ->
- weatherreport_log:log(Node, Level, Format, Terms);
- String ->
- weatherreport_log:log(Node, Level, String)
- end.
diff --git a/src/weatherreport/src/weatherreport_check_custodian.erl b/src/weatherreport/src/weatherreport_check_custodian.erl
deleted file mode 100644
index 924d1c94f..000000000
--- a/src/weatherreport/src/weatherreport_check_custodian.erl
+++ /dev/null
@@ -1,84 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% @doc Diagnostic that performs safety and liveness checks on
-%% cluster shards. Shard safety is determined by the availability of
-%% the nodes that contain copies of that shard. A shard is considered
-%% unsafe if one or more nodes containing copies are unavailable.
-%% Shard liveness is similar but also requires nodes containing copies
-%% to be actively participating in the cluster. If one or more nodes
-%% containing copies are in maintenance mode then liveness is impaired.
-%% Messages are also returned for any databases where there are
-%% conflicting shard maps.
-
--module(weatherreport_check_custodian).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--include_lib("eunit/include/eunit.hrl").
-
--spec description() -> string().
-description() ->
- "Shard safety/liveness checks".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
-n_to_level(2) ->
- warning;
-n_to_level(1) ->
- error;
-n_to_level(0) ->
- critical;
-n_to_level(_) ->
- info.
-
-report_to_message({DbName, ShardRange, {Type, N}}) ->
- {n_to_level(N), {Type, N, DbName, ShardRange}};
-report_to_message({DbName, {conflicted, N}}) ->
- {warning, {conflicted, N, DbName}}.
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- case custodian:report() of
- [] ->
- [{info, ok}];
- Report ->
- lists:map(fun(R) -> report_to_message(R) end, Report)
- end.
-
--spec format(term()) -> {io:format(), [term()]}.
-format(ok) ->
- {"All shards available and alive.", []};
-format({Type, N, DbName, ShardRange}) ->
- {"~w ~w shards for Db: ~s Range: ~w.", [N, Type, DbName, ShardRange]};
-format({conflicted, 1, DbName}) ->
- {"1 conflicted shard map for Db: ~s", [DbName]};
-format({conflicted, N, DbName}) ->
- {"~w conflicted shard maps for Db: ~s", [N, DbName]}.
diff --git a/src/weatherreport/src/weatherreport_check_disk.erl b/src/weatherreport/src/weatherreport_check_disk.erl
deleted file mode 100644
index 5361ae632..000000000
--- a/src/weatherreport/src/weatherreport_check_disk.erl
+++ /dev/null
@@ -1,195 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic_check_disk.erl to
-%% weatherreport_check_disk.erl and modified to work with Apache
-%% CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks permissions on data directories and
-%% whether noatime is set. It will only check data directories of
-%% known storage backends.
--module(weatherreport_check_disk).
--behaviour(weatherreport_check).
-
-%% The file that we will attempt to create and read under each data directory.
--define(TEST_FILE, "weatherreport.tmp").
-
-%% A dependent chain of permissions checking functions.
--define(CHECKPERMFUNS, [
- fun check_is_dir/1,
- fun check_is_writeable/1,
- fun check_is_readable/1,
- fun check_is_file_readable/1,
- fun check_atime/1
-]).
-
--include_lib("kernel/include/file.hrl").
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--spec description() -> string().
-description() ->
- "Data directory permissions and atime".
-
--spec valid() -> true.
-valid() ->
- true.
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- DataDirs = weatherreport_config:data_directories(),
- %% Add additional disk checks in the function below
- lists:flatmap(
- fun(Dir) ->
- check_directory_permissions(Dir)
- end,
- DataDirs
- ).
-
--spec format(term()) -> {io:format(), [term()]}.
-format({disk_full, DataDir}) ->
- {
- "Disk containing data directory ~s is full! "
- "Please check that it is set to the correct location and that there are not "
- "other files using up space intended for Riak.",
- [DataDir]
- };
-format({no_data_dir, DataDir}) ->
- {"Data directory ~s does not exist. Please create it.", [DataDir]};
-format({no_write, DataDir}) ->
- User = weatherreport_config:user(),
- {"No write access to data directory ~s. Please make it writeable by the '~s' user.", [
- DataDir, User
- ]};
-format({no_read, DataDir}) ->
- User = weatherreport_config:user(),
- {"No read access to data directory ~s. Please make it readable by the '~s' user.", [
- DataDir, User
- ]};
-format({write_check, File}) ->
- {"Write-test file ~s is a directory! Please remove it so this test can continue.", [File]};
-format({atime, Dir}) ->
- {
- "Data directory ~s is not mounted with 'noatime'. "
- "Please remount its disk with the 'noatime' flag to improve performance.",
- [Dir]
- }.
-
-%%% Private functions
-
-check_directory_permissions(Directory) ->
- check_directory(Directory, ?CHECKPERMFUNS).
-
-%% Run a list of check functions against the given directory,
-%% returning the first non-ok result.
-check_directory(_, []) ->
- [];
-check_directory(Directory, [Check | Checks]) ->
- case Check(Directory) of
- ok ->
- check_directory(Directory, Checks);
- Message ->
- [Message]
- end.
-
-%% Check if the path is actually a directory
-check_is_dir(Directory) ->
- case filelib:is_dir(Directory) of
- true ->
- ok;
- _ ->
- {error, {no_data_dir, Directory}}
- end.
-
-%% Check if the directory is writeable
-check_is_writeable(Directory) ->
- File = filename:join([Directory, ?TEST_FILE]),
- case file:write_file(File, <<"ok">>) of
- ok ->
- ok;
- {error, Error} when Error == enoent orelse Error == eacces ->
- {error, {no_write, Directory}};
- {error, enospc} ->
- {critical, {disk_full, Directory}};
- {error, eisdir} ->
- {error, {write_check, File}}
- end.
-
-%% Check if the directory is readable
-check_is_readable(Directory) ->
- case file:read_file_info(Directory) of
- {ok, #file_info{access = Access}} when
- Access == read orelse
- Access == read_write
- ->
- ok;
- {error, eacces} ->
- {error, {no_read, Directory}};
- {error, Error} when
- Error == enoent orelse
- Error == enotdir
- ->
- {error, {no_data_dir, Directory}};
- _ ->
- {error, {no_read, Directory}}
- end.
-
-%% Check if the file we created is readable
-check_is_file_readable(Directory) ->
- File = filename:join([Directory, ?TEST_FILE]),
- case file:read_file(File) of
- {error, Error} when
- Error == eacces orelse
- Error == enotdir
- ->
- {error, {no_read, Directory}};
- {error, enoent} ->
- {error, {write_check, File}};
- _ ->
- ok
- end.
-
-%% Check if the directory is mounted with 'noatime'
-check_atime(Directory) ->
- File = filename:join([Directory, ?TEST_FILE]),
- weatherreport_util:run_command("touch -at 201401010000.00 " ++ File),
- {ok, FileInfo1} = file:read_file_info(File),
- {ok, S} = file:open(File, [read]),
- io:get_line(S, ''),
- file:close(S),
- {ok, FileInfo2} = file:read_file_info(File),
- file:delete(File),
- case (FileInfo1#file_info.atime =/= FileInfo2#file_info.atime) of
- true ->
- {notice, {atime, Directory}};
- _ ->
- ok
- end.
diff --git a/src/weatherreport/src/weatherreport_check_internal_replication.erl b/src/weatherreport/src/weatherreport_check_internal_replication.erl
deleted file mode 100644
index 5dc0bfa64..000000000
--- a/src/weatherreport/src/weatherreport_check_internal_replication.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks the current size of the mem3_sync
-%% backlog. The size is printed as an info message if under a defined
-%% threshold, or as a warning if above the threshold.
--module(weatherreport_check_internal_replication).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--define(THRESHOLD, 1000000).
-
--spec description() -> string().
-description() ->
- "Check the number of pending internal replication jobs".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec total_to_level(integer()) -> atom().
-total_to_level(Total) when Total > ?THRESHOLD ->
- warning;
-total_to_level(_Total) ->
- info.
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- Backlog = mem3_sync:get_backlog(),
- [{total_to_level(Backlog), Backlog}].
-
--spec format(term()) -> {io:format(), [term()]}.
-format(Backlog) ->
- {"Total number of pending internal replication jobs: ~w", [Backlog]}.
diff --git a/src/weatherreport/src/weatherreport_check_ioq.erl b/src/weatherreport/src/weatherreport_check_ioq.erl
deleted file mode 100644
index a0e0b0e60..000000000
--- a/src/weatherreport/src/weatherreport_check_ioq.erl
+++ /dev/null
@@ -1,101 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks the total number of IOQ requests. If
-%% the total exceeds a configured threshold a warning message will be
-%% sent, otherwise only an information message.
--module(weatherreport_check_ioq).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--define(THRESHOLD, 500).
-
--spec description() -> string().
-description() ->
- "Check the total number of active IOQ requests".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec total_to_level(integer()) -> atom().
-total_to_level(Total) when Total > ?THRESHOLD ->
- warning;
-total_to_level(_Total) ->
- info.
-
--spec sum_channels(list(), list()) -> list().
-sum_channels([], Acc) ->
- Acc;
-sum_channels([{_Name, Value} | Rest], Acc) ->
- sum_channels(Rest, Acc + lists:sum(Value)).
-
--spec sum_queues(list(), list()) -> list().
-sum_queues([], Acc) ->
- Acc;
-sum_queues([{channels, {Channels}} | Rest], Acc) ->
- sum_queues(Rest, sum_channels(Channels, Acc));
-sum_queues([{_Name, Value} | Rest], Acc) ->
- sum_queues(Rest, Acc + Value).
-
--spec check(list()) -> [{atom(), term()}].
-check(Opts) ->
- case erlang:function_exported(ioq, get_queue_lengths, 0) of
- true ->
- case ioq:get_queue_lengths() of
- Queues when is_map(Queues) ->
- Total = maps:fold(
- fun(_Key, Val, Acc) ->
- Val + Acc
- end,
- 0,
- Queues
- ),
- [{total_to_level(Total), {ioq_requests, Total, Queues}}];
- Error ->
- [{warning, {ioq_requests_unknown, Error}}]
- end;
- false ->
- check_legacy(Opts)
- end.
-
--spec check_legacy(list()) -> [{atom(), term()}].
-check_legacy(_Opts) ->
- case ioq:get_disk_queues() of
- Queues when is_list(Queues) ->
- Total = sum_queues(Queues, 0),
- [{total_to_level(Total), {ioq_requests, Total, Queues}}];
- Error ->
- [{warning, {ioq_requests_unknown, Error}}]
- end.
-
--spec format(term()) -> {io:format(), [term()]}.
-format({ioq_requests_unknown, Error}) ->
- {"Could not determine total number of IOQ requests: ~w~n", [Error]};
-format({ioq_requests, Total, Queues}) ->
- {"Total number of active IOQ requests is: ~w ~w", [Total, Queues]}.
diff --git a/src/weatherreport/src/weatherreport_check_mem3_sync.erl b/src/weatherreport/src/weatherreport_check_mem3_sync.erl
deleted file mode 100644
index cabca5d50..000000000
--- a/src/weatherreport/src/weatherreport_check_mem3_sync.erl
+++ /dev/null
@@ -1,57 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks for the presence of the mem3_sync
-%% registered process. If this is not found a warning message will be
-%% sent, otherwise only informational messages.
--module(weatherreport_check_mem3_sync).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--spec description() -> string().
-description() ->
- "Check there is a registered mem3_sync process".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- case erlang:whereis(mem3_sync) of
- undefined ->
- [{warning, mem3_sync_not_found}];
- Pid ->
- [{info, {mem3_sync_found, Pid}}]
- end.
-
--spec format(term()) -> {io:format(), [term()]}.
-format(mem3_sync_not_found) ->
- {"No mem3_sync process found on local node.", []};
-format({mem3_sync_found, Pid}) ->
- {"mem3_sync process found on local node with pid ~w", [Pid]}.
diff --git a/src/weatherreport/src/weatherreport_check_membership.erl b/src/weatherreport/src/weatherreport_check_membership.erl
deleted file mode 100644
index 8fff33c10..000000000
--- a/src/weatherreport/src/weatherreport_check_membership.erl
+++ /dev/null
@@ -1,68 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic_check_ring_membership.erl to
-%% weatherreport_check_membership.erl and modified to work with Apache
-%% CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks whether the local node is a member of
-%% the ring. This might arise when the node name in vm.args has
-%% changed but the node has not been renamed in the ring.
--module(weatherreport_check_membership).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--include_lib("eunit/include/eunit.hrl").
-
--spec description() -> string().
-description() ->
- "Cluster membership validity".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- NodeName = node(),
- Members = mem3:nodes(),
- case lists:member(NodeName, Members) of
- true ->
- [];
- false ->
- [{warning, {not_ring_member, NodeName}}]
- end.
-
--spec format(term()) -> {io:format(), [term()]}.
-format({not_ring_member, Nodename}) ->
- {"Local node ~w is not a member of the cluster. Please check that the -name setting in vm.args is correct.",
- [Nodename]}.
diff --git a/src/weatherreport/src/weatherreport_check_memory_use.erl b/src/weatherreport/src/weatherreport_check_memory_use.erl
deleted file mode 100644
index 04c021381..000000000
--- a/src/weatherreport/src/weatherreport_check_memory_use.erl
+++ /dev/null
@@ -1,69 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic_check_memory_use.erl to
-%% weatherreport_check_memory_use.erl and modified to work with Apache
-%% CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks the current memory usage. If memory
-%% usage is high, a warning message will be sent, otherwise only
-%% informational messages.
--module(weatherreport_check_memory_use).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--spec description() -> string().
-description() ->
- "Measure memory usage".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- Pid = weatherreport_node:pid(),
- Output = weatherreport_util:run_command("ps -o pmem,rss -p " ++ Pid),
- [_, _, Percent, RealSize | _] = string:tokens(Output, "/n \n"),
- Messages = [{info, {process_usage, Percent, RealSize}}],
- case weatherreport_util:binary_to_float(list_to_binary(Percent)) >= 90 of
- false ->
- Messages;
- true ->
- [{critical, {high_memory, Percent}} | Messages]
- end.
-
--spec format(term()) -> {io:format(), [term()]}.
-format({high_memory, Percent}) ->
- {"Memory usage is HIGH: ~s% of available RAM", [Percent]};
-format({process_usage, Percent, Real}) ->
- {"Process is using ~s% of available RAM, totalling ~s KB of real memory.", [Percent, Real]}.
diff --git a/src/weatherreport/src/weatherreport_check_message_queues.erl b/src/weatherreport/src/weatherreport_check_message_queues.erl
deleted file mode 100644
index e55e9eb52..000000000
--- a/src/weatherreport/src/weatherreport_check_message_queues.erl
+++ /dev/null
@@ -1,60 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks for processes with large mailboxes
-%% and sends a warning message if one or more processes exceed the
-%% threshold.
--module(weatherreport_check_message_queues).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--define(THRESHOLD, 1000).
-
--spec description() -> string().
-description() ->
- "Check for processes with large mailboxes".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec check(list()) -> [{atom(), term()}].
-check(Opts) ->
- weatherreport_util:check_proc_count(
- message_queue_len,
- ?THRESHOLD,
- Opts
- ).
-
--spec format(term()) -> {io:format(), [term()]}.
-format({high, {Pid, MBoxSize, Info, Pinfo}}) ->
- {"Process ~w has excessive mailbox size of ~w: ~w ~w", [Pid, MBoxSize, Info, Pinfo]};
-format({high, {Pid, MBoxSize, Info}}) ->
- {"Process ~w has excessive mailbox size of ~w: ~w", [Pid, MBoxSize, Info]};
-format({ok, {Pid, MBoxSize, Info}}) ->
- {"Process ~w has mailbox size of ~w: ~w", [Pid, MBoxSize, Info]}.
diff --git a/src/weatherreport/src/weatherreport_check_node_stats.erl b/src/weatherreport/src/weatherreport_check_node_stats.erl
deleted file mode 100644
index 6c3353dc6..000000000
--- a/src/weatherreport/src/weatherreport_check_node_stats.erl
+++ /dev/null
@@ -1,68 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks various erlang VM statistics that are
-%% useful for diagnostics. A warning message is printed if certain stats
-%% rise above pre-determined thresholds, otherwise an info message is sent.
--module(weatherreport_check_node_stats).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--define(SAMPLES, 10).
--define(T_RUN_QUEUE, 40).
--define(T_PROCESS_COUNT, 100000).
-
--spec description() -> string().
-description() ->
- "Check useful erlang statistics for diagnostics".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec sum_absolute_stats({list(), list()}, list()) -> list().
-sum_absolute_stats({AbsStats, _}, AbsSum) ->
- [{K, V + proplists:get_value(K, AbsSum, 0)} || {K, V} <- AbsStats].
-
--spec mean_to_message({atom(), integer()}) -> {atom(), {atom(), integer()}}.
-mean_to_message({run_queue, Mean}) when Mean > ?T_RUN_QUEUE ->
- {warning, {run_queue, Mean}};
-mean_to_message({process_count, Mean}) when Mean > ?T_PROCESS_COUNT ->
- {warning, {process_count, Mean}};
-mean_to_message({Statistic, Mean}) ->
- {info, {Statistic, Mean}}.
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- SumOfStats = recon:node_stats(?SAMPLES, 100, fun sum_absolute_stats/2, []),
- MeanStats = [{K, erlang:round(V / ?SAMPLES)} || {K, V} <- SumOfStats],
- lists:map(fun mean_to_message/1, MeanStats).
-
--spec format(term()) -> {io:format(), [term()]}.
-format({Statistic, Value}) ->
- {"Mean ~w over one second is ~w", [Statistic, Value]}.
diff --git a/src/weatherreport/src/weatherreport_check_nodes_connected.erl b/src/weatherreport/src/weatherreport_check_nodes_connected.erl
deleted file mode 100644
index 389054209..000000000
--- a/src/weatherreport/src/weatherreport_check_nodes_connected.erl
+++ /dev/null
@@ -1,64 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic_check_nodes_connected.erl to
-%% weatherreport_check_nodes_connected.erl and modified to work with
-%% Apache CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic check that detects cluster members that are down.
--module(weatherreport_check_nodes_connected).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--spec description() -> string().
-description() ->
- "Cluster node liveness".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- NodeName = node(),
- ConnectedNodes = [NodeName | erlang:nodes()],
- Members = mem3:nodes(),
- [
- {warning, {node_disconnected, N}}
- || N <- Members,
- N =/= NodeName,
- lists:member(N, ConnectedNodes) == false
- ].
-
--spec format(term()) -> {io:format(), [term()]}.
-format({node_disconnected, Node}) ->
- {"Cluster member ~s is not connected to this node. Please check whether it is down.", [Node]}.
diff --git a/src/weatherreport/src/weatherreport_check_process_calls.erl b/src/weatherreport/src/weatherreport_check_process_calls.erl
deleted file mode 100644
index b6a228aeb..000000000
--- a/src/weatherreport/src/weatherreport_check_process_calls.erl
+++ /dev/null
@@ -1,168 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks for large numbers of processes sharing
-%% the same current or initial function call
--module(weatherreport_check_process_calls).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--define(THRESHOLD, 1000).
-
--spec description() -> string().
-description() ->
- "Check for large numbers of processes with the same current/initial call".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec total_to_level(integer()) -> atom().
-total_to_level(Total) when Total > ?THRESHOLD ->
- notice;
-total_to_level(_Total) ->
- info.
-
-fold_processes([], Acc, _Lim, _CallType, _Opts) ->
- Acc;
-fold_processes(_, Acc, 0, _CallType, _Opts) ->
- Acc;
-fold_processes([{Count, undefined} | T], Acc, Lim, CallType, Opts) ->
- Level = total_to_level(Count),
- Message = {Level, {process_count, {CallType, Count, undefined}}},
- fold_processes(T, [Message | Acc], Lim - 1, CallType, Opts);
-fold_processes([{Count, {M, F, A}} | T], Acc, Lim, CallType, Opts) ->
- Level = total_to_level(Count),
- Message =
- case proplists:get_value(expert, Opts) of
- true ->
- PidFun = list_to_atom("find_by_" ++ CallType ++ "_call"),
- Pids = erlang:apply(recon, PidFun, [M, F]),
- Pinfos = lists:map(
- fun(Pid) ->
- Pinfo = recon:info(Pid),
- {Pid, Pinfo}
- end,
- lists:sublist(Pids, 10)
- ),
- {Level, {process_count, {CallType, Count, M, F, A, Pinfos}}};
- _ ->
- {Level, {process_count, {CallType, Count, M, F, A}}}
- end,
- fold_processes(T, [Message | Acc], Lim - 1, CallType, Opts).
-
--spec check(list()) -> [{atom(), term()}].
-check(Opts) ->
- CurrentCallCounts = show_current_call_counts(),
- CurrentCallMessages = fold_processes(
- CurrentCallCounts,
- [],
- 10,
- "current",
- Opts
- ),
- FirstCallCounts = show_first_call_counts(),
- lists:reverse(
- fold_processes(
- FirstCallCounts,
- CurrentCallMessages,
- 10,
- "first",
- Opts
- )
- ).
-
--spec format(term()) -> {io:format(), [term()]}.
-format({process_count, {CallType, Count, undefined}}) ->
- {"~w processes with ~s call ~w", [Count, CallType, undefined]};
-format({process_count, {CallType, Count, M, F, A}}) ->
- {"~w processes with ~s call ~w:~w/~w", [Count, CallType, M, F, A]};
-format({process_count, {CallType, Count, M, F, A, Pinfos}}) ->
- {"~w processes with ~s call ~w:~w/~w ~w", [Count, CallType, M, F, A, Pinfos]}.
-
-%% @doc Show the list of first calls sorted by the number of
-%% processes that had that initial call.
--spec show_first_call_counts() -> [{Count, {Module, Function, Arity}}] when
- Count :: pos_integer(),
- Module :: atom(),
- Function :: atom(),
- Arity :: non_neg_integer().
-show_first_call_counts() ->
- Res = lists:foldl(
- fun(Pid, Acc) ->
- dict:update_counter(first_call(Pid), 1, Acc)
- end,
- dict:new(),
- processes()
- ),
- Rev = [{Count, Call} || {Call, Count} <- dict:to_list(Res)],
- lists:reverse(lists:sort(Rev)).
-
-%% @doc Show the list of current calls sorted by the number of
-%% processes that had that current call.
--spec show_current_call_counts() -> [{Count, {Module, Function, Arity}}] when
- Count :: pos_integer(),
- Module :: atom(),
- Function :: atom(),
- Arity :: non_neg_integer().
-show_current_call_counts() ->
- Res = lists:foldl(
- fun(Pid, Acc) ->
- case process_info(Pid, current_function) of
- {current_function, Call} ->
- dict:update_counter(Call, 1, Acc);
- undefined ->
- Acc
- end
- end,
- dict:new(),
- processes()
- ),
- Rev = [{Count, Call} || {Call, Count} <- dict:to_list(Res)],
- lists:reverse(lists:sort(Rev)).
-
-%% @doc Find the first function call for a Pid taking into account cases
-%% where '$initial_call' is set in the process dictionary.
--spec first_call(Pid) -> {Module, Function, Arity} when
- Pid :: pid(),
- Module :: atom(),
- Function :: atom(),
- Arity :: non_neg_integer().
-first_call(Pid) ->
- IC =
- case process_info(Pid, initial_call) of
- {initial_call, IC0} -> IC0;
- undefined -> undefined
- end,
- Dict =
- case process_info(Pid, dictionary) of
- {dictionary, Dict0} -> Dict0;
- undefined -> []
- end,
- MaybeCall = proplists:get_value('$initial_call', Dict, IC),
- proplists:get_value(initial_call, Dict, MaybeCall).
diff --git a/src/weatherreport/src/weatherreport_check_process_memory.erl b/src/weatherreport/src/weatherreport_check_process_memory.erl
deleted file mode 100644
index 4c7b2c76f..000000000
--- a/src/weatherreport/src/weatherreport_check_process_memory.erl
+++ /dev/null
@@ -1,60 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks for processes with high memory usage
-%% and sends a warning message if one or more processes exceed the
-%% threshold.
--module(weatherreport_check_process_memory).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--define(THRESHOLD, 104857600).
-
--spec description() -> string().
-description() ->
- "Check for processes with high memory usage".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec check(list()) -> [{atom(), term()}].
-check(Opts) ->
- weatherreport_util:check_proc_count(
- memory,
- ?THRESHOLD,
- Opts
- ).
-
--spec format(term()) -> {io:format(), [term()]}.
-format({high, {Pid, Memory, Info, Pinfo}}) ->
- {"Process ~w has excessive memory usage of ~w: ~w ~w", [Pid, Memory, Info, Pinfo]};
-format({high, {Pid, Memory, Info}}) ->
- {"Process ~w has excessive memory usage of ~w: ~w", [Pid, Memory, Info]};
-format({ok, {Pid, Memory, Info}}) ->
- {"Process ~w has memory usage of ~w: ~w", [Pid, Memory, Info]}.
diff --git a/src/weatherreport/src/weatherreport_check_safe_to_rebuild.erl b/src/weatherreport/src/weatherreport_check_safe_to_rebuild.erl
deleted file mode 100644
index 86bb1f9c7..000000000
--- a/src/weatherreport/src/weatherreport_check_safe_to_rebuild.erl
+++ /dev/null
@@ -1,121 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks whether the current node can be
-%% safely rebuilt (i.e. taken out of service).
--module(weatherreport_check_safe_to_rebuild).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--spec description() -> string().
-description() ->
- "Check whether the node can safely be taken out of service".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
-%% @doc Check if rebuilding a node is safe. Safe in this context means
-%% that no shard would end up with N<Threshold when the node is offline
--spec safe_to_rebuild(atom(), integer()) -> [list()].
-safe_to_rebuild(Node, RawThreshold) ->
- Threshold =
- case config:get("couchdb", "maintenance_mode") of
- "true" ->
- RawThreshold - 1;
- _ ->
- RawThreshold
- end,
- BelowThreshold = fun
- ({_, _, {_, C}}) when C =< Threshold -> true;
- (_) -> false
- end,
- ToKV = fun({Db, Range, Status}) -> {[Db, Range], Status} end,
-
- ShardsInDanger = dict:from_list(
- lists:map(
- ToKV,
- lists:filter(BelowThreshold, custodian:report())
- )
- ),
-
- mem3_shards:fold(
- fun(Shard, Acc) ->
- case Shard of
- {shard, _, Node, Db, [Start, End], _} ->
- case dict:find([Db, [Start, End]], ShardsInDanger) of
- {_, _} ->
- PrettyRange = [
- couch_util:to_hex(<<Start:32/integer>>),
- couch_util:to_hex(<<End:32/integer>>)
- ],
- PrettyShard = lists:flatten(
- io_lib:format("~s ~s-~s", [Db | PrettyRange])
- ),
- [PrettyShard | Acc];
- _ ->
- Acc
- end;
- _ ->
- Acc
- end
- end,
- []
- ).
-
--spec shards_to_message(atom(), list()) -> {atom(), {atom(), list()}}.
-shards_to_message(n1, []) ->
- {info, {n1, []}};
-shards_to_message(n1, Shards) ->
- {error, {n1, Shards}};
-shards_to_message(n0, []) ->
- {info, {n0, []}};
-shards_to_message(n0, Shards) ->
- {crit, {n0, Shards}}.
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- N0Shards = safe_to_rebuild(node(), 1),
- N1Shards = lists:subtract(safe_to_rebuild(node(), 2), N0Shards),
- [shards_to_message(n0, N0Shards), shards_to_message(n1, N1Shards)].
-
--spec format(term()) -> {io:format(), [term()]}.
-format({n1, []}) ->
- {"This node can be rebuilt without causing any shards to become N=1", []};
-format({n1, Shards}) ->
- {
- "Rebuilding this node will leave the following shards with only one live copy: ~s",
- [string:join(Shards, ", ")]
- };
-format({n0, []}) ->
- {"This node can be rebuilt without causing any shards to become N=0", []};
-format({n0, Shards}) ->
- {
- "Rebuilding this node will leave the following shard with NO live copies: ~s",
- [string:join(Shards, ", ")]
- }.
diff --git a/src/weatherreport/src/weatherreport_check_search.erl b/src/weatherreport/src/weatherreport_check_search.erl
deleted file mode 100644
index b7986db2b..000000000
--- a/src/weatherreport/src/weatherreport_check_search.erl
+++ /dev/null
@@ -1,60 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks the local clouseau node is responsive.
-%% If clouseau is unresponsive then search will not work. An info
-%% message is returned if clouseau responds to pings and an error
-%% otherwise.
--module(weatherreport_check_search).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--spec description() -> string().
-description() ->
- "Check the local search node is responsive".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- SearchNode = 'clouseau@127.0.0.1',
- case net_adm:ping(SearchNode) of
- pong ->
- [{info, {clouseau_ok, SearchNode}}];
- Error ->
- % only warning since search is not enabled by default
- [{warning, {clouseau_error, SearchNode, Error}}]
- end.
-
--spec format(term()) -> {io:format(), [term()]}.
-format({clouseau_ok, SearchNode}) ->
- {"Local search node at ~w responding ok", [SearchNode]};
-format({clouseau_error, SearchNode, Error}) ->
- {"Local search node at ~w not responding: ~w", [SearchNode, Error]}.
diff --git a/src/weatherreport/src/weatherreport_check_tcp_queues.erl b/src/weatherreport/src/weatherreport_check_tcp_queues.erl
deleted file mode 100644
index cc502031b..000000000
--- a/src/weatherreport/src/weatherreport_check_tcp_queues.erl
+++ /dev/null
@@ -1,92 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Diagnostic that checks the current tcp recv and send queues.
-%% If the queues are high a warning message will be send, otherwise
-%% only an informational message.
--module(weatherreport_check_tcp_queues).
--behaviour(weatherreport_check).
-
--export([
- description/0,
- valid/0,
- check/1,
- format/1
-]).
-
--define(THRESHOLD, 1000000).
-
--spec description() -> string().
-description() ->
- "Measure the length of tcp queues in the kernel".
-
--spec valid() -> boolean().
-valid() ->
- weatherreport_node:can_connect().
-
-%% @doc Converts the raw text output of netstat into the sum of the
-%% tcp recv and send queues.
--spec sum_queues(string()) -> {integer(), integer()}.
-sum_queues(Netstats) ->
- sum_queues(string:tokens(Netstats, "\n"), {0, 0}).
-
-%% @doc Converts the rows of text output of netstat into the sum of
-%% the tcp recv and send queues. Note that this function is tightly coupled
-%% to the output of the netstat command provided by the system OS (tested
-%% with netstat 1.42).
--spec sum_queues([string()], {integer(), integer()}) -> {integer(), integer()}.
-sum_queues([], Acc) ->
- Acc;
-sum_queues([Row | Rest], {SumRecvQ, SumSendQ}) ->
- {RecvQ, SendQ} =
- case string:tokens(Row, " ") of
- [[$t, $c, $p | _] | _] = Cols ->
- {Rq, Sq} = {lists:nth(2, Cols), lists:nth(3, Cols)},
- {list_to_integer(Rq), list_to_integer(Sq)};
- _ ->
- {0, 0}
- end,
- sum_queues(Rest, {RecvQ + SumRecvQ, SendQ + SumSendQ}).
-
-%% @doc Converts the sum of queue lengths to a log message at the approriate
-%% level, given ?THRESHOLD
--spec sum_to_message(integer(), string()) -> {atom(), term()}.
-sum_to_message(Sum, Prefix) when Sum > ?THRESHOLD ->
- {warning, {list_to_atom(Prefix ++ "_high"), Sum}};
-sum_to_message(Sum, Prefix) ->
- {info, {list_to_atom(Prefix ++ "_ok"), Sum}}.
-
--spec check(list()) -> [{atom(), term()}].
-check(_Opts) ->
- Netstats = weatherreport_util:run_command("netstat"),
- {SumRecvQ, SumSendQ} = sum_queues(Netstats),
- [sum_to_message(SumRecvQ, "recv_q"), sum_to_message(SumSendQ, "send_q")].
-
--spec format(term()) -> {io:format(), [term()]}.
-format({recv_q_high, QLen}) ->
- {"Total TCP Recv-Q is HIGH: ~w", [QLen]};
-format({recv_q_ok, QLen}) ->
- {"Total TCP Recv-Q is ok: ~w", [QLen]};
-format({send_q_high, QLen}) ->
- {"Total TCP Send-Q is HIGH: ~w", [QLen]};
-format({send_q_ok, QLen}) ->
- {"Total TCP Send-Q is ok: ~w", [QLen]}.
diff --git a/src/weatherreport/src/weatherreport_config.erl b/src/weatherreport/src/weatherreport_config.erl
deleted file mode 100644
index 6cf9fd533..000000000
--- a/src/weatherreport/src/weatherreport_config.erl
+++ /dev/null
@@ -1,200 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic_config.erl to weatherreport_config.erl
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Provides convenient access to configuration values. When
-%% the {@link weatherreport. weatherreport} module calls {@link
-%% prepare/0. prepare/0}, CouchDB's <code>default.ini</code>,
-%% <code>local.ini</code> and <code>vm.args</code> files will be
-%% parsed and memoized.
-%% @end
-
--module(weatherreport_config).
-
--export([
- prepare/0,
- data_directories/0,
- get_vm_env/1,
- etc_dir/0,
- timeout/0,
- node_name/0,
- cookie/0,
- user/0
-]).
-
-%% @doc Prepares appropriate configuration to the weatherreport script
-%% can run. This is called by the weaterreport module and you do
-%% not need to invoke it.
--spec prepare() -> ok | {error, iodata()}.
-prepare() ->
- prepare([fun load_app_config/0, fun load_vm_args/0]).
-
-prepare([]) ->
- ok;
-prepare([Fun | T]) ->
- case Fun() of
- {error, Reason} ->
- {error, Reason};
- _ ->
- prepare(T)
- end.
-
-%% @doc Determines where CouchDB is configured to store data. Returns a
-%% list of paths to directories defined by storage backends.
--spec data_directories() -> [file:filename()].
-data_directories() ->
- [config:get("couchdb", "view_index_dir"), config:get("couchdb", "database_dir")].
-
-%% @doc Get an -env flag out of the vm.args file.
--spec get_vm_env(string()) -> string() | undefined.
-get_vm_env(Key) ->
- case application:get_env(weatherreport, vm_env) of
- undefined ->
- undefined;
- {ok, PList} ->
- proplists:get_value(Key, PList)
- end.
-
-%% @doc Determines the user/uid that the script is running as.
--spec user() -> string().
-user() ->
- case weatherreport_util:run_command("whoami") of
- [] ->
- undefined;
- Resp ->
- [_Newline | Resp1] = lists:reverse(Resp),
- lists:reverse(Resp1)
- end.
-
-%% @doc The specified timeout value for diagnostic checks run via RPC
--spec timeout() -> integer().
-timeout() ->
- case application:get_env(weatherreport, timeout) of
- {ok, Timeout} ->
- Timeout;
- _ ->
- 300000
- end.
-
-%% @doc The CouchDB configuration directory.
--spec etc_dir() -> file:filename().
-etc_dir() ->
- case application:get_env(weatherreport, etc) of
- undefined ->
- ExecDir = filename:absname(filename:dirname(escript:script_name())),
- filename:join(ExecDir, "../etc");
- {ok, Path} ->
- filename:absname(Path, "/")
- end.
-
-%% @doc The local node name. Includes whether the node uses short
-%% or long nodenames for distributed Erlang.
--spec node_name() -> {shortnames | longnames, Name :: string()}.
-node_name() ->
- case application:get_env(weatherreport, node_name) of
- undefined ->
- undefined;
- {ok, Node} ->
- Node
- end.
-
-%% @doc The node's distributed Erlang cookie.
--spec cookie() -> atom().
-cookie() ->
- case application:get_env(weatherreport, cookie) of
- undefined ->
- undefined;
- {ok, Cookie} ->
- list_to_atom(Cookie)
- end.
-
-load_app_config() ->
- Etc = ?MODULE:etc_dir(),
- IniFiles = [
- filename:join(Etc, "default.ini"),
- filename:join(Etc, "local.ini")
- ],
- weatherreport_log:log(node(), debug, "Reading config from files: ~p", [IniFiles]),
- {ok, _Pid} = config:start_link(IniFiles),
- weatherreport_log:log(node(), debug, "Local node config: ~p~n", [config:all()]).
-
-load_vm_args() ->
- VmArgs =
- case init:get_argument(vm_args) of
- {ok, [[X]]} ->
- X;
- _ ->
- %% This is a backup. If for some reason -vm_args isn't specified
- %% then assume it lives in the same dir as app.config
- filename:absname("./vm.args", ?MODULE:etc_dir())
- end,
-
- case file:read_file(VmArgs) of
- {error, Reason} ->
- {error, io_lib:format("Could not read ~s, received error ~w!", [VmArgs, Reason])};
- {ok, Binary} ->
- load_vm_args(Binary)
- end.
-
-load_vm_args(Bin) when is_binary(Bin) ->
- load_vm_args(re:split(Bin, "\s*\r?\n\s*", [{return, list}, trim]));
-load_vm_args([]) ->
- ok;
-load_vm_args([[$# | _] | T]) ->
- load_vm_args(T);
-load_vm_args(["" | T]) ->
- load_vm_args(T);
-load_vm_args(["-sname " ++ NodeName | T]) ->
- application:set_env(weatherreport, node_name, {shortnames, string:strip(NodeName)}),
- load_vm_args(T);
-load_vm_args(["-name " ++ NodeName | T]) ->
- application:set_env(weatherreport, node_name, {longnames, string:strip(NodeName)}),
- load_vm_args(T);
-load_vm_args(["-setcookie " ++ Cookie | T]) ->
- application:set_env(weatherreport, cookie, string:strip(Cookie)),
- load_vm_args(T);
-load_vm_args(["-env " ++ Env | T]) ->
- [Key, Value] = re:split(Env, "\s+", [{return, list}, trim]),
- add_or_insert_env(vm_env, {Key, Value}),
- load_vm_args(T);
-load_vm_args([[$+ | EmuFlags] | T]) ->
- [Flag | Rest] = re:split(EmuFlags, "\s+", [{return, list}, trim]),
- add_or_insert_env(emu_flags, {[$+ | Flag], Rest}),
- load_vm_args(T);
-load_vm_args([[$- | InitFlags] | T]) ->
- [Flag | Rest] = re:split(InitFlags, "\s+", [{return, list}, trim]),
- add_or_insert_env(init_flags, {[$- | Flag], Rest}),
- load_vm_args(T);
-load_vm_args([Line | _]) ->
- {error, io_lib:format("Erroneous line in vm.args: ~s", [Line])}.
-
-add_or_insert_env(Key, Value) ->
- case application:get_env(weatherreport, Key) of
- undefined ->
- application:set_env(weatherreport, Key, [Value]);
- {ok, List} ->
- application:set_env(weatherreport, Key, [Value | List])
- end.
diff --git a/src/weatherreport/src/weatherreport_getopt.erl b/src/weatherreport/src/weatherreport_getopt.erl
deleted file mode 100644
index 736112630..000000000
--- a/src/weatherreport/src/weatherreport_getopt.erl
+++ /dev/null
@@ -1,655 +0,0 @@
-%%%-------------------------------------------------------------------
-%%% @author Juan Jose Comellas <juanjo@comellas.org>
-%%% @copyright (C) 2009 Juan Jose Comellas
-%%% @doc Parses command line options with a format similar to that of GNU getopt.
-%%% @end
-%%%
-%%% This source file is subject to the New BSD License. You should have received
-%%% a copy of the New BSD license with this software. If not, it can be
-%%% retrieved from: http://www.opensource.org/licenses/bsd-license.php
-%%%-------------------------------------------------------------------
--module(weatherreport_getopt).
--author('juanjo@comellas.org').
-
--export([parse/2, usage/2, usage/3, usage/4]).
-
--export_type([
- arg_type/0,
- arg_value/0,
- arg_spec/0,
- simple_option/0,
- compound_option/0,
- option/0,
- option_spec/0
-]).
-
--define(TAB_LENGTH, 8).
-%% Indentation of the help messages in number of tabs.
--define(INDENTATION, 3).
-
-%% Position of each field in the option specification tuple.
--define(OPT_NAME, 1).
--define(OPT_SHORT, 2).
--define(OPT_LONG, 3).
--define(OPT_ARG, 4).
--define(OPT_HELP, 5).
-
--define(IS_OPT_SPEC(Opt), (tuple_size(Opt) =:= ?OPT_HELP)).
-
-%% Atom indicating the data type that an argument can be converted to.
--type arg_type() :: 'atom' | 'binary' | 'boolean' | 'float' | 'integer' | 'string'.
-%% Data type that an argument can be converted to.
--type arg_value() :: atom() | binary() | boolean() | float() | integer() | string().
-%% Argument specification.
--type arg_spec() :: arg_type() | {arg_type(), arg_value()} | undefined.
-%% Option type and optional default argument.
--type simple_option() :: atom().
--type compound_option() :: {atom(), arg_value()}.
--type option() :: simple_option() | compound_option().
-%% Command line option specification.
--type option_spec() :: {
- Name :: atom(),
- Short :: char() | undefined,
- Long :: string() | undefined,
- ArgSpec :: arg_spec(),
- Help :: string() | undefined
-}.
-%% Output streams
--type output_stream() :: 'standard_io' | 'standard_error'.
-
-%% @doc Parse the command line options and arguments returning a list of tuples
-%% and/or atoms using the Erlang convention for sending options to a
-%% function.
--spec parse([option_spec()], string() | [string()]) ->
- {ok, {[option()], [string()]}} | {error, {Reason :: atom(), Data :: any()}}.
-parse(OptSpecList, CmdLine) ->
- try
- Args =
- if
- is_integer(hd(CmdLine)) ->
- string:tokens(CmdLine, " \t\n");
- true ->
- CmdLine
- end,
- parse(OptSpecList, [], [], 0, Args)
- catch
- throw:{error, {_Reason, _Data}} = Error ->
- Error
- end.
-
--spec parse([option_spec()], [option()], [string()], integer(), [string()]) ->
- {ok, {[option()], [string()]}}.
-%% Process the option terminator.
-parse(OptSpecList, OptAcc, ArgAcc, _ArgPos, ["--" | Tail]) ->
- %% Any argument present after the terminator is not considered an option.
- {ok, {lists:reverse(append_default_options(OptSpecList, OptAcc)), lists:reverse(ArgAcc, Tail)}};
-%% Process long options.
-parse(OptSpecList, OptAcc, ArgAcc, ArgPos, ["--" ++ OptArg = OptStr | Tail]) ->
- parse_long_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Tail, OptStr, OptArg);
-%% Process short options.
-parse(OptSpecList, OptAcc, ArgAcc, ArgPos, ["-" ++ ([_Char | _] = OptArg) = OptStr | Tail]) ->
- parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Tail, OptStr, OptArg);
-%% Process non-option arguments.
-parse(OptSpecList, OptAcc, ArgAcc, ArgPos, [Arg | Tail]) ->
- case find_non_option_arg(OptSpecList, ArgPos) of
- {value, OptSpec} when ?IS_OPT_SPEC(OptSpec) ->
- parse(OptSpecList, add_option_with_arg(OptSpec, Arg, OptAcc), ArgAcc, ArgPos + 1, Tail);
- false ->
- parse(OptSpecList, OptAcc, [Arg | ArgAcc], ArgPos, Tail)
- end;
-parse(OptSpecList, OptAcc, ArgAcc, _ArgPos, []) ->
- %% Once we have completed gathering the options we add the ones that were
- %% not present but had default arguments in the specification.
- {ok, {lists:reverse(append_default_options(OptSpecList, OptAcc)), lists:reverse(ArgAcc)}}.
-
-%% @doc Parse a long option, add it to the option accumulator and continue
-%% parsing the rest of the arguments recursively.
-%% A long option can have the following syntax:
-%% --foo Single option 'foo', no argument
-%% --foo=bar Single option 'foo', argument "bar"
-%% --foo bar Single option 'foo', argument "bar"
--spec parse_long_option(
- [option_spec()], [option()], [string()], integer(), [string()], string(), string()
-) ->
- {ok, {[option()], [string()]}}.
-parse_long_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, OptArg) ->
- case split_assigned_arg(OptArg) of
- {Long, Arg} ->
- %% Get option that has its argument within the same string
- %% separated by an equal ('=') character (e.g. "--port=1000").
- parse_long_option_assigned_arg(
- OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, Long, Arg
- );
- Long ->
- case lists:keyfind(Long, ?OPT_LONG, OptSpecList) of
- {Name, _Short, Long, undefined, _Help} ->
- parse(OptSpecList, [Name | OptAcc], ArgAcc, ArgPos, Args);
- {_Name, _Short, Long, _ArgSpec, _Help} = OptSpec ->
- %% The option argument string is empty, but the option requires
- %% an argument, so we look into the next string in the list.
- %% e.g ["--port", "1000"]
- parse_long_option_next_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptSpec);
- false ->
- throw({error, {invalid_option, OptStr}})
- end
- end.
-
-%% @doc Parse an option where the argument is 'assigned' in the same string using
-%% the '=' character, add it to the option accumulator and continue parsing the
-%% rest of the arguments recursively. This syntax is only valid for long options.
--spec parse_long_option_assigned_arg(
- [option_spec()],
- [option()],
- [string()],
- integer(),
- [string()],
- string(),
- string(),
- string()
-) ->
- {ok, {[option()], [string()]}}.
-parse_long_option_assigned_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, Long, Arg) ->
- case lists:keyfind(Long, ?OPT_LONG, OptSpecList) of
- {_Name, _Short, Long, ArgSpec, _Help} = OptSpec ->
- case ArgSpec of
- undefined ->
- throw({error, {invalid_option_arg, OptStr}});
- _ ->
- parse(
- OptSpecList,
- add_option_with_assigned_arg(OptSpec, Arg, OptAcc),
- ArgAcc,
- ArgPos,
- Args
- )
- end;
- false ->
- throw({error, {invalid_option, OptStr}})
- end.
-
-%% @doc Split an option string that may contain an option with its argument
-%% separated by an equal ('=') character (e.g. "port=1000").
--spec split_assigned_arg(string()) -> {Name :: string(), Arg :: string()} | string().
-split_assigned_arg(OptStr) ->
- split_assigned_arg(OptStr, OptStr, []).
-
-split_assigned_arg(_OptStr, "=" ++ Tail, Acc) ->
- {lists:reverse(Acc), Tail};
-split_assigned_arg(OptStr, [Char | Tail], Acc) ->
- split_assigned_arg(OptStr, Tail, [Char | Acc]);
-split_assigned_arg(OptStr, [], _Acc) ->
- OptStr.
-
-%% @doc Retrieve the argument for an option from the next string in the list of
-%% command-line parameters or set the value of the argument from the argument
-%% specification (for boolean and integer arguments), if possible.
-parse_long_option_next_arg(
- OptSpecList, OptAcc, ArgAcc, ArgPos, Args, {Name, _Short, _Long, ArgSpec, _Help} = OptSpec
-) ->
- ArgSpecType = arg_spec_type(ArgSpec),
- case Args =:= [] orelse is_implicit_arg(ArgSpecType, hd(Args)) of
- true ->
- parse(OptSpecList, add_option_with_implicit_arg(OptSpec, OptAcc), ArgAcc, ArgPos, Args);
- false ->
- [Arg | Tail] = Args,
- try
- parse(
- OptSpecList, [{Name, to_type(ArgSpecType, Arg)} | OptAcc], ArgAcc, ArgPos, Tail
- )
- catch
- error:_ ->
- throw({error, {invalid_option_arg, {Name, Arg}}})
- end
- end.
-
-%% @doc Parse a short option, add it to the option accumulator and continue
-%% parsing the rest of the arguments recursively.
-%% A short option can have the following syntax:
-%% -a Single option 'a', no argument or implicit boolean argument
-%% -a foo Single option 'a', argument "foo"
-%% -afoo Single option 'a', argument "foo"
-%% -abc Multiple options: 'a'; 'b'; 'c'
-%% -bcafoo Multiple options: 'b'; 'c'; 'a' with argument "foo"
-%% -aaa Multiple repetitions of option 'a' (only valid for options with integer arguments)
--spec parse_short_option(
- [option_spec()], [option()], [string()], integer(), [string()], string(), string()
-) ->
- {ok, {[option()], [string()]}}.
-parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, OptArg) ->
- parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, first, OptArg).
-
-parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, OptPos, [Short | Arg]) ->
- case lists:keyfind(Short, ?OPT_SHORT, OptSpecList) of
- {Name, Short, _Long, undefined, _Help} ->
- parse_short_option(
- OptSpecList, [Name | OptAcc], ArgAcc, ArgPos, Args, OptStr, first, Arg
- );
- {_Name, Short, _Long, ArgSpec, _Help} = OptSpec ->
- %% The option has a specification, so it requires an argument.
- case Arg of
- [] ->
- %% The option argument string is empty, but the option requires
- %% an argument, so we look into the next string in the list.
- parse_short_option_next_arg(
- OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptSpec, OptPos
- );
- _ ->
- case is_valid_arg(ArgSpec, Arg) of
- true ->
- parse(
- OptSpecList,
- add_option_with_arg(OptSpec, Arg, OptAcc),
- ArgAcc,
- ArgPos,
- Args
- );
- _ ->
- NewOptAcc =
- case OptPos of
- first -> add_option_with_implicit_arg(OptSpec, OptAcc);
- _ -> add_option_with_implicit_incrementable_arg(OptSpec, OptAcc)
- end,
- parse_short_option(
- OptSpecList, NewOptAcc, ArgAcc, ArgPos, Args, OptStr, next, Arg
- )
- end
- end;
- false ->
- throw({error, {invalid_option, OptStr}})
- end;
-parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, _OptStr, _OptPos, []) ->
- parse(OptSpecList, OptAcc, ArgAcc, ArgPos, Args).
-
-%% @doc Retrieve the argument for an option from the next string in the list of
-%% command-line parameters or set the value of the argument from the argument
-%% specification (for boolean and integer arguments), if possible.
-parse_short_option_next_arg(
- OptSpecList,
- OptAcc,
- ArgAcc,
- ArgPos,
- Args,
- {Name, _Short, _Long, ArgSpec, _Help} = OptSpec,
- OptPos
-) ->
- case Args =:= [] orelse is_implicit_arg(ArgSpec, hd(Args)) of
- true when OptPos =:= first ->
- parse(OptSpecList, add_option_with_implicit_arg(OptSpec, OptAcc), ArgAcc, ArgPos, Args);
- true ->
- parse(
- OptSpecList,
- add_option_with_implicit_incrementable_arg(OptSpec, OptAcc),
- ArgAcc,
- ArgPos,
- Args
- );
- false ->
- [Arg | Tail] = Args,
- try
- parse(OptSpecList, [{Name, to_type(ArgSpec, Arg)} | OptAcc], ArgAcc, ArgPos, Tail)
- catch
- error:_ ->
- throw({error, {invalid_option_arg, {Name, Arg}}})
- end
- end.
-
-%% @doc Find the option for the discrete argument in position specified in the
-%% Pos argument.
--spec find_non_option_arg([option_spec()], integer()) -> {value, option_spec()} | false.
-find_non_option_arg([{_Name, undefined, undefined, _ArgSpec, _Help} = OptSpec | _Tail], 0) ->
- {value, OptSpec};
-find_non_option_arg([{_Name, undefined, undefined, _ArgSpec, _Help} | Tail], Pos) ->
- find_non_option_arg(Tail, Pos - 1);
-find_non_option_arg([_Head | Tail], Pos) ->
- find_non_option_arg(Tail, Pos);
-find_non_option_arg([], _Pos) ->
- false.
-
-%% @doc Append options that were not present in the command line arguments with
-%% their default arguments.
--spec append_default_options([option_spec()], [option()]) -> [option()].
-append_default_options([{Name, _Short, _Long, {_Type, DefaultArg}, _Help} | Tail], OptAcc) ->
- append_default_options(
- Tail,
- case lists:keymember(Name, 1, OptAcc) of
- false ->
- [{Name, DefaultArg} | OptAcc];
- _ ->
- OptAcc
- end
- );
-%% For options with no default argument.
-append_default_options([_Head | Tail], OptAcc) ->
- append_default_options(Tail, OptAcc);
-append_default_options([], OptAcc) ->
- OptAcc.
-
-%% @doc Add an option with argument converting it to the data type indicated by the
-%% argument specification.
--spec add_option_with_arg(option_spec(), string(), [option()]) -> [option()].
-add_option_with_arg({Name, _Short, _Long, ArgSpec, _Help} = OptSpec, Arg, OptAcc) ->
- case is_valid_arg(ArgSpec, Arg) of
- true ->
- try
- [{Name, to_type(ArgSpec, Arg)} | OptAcc]
- catch
- error:_ ->
- throw({error, {invalid_option_arg, {Name, Arg}}})
- end;
- false ->
- add_option_with_implicit_arg(OptSpec, OptAcc)
- end.
-
-%% @doc Add an option with argument that was part of an assignment expression
-%% (e.g. "--verbose=3") converting it to the data type indicated by the
-%% argument specification.
--spec add_option_with_assigned_arg(option_spec(), string(), [option()]) -> [option()].
-add_option_with_assigned_arg({Name, _Short, _Long, ArgSpec, _Help}, Arg, OptAcc) ->
- try
- [{Name, to_type(ArgSpec, Arg)} | OptAcc]
- catch
- error:_ ->
- throw({error, {invalid_option_arg, {Name, Arg}}})
- end.
-
-%% @doc Add an option that required an argument but did not have one. Some data
-%% types (boolean, integer) allow implicit or assumed arguments.
--spec add_option_with_implicit_arg(option_spec(), [option()]) -> [option()].
-add_option_with_implicit_arg({Name, _Short, _Long, ArgSpec, _Help}, OptAcc) ->
- case arg_spec_type(ArgSpec) of
- boolean ->
- %% Special case for boolean arguments: if there is no argument we
- %% set the value to 'true'.
- [{Name, true} | OptAcc];
- integer ->
- %% Special case for integer arguments: if the option had not been set
- %% before we set the value to 1. This is needed to support options like
- %% "-v" to return something like {verbose, 1}.
- [{Name, 1} | OptAcc];
- _ ->
- throw({error, {missing_option_arg, Name}})
- end.
-
-%% @doc Add an option with an implicit or assumed argument.
--spec add_option_with_implicit_incrementable_arg(option_spec() | arg_spec(), [option()]) ->
- [option()].
-add_option_with_implicit_incrementable_arg({Name, _Short, _Long, ArgSpec, _Help}, OptAcc) ->
- case arg_spec_type(ArgSpec) of
- boolean ->
- %% Special case for boolean arguments: if there is no argument we
- %% set the value to 'true'.
- [{Name, true} | OptAcc];
- integer ->
- %% Special case for integer arguments: if the option had not been set
- %% before we set the value to 1; if not we increment the previous value
- %% the option had. This is needed to support options like "-vvv" to
- %% return something like {verbose, 3}.
- case OptAcc of
- [{Name, Count} | Tail] ->
- [{Name, Count + 1} | Tail];
- _ ->
- [{Name, 1} | OptAcc]
- end;
- _ ->
- throw({error, {missing_option_arg, Name}})
- end.
-
-%% @doc Retrieve the data type form an argument specification.
--spec arg_spec_type(arg_spec()) -> arg_type() | undefined.
-arg_spec_type({Type, _DefaultArg}) ->
- Type;
-arg_spec_type(Type) when is_atom(Type) ->
- Type.
-
-%% @doc Convert an argument string to its corresponding data type.
--spec to_type(arg_spec() | arg_type(), string()) -> arg_value().
-to_type({Type, _DefaultArg}, Arg) ->
- to_type(Type, Arg);
-to_type(binary, Arg) ->
- list_to_binary(Arg);
-to_type(atom, Arg) ->
- list_to_atom(Arg);
-to_type(integer, Arg) ->
- list_to_integer(Arg);
-to_type(float, Arg) ->
- list_to_float(Arg);
-to_type(boolean, Arg) ->
- LowerArg = string:to_lower(Arg),
- case is_arg_true(LowerArg) of
- true ->
- true;
- _ ->
- case is_arg_false(LowerArg) of
- true ->
- false;
- false ->
- erlang:error(badarg)
- end
- end;
-to_type(_Type, Arg) ->
- Arg.
-
--spec is_arg_true(string()) -> boolean().
-is_arg_true(Arg) ->
- (Arg =:= "true") orelse (Arg =:= "t") orelse
- (Arg =:= "yes") orelse (Arg =:= "y") orelse
- (Arg =:= "on") orelse (Arg =:= "enabled") orelse
- (Arg =:= "1").
-
--spec is_arg_false(string()) -> boolean().
-is_arg_false(Arg) ->
- (Arg =:= "false") orelse (Arg =:= "f") orelse
- (Arg =:= "no") orelse (Arg =:= "n") orelse
- (Arg =:= "off") orelse (Arg =:= "disabled") orelse
- (Arg =:= "0").
-
--spec is_valid_arg(arg_spec(), nonempty_string()) -> boolean().
-is_valid_arg({Type, _DefaultArg}, Arg) ->
- is_valid_arg(Type, Arg);
-is_valid_arg(boolean, Arg) ->
- is_boolean_arg(Arg);
-is_valid_arg(integer, Arg) ->
- is_non_neg_integer_arg(Arg);
-is_valid_arg(float, Arg) ->
- is_non_neg_float_arg(Arg);
-is_valid_arg(_Type, _Arg) ->
- true.
-
--spec is_implicit_arg(arg_spec(), nonempty_string()) -> boolean().
-is_implicit_arg({Type, _DefaultArg}, Arg) ->
- is_implicit_arg(Type, Arg);
-is_implicit_arg(boolean, Arg) ->
- not is_boolean_arg(Arg);
-is_implicit_arg(integer, Arg) ->
- not is_integer_arg(Arg);
-is_implicit_arg(_Type, _Arg) ->
- false.
-
--spec is_boolean_arg(string()) -> boolean().
-is_boolean_arg(Arg) ->
- LowerArg = string:to_lower(Arg),
- is_arg_true(LowerArg) orelse is_arg_false(LowerArg).
-
--spec is_integer_arg(string()) -> boolean().
-is_integer_arg("-" ++ Tail) ->
- is_non_neg_integer_arg(Tail);
-is_integer_arg(Arg) ->
- is_non_neg_integer_arg(Arg).
-
--spec is_non_neg_integer_arg(string()) -> boolean().
-is_non_neg_integer_arg([Head | Tail]) when Head >= $0, Head =< $9 ->
- is_non_neg_integer_arg(Tail);
-is_non_neg_integer_arg([_Head | _Tail]) ->
- false;
-is_non_neg_integer_arg([]) ->
- true.
-
--spec is_non_neg_float_arg(string()) -> boolean().
-is_non_neg_float_arg([Head | Tail]) when (Head >= $0 andalso Head =< $9) orelse Head =:= $. ->
- is_non_neg_float_arg(Tail);
-is_non_neg_float_arg([_Head | _Tail]) ->
- false;
-is_non_neg_float_arg([]) ->
- true.
-
-%% @doc Show a message on standard_error indicating the command line options and
-%% arguments that are supported by the program.
--spec usage([option_spec()], string()) -> ok.
-usage(OptSpecList, ProgramName) ->
- usage(OptSpecList, ProgramName, standard_error).
-
-%% @doc Show a message on standard_error or standard_io indicating the command line options and
-%% arguments that are supported by the program.
--spec usage([option_spec()], string(), output_stream() | string()) -> ok.
-usage(OptSpecList, ProgramName, OutputStream) when is_atom(OutputStream) ->
- io:format(
- OutputStream,
- "Usage: ~s~s~n~n~s~n",
- [ProgramName, usage_cmd_line(OptSpecList), usage_options(OptSpecList)]
- );
-%% @doc Show a message on standard_error indicating the command line options and
-%% arguments that are supported by the program. The CmdLineTail argument
-%% is a string that is added to the end of the usage command line.
-usage(OptSpecList, ProgramName, CmdLineTail) ->
- usage(OptSpecList, ProgramName, CmdLineTail, standard_error).
-
-%% @doc Show a message on standard_error or standard_io indicating the command line options and
-%% arguments that are supported by the program. The CmdLineTail argument
-%% is a string that is added to the end of the usage command line.
--spec usage([option_spec()], string(), string(), output_stream() | [{string(), string()}]) -> ok.
-usage(OptSpecList, ProgramName, CmdLineTail, OutputStream) when is_atom(OutputStream) ->
- io:format(
- OutputStream,
- "Usage: ~s~s ~s~n~n~s~n",
- [ProgramName, usage_cmd_line(OptSpecList), CmdLineTail, usage_options(OptSpecList)]
- );
-%% @doc Show a message on standard_error indicating the command line options and
-%% arguments that are supported by the program. The CmdLineTail and OptionsTail
-%% arguments are a string that is added to the end of the usage command line
-%% and a list of tuples that are added to the end of the options' help lines.
-usage(OptSpecList, ProgramName, CmdLineTail, OptionsTail) ->
- usage(OptSpecList, ProgramName, CmdLineTail, OptionsTail, standard_error).
-
-%% @doc Show a message on standard_error or standard_io indicating the command line options and
-%% arguments that are supported by the program. The CmdLineTail and OptionsTail
-%% arguments are a string that is added to the end of the usage command line
-%% and a list of tuples that are added to the end of the options' help lines.
--spec usage([option_spec()], string(), string(), [{string(), string()}], output_stream()) -> ok.
-usage(OptSpecList, ProgramName, CmdLineTail, OptionsTail, OutputStream) ->
- UsageOptions = lists:foldl(
- fun({Prefix, Help}, Acc) ->
- add_option_help(Prefix, Help, Acc)
- end,
- usage_options_reverse(OptSpecList, []),
- OptionsTail
- ),
- io:format(
- OutputStream,
- "Usage: ~s~s ~s~n~n~s~n",
- [
- ProgramName,
- usage_cmd_line(OptSpecList),
- CmdLineTail,
- lists:flatten(lists:reverse(UsageOptions))
- ]
- ).
-
-%% @doc Return a string with the syntax for the command line options and
-%% arguments.
--spec usage_cmd_line([option_spec()]) -> string().
-usage_cmd_line(OptSpecList) ->
- usage_cmd_line(OptSpecList, []).
-
-usage_cmd_line([{Name, Short, Long, ArgSpec, _Help} | Tail], Acc) ->
- CmdLine =
- case ArgSpec of
- undefined ->
- if
- %% For options with short form and no argument.
- Short =/= undefined ->
- [$\s, $[, $-, Short, $]];
- %% For options with only long form and no argument.
- Long =/= undefined ->
- [$\s, $[, $-, $-, Long, $]];
- true ->
- []
- end;
- _ ->
- if
- %% For options with short form and argument.
- Short =/= undefined ->
- [$\s, $[, $-, Short, $\s, $<, atom_to_list(Name), $>, $]];
- %% For options with only long form and argument.
- Long =/= undefined ->
- [$\s, $[, $-, $-, Long, $\s, $<, atom_to_list(Name), $>, $]];
- %% For options with neither short nor long form and argument.
- true ->
- [$\s, $<, atom_to_list(Name), $>]
- end
- end,
- usage_cmd_line(Tail, [CmdLine | Acc]);
-usage_cmd_line([], Acc) ->
- lists:flatten(lists:reverse(Acc)).
-
-%% @doc Return a string with the help message for each of the options and
-%% arguments.
--spec usage_options([option_spec()]) -> string().
-usage_options(OptSpecList) ->
- lists:flatten(lists:reverse(usage_options_reverse(OptSpecList, []))).
-
-usage_options_reverse([{Name, Short, Long, _ArgSpec, Help} | Tail], Acc) ->
- Prefix =
- case Long of
- undefined ->
- case Short of
- %% Neither short nor long form (non-option argument).
- undefined ->
- [$<, atom_to_list(Name), $>];
- %% Only short form.
- _ ->
- [$-, Short]
- end;
- _ ->
- case Short of
- %% Only long form.
- undefined ->
- [$-, $- | Long];
- %% Both short and long form.
- _ ->
- [$-, Short, $,, $\s, $-, $- | Long]
- end
- end,
- usage_options_reverse(Tail, add_option_help(Prefix, Help, Acc));
-usage_options_reverse([], Acc) ->
- Acc.
-
-%% @doc Add the help message corresponding to an option specification to a list
-%% with the correct indentation.
--spec add_option_help(Prefix :: string(), Help :: string(), Acc :: string()) -> string().
-add_option_help(Prefix, Help, Acc) when is_list(Help), Help =/= [] ->
- FlatPrefix = lists:flatten(Prefix),
- case ((?INDENTATION * ?TAB_LENGTH) - 2 - length(FlatPrefix)) of
- TabSize when TabSize > 0 ->
- Tab = lists:duplicate(ceiling(TabSize / ?TAB_LENGTH), $\t),
- [[$\s, $\s, FlatPrefix, Tab, Help, $\n] | Acc];
- _ ->
- % The indentation for the option description is 3 tabs (i.e. 24 characters)
- % IMPORTANT: Change the number of tabs below if you change the
- % value of the INDENTATION macro.
- [[$\t, $\t, $\t, Help, $\n], [$\s, $\s, FlatPrefix, $\n] | Acc]
- end;
-add_option_help(_Opt, _Prefix, Acc) ->
- Acc.
-
-%% @doc Return the smallest integral value not less than the argument.
--spec ceiling(float()) -> integer().
-ceiling(X) ->
- T = erlang:trunc(X),
- case (X - T) of
- % Neg when Neg < 0 ->
- % T;
- Pos when Pos > 0 ->
- T + 1;
- _ ->
- T
- end.
diff --git a/src/weatherreport/src/weatherreport_log.erl b/src/weatherreport/src/weatherreport_log.erl
deleted file mode 100644
index 7a511aa1c..000000000
--- a/src/weatherreport/src/weatherreport_log.erl
+++ /dev/null
@@ -1,78 +0,0 @@
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-
--module(weatherreport_log).
--export([
- level/1,
- log/3,
- log/4,
- should_log/1
-]).
-
-level(debug) ->
- 7;
-level(info) ->
- 6;
-level(notice) ->
- 5;
-level(warn) ->
- 4;
-level(warning) ->
- 4;
-level(err) ->
- 3;
-level(error) ->
- 3;
-level(crit) ->
- 2;
-level(alert) ->
- 1;
-level(emerg) ->
- 0;
-level(panic) ->
- 0;
-level(I) when is_integer(I), I >= 0, I =< 7 ->
- I;
-level(_BadLevel) ->
- 3.
-
-log(Node, Level, Format, Terms) ->
- case should_log(Level) of
- true ->
- Prefix = get_prefix(Node, Level),
- Message = io_lib:format(Format, Terms),
- io:format("~s ~s~n", [Prefix, Message]);
- false ->
- ok
- end.
-
-log(Node, Level, String) ->
- case should_log(Level) of
- true ->
- Prefix = get_prefix(Node, Level),
- io:format("~s ~s~n", [Prefix, String]);
- false ->
- ok
- end.
-
-should_log(Level) ->
- AppLevel =
- case application:get_env(weatherreport, log_level) of
- undefined -> info;
- {ok, L0} -> L0
- end,
- level(AppLevel) >= level(Level).
-
-get_prefix(Node, Level) ->
- io_lib:format("[~w] [~w]", [Node, Level]).
diff --git a/src/weatherreport/src/weatherreport_node.erl b/src/weatherreport/src/weatherreport_node.erl
deleted file mode 100644
index d108d0f7f..000000000
--- a/src/weatherreport/src/weatherreport_node.erl
+++ /dev/null
@@ -1,221 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic_node.erl to weatherreport_node.erl and
-%% modified to work with Apache CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Functions that help diagnostics interact with the local
-%% node or other members of the cluster.
--module(weatherreport_node).
-
--export([
- can_connect/0,
- can_connect_all/0,
- pid/0,
- local_command/2,
- local_command/3,
- local_command/4,
- multicall/5,
- nodename/0
-]).
-
-%% @doc Calls the given 0-arity module and function on the local
-%% node and returns the result of that call.
-%% @equiv local_command(Module, Function, [])
-%% @see can_connect/0.
--spec local_command(Module :: atom(), Function :: atom()) -> term().
-local_command(Module, Function) ->
- local_command(Module, Function, []).
-
-%% @doc Calls the given module and function with the given arguments
-%% on the local node and returns the result of that call.
-%% @equiv local_command(Module, Function, Args, 5000)
-%% @see can_connect/0
--spec local_command(Module :: atom(), Function :: atom(), Args :: [term()]) -> term().
-local_command(Module, Function, Args) ->
- local_command(Module, Function, Args, weatherreport_config:timeout()).
-
-%% @doc Calls the given module and function with the given arguments
-%% on the local node and returns the result of that call,
-%% returning an error if the call doesn't complete within the given
-%% timeout.
-%% @equiv rpc:call(NodeName, Module, Function, Args, Timeout)
-%% @see can_connect/0
--spec local_command(Module :: atom(), Function :: atom(), Args :: [term()], Timeout :: integer()) ->
- term().
-local_command(Module, Function, Args, Timeout) ->
- case is_cluster_node() of
- true ->
- weatherreport_log:log(
- node(),
- debug,
- "Local function call: ~p:~p(~p)",
- [Module, Function, Args]
- ),
- erlang:apply(Module, Function, Args);
- _ ->
- weatherreport_log:log(
- node(),
- debug,
- "Local RPC: ~p:~p(~p) [~p]",
- [Module, Function, Args, Timeout]
- ),
- rpc:call(nodename(), Module, Function, Args, Timeout)
- end.
-
-%% @doc Call rpc:multicall/5 from the local cluster node rather than the
-%% escript.
--spec multicall(
- [node()], Module :: atom(), Function :: atom(), Args :: [term()], Timeout :: integer()
-) -> term().
-multicall(Nodes, Module, Function, Args, Timeout) ->
- case local_command(rpc, multicall, [Nodes, Module, Function, Args, Timeout]) of
- {badrpc, Reason} ->
- {[{badrpc, Reason}], []};
- Resp ->
- Resp
- end.
-
-%% @doc Retrieves the operating system's process ID of the local
-%% node.
-%% @equiv local_command(os, getpid)
-%% @see can_connect/0
--spec pid() -> string().
-pid() ->
- local_command(os, getpid).
-
-%% @doc Attempts to connect to the local node if it is not
-%% already, and returns whether connection was successful.
--spec can_connect() -> true | false.
-can_connect() ->
- case is_connected() or is_cluster_node() of
- true ->
- true;
- false ->
- weatherreport_log:log(
- node(),
- debug,
- "Not connected to the local cluster node, trying to connect. alive:~p connect_failed:~p",
- [is_alive(), connect_failed()]
- ),
- maybe_connect()
- end.
-
--spec can_connect_all() -> true | false.
-can_connect_all() ->
- case is_connected() of
- true ->
- case weatherreport_check_nodes_connected:check([]) of
- [] -> true;
- _ -> false
- end;
- false ->
- false
- end.
-
-nodename() ->
- Name =
- case weatherreport_config:node_name() of
- undefined ->
- atom_to_list(node());
- {_, NodeName} ->
- NodeName
- end,
- case string:tokens(Name, "@") of
- [_Node, _Host] ->
- list_to_atom(Name);
- [Node] ->
- [_, Host] = string:tokens(atom_to_list(node()), "@"),
- list_to_atom(lists:concat([Node, "@", Host]))
- end.
-
-%% Private functions
-is_cluster_node() ->
- nodename() =:= node().
-
-is_connected() ->
- is_alive() andalso connect_failed() =/= true.
-
-maybe_connect() ->
- case connect_failed() of
- true -> false;
- _ -> try_connect()
- end.
-
-try_connect() ->
- TargetNode = nodename(),
- case is_alive() of
- true -> ok;
- _ -> start_net()
- end,
- case {net_kernel:hidden_connect_node(TargetNode), net_adm:ping(TargetNode)} of
- {true, pong} ->
- application:set_env(weatherreport, connect_failed, false),
- weatherreport_log:log(
- node(),
- debug,
- "Connected to local cluster node ~p.",
- [TargetNode]
- ),
- true;
- _ ->
- application:set_env(weatherreport, connect_failed, true),
- weatherreport_log:log(
- node(),
- warning,
- "Could not connect to the local cluster node ~p, some checks will not run.",
- [TargetNode]
- ),
- false
- end.
-
-connect_failed() ->
- case application:get_env(weatherreport, connect_failed) of
- {ok, true} -> true;
- undefined -> undefined;
- _ -> false
- end.
-
-start_net() ->
- weatherreport_log:log(node(), debug, "Starting distributed Erlang."),
- {Type, NodeName} = weatherreport_config:node_name(),
- ThisNode = append_node_suffix(NodeName, "_diag"),
- {ok, _} = net_kernel:start([ThisNode, Type]),
- case weatherreport_config:cookie() of
- undefined ->
- % Don't set cookie to undefined so we can pick up the ~/.erlang.cookie
- ok;
- Cookie when is_atom(Cookie) ->
- erlang:set_cookie(node(), Cookie)
- end.
-
-append_node_suffix(Name, Suffix) ->
- case string:tokens(Name, "@") of
- [Node, Host] ->
- list_to_atom(lists:concat([Node, Suffix, os:getpid(), "@", Host]));
- [Node] ->
- list_to_atom(lists:concat([Node, Suffix, os:getpid()]))
- end.
diff --git a/src/weatherreport/src/weatherreport_runner.erl b/src/weatherreport/src/weatherreport_runner.erl
deleted file mode 100644
index 77518d690..000000000
--- a/src/weatherreport/src/weatherreport_runner.erl
+++ /dev/null
@@ -1,96 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% weatherreport - automated diagnostic tools for CouchDB
-%%
-%% Copyright (c) 2014 Cloudant
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-
-%% @doc <p>The <code>weatherreport_runner</code> module provides
-%% utility functions for running checks either on a single node or
-%% multiple nodes.
-
--module(weatherreport_runner).
-
--export([run/1, run/2, format/1]).
-
-%% @doc Run the supplied list of checks on the local node
--spec run([Module :: atom()]) -> [tuple()].
-run(Checks) ->
- weatherreport_node:can_connect(),
- run(Checks, [weatherreport_node:nodename()]).
-
-%% @doc Run the supplied list of checks on the supplied list of cluster nodes
--spec run([Module :: atom()], [node()] | all) -> [tuple()].
-run(Checks, all) ->
- weatherreport_node:can_connect(),
- case weatherreport_node:local_command(mem3, nodes, []) of
- ClusterNodes when is_list(ClusterNodes) ->
- run(Checks, ClusterNodes);
- Error ->
- [{node(), critical, weatherreport_runner, {checks_failed, Error}}]
- end;
-run(Checks, Nodes) ->
- CheckOpts = get_check_options(),
- lists:flatten(
- lists:foldl(
- fun(Mod, Acc) ->
- {Resps, BadNodes} = weatherreport_node:multicall(
- Nodes,
- erlang,
- apply,
- [fun() -> {node(), weatherreport_check:check(Mod, CheckOpts)} end, []],
- weatherreport_config:timeout()
- ),
- TransformFailedCheck = fun(Node) ->
- {node(), crit, weatherreport_runner, {check_failed, Mod, Node}}
- end,
- FailedChecks = [TransformFailedCheck(Node) || Node <- BadNodes],
- TransformResponse = fun
- ({badrpc, Error}) ->
- [{node(), crit, weatherreport_runner, {badrpc, Mod, Error}}];
- ({Node, Messages}) ->
- [{Node, Lvl, Module, Msg} || {Lvl, Module, Msg} <- Messages]
- end,
- Responses = [TransformResponse(Resp) || Resp <- Resps],
- [Responses ++ FailedChecks | Acc]
- end,
- [],
- Checks
- )
- ).
-
-%% @doc Part of the weatherreport_check behaviour. This means that any messages
-%% returned by this module can be handled via the existing message reporting
-%% code.
-format({checks_failed, Error}) ->
- {"Could not run checks - received error: ~w", [Error]};
-format({check_failed, Check, Node}) ->
- {"Could not run check ~w on cluster node ~w", [Check, Node]};
-format({badrpc, Check, Error}) ->
- {"Bad rpc call executing check ~w: ~w", [Check, Error]}.
-
-%% Private functions
-get_check_options() ->
- Expert =
- case application:get_env(weatherreport, expert) of
- {ok, true} ->
- true;
- _ ->
- false
- end,
- [{expert, Expert}].
diff --git a/src/weatherreport/src/weatherreport_util.erl b/src/weatherreport/src/weatherreport_util.erl
deleted file mode 100644
index ef42505e9..000000000
--- a/src/weatherreport/src/weatherreport_util.erl
+++ /dev/null
@@ -1,115 +0,0 @@
-%% -------------------------------------------------------------------
-%%
-%% derived from riaknostic - automated diagnostic tools for Riak
-%%
-%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
-%%
-%% This file is provided to you under the Apache License,
-%% Version 2.0 (the "License"); you may not use this file
-%% except in compliance with the License. You may obtain
-%% a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing,
-%% software distributed under the License is distributed on an
-%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-%% KIND, either express or implied. See the License for the
-%% specific language governing permissions and limitations
-%% under the License.
-%%
-%% -------------------------------------------------------------------
-%%
-%% File renamed from riaknostic_util.erl to weatherreport_util.erl
-%% Copyright (c) 2014 Cloudant
-%%
-%% -------------------------------------------------------------------
-
-%% @doc Utility functions for weatherreport.
-%% @end
--module(weatherreport_util).
--export([
- short_name/1,
- run_command/1,
- binary_to_float/1,
- flush_stdout/0,
- check_proc_count/3
-]).
-
-%% @doc Converts a check module name into a short name that can be
-%% used to refer to a check on the command line. For example,
-%% <code>weatherreport_check_memory_use becomes</code>
-%% <code>"memory_use"</code>.
--spec short_name(module()) -> iodata() | unicode:charlist().
-short_name(Mod) when is_atom(Mod) ->
- re:replace(atom_to_list(Mod), "weatherreport_check_", "", [{return, list}]).
-
-%% @doc Runs a shell command and returns the output. stderr is
-%% redirected to stdout so its output will be included.
--spec run_command(Command :: iodata()) -> StdOut :: iodata().
-run_command(Command) ->
- weatherreport_log:log(
- node(),
- debug,
- "Running shell command: ~s",
- [Command]
- ),
- Port = erlang:open_port({spawn, Command}, [exit_status, stderr_to_stdout]),
- do_read(Port, []).
-
-do_read(Port, Acc) ->
- receive
- {Port, {data, StdOut}} ->
- weatherreport_log:log(
- node(),
- debug,
- "Shell command output: ~n~s~n",
- [StdOut]
- ),
- do_read(Port, Acc ++ StdOut);
- {Port, {exit_status, _}} ->
- %%port_close(Port),
- Acc;
- Other ->
- io:format("~w", [Other]),
- do_read(Port, Acc)
- end.
-
-%% @doc Converts a binary containing a text representation of a float
-%% into a float type.
--spec binary_to_float(binary()) -> float().
-binary_to_float(Bin) ->
- list_to_float(binary_to_list(Bin)).
-
-flush_stdout() ->
- timer:sleep(1000).
-
-%% @doc Utility function to check processes based on an attribute returned
-%% by recon:proc_count/2.
--spec check_proc_count(atom(), integer(), list()) -> [{atom(), term()}].
-check_proc_count(Key, Threshold, Opts) ->
- Processes = recon:proc_count(Key, 10),
- procs_to_messages(Processes, Threshold, [], Opts).
-
-%% @doc Utility function to convert the list of process info returned by
-%% recon:proc_count/2 into a list of diagnostic messages.
--spec procs_to_messages(list(), integer(), list(), list()) -> [{atom(), term()}].
-procs_to_messages([], _Threshold, Acc, _Opts) ->
- Acc;
-procs_to_messages([{Pid, Value, Info} | T], Threshold, Acc, Opts) ->
- Level =
- case Value > Threshold of
- true -> warning;
- _ -> info
- end,
- Message =
- case {Level, proplists:get_value(expert, Opts)} of
- {warning, true} ->
- Pinfo = recon:info(Pid),
- {warning, {high, {Pid, Value, Info, Pinfo}}};
- {warning, _} ->
- {warning, {high, {Pid, Value, Info}}};
- {info, _} ->
- {info, {ok, {Pid, Value, Info}}}
- end,
- procs_to_messages(T, Threshold, [Message | Acc], Opts).
diff --git a/support/build_js.escript b/support/build_js.escript
deleted file mode 100644
index 5f1e92015..000000000
--- a/support/build_js.escript
+++ /dev/null
@@ -1,90 +0,0 @@
-%% -*- tab-width: 4;erlang-indent-level: 4;indent-tabs-mode: nil -*-
-%% ex: ft=erlang ts=4 sw=4 et
-
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-%%
-%%
-
--export([main/1]).
-
-
-main([]) ->
-
- CouchConfig = case filelib:is_file(os:getenv("COUCHDB_CONFIG")) of
- true ->
- {ok, Result} = file:consult(os:getenv("COUCHDB_CONFIG")),
- Result;
- false ->
- []
- end,
-
- SMVsn = case lists:keyfind(spidermonkey_version, 1, CouchConfig) of
- {_, Vsn} -> Vsn;
- undefined -> "1.8.5"
- end,
-
- JsFiles = [
- "share/server/json2.js",
- "share/server/dreyfus.js",
- "share/server/filter.js",
- "share/server/mimeparse.js",
- "share/server/render.js",
- "share/server/state.js",
- "share/server/util.js",
- "share/server/validate.js",
- "share/server/views.js",
- "share/server/loop.js"
- ],
-
- CoffeeFiles = [
- "share/server/json2.js",
- "share/server/dreyfus.js",
- "share/server/filter.js",
- "share/server/mimeparse.js",
- "share/server/render.js",
- "share/server/state.js",
- "share/server/util.js",
- "share/server/validate.js",
- "share/server/views.js",
- "share/server/coffee-script.js",
- "share/server/loop.js"
- ],
-
- ExtraFiles = case SMVsn of
- "1.8.5" ->
- [
- "share/server/rewrite_fun.js"
- ];
- _ ->
- [
- "share/server/60/esprima.js",
- "share/server/60/escodegen.js",
- "share/server/60/rewrite_fun.js"
- ]
- end,
-
- Pre = "(function () {\n",
- Post = "})();\n",
-
- Concat = fun(Files, To) ->
- AccBin = lists:foldl(fun(Path, Acc) ->
- {ok, Bin} = file:read_file(Path),
- [Bin | Acc]
- end, [], Files),
- FinalBin = iolist_to_binary(Pre ++ lists:reverse(AccBin) ++ Post),
- file:write_file(To, FinalBin)
- end,
-
- ok = Concat(ExtraFiles ++ JsFiles, "share/server/main.js"),
- ok = Concat(ExtraFiles ++ CoffeeFiles, "share/server/main-coffee.js"),
- ok.
diff --git a/test/bench/benchbulk.sh b/test/bench/benchbulk.sh
deleted file mode 100755
index 55c72e47f..000000000
--- a/test/bench/benchbulk.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/sh -e
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-
-# usage: time benchbulk.sh
-# it takes about 30 seconds to run on my old MacBook with bulksize 1000
-
-BULKSIZE=100
-DOCSIZE=10
-INSERTS=10
-ROUNDS=10
-DBURL="http://127.0.0.1:5984/benchbulk"
-POSTURL="$DBURL/_bulk_docs"
-
-function make_bulk_docs() {
- ROW=0
- SIZE=$(($1-1))
- START=$2
- BODYSIZE=$3
-
- BODY=$(printf "%0${BODYSIZE}d")
-
- echo '{"docs":['
- while [ $ROW -lt $SIZE ]; do
- printf '{"_id":"%020d", "body":"'$BODY'"},' $(($ROW + $START))
- let ROW=ROW+1
- done
- printf '{"_id":"%020d", "body":"'$BODY'"}' $(($ROW + $START))
- echo ']}'
-}
-
-echo "Making $INSERTS bulk inserts of $BULKSIZE docs each"
-
-echo "Attempt to delete db at $DBURL"
-curl -X DELETE $DBURL -w\\n
-
-echo "Attempt to create db at $DBURL"
-curl -X PUT $DBURL -w\\n
-
-echo "Running $ROUNDS rounds of $INSERTS concurrent inserts to $POSTURL"
-RUN=0
-while [ $RUN -lt $ROUNDS ]; do
-
- POSTS=0
- while [ $POSTS -lt $INSERTS ]; do
- STARTKEY=$[ POSTS * BULKSIZE + RUN * BULKSIZE * INSERTS ]
- echo "startkey $STARTKEY bulksize $BULKSIZE"
- DOCS=$(make_bulk_docs $BULKSIZE $STARTKEY $DOCSIZE)
- # echo $DOCS
- echo $DOCS | curl -T - -H Content-Type:application/json -X POST $POSTURL -w%{http_code}\ %{time_total}\ sec\\n >/dev/null 2>&1 &
- let POSTS=POSTS+1
- done
-
- echo "waiting"
- wait
- let RUN=RUN+1
-done
-
-curl $DBURL -w\\n
diff --git a/test/build/test-configure-distclean.sh b/test/build/test-configure-distclean.sh
deleted file mode 100755
index ed01faab2..000000000
--- a/test/build/test-configure-distclean.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-rm -rf apache-couchdb apache-couchdb-pristine
-./configure
-make release
-cp -r apache-couchdb apache-couchdb-pristine
-cd apache-couchdb
- ./configure
- make distclean
-cd ..
-
-echo "********************************************"
-echo "If you see anything here"
-diff -r apache-couchdb apache-couchdb-pristine
-echo "and here, something is wrong"
-echo "********************************************"
diff --git a/test/build/test-configure.sh b/test/build/test-configure.sh
deleted file mode 100755
index 1309f6f2e..000000000
--- a/test/build/test-configure.sh
+++ /dev/null
@@ -1,372 +0,0 @@
-#!/bin/sh
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# requires shunit2 to be in $PATH
-# http://shunit2.googlecode.com/
-# uses `checkbashisms` if in $PATH
-
-
-SHUNIT2=`which shunit2`
-
-if [ -z "$SHUNIT2" -o ! -x "$SHUNIT2" ]; then
- echo
- echo "Error: This test script requires the shunit2 script to be in \$PATH".
- echo "You can download shunit2 from http://shunit2.googlecode.com or via"
- echo "your preferred package manager."
- echo
- exit 1
-fi
-
-CHECKBASHISMS=`which checkbashisms`
-
-if [ -n "$CHECKBASHISMS" -a -x "$CHECKBASHISMS" ]; then
- echo "Checking for bash-isms"
-
- echo " in ./configure"
- `$CHECKBASHISMS -npfx configure`
- if [ $? -ne 0 ]; then
- echo "./configure includes bashisms, do not release"
- fi
- echo " done"
-
- echo " in ./build-aux/couchdb-build-release.sh"
- `$CHECKBASHISMS -npfx ./build-aux/couchdb-build-release.sh`
- if [ $? -ne 0 ]; then
- echo "./build-aux/couchdb-build-release.sh includes bashisms, do not release"
- fi
- echo " done"
-fi
-
-
-# shunit2 tests
-CMD="./configure --test "
-
-test_defaults() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
- RESULT=`$CMD`
- assertEquals "test defaults" "$EXPECT" "$RESULT"
-}
-
-test_prefix() {
- EXPECT="/opt/local /opt/local /opt/local/bin /opt/local/libexec /opt/local/etc /opt/local/share /opt/local/share /opt/local/var /opt/local/var/run /opt/local/share/doc/apache-couchdb /opt/local/lib /opt/local/var/lib/couchdb /opt/local/var/lib/couchdb /opt/local/var/log /opt/local/share/man /opt/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --prefix=/opt/local`
- assertEquals "test prefix" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --prefix /opt/local`
- assertEquals "test prefix" "$EXPECT" "$RESULT"
-}
-
-test_prefix_error() {
- EXPECT='ERROR: "--prefix" requires a non-empty argument.'
-
- RESULT=`$CMD --prefix= 2>&1`
- assertEquals "test prefix error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --prefix 2>&1`
- assertEquals "test prefix error" "$EXPECT" "$RESULT"
-}
-
-
-test_exec_prefix() {
- EXPECT="/usr/local /opt/local /opt/local/bin /opt/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /opt/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --exec-prefix=/opt/local`
- assertEquals "test exec_prefix" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --exec-prefix /opt/local`
- assertEquals "test exec_prefix" "$EXPECT" "$RESULT"
-}
-
-test_exec_prefix_eval() {
- EXPECT="/horse/local /horse/local /horse/local/bin /horse/local/libexec /horse/local/etc /horse/local/share /horse/local/share /horse/local/var /horse/local/var/run /horse/local/share/doc/apache-couchdb /horse/local/lib /horse/local/var/lib/couchdb /horse/local/var/lib/couchdb /horse/local/var/log /horse/local/share/man /horse/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --prefix=/horse/local --exec-prefix=\\${prefix}`
- assertEquals "test exec_prefix" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --prefix /horse/local --exec-prefix \\${prefix}`
- assertEquals "test exec_prefix" "$EXPECT" "$RESULT"
-}
-
-test_exec_prefix_error() {
- EXPECT='ERROR: "--exec-prefix" requires a non-empty argument.'
-
- RESULT=`$CMD --exec-prefix= 2>&1`
- assertEquals "test exec_prefix error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --exec-prefix 2>&1`
- assertEquals "test exec_prefix error" "$EXPECT" "$RESULT"
-}
-
-test_bindir() {
- EXPECT="/usr/local /usr/local /my/funky/bindir /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --bindir=/my/funky/bindir`
- assertEquals "test bindir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --bindir /my/funky/bindir`
- assertEquals "test bindir" "$EXPECT" "$RESULT"
-}
-
-test_bindir_error() {
- EXPECT='ERROR: "--bindir" requires a non-empty argument.'
-
- RESULT=`$CMD --bindir= 2>&1`
- assertEquals "test bindir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --bindir 2>&1`
- assertEquals "test bindir error" "$EXPECT" "$RESULT"
-}
-
-test_libexecdir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /opt/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --libexecdir=/opt/local/libexec`
- assertEquals "test libexecdir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --libexecdir /opt/local/libexec`
- assertEquals "test libexecdir" "$EXPECT" "$RESULT"
-}
-
-test_libexecdir_error() {
- EXPECT='ERROR: "--libexecdir" requires a non-empty argument.'
-
- RESULT=`$CMD --libexecdir= 2>&1`
- assertEquals "test libexecdir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --libexecdir 2>&1`
- assertEquals "test libexecdir error" "$EXPECT" "$RESULT"
-}
-
-test_sysconfdir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /opt/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --sysconfdir=/opt/local/etc`
- assertEquals "test sysconfdir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --sysconfdir /opt/local/etc`
- assertEquals "test sysconfdir" "$EXPECT" "$RESULT"
-}
-
-test_sysconfdir_error() {
- EXPECT='ERROR: "--sysconfdir" requires a non-empty argument.'
-
- RESULT=`$CMD --sysconfdir= 2>&1`
- assertEquals "test sysconfdir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --sysconfdir 2>&1`
- assertEquals "test sysconfdir error" "$EXPECT" "$RESULT"
-}
-
-test_datarootdir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /opt/local/share /opt/local/share /usr/local/var /usr/local/var/run /opt/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /opt/local/share/man /opt/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --datarootdir=/opt/local/share`
- assertEquals "test datarootdir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --datarootdir /opt/local/share`
- assertEquals "test datarootdir" "$EXPECT" "$RESULT"
-}
-
-test_datarootdir_error() {
- EXPECT='ERROR: "--datarootdir" requires a non-empty argument.'
-
- RESULT=`$CMD --datarootdir= 2>&1`
- assertEquals "test datarootdir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --datarootdir 2>&1`
- assertEquals "test datarootdir error" "$EXPECT" "$RESULT"
-}
-
-test_localstatedir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /horse/local/var /horse/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /horse/local/var/lib/couchdb /horse/local/var/lib/couchdb /horse/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --localstatedir=/horse/local/var`
- assertEquals "test localstatedir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --localstatedir /horse/local/var`
- assertEquals "test localstatedir" "$EXPECT" "$RESULT"
-}
-
-test_localstatedir_error() {
- EXPECT='ERROR: "--localstatedir" requires a non-empty argument.'
-
- RESULT=`$CMD --localstatedir= 2>&1`
- assertEquals "test localstatedir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --localstatedir 2>&1`
- assertEquals "test localstatedir error" "$EXPECT" "$RESULT"
-}
-
-test_runstatedir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /horse/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --runstatedir=/horse/local/var/run`
- assertEquals "test runstatedir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --runstatedir /horse/local/var/run`
- assertEquals "test runstatedir" "$EXPECT" "$RESULT"
-}
-
-test_runstatedir_error() {
- EXPECT='ERROR: "--runstatedir" requires a non-empty argument.'
-
- RESULT=`$CMD --runstatedir= 2>&1`
- assertEquals "test runstatedir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --runstatedir 2>&1`
- assertEquals "test runstatedir error" "$EXPECT" "$RESULT"
-}
-
-test_docdir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /horse/local/share/doc /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /horse/local/share/doc/html"
-
- RESULT=`$CMD --docdir=/horse/local/share/doc`
- assertEquals "test docdir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --docdir /horse/local/share/doc`
- assertEquals "test docdir" "$EXPECT" "$RESULT"
-}
-
-test_docdir_error() {
- EXPECT='ERROR: "--docdir" requires a non-empty argument.'
-
- RESULT=`$CMD --docdir= 2>&1`
- assertEquals "test docdir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --docdir 2>&1`
- assertEquals "test docdir error" "$EXPECT" "$RESULT"
-}
-
-test_libdir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /horse/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --libdir=/horse/local/lib`
- assertEquals "test libdir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --libdir /horse/local/lib`
- assertEquals "test libdir" "$EXPECT" "$RESULT"
-}
-
-test_libdir_error() {
- EXPECT='ERROR: "--libdir" requires a non-empty argument.'
-
- RESULT=`$CMD --libdir= 2>&1`
- assertEquals "test libdir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --libdir 2>&1`
- assertEquals "test libdir error" "$EXPECT" "$RESULT"
-}
-
-test_database_dir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /horse/local/var/lib /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --databasedir=/horse/local/var/lib`
- assertEquals "test databasedir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --databasedir /horse/local/var/lib`
- assertEquals "test databasedir" "$EXPECT" "$RESULT"
-}
-
-test_database_dir_error() {
- EXPECT='ERROR: "--databasedir" requires a non-empty argument.'
-
- RESULT=`$CMD --databasedir= 2>&1`
- assertEquals "test databasedir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --databasedir 2>&1`
- assertEquals "test databasedir error" "$EXPECT" "$RESULT"
-}
-
-test_view_dir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /horse/local/var/lib /usr/local/var/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --viewindexdir=/horse/local/var/lib`
- assertEquals "test viewindexdir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --viewindexdir /horse/local/var/lib`
- assertEquals "test viewindexdir" "$EXPECT" "$RESULT"
-}
-
-test_view_dir_error() {
- EXPECT='ERROR: "--viewindexdir" requires a non-empty argument.'
-
- RESULT=`$CMD --viewindexdir= 2>&1`
- assertEquals "test viewindexdir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --viewindexdir 2>&1`
- assertEquals "test viewindexdir error" "$EXPECT" "$RESULT"
-}
-
-test_logdir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /horse/log /usr/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --logdir=/horse/log`
- assertEquals "test logdir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --logdir /horse/log`
- assertEquals "test logdir" "$EXPECT" "$RESULT"
-}
-
-test_logdir_error() {
- EXPECT='ERROR: "--logdir" requires a non-empty argument.'
-
- RESULT=`$CMD --logdir= 2>&1`
- assertEquals "test logdir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --logdir 2>&1`
- assertEquals "test logdir error" "$EXPECT" "$RESULT"
-}
-
-test_mandir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /horse/local/share/man /usr/local/share/doc/apache-couchdb/html"
-
- RESULT=`$CMD --mandir=/horse/local/share/man`
- assertEquals "test mandir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --mandir /horse/local/share/man`
- assertEquals "test mandir" "$EXPECT" "$RESULT"
-}
-
-test_mandir_error() {
- EXPECT='ERROR: "--mandir" requires a non-empty argument.'
-
- RESULT=`$CMD --mandir= 2>&1`
- assertEquals "test mandir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --mandir 2>&1`
- assertEquals "test mandir error" "$EXPECT" "$RESULT"
-}
-
-test_htmldir() {
- EXPECT="/usr/local /usr/local /usr/local/bin /usr/local/libexec /usr/local/etc /usr/local/share /usr/local/share /usr/local/var /usr/local/var/run /usr/local/share/doc/apache-couchdb /usr/local/lib /usr/local/var/lib/couchdb /usr/local/var/lib/couchdb /usr/local/var/log /usr/local/share/man /horse/local/share/doc/html"
-
- RESULT=`$CMD --htmldir=/horse/local/share/doc/html`
- assertEquals "test htmldir" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --htmldir /horse/local/share/doc/html`
- assertEquals "test htmldir" "$EXPECT" "$RESULT"
-}
-
-test_htmldir_error() {
- EXPECT='ERROR: "--htmldir" requires a non-empty argument.'
-
- RESULT=`$CMD --htmldir= 2>&1`
- assertEquals "test htmldir error" "$EXPECT" "$RESULT"
-
- RESULT=`$CMD --htmldir 2>&1`
- assertEquals "test htmldir error" "$EXPECT" "$RESULT"
-}
-
-# source the shunit2
-. $SHUNIT2
diff --git a/test/build/test-make-clean.sh b/test/build/test-make-clean.sh
deleted file mode 100755
index ce6366fef..000000000
--- a/test/build/test-make-clean.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-rm -rf apache-couchdb*
-./configure
-make release
-cd apache-couchdb
- ./configure
-cd ..
-
-cp -r apache-couchdb apache-couchdb-pristine
-
-cd apache-couchdb
- make
- make clean
-cd ..
-
-echo "********************************************"
-echo "If you see anything here"
-diff -r apache-couchdb apache-couchdb-pristine
-echo "and here, something is wrong"
-echo "********************************************"
diff --git a/test/elixir/.formatter.exs b/test/elixir/.formatter.exs
deleted file mode 100644
index 742e82394..000000000
--- a/test/elixir/.formatter.exs
+++ /dev/null
@@ -1,6 +0,0 @@
-# Used by "mix format"
-[
- inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"],
- line_length: 90,
- rename_deprecated_at: "1.5.0"
-]
diff --git a/test/elixir/.gitignore b/test/elixir/.gitignore
deleted file mode 100644
index 2e39defe6..000000000
--- a/test/elixir/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-_build/
-deps/
diff --git a/test/elixir/Makefile b/test/elixir/Makefile
deleted file mode 100644
index 67ce2b427..000000000
--- a/test/elixir/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
-
-all:
- make -C ${SELF_DIR}../.. elixir
diff --git a/test/elixir/README.md b/test/elixir/README.md
deleted file mode 100644
index 51f83ef36..000000000
--- a/test/elixir/README.md
+++ /dev/null
@@ -1,256 +0,0 @@
-# Elixir Test Suite
-
-Proof of concept porting the JS test suite to Elixir.
-
-Currently the basics.js suite has been partially ported over.
-
-To run the suite:
-
-```
-mix deps.get
-mix test --trace
-```
-
-## Set CouchDB credentials
-
-By default the Elixir tests require CouchDB running at http://127.0.0.1:15984 with credentials `adm:pass`.
-You can override those using the following:
-
-```
-$ EX_USERNAME=myusername EX_PASSWORD=password EX_COUCH_URL=http://my-couchdb.com mix test
-```
-
-## Tests to port
-
-X means done, - means partially
-
- - [X] Port all_docs.js
- - [X] Port attachment_names.js
- - [X] Port attachment_paths.js
- - [X] Port attachment_ranges.js
- - [X] Port attachments.js
- - [X] Port attachments_multipart.js
- - [X] Port attachment_views.js
- - [X] Port auth_cache.js
- - [X] Port basics.js
- - [X] Port batch_save.js
- - [X] Port bulk_docs.js
- - [X] Port changes.js
- - [X] Port coffee.js
- - [X] Port compact.js
- - [X] Port config.js
- - [X] Port conflicts.js
- - [X] Port cookie_auth.js
- - [X] Port copy_doc.js
- - [X] Port design_docs.js
- - [X] Port design_docs_query.js
- - [X] Port design_options.js
- - [X] Port design_paths.js
- - [X] Port erlang_views.js
- - [X] Port etags_head.js
- - [ ] ~~Port etags_views.js~~ (skipped in js test suite)
- - [X] Port form_submit.js
- - [X] Port http.js
- - [X] Port invalid_docids.js
- - [X] Port jsonp.js
- - [X] Port large_docs.js
- - [X] Port list_views.js
- - [X] Port lorem_b64.txt
- - [X] Port lorem.txt
- - [X] Port lots_of_docs.js
- - [X] Port method_override.js
- - [X] Port multiple_rows.js
- - [X] Port proxyauth.js
- - [X] Port purge.js
- - [X] Port reader_acl.js
- - [X] Port recreate_doc.js
- - [X] Port reduce_builtin.js
- - [X] Port reduce_false.js
- - [ ] ~~Port reduce_false_temp.js~~
- - [X] Port reduce.js
- - [X] Port replication.js
- - [ ] Port replicator_db_bad_rep_id.js
- - [ ] Port replicator_db_by_doc_id.js
- - [ ] Port replicator_db_compact_rep_db.js
- - [ ] Port replicator_db_continuous.js
- - [ ] Port replicator_db_credential_delegation.js
- - [ ] Port replicator_db_field_validation.js
- - [ ] Port replicator_db_filtered.js
- - [ ] Port replicator_db_identical_continuous.js
- - [ ] Port replicator_db_identical.js
- - [ ] Port replicator_db_invalid_filter.js
- - [ ] Port replicator_db_security.js
- - [ ] Port replicator_db_simple.js
- - [ ] Port replicator_db_successive.js
- - [ ] Port replicator_db_survives.js
- - [ ] Port replicator_db_swap_rep_db.js
- - [ ] Port replicator_db_update_security.js
- - [ ] Port replicator_db_user_ctx.js
- - [ ] Port replicator_db_write_auth.js
- - [X] Port rev_stemming.js
- - [X] Port rewrite.js
- - [X] Port rewrite_js.js
- - [X] Port security_validation.js
- - [X] Port show_documents.js
- - [ ] Port stats.js
- - [X] Port update_documents.js
- - [X] Port users_db.js
- - [X] Port users_db_security.js
- - [X] Port utf8.js
- - [X] Port uuids.js
- - [X] Port view_collation.js
- - [X] Port view_collation_raw.js
- - [X] Port view_compaction.js
- - [X] Port view_conflicts.js
- - [X] Port view_errors.js
- - [X] Port view_include_docs.js
- - [X] Port view_multi_key_all_docs.js
- - [X] Port view_multi_key_design.js
- - [ ] ~~Port view_multi_key_temp.js~~
- - [X] Port view_offsets.js
- - [X] Port view_pagination.js
- - [X] Port view_sandboxing.js
- - [X] Port view_update_seq.js
-
-# Using ExUnit to write unit tests
-
-Elixir has a number of benefits which makes writing unit tests easier.
-For example it is trivial to do codegeneration of tests.
-Bellow we present a few use cases where code-generation is really helpful.
-
-## How to write ExUnit tests
-
-1. Create new file in test/exunit/ directory (the file name should match *_test.exs)
-2. In case it is a first file in the directory create test_helper.exs (look at src/couch/test/exunit/test_helper.exs to get an idea)
-3. define test module which does `use Couch.Test.ExUnit.Case`
-4. Define test cases in the module
-
-You can run tests either:
-- using make: `make exunit`
-- using mix: BUILDDIR=`pwd` ERL_LIBS=`pwd`/src MIX_ENV=test mix test --trace
-
-## Generating tests from spec
-
-Sometimes we have some data in structured format and want
-to generate test cases using that data. This is easy in Elixir.
-For example suppose we have following spec:
-```
-{
- "{db_name}/_view_cleanup": {
- "roles": ["_admin"]
- }
-}
-```
-We can use this spec to generate test cases
-```
-defmodule GenerateTestsFromSpec do
- use ExUnit.Case
- require Record
- Record.defrecordp :user_ctx, Record.extract(:user_ctx, from_lib: "couch/include/couch_db.hrl")
- Record.defrecordp :httpd, Record.extract(:httpd, from_lib: "couch/include/couch_db.hrl")
-
- {:ok, spec_bin} = File.read("roles.json")
- spec = :jiffy.decode(spec_bin, [:return_maps])
- Enum.each spec, fn {path, path_spec} ->
- roles = path_spec["roles"]
- @roles roles
- @path_parts String.split(path, "/")
- test "Access with `#{inspect(roles)}` roles" do
- req = httpd(path_parts: @path_parts, user_ctx: user_ctx(roles: @roles))
- :chttpd_auth_request.authorize_request(req)
- end
- end
-end
-```
-As a result we would get
-```
-GenerateTestsFromSpec
- * test Access with `["_admin"]` roles (0.00ms)
-```
-
-## Test all possible combinations
-
-Sometimes we want to test all possible permutations for parameters.
-This can be accomplished using something like the following:
-
-```
-defmodule Permutations do
- use ExUnit.Case
- pairs = :couch_tests_combinatorics.product([
- [:remote, :local], [:remote, :local]
- ])
- for [source, dest] <- pairs do
- @source source
- @dest dest
- test "Replication #{source} -> #{dest}" do
- assert :ok == :ok
- end
- end
-end
-```
-
-This would produce following tests
-```
-Permutations
- * test Replication remote -> remote (0.00ms)
- * test Replication local -> remote (0.00ms)
- * test Replication remote -> local (0.00ms)
- * test Replication local -> local (0.00ms)
-```
-
-## Reuseing of common setups
-
-The setup functions are quite similar in lots of tests therefore it makes
-sense to reuse them. The idea is to add shared setup functions into either
-- test/elixir/lib/setup/common.ex
-- test/elixir/lib/setup/<something>.ex
-
-The setup functions looks like the following:
-```
-defmodule Foo do
- alias Couch.Test.Setup.Step
-
- def httpd_with_admin(setup) do
- setup
- |> Step.Start.new(:start, extra_apps: [:chttpd])
- |> Step.User.new(:admin, roles: [:server_admin])
- end
-end
-```
-
-These parts of a setup chain can be invoked as follows:
-```
-defmodule Couch.Test.CRUD do
- use Couch.Test.ExUnit.Case
- alias Couch.Test.Utils
-
- alias Couch.Test.Setup
-
- alias Couch.Test.Setup.Step
-
- def with_db(context, setup) do
- setup =
- setup
- |> Setup.Common.httpd_with_db()
- |> Setup.run()
-
- context =
- Map.merge(context, %{
- db_name: setup |> Setup.get(:db) |> Step.Create.DB.name(),
- base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url(),
- user: setup |> Setup.get(:admin) |> Step.User.name()
- })
-
- {context, setup}
- end
-
- describe "Database CRUD using Fabric API" do
- @describetag setup: &__MODULE__.with_db/2
- test "Create DB", ctx do
- IO.puts("base_url: #{ctx.base_url}")
- IO.puts("admin: #{ctx.user}")
- IO.puts("db_name: #{ctx.db_name}")
- end
- end
-end
-```
diff --git a/test/elixir/config/config.exs b/test/elixir/config/config.exs
deleted file mode 100644
index 966ae83bb..000000000
--- a/test/elixir/config/config.exs
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is responsible for configuring your application
-# and its dependencies with the aid of the Mix.Config module.
-use Mix.Config
-
-# This configuration is loaded before any dependency and is restricted
-# to this project. If another project depends on this project, this
-# file won't be loaded nor affect the parent project. For this reason,
-# if you want to provide default values for your application for
-# 3rd-party users, it should be done in your "mix.exs" file.
-
-# You can configure your application as:
-#
-# config :foo, key: :value
-#
-# and access this configuration in your application as:
-#
-# Application.get_env(:foo, :key)
-#
-# You can also configure a 3rd-party app:
-#
-# config :logger, level: :info
-#
-
-# It is also possible to import configuration files, relative to this
-# directory. For example, you can emulate configuration per environment
-# by uncommenting the line below and defining dev.exs, test.exs and such.
-# Configuration from the imported file will override the ones defined
-# here (which is why it is important to import them last).
-#
-# import_config "#{Mix.env}.exs"
diff --git a/test/elixir/config/test.exs b/test/elixir/config/test.exs
deleted file mode 100644
index 4b28ea99b..000000000
--- a/test/elixir/config/test.exs
+++ /dev/null
@@ -1,3 +0,0 @@
-config :logger,
- backends: [:console],
- compile_time_purge_level: :debug
diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex
deleted file mode 100644
index d9751c416..000000000
--- a/test/elixir/lib/couch.ex
+++ /dev/null
@@ -1,190 +0,0 @@
-defmodule Couch.Session do
- @moduledoc """
- CouchDB session helpers.
- """
-
- defstruct [:cookie, :error]
-
- def new(cookie, error \\ "") do
- %Couch.Session{cookie: cookie, error: error}
- end
-
- def logout(sess) do
- headers = [
- "Content-Type": "application/x-www-form-urlencoded",
- "X-CouchDB-WWW-Authenticate": "Cookie",
- Cookie: sess.cookie
- ]
-
- Couch.delete!("/_session", headers: headers)
- end
-
- def info(sess) do
- headers = [
- "Content-Type": "application/x-www-form-urlencoded",
- "X-CouchDB-WWW-Authenticate": "Cookie",
- Cookie: sess.cookie
- ]
-
- Couch.get("/_session", headers: headers).body
- end
-
- def get(sess, url, opts \\ []), do: go(sess, :get, url, opts)
- def get!(sess, url, opts \\ []), do: go!(sess, :get, url, opts)
- def put(sess, url, opts \\ []), do: go(sess, :put, url, opts)
- def put!(sess, url, opts \\ []), do: go!(sess, :put, url, opts)
- def post(sess, url, opts \\ []), do: go(sess, :post, url, opts)
- def post!(sess, url, opts \\ []), do: go!(sess, :post, url, opts)
- def delete(sess, url, opts \\ []), do: go(sess, :delete, url, opts)
- def delete!(sess, url, opts \\ []), do: go!(sess, :delete, url, opts)
-
- # Skipping head/patch/options for YAGNI. Feel free to add
- # if the need arises.
- def go(%Couch.Session{} = sess, method, url, opts) do
- parse_response = Keyword.get(opts, :parse_response, true)
- opts = opts
- |> Keyword.merge(cookie: sess.cookie)
- |> Keyword.delete(:parse_response)
- if parse_response do
- Couch.request(method, url, opts)
- else
- Rawresp.request(method, url, opts)
- end
- end
-
- def go!(%Couch.Session{} = sess, method, url, opts) do
- parse_response = Keyword.get(opts, :parse_response, true)
- opts = opts
- |> Keyword.merge(cookie: sess.cookie)
- |> Keyword.delete(:parse_response)
- if parse_response do
- Couch.request!(method, url, opts)
- else
- Rawresp.request!(method, url, opts)
- end
- end
-end
-
-defmodule Couch do
- use HTTPotion.Base
-
- @moduledoc """
- CouchDB library to power test suite.
- """
-
- # These constants are supplied to the underlying HTTP client and control
- # how long we will wait before timing out a test. The inactivity timeout
- # specifically fires during an active HTTP response and defaults to 10_000
- # if not specified. We're defining it to a different value than the
- # request_timeout largely just so we know which timeout fired.
- @request_timeout 60_000
- @inactivity_timeout 55_000
-
- def process_url("http://" <> _ = url) do
- url
- end
-
- def process_url(url) do
- base_url = System.get_env("EX_COUCH_URL") || "http://127.0.0.1:15984"
- base_url <> url
- end
-
- def process_request_headers(headers, _body, options) do
- headers = Keyword.put(headers, :"User-Agent", "couch-potion")
-
- headers =
- if headers[:"Content-Type"] do
- headers
- else
- Keyword.put(headers, :"Content-Type", "application/json")
- end
-
- case Keyword.get(options, :cookie) do
- nil ->
- headers
-
- cookie ->
- Keyword.put(headers, :Cookie, cookie)
- end
- end
-
- def process_options(options) do
- options
- |> set_auth_options()
- |> set_inactivity_timeout()
- |> set_request_timeout()
- end
-
- def process_request_body(body) do
- if is_map(body) do
- :jiffy.encode(body)
- else
- body
- end
- end
-
- def process_response_body(_headers, body) when body == [] do
- ""
- end
-
- def process_response_body(headers, body) do
- content_type = headers[:"Content-Type"]
-
- if !!content_type and String.match?(content_type, ~r/application\/json/) do
- body |> IO.iodata_to_binary() |> :jiffy.decode([:return_maps])
- else
- process_response_body(body)
- end
- end
-
- def set_auth_options(options) do
- if Keyword.get(options, :cookie) == nil do
- headers = Keyword.get(options, :headers, [])
- if headers[:basic_auth] != nil or headers[:authorization] != nil
- or List.keymember?(headers, :"X-Auth-CouchDB-UserName", 0) do
- options
- else
- username = System.get_env("EX_USERNAME") || "adm"
- password = System.get_env("EX_PASSWORD") || "pass"
- Keyword.put(options, :basic_auth, {username, password})
- end
- else
- options
- end
- end
-
- def set_inactivity_timeout(options) do
- Keyword.update(
- options,
- :ibrowse,
- [{:inactivity_timeout, @inactivity_timeout}],
- fn ibrowse ->
- Keyword.put_new(ibrowse, :inactivity_timeout, @inactivity_timeout)
- end
- )
- end
-
- def set_request_timeout(options) do
- timeout = Application.get_env(:httpotion, :default_timeout, @request_timeout)
- Keyword.put_new(options, :timeout, timeout)
- end
-
- def login(userinfo) do
- [user, pass] = String.split(userinfo, ":", parts: 2)
- login(user, pass)
- end
-
- def login(user, pass, expect \\ :success) do
- resp = Couch.post("/_session", body: %{:username => user, :password => pass})
-
- if expect == :success do
- true = resp.body["ok"]
- cookie = resp.headers[:"set-cookie"]
- [token | _] = String.split(cookie, ";")
- %Couch.Session{cookie: token}
- else
- true = Map.has_key?(resp.body, "error")
- %Couch.Session{error: resp.body["error"]}
- end
- end
-end
diff --git a/test/elixir/lib/couch/db_test.ex b/test/elixir/lib/couch/db_test.ex
deleted file mode 100644
index 652fa6bb6..000000000
--- a/test/elixir/lib/couch/db_test.ex
+++ /dev/null
@@ -1,557 +0,0 @@
-defmodule Couch.DBTest do
- @moduledoc false
-
- import ExUnit.Callbacks, only: [on_exit: 1]
- import ExUnit.Assertions, only: [assert: 1, assert: 2]
-
- def set_db_context(context) do
- context =
- case context do
- %{:with_db_name => true} ->
- Map.put(context, :db_name, random_db_name())
-
- %{:with_db_name => db_name} when is_binary(db_name) ->
- Map.put(context, :db_name, db_name)
-
- %{:with_random_db => db_name} when is_binary(db_name) ->
- context
- |> Map.put(:db_name, random_db_name(db_name))
- |> Map.put(:with_db, true)
-
- %{:with_partitioned_db => true} ->
- context
- |> Map.put(:db_name, random_db_name())
- |> Map.put(:query, %{partitioned: true})
- |> Map.put(:with_db, true)
-
- %{:with_db => true} ->
- Map.put(context, :db_name, random_db_name())
-
- %{:with_db => db_name} when is_binary(db_name) ->
- Map.put(context, :db_name, db_name)
-
- _ ->
- context
- end
-
- if Map.has_key?(context, :with_db) do
- {:ok, _} = create_db(context[:db_name], query: context[:query])
- on_exit(fn -> delete_db(context[:db_name]) end)
- end
-
- context
- end
-
- def set_config_context(context) do
- if is_list(context[:config]) do
- Enum.each(context[:config], fn cfg ->
- set_config(cfg)
- end)
- end
-
- context
- end
-
- def set_user_context(context) do
- case Map.get(context, :user) do
- nil ->
- context
-
- user when is_list(user) ->
- user = create_user(user)
-
- on_exit(fn ->
- query = %{:rev => user["_rev"]}
- resp = Couch.delete("/_users/#{user["_id"]}", query: query)
- assert HTTPotion.Response.success?(resp)
- end)
-
- context = Map.put(context, :user, user)
- userinfo = user["name"] <> ":" <> user["password"]
- Map.put(context, :userinfo, userinfo)
- end
- end
-
- def random_db_name do
- random_db_name("random-test-db")
- end
-
- def random_db_name(prefix) do
- time = :erlang.monotonic_time()
- umi = :erlang.unique_integer([:monotonic])
- "#{prefix}-#{time}-#{umi}"
- end
-
- def set_config({section, key, value}) do
- existing = set_config_raw(section, key, value)
-
- on_exit(fn ->
- Enum.each(existing, fn {node, prev_value} ->
- if prev_value != "" do
- url = "/_node/#{node}/_config/#{section}/#{key}"
- headers = ["X-Couch-Persist": "false"]
- body = :jiffy.encode(prev_value)
- resp = Couch.put(url, headers: headers, body: body)
- assert resp.status_code == 200
- else
- url = "/_node/#{node}/_config/#{section}/#{key}"
- headers = ["X-Couch-Persist": "false"]
- resp = Couch.delete(url, headers: headers)
- assert resp.status_code == 200
- end
- end)
- end)
- end
-
- def set_config_raw(section, key, value) do
- resp = Couch.get("/_membership")
-
- Enum.map(resp.body["all_nodes"], fn node ->
- url = "/_node/#{node}/_config/#{section}/#{key}"
- headers = ["X-Couch-Persist": "false"]
- body = :jiffy.encode(value)
- resp = Couch.put(url, headers: headers, body: body)
- assert resp.status_code == 200
- {node, resp.body}
- end)
- end
-
- def prepare_user_doc(user) do
- required = [:name, :password]
-
- Enum.each(required, fn key ->
- assert Keyword.has_key?(user, key), "User missing key: #{key}"
- end)
-
- id = Keyword.get(user, :id)
- name = Keyword.get(user, :name)
- password = Keyword.get(user, :password)
- roles = Keyword.get(user, :roles, [])
-
- assert is_binary(name), "User name must be a string"
- assert is_binary(password), "User password must be a string"
- assert is_list(roles), "Roles must be a list of strings"
-
- Enum.each(roles, fn role ->
- assert is_binary(role), "Roles must be a list of strings"
- end)
-
- %{
- "_id" => id || "org.couchdb.user:" <> name,
- "type" => "user",
- "name" => name,
- "roles" => roles,
- "password" => password
- }
- end
-
- def create_user(user) do
- user_doc = prepare_user_doc(user)
- resp = Couch.get("/_users/#{user_doc["_id"]}")
-
- user_doc =
- case resp.status_code do
- 404 ->
- user_doc
-
- sc when sc >= 200 and sc < 300 ->
- Map.put(user_doc, "_rev", resp.body["_rev"])
- end
-
- resp = Couch.post("/_users", body: user_doc)
- assert HTTPotion.Response.success?(resp)
- assert resp.body["ok"]
- Map.put(user_doc, "_rev", resp.body["rev"])
- end
-
- def create_db(db_name, opts \\ []) do
- retry_until(fn ->
- resp = Couch.put("/#{db_name}", opts)
- assert resp.status_code in [201, 202]
- assert resp.body == %{"ok" => true}
- {:ok, resp}
- end)
- end
-
- def delete_db(db_name) do
- resp = Couch.delete("/#{db_name}")
- assert resp.status_code in [200, 202, 404]
- {:ok, resp}
- end
-
- def create_doc(db_name, body) do
- resp = Couch.post("/#{db_name}", body: body)
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
- {:ok, resp}
- end
-
- def info(db_name) do
- resp = Couch.get("/#{db_name}")
- assert resp.status_code == 200
- resp.body
- end
-
- def save(db_name, document) do
- resp = Couch.put("/#{db_name}/#{document["_id"]}", body: document)
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
- Map.put(document, "_rev", resp.body["rev"])
- end
-
- def bulk_save(db_name, docs) do
- resp =
- Couch.post(
- "/#{db_name}/_bulk_docs",
- body: %{
- docs: docs
- }
- )
-
- assert resp.status_code in [201, 202]
- resp
- end
-
- def query(
- db_name,
- map_fun,
- reduce_fun \\ nil,
- options \\ nil,
- keys \\ nil,
- language \\ "javascript"
- ) do
- l_map_function =
- if language == "javascript" do
- "#{map_fun} /* avoid race cond #{now(:ms)} */"
- else
- map_fun
- end
-
- view = %{
- :map => l_map_function
- }
-
- view =
- if reduce_fun != nil do
- Map.put(view, :reduce, reduce_fun)
- else
- view
- end
-
- {view, request_options} =
- if options != nil and Map.has_key?(options, :options) do
- {Map.put(view, :options, options.options), Map.delete(options, :options)}
- else
- {view, options}
- end
-
- ddoc_name = "_design/temp_#{now(:ms)}"
-
- ddoc = %{
- _id: ddoc_name,
- language: language,
- views: %{
- view: view
- }
- }
-
- request_options =
- if keys != nil and is_list(keys) do
- Map.merge(request_options || %{}, %{:keys => :jiffy.encode(keys)})
- else
- request_options
- end
-
- resp =
- Couch.put(
- "/#{db_name}/#{ddoc_name}",
- headers: ["Content-Type": "application/json"],
- body: ddoc
- )
-
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/#{ddoc_name}/_view/view", query: request_options)
- assert resp.status_code == 200
-
- Couch.delete("/#{db_name}/#{ddoc_name}")
-
- resp.body
- end
-
- def compact(db_name) do
- resp = Couch.post("/#{db_name}/_compact")
- assert resp.status_code == 202
-
- retry_until(
- fn -> Map.get(info(db_name), "compact_running") == false end,
- 200,
- 10_000
- )
-
- resp.body
- end
-
- def replicate(src, tgt, options \\ []) do
- username = System.get_env("EX_USERNAME") || "adm"
- password = System.get_env("EX_PASSWORD") || "pass"
-
- {userinfo, options} = Keyword.pop(options, :userinfo)
-
- userinfo =
- if userinfo == nil do
- "#{username}:#{password}"
- else
- userinfo
- end
-
- src = set_user(src, userinfo)
- tgt = set_user(tgt, userinfo)
-
- defaults = [headers: [], body: %{}, timeout: 30_000]
- options = defaults |> Keyword.merge(options) |> Enum.into(%{})
-
- %{body: body} = options
- body = [source: src, target: tgt] |> Enum.into(body)
- options = Map.put(options, :body, body)
-
- resp = Couch.post("/_replicate", Enum.to_list(options))
- assert HTTPotion.Response.success?(resp), "#{inspect(resp)}"
- resp.body
- end
-
- defp set_user(uri, userinfo) do
- case URI.parse(uri) do
- %{scheme: nil} ->
- uri
-
- %{userinfo: nil} = uri ->
- URI.to_string(Map.put(uri, :userinfo, userinfo))
-
- _ ->
- uri
- end
- end
-
- def view(db_name, view_name, options \\ nil, keys \\ nil) do
- [view_root, view_name] = String.split(view_name, "/")
-
- resp =
- case keys do
- nil ->
- Couch.get("/#{db_name}/_design/#{view_root}/_view/#{view_name}", query: options)
-
- _ ->
- Couch.post("/#{db_name}/_design/#{view_root}/_view/#{view_name}", query: options,
- body: %{"keys" => keys}
- )
- end
-
- assert resp.status_code in [200, 201]
- resp
- end
-
- def sample_doc_foo do
- %{
- _id: "foo",
- bar: "baz"
- }
- end
-
- # Generate range of docs with strings as keys
- def make_docs(id_range) do
- for id <- id_range, str_id = Integer.to_string(id) do
- %{"_id" => str_id, "integer" => id, "string" => str_id}
- end
- end
-
- # Generate range of docs based on a template
- def make_docs(id_range, template_doc) do
- for id <- id_range, str_id = Integer.to_string(id) do
- Map.merge(template_doc, %{"_id" => str_id})
- end
- end
-
- # Generate range of docs with atoms as keys, which are more
- # idiomatic, and are encoded by jiffy to binaries
- def create_docs(id_range) do
- for id <- id_range, str_id = Integer.to_string(id) do
- %{_id: str_id, integer: id, string: str_id}
- end
- end
-
- def request_stats(path_steps, is_test) do
- path =
- List.foldl(
- path_steps,
- "/_node/_local/_stats",
- fn p, acc ->
- "#{acc}/#{p}"
- end
- )
-
- path =
- if is_test do
- path <> "?flush=true"
- else
- path
- end
-
- Couch.get(path).body
- end
-
- def retry_until(condition, sleep \\ 100, timeout \\ 30_000) do
- retry_until(condition, now(:ms), sleep, timeout)
- end
-
- defp retry_until(condition, start, sleep, timeout) do
- now = now(:ms)
-
- if now > start + timeout do
- raise "timed out after #{now - start} ms"
- else
- try do
- if result = condition.() do
- result
- else
- raise ExUnit.AssertionError
- end
- rescue
- ExUnit.AssertionError ->
- :timer.sleep(sleep)
- retry_until(condition, start, sleep, timeout)
- end
- end
- end
-
- defp now(:ms) do
- case elem(:os.type, 0) do
- :win32 ->
- div(:erlang.system_time(), 1_000)
- _ ->
- div(:erlang.system_time(), 1_000_000)
- end
- end
-
- @spec rev(map(), map()) :: map()
- def rev(doc = %{_id: id}, %{"id" => id, "rev" => rev}) do
- Map.put(doc, :_rev, rev)
- end
-
- @spec rev([map()], [map()]) :: [map()]
- def rev(docs, rows) when length(docs) == length(rows) do
- for {doc, row} <- Enum.zip(docs, rows), do: rev(doc, row)
- end
-
- def pretty_inspect(resp) do
- opts = [pretty: true, width: 20, limit: :infinity, printable_limit: :infinity]
- inspect(resp, opts)
- end
-
- def run_on_modified_server(settings, fun) do
- resp = Couch.get("/_membership")
- assert resp.status_code == 200
- nodes = resp.body["all_nodes"]
-
- prev_settings =
- Enum.map(settings, fn setting ->
- prev_setting_node =
- Enum.reduce(nodes, %{}, fn node, acc ->
- resp =
- Couch.put(
- "/_node/#{node}/_config/#{setting.section}/#{setting.key}",
- headers: ["X-Couch-Persist": false],
- body: :jiffy.encode(setting.value)
- )
-
- assert resp.status_code == 200
- Map.put(acc, node, resp.body)
- end)
-
- Map.put(setting, :nodes, Map.to_list(prev_setting_node))
- end)
-
- try do
- fun.()
- after
- Enum.each(prev_settings, fn setting ->
- Enum.each(setting.nodes, fn node_value ->
- node = elem(node_value, 0)
- value = elem(node_value, 1)
-
- if value == ~s(""\\n) or value == "" or value == nil do
- resp =
- Couch.delete(
- "/_node/#{node}/_config/#{setting.section}/#{setting.key}",
- headers: ["X-Couch-Persist": false]
- )
-
- assert resp.status_code == 200
- else
- resp =
- Couch.put(
- "/_node/#{node}/_config/#{setting.section}/#{setting.key}",
- headers: ["X-Couch-Persist": false],
- body: :jiffy.encode(value)
- )
-
- assert resp.status_code == 200
- end
- end)
- end)
- end
- end
-
- def restart_cluster do
- resp = Couch.get("/_membership")
- assert resp.status_code == 200
- nodes = resp.body["all_nodes"]
-
- nodes_ports =
- Enum.reduce(nodes, [], fn node, acc ->
- port = node_to_port(node)
- [{node, port} | acc]
- end)
-
- tasks =
- Enum.map(nodes_ports, fn {node, port} ->
- Task.async(fn -> restart_node(node, port) end)
- end)
-
- Task.yield_many(tasks, length(nodes) * 5000)
- end
-
- def restart_node(node \\ "node1@127.0.0.1") do
- port = node_to_port(node)
- restart_node(node, port)
- end
-
- defp restart_node(node, port) do
- url = "http://127.0.0.1:#{port}/_node/#{node}/_restart"
- resp = Couch.post(url)
- assert HTTPotion.Response.success?(resp)
- assert resp.body["ok"]
- # make sure node went down. we assuming the node can't bounce quick
- # enough to inroduce a race here
- retry_until(fn -> !node_is_running(port) end)
- # wait utill node is back
- retry_until(fn -> node_is_running(port) end, 500, 30_000)
- end
-
- defp node_is_running(port) do
- url = "http://127.0.0.1:#{port}/_up"
- resp = Couch.get(url)
-
- case HTTPotion.Response.success?(resp) do
- true -> resp.status_code in 200..399
- false -> false
- end
- end
-
- defp node_to_port(node) do
- url = "/_node/#{node}/_config/chttpd/port"
- resp = Couch.get(url)
- assert HTTPotion.Response.success?(resp)
- resp.body
- end
-end
diff --git a/test/elixir/lib/couch_raw.ex b/test/elixir/lib/couch_raw.ex
deleted file mode 100644
index 62a0bbd0e..000000000
--- a/test/elixir/lib/couch_raw.ex
+++ /dev/null
@@ -1,105 +0,0 @@
-defmodule Rawresp do
- use HTTPotion.Base
-
- @moduledoc """
- HTTP client that provides raw response as result
- """
- @request_timeout 60_000
- @inactivity_timeout 55_000
-
- def process_url("http://" <> _ = url) do
- url
- end
-
- def process_url(url) do
- base_url = System.get_env("EX_COUCH_URL") || "http://127.0.0.1:15984"
- base_url <> url
- end
-
- def process_request_headers(headers, _body, options) do
- headers =
- headers
- |> Keyword.put(:"User-Agent", "couch-potion")
-
- headers =
- if headers[:"Content-Type"] do
- headers
- else
- Keyword.put(headers, :"Content-Type", "application/json")
- end
-
- case Keyword.get(options, :cookie) do
- nil ->
- headers
-
- cookie ->
- Keyword.put(headers, :Cookie, cookie)
- end
- end
-
- def process_options(options) do
- options
- |> set_auth_options()
- |> set_inactivity_timeout()
- |> set_request_timeout()
- end
-
- def process_request_body(body) do
- if is_map(body) do
- :jiffy.encode(body)
- else
- body
- end
- end
-
- def set_auth_options(options) do
- if Keyword.get(options, :cookie) == nil do
- headers = Keyword.get(options, :headers, [])
-
- if headers[:basic_auth] != nil or headers[:authorization] != nil do
- options
- else
- username = System.get_env("EX_USERNAME") || "adm"
- password = System.get_env("EX_PASSWORD") || "pass"
- Keyword.put(options, :basic_auth, {username, password})
- end
- else
- options
- end
- end
-
- def set_inactivity_timeout(options) do
- Keyword.update(
- options,
- :ibrowse,
- [{:inactivity_timeout, @inactivity_timeout}],
- fn ibrowse ->
- Keyword.put_new(ibrowse, :inactivity_timeout, @inactivity_timeout)
- end
- )
- end
-
- def set_request_timeout(options) do
- timeout = Application.get_env(:httpotion, :default_timeout, @request_timeout)
- Keyword.put_new(options, :timeout, timeout)
- end
-
- def login(userinfo) do
- [user, pass] = String.split(userinfo, ":", parts: 2)
- login(user, pass)
- end
-
- def login(user, pass, expect \\ :success) do
- resp = Couch.post("/_session", body: %{:username => user, :password => pass})
-
- if expect == :success do
- true = resp.body["ok"]
- cookie = resp.headers[:"set-cookie"]
- [token | _] = String.split(cookie, ";")
- %Couch.Session{cookie: token}
- else
- true = Map.has_key?(resp.body, "error")
- %Couch.Session{error: resp.body["error"]}
- end
- end
-end
diff --git a/test/elixir/lib/ex_unit.ex b/test/elixir/lib/ex_unit.ex
deleted file mode 100644
index 8503cd991..000000000
--- a/test/elixir/lib/ex_unit.ex
+++ /dev/null
@@ -1,48 +0,0 @@
-defmodule Couch.Test.ExUnit.Case do
- @moduledoc """
- Template for ExUnit test case. It can be used as follows:
- ```
- defmodule Couch.Test.CRUD do
- use Couch.Test.ExUnit.Case
- ...
- def with_db(context, setup) do
- setup = setup
- |> Step.Start.new(:start, extra_apps: [:chttpd])
- |> Setup.run
- context = Map.merge(context, %{
- base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url
- })
- {context, setup}
- end
- describe "Group of tests" do
- @describetag setup: &__MODULE__.with_db/2
- test "Single test in a group", ctx do
- ctx.base_url
- end
- ...
- end
- ```
- """
-
- use ExUnit.CaseTemplate
- alias Couch.Test.Setup
-
- using do
- quote do
- require Logger
- use ExUnit.Case
- end
- end
-
- setup context do
- on_exit(fn ->
- :meck.unload()
- end)
-
- case context do
- %{:setup => setup_fun} ->
- {:ok, Setup.setup(context, setup_fun)}
- _ -> {:ok, context}
- end
- end
-end \ No newline at end of file
diff --git a/test/elixir/lib/setup.ex b/test/elixir/lib/setup.ex
deleted file mode 100644
index 037988521..000000000
--- a/test/elixir/lib/setup.ex
+++ /dev/null
@@ -1,97 +0,0 @@
-defmodule Couch.Test.Setup do
- @moduledoc """
- Allows to chain setup functions.
- Example of using:
-
- ```
- alias Couch,Test.Utils
- def with_db_name(context, setup) do
- setup =
- setup
- |> Step.Start.new(:start, extra_apps: [:chttpd])
- |> Step.User.new(:admin, roles: [:server_admin])
- |> Setup.run()
-
- context =
- Map.merge(context, %{
- db_name: Utils.random_name("db")
- base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url(),
- user: setup |> Setup.get(:admin) |> Step.User.name()
- })
- {context, setup}
- end
-
- @tag setup: &__MODULE__.with_db_name/2
- test "Create", %{db_name: db_name, user: user} do
- ...
- end
- ```
- """
- import ExUnit.Callbacks, only: [on_exit: 1]
- import ExUnit.Assertions, only: [assert: 2]
- require Logger
-
- alias Couch.Test.Setup
- alias Couch.Test.Setup.Step
- defstruct stages: [], by_type: %{}, state: %{}
-
- def step(%Setup{stages: stages} = setup, id, step) do
- %{setup | stages: [{id, step} | stages]}
- end
-
- defp setup_step({id, step}, %Setup{state: state, by_type: by_type} = setup) do
- %module{} = step
- # credo:disable-for-next-line Credo.Check.Warning.LazyLogging
- Logger.debug("Calling 'setup/2' for '#{module}'")
- step = module.setup(setup, step)
- state = Map.put(state, id, step)
- by_type = Map.update(by_type, module, [id], fn ids -> [id | ids] end)
- on_exit(fn ->
- # credo:disable-for-next-line Credo.Check.Warning.LazyLogging
- Logger.debug("Calling 'teardown/3' for '#{module}'")
- try do
- module.teardown(setup, step)
- :ok
- catch
- _ -> :ok
- _, _ -> :ok
- end
- end)
- {{id, step}, %{setup | state: state, by_type: by_type}}
- end
-
- def run(%Setup{stages: stages} = setup) do
- {stages, setup} = stages
- |> Enum.reverse
- |> Enum.map_reduce(setup, &setup_step/2)
- %{setup | stages: stages}
- end
-
- def setup(ctx) do
- Map.get(ctx, :__setup)
- end
-
- def setup(ctx, setup_fun) do
- setup = %Setup{} |> Step.Config.new(:test_config, config_file: nil)
- {ctx, setup} = setup_fun.(ctx, setup)
- assert not Map.has_key?(ctx, :__setup), "Key `__setup` is reserved for internal purposes"
- Map.put(ctx, :__setup, setup)
- end
-
- def completed?(%Setup{by_type: by_type}, step) do
- Map.has_key?(by_type, step)
- end
-
- def all_for(%Setup{by_type: by_type, state: state}, step_module) do
- Map.take(state, by_type[step_module] || [])
- end
-
- def reduce_for(setup, step_module, acc, fun) do
- Enum.reduce(all_for(setup, step_module), acc, fun)
- end
-
- def get(%Setup{state: state}, id) do
- state[id]
- end
-
-end \ No newline at end of file
diff --git a/test/elixir/lib/setup/common.ex b/test/elixir/lib/setup/common.ex
deleted file mode 100644
index e81f109c9..000000000
--- a/test/elixir/lib/setup/common.ex
+++ /dev/null
@@ -1,27 +0,0 @@
-defmodule Couch.Test.Setup.Common do
- @moduledoc """
- A set of common setup pipelines for reuse
-
- - httpd_with_admin - chttpd is started and new admin is created
- - httpd_with_db - httpd_with_admin and new database is created
- """
- alias Couch.Test.Setup.Step
-
- def httpd_with_admin(setup) do
- setup
- |> Step.Start.new(:start, extra_apps: [:chttpd])
- |> Step.User.new(:admin, roles: [:server_admin])
- end
-
- def httpd_with_db(setup) do
- setup
- |> httpd_with_admin()
- |> Step.Create.DB.new(:db)
- end
-
- def with_db(setup) do
- setup
- |> Step.Start.new(:start, extra_apps: [:fabric])
- |> Step.Create.DB.new(:db)
- end
-end \ No newline at end of file
diff --git a/test/elixir/lib/step.ex b/test/elixir/lib/step.ex
deleted file mode 100644
index 316d765aa..000000000
--- a/test/elixir/lib/step.ex
+++ /dev/null
@@ -1,44 +0,0 @@
-defmodule Couch.Test.Setup.Step do
- @moduledoc """
- A behaviour module for implementing custom setup steps for future reuse.
-
- Every module implementing this behaviour must implement following three functions:
- - new
- - setup
- - teardown
-
- Here is an example of a custom step
- ```
- defmodule Couch.Test.Setup.Step.Foo do
-
- alias Couch.Test.Setup
-
- defstruct [:foo_data, :foo_arg]
-
- def new(setup, id, arg: arg) do
- setup |> Setup.step(id, %__MODULE__{foo_arg: arg})
- end
-
- def setup(_setup, %__MODULE__{foo_arg: arg} = step) do
- ...
- foo_data = ...
- %{step | foo_data: foo_data}
- end
-
- def teardown(_setup, _step) do
- end
-
- def get_data(%__MODULE__{foo_data: data}) do
- data
- end
- end
- ```
- """
- @type t :: struct()
- @callback new(setup :: %Couch.Test.Setup{}, id :: atom(), args: Keyword.t()) ::
- %Couch.Test.Setup{}
- @callback setup(setup :: %Couch.Test.Setup{}, step :: t()) ::
- t()
- @callback teardown(setup :: %Couch.Test.Setup{}, step :: t()) ::
- any()
-end \ No newline at end of file
diff --git a/test/elixir/lib/step/config.ex b/test/elixir/lib/step/config.ex
deleted file mode 100644
index 41d559908..000000000
--- a/test/elixir/lib/step/config.ex
+++ /dev/null
@@ -1,33 +0,0 @@
-defmodule Couch.Test.Setup.Step.Config do
- @moduledoc """
- This setup reads configuration for a test run.
- It is not supposed to be called manually.
- """
-
- alias Couch.Test.Setup
-
- defstruct [:config, :config_file]
-
- def new(setup, id, config_file: config_file) do
- setup |> Setup.step(id, %__MODULE__{config_file: config_file})
- end
-
- def setup(_setup, %__MODULE__{config_file: _config_file} = step) do
- # TODO we would need to access config file here
- %{step | config: %{
- backdoor: %{
- protocol: "http"
- },
- clustered: %{
- protocol: "http"
- }
- }}
- end
-
- def teardown(_setup, _step) do
- end
-
- def get(%__MODULE__{config: config}) do
- config
- end
-end
diff --git a/test/elixir/lib/step/create_db.ex b/test/elixir/lib/step/create_db.ex
deleted file mode 100644
index d38e6722f..000000000
--- a/test/elixir/lib/step/create_db.ex
+++ /dev/null
@@ -1,53 +0,0 @@
-defmodule Couch.Test.Setup.Step.Create.DB do
- @moduledoc """
- This setup step creates a database with given name.
- If name is not provided random name would be used.
-
- Example
- setup
- ...
- |> Setup.Step.Create.DB.new(:db)
- ...
- |> Setup.run
- ...
-
- db_name = setup |> Setup.get(:db) |> Setup.Step.Create.DB.name
- """
- alias Couch.Test.Setup
- alias Couch.Test.Setup.Step
- alias Couch.Test.Utils
-
- defstruct [:name]
-
- import ExUnit.Assertions, only: [assert: 2]
-
- import Utils
-
- @admin {:user_ctx, user_ctx(roles: ["_admin"])}
-
- def new(setup, id) do
- new(setup, id, name: Utils.random_name("db"))
- end
-
- def new(setup, id, name: name) do
- setup |> Setup.step(id, %__MODULE__{name: name})
- end
-
- def setup(setup, %__MODULE__{name: name} = step) do
- assert Setup.completed?(setup, Step.Start), "Require `Start` step"
- assert :fabric in Step.Start.apps(), "Fabric is not started"
- res = :fabric.create_db(name, [@admin])
- assert res in [:ok, :accepted], "Cannot create `#{name}` database"
- step
- end
-
- def teardown(_setup, %__MODULE__{name: name} = _step) do
- :fabric.delete_db(name, [@admin])
- :ok
- end
-
- def name(%__MODULE__{name: name}) do
- name
- end
-
-end
diff --git a/test/elixir/lib/step/start.ex b/test/elixir/lib/step/start.ex
deleted file mode 100644
index ea7c70f5a..000000000
--- a/test/elixir/lib/step/start.ex
+++ /dev/null
@@ -1,85 +0,0 @@
-defmodule Couch.Test.Setup.Step.Start do
- @moduledoc """
- Step to start a set of couchdb applications. By default it starts
- list of applications from DEFAULT_APPS macro defined in `test_util.erl`.
- At the time of writing this list included:
- - inets
- - ibrowse
- - ssl
- - config
- - couch_epi
- - couch_event
- - couch
-
- It is possible to specify additional list of applications to start.
-
- This setup is also maintains `clustered_url` and `backdoor_url` for future use.
- The value for `clustered_url` could be nil if :chttpd app is not included in extra_apps.
-
- Example
- setup
- |> Setup.Step.Start.new(:start, extra_apps: [:fabric, :chttpd])
- ...
- |> Setup.run
- ...
-
- started_apps = Setup.Step.Start.apps
- clustered_url = setup |> Setup.get(:start) |> Setup.Step.Start.clustered_url
- backdoor_url = setup |> Setup.get(:start) |> Setup.Step.Start.backdoor_url
- """
- alias Couch.Test.Setup
- alias Couch.Test.Setup.Step
-
- defstruct [:test_ctx, :extra_apps, :clustered_url, :backdoor_url]
-
- def new(setup, id, extra_apps: extra_apps) do
- setup |> Setup.step(id, %__MODULE__{extra_apps: extra_apps || []})
- end
-
- def setup(setup, %__MODULE__{extra_apps: extra_apps} = step) do
- test_config = setup |> Setup.get(:test_config) |> Step.Config.get()
- protocol = test_config[:backdoor][:protocol] || "http"
- test_ctx = :test_util.start_couch(extra_apps)
- addr = :config.get('couch_httpd', 'bind_address', '127.0.0.1')
- port = :mochiweb_socket_server.get(:couch_httpd, :port)
- backdoor_url = "#{protocol}://#{addr}:#{port}"
- clustered_url =
- if :chttpd in extra_apps do
- protocol = test_config[:clustered][:protocol] || "http"
- addr = :config.get('chttpd', 'bind_address', '127.0.0.1')
- port = :mochiweb_socket_server.get(:chttpd, :port)
- "#{protocol}://#{addr}:#{port}"
- else
- nil
- end
- %{step |
- test_ctx: test_ctx,
- clustered_url: clustered_url,
- backdoor_url: backdoor_url
- }
- end
-
- def teardown(_setup, %___MODULE__{test_ctx: test_ctx}) do
- :test_util.stop_couch(test_ctx)
- end
-
- def backdoor_url(%__MODULE__{backdoor_url: url}) do
- url
- end
-
- def clustered_url(%__MODULE__{clustered_url: url}) do
- url
- end
-
- def extra_apps(%__MODULE__{extra_apps: apps}) do
- apps
- end
-
- @doc """
- Returns list of currently running applications
- """
- def apps() do
- for {x, _, _} <- Application.started_applications, do: x
- end
-
-end \ No newline at end of file
diff --git a/test/elixir/lib/step/user.ex b/test/elixir/lib/step/user.ex
deleted file mode 100644
index 49ef0feae..000000000
--- a/test/elixir/lib/step/user.ex
+++ /dev/null
@@ -1,103 +0,0 @@
-defmodule Couch.Test.Setup.Step.User do
- @moduledoc """
- Step to create user with given list of roles.
- The :server_admin is a special role which is used to put user
- into `admins` section of a config instead of a database.
-
- Example
- setup
- |> Setup.Step.User.new(:admin, roles: [:server_admin])
- ...
- |> Setup.run
- ...
-
- user = setup |> Setup.get(:admin) |> Step.User.name()
- """
-
- alias Couch.Test.Setup
- alias Couch.Test.Utils
-
- import ExUnit.Callbacks, only: [on_exit: 1]
-
- defstruct [:roles, :name, :password, :users_db]
-
- import ExUnit.Assertions, only: [assert: 1, assert: 2]
-
- import Utils
-
- @admin {:user_ctx, user_ctx(roles: ["_admin"])}
-
- def new(setup, id, roles: roles) do
- setup |> Setup.step(id, %__MODULE__{roles: roles || []})
- end
-
- def setup(_setup, %__MODULE__{roles: roles} = step) do
- users_db = IO.chardata_to_string(
- :config.get('chttpd_auth', 'authentication_db', '_users'))
- if not Utils.db_exists?(users_db) do
- on_exit fn ->
- :fabric.delete_db(users_db, [@admin])
- end
- res = :fabric.create_db(users_db, [@admin])
- assert res in [:ok, :accepted], "Cannot create `users` database #{users_db}"
- end
-
- if :server_admin in roles do
- name = Utils.random_name("admin")
- pass = Utils.random_password()
- :config.set(
- 'admins', String.to_charlist(name), String.to_charlist(pass), false)
- %{step |
- name: name,
- password: pass,
- users_db: users_db
- }
- else
- name = Utils.random_name("admin")
- pass = Utils.random_password()
- doc_id = "org.couchdb.user:#{name}"
- user_doc = :couch_doc.from_json_obj(%{
- _id: doc_id,
- name: name,
- type: "user",
- roles: roles,
- password: pass
- })
- res = :fabric.update_doc(users_db, user_doc, [@admin])
- assert res in [:ok, :accepted], "Cannot create user document"
- %{step |
- name: name,
- password: pass,
- users_db: users_db,
- roles: roles
- }
- end
- end
-
- def teardown(_setup, %__MODULE__{name: name, users_db: users_db, roles: roles} = _step) do
- if :server_admin in roles do
- :config.delete("admins", String.to_charlist(name), false)
- else
- doc_id = "org.couchdb.user:#{name}"
- assert {:ok, doc_info(revs: [rev | _])} = :fabric.get_doc_info(users_db, doc_id, [])
- doc = :couch_doc.from_json_obj(%{
- _id: doc_id,
- _rev: rev,
- _deleted: true
- })
- assert {:ok, _resp} = :fabric.update_doc(users_db, doc, [@admin])
- end
- :ok
- end
-
- def name(%__MODULE__{name: name}) do
- name
- end
- def password(%__MODULE__{password: pass}) do
- pass
- end
- def credentials(%__MODULE__{name: name, password: pass}) do
- {name, pass}
- end
-
-end
diff --git a/test/elixir/lib/suite.ex b/test/elixir/lib/suite.ex
deleted file mode 100644
index c30332cb2..000000000
--- a/test/elixir/lib/suite.ex
+++ /dev/null
@@ -1,222 +0,0 @@
-defmodule Couch.Test.Suite do
- @moduledoc """
- Common code to configure ExUnit runner.
- It replaces the usual invocation of `ExUnit.start()` in
- `test_helper.exs` related to integration tests with:
- ```
- Couch.Test.Suite.start()
- ```
- """
- @doc """
- This helper function can be used to create `suite.elixir`
- as
- ```
- tests =
- Couch.Test.Suite.list()
- |> Enum.sort()
- |> Couch.Test.Suite.group_by()
-
- IO.puts(Couch.Test.Suite.pretty_print(tests))
-
- ```
- """
- def list() do
- test_paths = Keyword.get(Mix.Project.config(), :test_paths, [])
- Enum.reduce(test_paths, [], fn directory, acc ->
- list(directory) ++ acc
- end)
- end
-
- @doc """
- This helper function can be used to create `suite.elixir`
- as
- ```
- tests =
- Couch.Test.Suite.list(["test/elixir/test"])
- |> Enum.sort()
- |> Couch.Test.Suite.group_by()
-
- IO.puts(Couch.Test.Suite.pretty_print(tests))
- ```
- """
- def list(directory) do
- ensure_exunit_started()
- Enum.reduce(test_files(directory), [], fn file_path, acc ->
- tests_in_file(file_path) ++ acc
- end)
- end
-
- @doc """
- This helper function is used in a snippet to create `suite.elixir`
- see list/1
- """
- def group_by(tests) do
- tests |> Enum.group_by(&module_name/1, &test_name/1)
- end
-
- @doc """
- This helper function is used in a snippet to create `suite.elixir`
- see list/1
- """
- def pretty_print(tests) do
- tests = Enum.join(Enum.sort(Enum.map(tests, fn {module_name, test_names} ->
- test_names = test_names
- |> Enum.map(fn x -> ~s("#{x}") end) |> Enum.join(",\n ")
- ~s( "#{module_name}": [\n #{test_names}\n ])
- end)), ",\n")
- "%{\n#{tests}\n}"
- end
-
- def start(exclude \\ []) do
- # If build number detected assume we running on Jenkins
- # and skip certain tests that fail on jenkins.
- default_exclude =
- case System.get_env("BUILD_NUMBER") !== nil do
- true -> [:pending, :skip_on_jenkins]
- false -> [:pending]
- end
-
- current_exclude = Keyword.get(ExUnit.configuration(), :exclude, [])
- {ignores, current_exclude} = from_file(current_exclude)
-
- current_include = Keyword.get(ExUnit.configuration(), :include, [])
- {suite, current_include} = from_file(current_include)
-
- only_test_ids =
- case suite -- ignores do
- [] ->
- nil
-
- test_ids ->
- to_tests(test_ids)
- end
-
- ExUnit.configure(
- exclude: Enum.uniq(default_exclude ++ current_exclude ++ exclude),
- include: current_include,
- formatters: [JUnitFormatter, ExUnit.CLIFormatter],
- only_test_ids: only_test_ids
- )
-
- ExUnit.start()
- end
-
- # Helpers for start/0
-
- defp split_files(opts) do
- {files, opts} =
- Enum.split_with(opts, fn x ->
- String.ends_with?(Atom.to_string(x), ".elixir")
- end)
-
- {Enum.map(files, &Atom.to_string/1), opts}
- end
-
- defp read_from_file(file_name) do
- {map, _} = Code.eval_file(file_name)
-
- map
- |> Enum.reduce([], fn {module, tests}, acc ->
- Enum.map(tests, &{module, &1}) ++ acc
- end)
- end
-
- defp from_file(opts) do
- case split_files(opts) do
- {[], opts} ->
- {[], opts}
-
- {[file_name], opts} ->
- {read_from_file(file_name), opts}
-
- {_, _} ->
- throw("Only one file is supported in --exclude or --include")
- end
- end
-
- defp to_tests(ids) do
- MapSet.new(
- Enum.map(ids, fn {module_name, test_name} ->
- {String.to_atom("Elixir.#{module_name}"), String.to_atom("test #{test_name}")}
- end)
- )
- end
-
- # Helpers for list/0
-
- defp ensure_exunit_started() do
- if not Process.get(EXUNIT_STARTED, false) do
- started? =
- Application.started_applications()
- |> Enum.map(&Kernel.elem(&1, 0))
- |> Enum.member?(:ex_unit)
-
- if not started? do
- ExUnit.start(autorun: false)
- Process.put(EXUNIT_STARTED, true)
- end
- end
- end
-
- defp test_files(directory) do
- files = Path.wildcard(Path.join(directory, "*_test.exs"))
- Enum.filter(files, &File.regular?/1)
- end
-
- defp test_helpers(directory) do
- files = Path.wildcard(Path.join(directory, "*_helpers.exs"))
- Enum.filter(files, &File.regular?/1)
- end
-
- def tests_in_file(file_path) do
- ensure_exunit_started()
- Code.compiler_options(ignore_module_conflict: true)
-
- Enum.each(
- test_helpers(Path.dirname(file_path)), &require_file/1
- )
-
- tests =
- Enum.reduce(require_file(file_path), [], fn {module_name, _}, acc ->
- if :erlang.function_exported(module_name, :__ex_unit__, 0) do
- module_name.__ex_unit__().tests ++ acc
- else
- acc
- end
- end)
-
- Code.unrequire_files([file_path])
- tests
- end
-
- def require_file(file_path) do
- drop_stderr(fn ->
- Code.require_file(file_path)
- end)
- end
-
- defp drop_stderr(fun) do
- {:ok, pid} = StringIO.open("")
- original_pid = Process.whereis(:standard_error)
-
- try do
- Process.unregister(:standard_error)
- Process.register(pid, :standard_error)
- fun.()
- after
- Process.unregister(:standard_error)
- Process.register(original_pid, :standard_error)
- StringIO.close(pid)
- end
- end
-
- defp test_name(test) do
- String.replace_leading(Atom.to_string(test.name), "test ", "")
- end
-
- defp module_name(test) do
- test.module
- |> Atom.to_string()
- |> String.replace_leading("Elixir.", "")
- end
-end
diff --git a/test/elixir/lib/utils.ex b/test/elixir/lib/utils.ex
deleted file mode 100644
index 3ecf878e7..000000000
--- a/test/elixir/lib/utils.ex
+++ /dev/null
@@ -1,61 +0,0 @@
-defmodule Couch.Test.Utils do
- require Record
- @moduledoc "Helper functions for testing"
- @project_root "#{__DIR__}/../../../"
- Record.defrecord :user_ctx, Record.extract(
- :user_ctx, from: "#{@project_root}/src/couch/include/couch_db.hrl")
-
- Record.defrecord :doc_info, Record.extract(
- :doc_info, from: "#{@project_root}/src/couch/include/couch_db.hrl")
-
- def random_name(prefix) do
- time = :erlang.monotonic_time()
- umi = :erlang.unique_integer([:monotonic])
- "#{prefix}-#{time}-#{umi}"
- end
-
- def random_password() do
- rand_bytes = :crypto.strong_rand_bytes(16)
- rand_bytes
- |> :base64.encode()
- |> String.slice(0..16)
- end
-
- def db_exists?(db_name) do
- try do
- :fabric.get_db_info(db_name)
- catch
- :error, :database_does_not_exist -> false
- end
- end
-
- @doc """
- In some cases we need to access record definition at compile time.
- We cannot use Record.defrecord in such cases. This helper function
- can be used instead. Use it as follows:
- ```
- defmodule Foo do
- admin_ctx = {:user_ctx, Utils.erlang_record(
- :user_ctx, "couch/include/couch_db.hrl", roles: ["_admin"])}
- end
- ```
-
- Longer term we should wrap erlang records as it is done for user_ctx
- see beginning of the Utils.ex. In this case we would be able to use
- them at compile time in other modules.
- ```
- Record.defrecord :user_ctx, Record.extract(
- :user_ctx, from_lib: "couch/include/couch_db.hrl")
- ```
- """
- def erlang_record(name, from_lib, opts \\ []) do
- record_info = Record.extract(name, from_lib: from_lib)
- index = [name | Keyword.keys(record_info)] |> Enum.with_index
- draft = [name | Keyword.values(record_info)] |> List.to_tuple
- opts
- |> Enum.reduce(draft, fn
- {k, v}, acc -> put_elem(acc, index[k], v)
- end)
- end
-
-end \ No newline at end of file
diff --git a/test/elixir/run.cmd b/test/elixir/run.cmd
deleted file mode 100644
index f1789adce..000000000
--- a/test/elixir/run.cmd
+++ /dev/null
@@ -1,7 +0,0 @@
-@ECHO OFF
-
-cd %~dp0
-call mix local.hex --force
-call mix local.rebar --force
-call mix deps.get
-call mix test --trace %*
diff --git a/test/elixir/test/all_docs_test.exs b/test/elixir/test/all_docs_test.exs
deleted file mode 100644
index 3d07e12e8..000000000
--- a/test/elixir/test/all_docs_test.exs
+++ /dev/null
@@ -1,317 +0,0 @@
-defmodule AllDocsTest do
- use CouchTestCase
-
- @moduletag :all_docs
-
- @moduledoc """
- Test CouchDB _all_docs
- This is a port of the all_docs.js suite
- """
-
- # TODO: do we need to bring this in?
- # var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"}, {w: 3});
-
- @tag :with_db
- test "All Docs tests", context do
- db_name = context[:db_name]
- resp1 = Couch.post("/#{db_name}", body: %{:_id => "0", :a => 1, :b => 1}).body
- resp2 = Couch.post("/#{db_name}", body: %{:_id => "3", :a => 4, :b => 16}).body
- resp3 = Couch.post("/#{db_name}", body: %{:_id => "1", :a => 2, :b => 4}).body
- resp4 = Couch.post("/#{db_name}", body: %{:_id => "2", :a => 3, :b => 9}).body
-
- assert resp1["ok"]
- assert resp2["ok"]
- assert resp3["ok"]
- assert resp4["ok"]
-
- revs = [resp1["rev"], resp2["rev"], resp3["rev"], resp4["rev"]]
-
- # Check _all_docs
- resp = Couch.get("/#{db_name}/_all_docs").body
- rows = resp["rows"]
- assert resp["total_rows"] == length(rows)
-
- Enum.each(rows, fn row ->
- assert row["id"] >= "0" && row["id"] <= "4"
- end)
-
- # Check _all_docs with descending=true
- resp = Couch.get("/#{db_name}/_all_docs", query: %{:descending => true}).body
- rows = resp["rows"]
- assert resp["total_rows"] == length(rows)
-
- # Check _all_docs offset
- retry_until(fn ->
- resp = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => "\"2\""}).body
- assert resp["offset"] == 2
- end)
-
- # Confirm that queries may assume raw collation
- resp =
- Couch.get(
- "/#{db_name}/_all_docs",
- query: %{
- :startkey => "\"org.couchdb.user:\"",
- :endkey => "\"org.couchdb.user;\""
- }
- )
-
- assert Enum.empty?(resp.body["rows"])
-
- # Check that all docs show up in the changes feed; order can vary
- resp = Couch.get("/#{db_name}/_changes").body
-
- Enum.each(resp["results"], fn row ->
- assert Enum.member?(revs, hd(row["changes"])["rev"]),
- "doc #{row["id"]} should be in changes"
- end)
-
- # Check that deletions also show up right
- doc1 = Couch.get("/#{db_name}/1").body
- assert Couch.delete("/#{db_name}/1", query: %{:rev => doc1["_rev"]}).body["ok"]
- changes = Couch.get("/#{db_name}/_changes").body["results"]
- assert length(changes) == 4
-
- retry_until(fn ->
- deleted = Enum.filter(changes, fn row -> row["deleted"] end)
- assert length(deleted) == 1
- assert hd(deleted)["id"] == "1"
- end)
-
- # (remember old seq)
- orig_doc = Enum.find(changes, fn row -> row["id"] == "3" end)
- # Perform an update
- doc3 = Couch.get("/#{db_name}/3").body
- doc3 = Map.put(doc3, :updated, "totally")
- assert Couch.put("/#{db_name}/3", body: doc3).body["ok"]
-
- # The update should make doc id 3 have another seq num
- # (not nec. higher or the last though)
- changes = Couch.get("/#{db_name}/_changes").body["results"]
- assert length(changes) == 4
- updated_doc = Enum.find(changes, fn row -> row["id"] == "3" end)
- assert orig_doc["seq"] != updated_doc["seq"], "seq num should be different"
-
- # Ok, now let's see what happens with include docs
- changes =
- Couch.get("/#{db_name}/_changes", query: %{:include_docs => true}).body["results"]
-
- assert length(changes) == 4
- updated_doc = Enum.find(changes, fn row -> row["id"] == doc3["_id"] end)
- assert updated_doc["doc"]["updated"] == "totally"
-
- deleted_doc = Enum.find(changes, fn row -> row["deleted"] end)
- assert deleted_doc["doc"]["_deleted"]
-
- # Test _all_docs with keys
- rows =
- Couch.post(
- "/#{db_name}/_all_docs",
- query: %{:include_docs => true},
- body: %{:keys => ["1"]}
- ).body["rows"]
-
- row = hd(rows)
- assert length(rows) == 1
- assert row["key"] == "1"
- assert row["id"] == "1"
- assert row["value"]["deleted"]
- assert row["doc"] == :null
-
- # Add conflicts
- conflicted_doc1 = %{
- :_id => "3",
- :_rev => "2-aa01552213fafa022e6167113ed01087",
- :value => "X"
- }
-
- conflicted_doc2 = %{
- :_id => "3",
- :_rev => "2-ff01552213fafa022e6167113ed01087",
- :value => "Z"
- }
-
- assert Couch.put(
- "/#{db_name}/3",
- query: %{:new_edits => false},
- body: conflicted_doc1
- ).body["ok"]
-
- assert Couch.put(
- "/#{db_name}/3",
- query: %{:new_edits => false},
- body: conflicted_doc2
- ).body["ok"]
-
- win_rev = Couch.get("/#{db_name}/3").body
-
- changes =
- Couch.get(
- "/#{db_name}/_changes",
- query: %{:include_docs => true, :conflicts => true, :style => "all_docs"}
- ).body["results"]
-
- doc3 = Enum.find(changes, fn row -> row["id"] == "3" end)
- assert doc3["id"] == "3"
- assert length(doc3["changes"]) == 3
- assert win_rev["_rev"] == hd(doc3["changes"])["rev"]
- assert is_list(doc3["doc"]["_conflicts"])
- assert length(doc3["doc"]["_conflicts"]) == 2
-
- rows =
- Couch.get(
- "/#{db_name}/_all_docs",
- query: %{:include_docs => true, :conflicts => true}
- ).body["rows"]
-
- assert length(rows) == 3
- change = hd(tl(tl(rows)))
- assert change["key"] == "3"
- assert change["id"] == "3"
- assert change["value"]["rev"] == win_rev["_rev"]
- assert change["doc"]["_rev"] == win_rev["_rev"]
- assert change["doc"]["_id"] == "3"
- assert is_list(change["doc"]["_conflicts"])
- assert length(change["doc"]["_conflicts"]) == 2
-
- # Test that _all_docs collates sanely
- assert Couch.post("/#{db_name}", body: %{:_id => "Z", :foo => "Z"}).body["ok"]
- assert Couch.post("/#{db_name}", body: %{:_id => "a", :foo => "a"}).body["ok"]
-
- rows =
- Couch.get(
- "/#{db_name}/_all_docs",
- query: %{:startkey => "\"Z\"", :endkey => "\"Z\""}
- ).body["rows"]
-
- assert length(rows) == 1
- end
-
- @tag :with_db
- test "GET with one key", context do
- db_name = context[:db_name]
-
- {:ok, _} = create_doc(
- db_name,
- %{
- _id: "foo",
- bar: "baz"
- }
- )
-
- {:ok, _} = create_doc(
- db_name,
- %{
- _id: "foo2",
- bar: "baz2"
- }
- )
-
- resp = Couch.get(
- "/#{db_name}/_all_docs",
- query: %{
- :key => "\"foo\"",
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
-
- @tag :with_db
- test "POST with empty body", context do
- db_name = context[:db_name]
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..2)})
- assert resp.status_code in [201, 202]
-
- resp = Couch.post(
- "/#{db_name}/_all_docs",
- body: %{}
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 3
- end
-
- @tag :with_db
- test "POST with keys and limit", context do
- db_name = context[:db_name]
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..3)})
- assert resp.status_code in [201, 202]
-
- resp = Couch.post(
- "/#{db_name}/_all_docs",
- body: %{
- :keys => [1, 2],
- :limit => 1
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- @tag :with_db
- test "POST with query parameter and JSON body", context do
- db_name = context[:db_name]
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..3)})
- assert resp.status_code in [201, 202]
-
- resp = Couch.post(
- "/#{db_name}/_all_docs",
- query: %{
- :limit => 1
- },
- body: %{
- :keys => [1, 2]
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- @tag :with_db
- test "POST edge case with colliding parameters - query takes precedence", context do
- db_name = context[:db_name]
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..3)})
- assert resp.status_code in [201, 202]
-
- resp = Couch.post(
- "/#{db_name}/_all_docs",
- query: %{
- :limit => 1
- },
- body: %{
- :keys => [1, 2],
- :limit => 2
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- @tag :with_db
- test "POST boolean", context do
- db_name = context[:db_name]
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..3)})
- assert resp.status_code in [201, 202]
-
- resp = Couch.post(
- "/#{db_name}/_all_docs",
- body: %{
- :stable => true,
- :update => true
- }
- )
-
- assert resp.status_code == 200
- end
-end
diff --git a/test/elixir/test/attachment_names_test.exs b/test/elixir/test/attachment_names_test.exs
deleted file mode 100644
index a89b26548..000000000
--- a/test/elixir/test/attachment_names_test.exs
+++ /dev/null
@@ -1,112 +0,0 @@
-defmodule AttachmentNamesTest do
- use CouchTestCase
-
- @moduletag :attachments
-
- @good_doc """
- {
- "_id": "good_doc",
- "_attachments": {
- "Kолян.txt": {
- "content_type": "application/octet-stream",
- "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
- """
-
- @bin_att_doc %{
- _id: "bin_doc",
- _attachments: %{
- footxt: %{
- content_type: "text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
-
- @bin_data "JHAPDO*AU£PN ){(3u[d 93DQ9¡€])} ææøo'∂ƒæ≤çæππ•¥∫¶®#†π¶®¥π€ª®˙π8np"
-
- @leading_underscores_att """
- {
- "_id": "bin_doc2",
- "_attachments": {
- "_foo.txt": {
- "content_type": "text/plain",
- "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
- """
-
- @moduledoc """
- Test CouchDB attachment names
- This is a port of the attachment_names.js suite
- """
-
- @tag :with_db
- test "saves attachment names successfully", context do
- db_name = context[:db_name]
- filename = URI.encode("Kолян.txt", &URI.char_unreserved?(&1))
- resp = Couch.post("/#{db_name}", body: @good_doc)
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
-
- resp = Couch.get("/#{db_name}/good_doc/#{filename}")
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "application/octet-stream"
- assert resp.headers["Etag"] == ~s("aEI7pOYCRBLTRQvvqYrrJQ==")
-
- resp = Couch.post("/#{db_name}", body: @bin_att_doc)
- assert(resp.status_code == 201)
-
- # standalone docs
- resp =
- Couch.put(
- "/#{db_name}/bin_doc3/attachmenttxt",
- body: @bin_data,
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- assert(resp.status_code == 201)
-
- # bulk docs
- docs = %{
- docs: [@bin_att_doc]
- }
-
- resp =
- Couch.post(
- "/#{db_name}/_bulk_docs",
- body: docs
- )
-
- assert(resp.status_code == 201)
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc2",
- body: @leading_underscores_att
- )
-
- assert resp.status_code == 400
-
- assert resp.body["reason"] ==
- "Attachment name '_foo.txt' starts with prohibited character '_'"
-
- resp =
- Couch.post(
- "/#{db_name}",
- body: @leading_underscores_att
- )
-
- assert resp.status_code == 400
-
- assert resp.body["reason"] ==
- "Attachment name '_foo.txt' starts with prohibited character '_'"
-
- resp = Couch.get("/#{db_name}/bin_doc2/_foo.txt")
-
- assert resp.status_code == 404
- end
-end
diff --git a/test/elixir/test/attachment_paths_test.exs b/test/elixir/test/attachment_paths_test.exs
deleted file mode 100644
index b776feabf..000000000
--- a/test/elixir/test/attachment_paths_test.exs
+++ /dev/null
@@ -1,177 +0,0 @@
-defmodule AttachmentPathsTest do
- use CouchTestCase
-
- @moduletag :attachments
-
- @bin_att_doc """
- {
- "_id": "bin_doc",
- "_attachments": {
- "foo/bar.txt": {
- "content_type": "text/plain",
- "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- },
- "foo%2Fbaz.txt": {
- "content_type": "text/plain",
- "data": "V2UgbGlrZSBwZXJjZW50IHR3byBGLg=="
- }
- }
- }
- """
-
- @design_att_doc """
- {
- "_id": "_design/bin_doc",
- "_attachments": {
- "foo/bar.txt": {
- "content_type": "text/plain",
- "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- },
- "foo%2Fbaz.txt": {
- "content_type": "text/plain",
- "data": "V2UgbGlrZSBwZXJjZW50IHR3byBGLg=="
- }
- }
- }
- """
-
- @moduledoc """
- Test CouchDB attachment names
- This is a port of the attachment_names.js suite
- """
-
- @tag :with_db_name
- test "manages attachment paths successfully", context do
- db_name =
- URI.encode(
- "#{context[:db_name]}/with_slashes",
- &URI.char_unreserved?(&1)
- )
-
- create_db(db_name)
-
- resp = Couch.post("/#{db_name}", body: @bin_att_doc)
- msg = "Should return 201-Created"
-
- assert resp.status_code in [201, 202], msg
-
- rev = resp.body["rev"]
-
- resp = Couch.get("/#{db_name}/bin_doc/foo/bar.txt")
- assert resp.status_code == 200
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "text/plain"
-
- resp = Couch.get("/#{db_name}/bin_doc/foo%2Fbar.txt")
- assert resp.status_code == 200
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "text/plain"
-
- resp = Couch.get("/#{db_name}/bin_doc/foo/baz.txt")
- assert resp.status_code == 404
-
- resp = Couch.get("/#{db_name}/bin_doc/foo%252Fbaz.txt")
- assert resp.status_code == 200
- assert resp.body == "We like percent two F."
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc/foo/attachment.txt",
- body: "Just some text",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- assert resp.status_code == 409
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc/foo/bar2.txt",
- query: %{rev: rev},
- body: "This is no base64 encoded text",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/bin_doc")
- assert resp.status_code == 200
-
- att_doc = resp.body
-
- assert att_doc["_attachments"]["foo/bar.txt"]
- assert att_doc["_attachments"]["foo%2Fbaz.txt"]
- assert att_doc["_attachments"]["foo/bar2.txt"]
-
- ctype = att_doc["_attachments"]["foo/bar2.txt"]["content_type"]
- assert ctype == "text/plain;charset=utf-8"
-
- assert att_doc["_attachments"]["foo/bar2.txt"]["length"] == 30
- delete_db(db_name)
- end
-
- @tag :with_db_name
- test "manages attachment paths successfully - design docs", context do
- db_name =
- URI.encode(
- "#{context[:db_name]}/with_slashes",
- &URI.char_unreserved?(&1)
- )
-
- create_db(db_name)
- resp = Couch.post("/#{db_name}", body: @design_att_doc)
- assert resp.status_code in [201, 202]
-
- rev = resp.body["rev"]
-
- resp = Couch.get("/#{db_name}/_design/bin_doc/foo/bar.txt")
- assert resp.status_code == 200
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "text/plain"
-
- resp = Couch.get("/#{db_name}/_design/bin_doc/foo%2Fbar.txt")
- assert resp.status_code == 200
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "text/plain"
-
- resp = Couch.get("/#{db_name}/_design/bin_doc/foo/baz.txt")
- assert resp.status_code == 404
-
- resp = Couch.get("/#{db_name}/_design/bin_doc/foo%252Fbaz.txt")
- assert resp.status_code == 200
- assert resp.body == "We like percent two F."
-
- resp =
- Couch.put(
- "/#{db_name}/_design/bin_doc/foo/attachment.txt",
- body: "Just some text",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- assert resp.status_code == 409
-
- resp =
- Couch.put(
- "/#{db_name}/_design/bin_doc/foo/bar2.txt",
- query: %{rev: rev},
- body: "This is no base64 encoded text",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/_design/bin_doc")
- assert resp.status_code == 200
-
- att_doc = resp.body
-
- assert att_doc["_attachments"]["foo/bar.txt"]
- assert att_doc["_attachments"]["foo%2Fbaz.txt"]
- assert att_doc["_attachments"]["foo/bar2.txt"]
-
- ctype = att_doc["_attachments"]["foo/bar2.txt"]["content_type"]
- assert ctype == "text/plain;charset=utf-8"
-
- assert att_doc["_attachments"]["foo/bar2.txt"]["length"] == 30
- delete_db(db_name)
- end
-end
diff --git a/test/elixir/test/attachment_ranges_test.exs b/test/elixir/test/attachment_ranges_test.exs
deleted file mode 100644
index 01c1239bc..000000000
--- a/test/elixir/test/attachment_ranges_test.exs
+++ /dev/null
@@ -1,143 +0,0 @@
-defmodule AttachmentRangesTest do
- use CouchTestCase
-
- @moduletag :attachments
-
- @moduledoc """
- Test CouchDB attachment range requests
- This is a port of the attachment_ranges.js suite
- """
-
- @tag :with_db
- test "manages attachment range requests successfully", context do
- db_name = context[:db_name]
-
- bin_att_doc = %{
- _id: "bin_doc",
- _attachments: %{
- "foo.txt": %{
- content_type: "application/octet-stream",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
-
- create_doc(db_name, bin_att_doc)
- # Fetching the whole entity is a 206
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=0-28"]
- )
-
- assert(resp.status_code == 206)
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Range"] == "bytes 0-28/29"
- assert resp.headers["Content-Length"] == "29"
-
- # Fetch the whole entity without an end offset is a 200
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=0-"]
- )
-
- assert(resp.status_code == 200)
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Range"] == nil
- assert resp.headers["Content-Length"] == "29"
-
- # Even if you ask multiple times.
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=0-,0-,0-"]
- )
-
- assert(resp.status_code == 200)
-
- # Badly formed range header is a 200
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes:0-"]
- )
-
- assert(resp.status_code == 200)
-
- # Fetch the end of an entity without an end offset is a 206
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=2-"]
- )
-
- assert(resp.status_code == 206)
- assert resp.body == "is is a base64 encoded text"
- assert resp.headers["Content-Range"] == "bytes 2-28/29"
- assert resp.headers["Content-Length"] == "27"
-
- # Fetch first part of entity is a 206
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=0-3"]
- )
-
- assert(resp.status_code == 206)
- assert resp.body == "This"
- assert resp.headers["Content-Range"] == "bytes 0-3/29"
- assert resp.headers["Content-Length"] == "4"
-
- # Fetch middle of entity is also a 206
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=10-15"]
- )
-
- assert(resp.status_code == 206)
- assert resp.body == "base64"
- assert resp.headers["Content-Range"] == "bytes 10-15/29"
- assert resp.headers["Content-Length"] == "6"
-
- # Fetch end of entity is also a 206
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=-3"]
- )
-
- assert(resp.status_code == 206)
- assert resp.body == "ext"
- assert resp.headers["Content-Range"] == "bytes 26-28/29"
- assert resp.headers["Content-Length"] == "3"
-
- # backward range is 416
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=5-3"]
- )
-
- assert(resp.status_code == 416)
-
- # range completely outside of entity is 416
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=300-310"]
- )
-
- assert(resp.status_code == 416)
-
- # We ignore a Range header with too many ranges
- resp =
- Couch.get(
- "/#{db_name}/bin_doc/foo.txt",
- headers: [Range: "bytes=0-1,0-1,0-1,0-1,0-1,0-1,0-1,0-1,0-1,0-1"]
- )
-
- assert(resp.status_code == 200)
- end
-end
diff --git a/test/elixir/test/attachment_views_test.exs b/test/elixir/test/attachment_views_test.exs
deleted file mode 100644
index 3da62f042..000000000
--- a/test/elixir/test/attachment_views_test.exs
+++ /dev/null
@@ -1,142 +0,0 @@
-defmodule AttachmentViewTest do
- use CouchTestCase
-
- @moduletag :attachments
-
- @moduledoc """
- Test CouchDB attachment views requests
- This is a port of the attachment_views.js suite
- """
-
- @tag :with_db
- test "manages attachments in views successfully", context do
- db_name = context[:db_name]
- attachment_data = "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
-
- attachment_template_1 = %{
- "_attachments" => %{
- "foo.txt" => %{
- "content_type" => "text/plain",
- "data" => attachment_data
- }
- }
- }
-
- attachment_template_2 = %{
- "_attachments" => %{
- "foo.txt" => %{
- "content_type" => "text/plain",
- "data" => attachment_data
- },
- "bar.txt" => %{
- "content_type" => "text/plain",
- "data" => attachment_data
- }
- }
- }
-
- attachment_template_3 = %{
- "_attachments" => %{
- "foo.txt" => %{
- "content_type" => "text/plain",
- "data" => attachment_data
- },
- "bar.txt" => %{
- "content_type" => "text/plain",
- "data" => attachment_data
- },
- "baz.txt" => %{
- "content_type" => "text/plain",
- "data" => attachment_data
- }
- }
- }
-
- bulk_save(db_name, make_docs(0..9))
- bulk_save(db_name, make_docs(10..19, attachment_template_1))
- bulk_save(db_name, make_docs(20..29, attachment_template_2))
- bulk_save(db_name, make_docs(30..39, attachment_template_3))
-
- map_function = """
- function(doc) {
- var count = 0;
-
- for(var idx in doc._attachments) {
- count = count + 1;
- }
-
- emit(parseInt(doc._id), count);
- }
- """
-
- reduce_function = """
- function(key, values) {
- return sum(values);
- }
- """
-
- result = query(db_name, map_function, reduce_function)
- assert length(result["rows"]) == 1
- assert Enum.at(result["rows"], 0)["value"] == 60
-
- result =
- query(db_name, map_function, reduce_function, %{
- startkey: 10,
- endkey: 19
- })
-
- assert length(result["rows"]) == 1
- assert Enum.at(result["rows"], 0)["value"] == 10
-
- result = query(db_name, map_function, reduce_function, %{startkey: 20, endkey: 29})
- assert length(result["rows"]) == 1
- assert Enum.at(result["rows"], 0)["value"] == 20
-
- result =
- query(db_name, map_function, nil, %{
- startkey: 30,
- endkey: 39,
- include_docs: true
- })
-
- assert length(result["rows"]) == 10
- assert Enum.at(result["rows"], 0)["value"] == 3
- attachment = Enum.at(result["rows"], 0)["doc"]["_attachments"]["baz.txt"]
- assert attachment["stub"] == true
- assert Map.has_key?(attachment, "data") == false
- assert Map.has_key?(attachment, "encoding") == false
- assert Map.has_key?(attachment, "encoded_length") == false
-
- result =
- query(db_name, map_function, nil, %{
- startkey: 30,
- endkey: 39,
- include_docs: true,
- attachments: true
- })
-
- assert length(result["rows"]) == 10
- assert Enum.at(result["rows"], 0)["value"] == 3
- attachment = Enum.at(result["rows"], 0)["doc"]["_attachments"]["baz.txt"]
- assert attachment["data"] == attachment_data
- assert Map.has_key?(attachment, "stub") == false
- assert Map.has_key?(attachment, "encoding") == false
- assert Map.has_key?(attachment, "encoded_length") == false
-
- result =
- query(db_name, map_function, nil, %{
- startkey: 30,
- endkey: 39,
- include_docs: true,
- att_encoding_info: true
- })
-
- assert length(result["rows"]) == 10
- assert Enum.at(result["rows"], 0)["value"] == 3
- attachment = Enum.at(result["rows"], 0)["doc"]["_attachments"]["baz.txt"]
- assert attachment["stub"] == true
- assert attachment["encoding"] == "gzip"
- assert attachment["encoded_length"] == 47
- assert Map.has_key?(attachment, "data") == false
- end
-end
diff --git a/test/elixir/test/attachments_multipart_test.exs b/test/elixir/test/attachments_multipart_test.exs
deleted file mode 100644
index 2cedef513..000000000
--- a/test/elixir/test/attachments_multipart_test.exs
+++ /dev/null
@@ -1,476 +0,0 @@
-defmodule AttachmentMultipartTest do
- use CouchTestCase
-
- @moduletag :attachments
-
- @moduledoc """
- Test CouchDB attachment multipart requests
- This is a port of the attachments_multipart.js suite
- """
-
- @tag :with_db
- test "manages attachments multipart requests successfully", context do
- db_name = context[:db_name]
-
- document = """
- {
- "body": "This is a body.",
- "_attachments": {
- "foo.txt": {
- "follows": true,
- "content_type": "application/test",
- "length": 21
- },
- "bar.txt": {
- "follows": true,
- "content_type": "application/test",
- "length": 20
- },
- "baz.txt": {
- "follows": true,
- "content_type": "text/plain",
- "length": 19
- }
- }
- }
- """
-
- multipart_data =
- "--abc123\r\n" <>
- "content-type: application/json\r\n" <>
- "\r\n" <>
- document <>
- "\r\n--abc123\r\n" <>
- "\r\n" <>
- "this is 21 chars long" <>
- "\r\n--abc123\r\n" <>
- "\r\n" <>
- "this is 20 chars lon" <>
- "\r\n--abc123\r\n" <> "\r\n" <> "this is 19 chars lo" <> "\r\n--abc123--epilogue"
-
- resp =
- Couch.put(
- "/#{db_name}/multipart",
- body: multipart_data,
- headers: ["Content-Type": "multipart/related;boundary=\"abc123\""]
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"] == true
-
- resp = Couch.get("/#{db_name}/multipart/foo.txt")
-
- assert resp.body == "this is 21 chars long"
-
- resp = Couch.get("/#{db_name}/multipart/bar.txt")
-
- assert resp.body == "this is 20 chars lon"
-
- resp = Couch.get("/#{db_name}/multipart/baz.txt")
-
- assert resp.body == "this is 19 chars lo"
-
- doc = Couch.get("/#{db_name}/multipart", query: %{att_encoding_info: true})
- first_rev = doc.body["_rev"]
-
- assert doc.body["_attachments"]["foo.txt"]["stub"] == true
- assert doc.body["_attachments"]["bar.txt"]["stub"] == true
- assert doc.body["_attachments"]["baz.txt"]["stub"] == true
-
- assert Map.has_key?(doc.body["_attachments"]["foo.txt"], "encoding") == false
- assert Map.has_key?(doc.body["_attachments"]["bar.txt"], "encoding") == false
- assert doc.body["_attachments"]["baz.txt"]["encoding"] == "gzip"
-
- document_updated = """
- {
- "_rev": "#{first_rev}",
- "body": "This is a body.",
- "_attachments": {
- "foo.txt": {
- "stub": true,
- "content_type": "application/test"
- },
- "bar.txt": {
- "follows": true,
- "content_type": "application/test",
- "length": 18
- }
- }
- }
- """
-
- multipart_data_updated =
- "--abc123\r\n" <>
- "content-type: application/json\r\n" <>
- "\r\n" <>
- document_updated <>
- "\r\n--abc123\r\n" <> "\r\n" <> "this is 18 chars l" <> "\r\n--abc123--"
-
- resp =
- Couch.put(
- "/#{db_name}/multipart",
- body: multipart_data_updated,
- headers: ["Content-Type": "multipart/related;boundary=\"abc123\""]
- )
-
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/multipart/bar.txt")
-
- assert resp.body == "this is 18 chars l"
-
- resp = Couch.get("/#{db_name}/multipart/baz.txt")
-
- assert resp.status_code == 404
-
- resp =
- Couch.get(
- "/#{db_name}/multipart",
- query: %{:attachments => true},
- headers: [accept: "multipart/related,*/*;"]
- )
-
- assert resp.status_code == 200
- assert resp.headers["Content-length"] == "790"
- # parse out the multipart
- sections = parse_multipart(resp)
-
- assert length(sections) == 3
- # The first section is the json doc. Check it's content-type.
- # Each part carries their own meta data.
-
- assert Enum.at(sections, 0).headers["Content-Type"] == "application/json"
- assert Enum.at(sections, 1).headers["Content-Type"] == "application/test"
- assert Enum.at(sections, 2).headers["Content-Type"] == "application/test"
-
- assert Enum.at(sections, 1).headers["Content-Length"] == "21"
- assert Enum.at(sections, 2).headers["Content-Length"] == "18"
-
- assert Enum.at(sections, 1).headers["Content-Disposition"] ==
- ~s(attachment; filename="foo.txt")
-
- assert Enum.at(sections, 2).headers["Content-Disposition"] ==
- ~s(attachment; filename="bar.txt")
-
- doc = :jiffy.decode(Enum.at(sections, 0).body, [:return_maps])
-
- assert doc["_attachments"]["foo.txt"]["follows"] == true
- assert doc["_attachments"]["bar.txt"]["follows"] == true
-
- assert Enum.at(sections, 1).body == "this is 21 chars long"
- assert Enum.at(sections, 2).body == "this is 18 chars l"
-
- # now get attachments incrementally (only the attachments changes since
- # a certain rev).
-
- resp =
- Couch.get(
- "/#{db_name}/multipart",
- query: %{:atts_since => ~s(["#{first_rev}"])},
- headers: [accept: "multipart/related,*/*;"]
- )
-
- assert resp.status_code == 200
-
- sections = parse_multipart(resp)
- assert length(sections) == 2
-
- doc = :jiffy.decode(Enum.at(sections, 0).body, [:return_maps])
-
- assert doc["_attachments"]["foo.txt"]["stub"] == true
- assert doc["_attachments"]["bar.txt"]["follows"] == true
- assert Enum.at(sections, 1).body == "this is 18 chars l"
-
- # try the atts_since parameter together with the open_revs parameter
- resp =
- Couch.get(
- "/#{db_name}/multipart",
- query: %{
- :open_revs => ~s(["#{doc["_rev"]}"]),
- :atts_since => ~s(["#{first_rev}"])
- },
- headers: [accept: "multipart/related,*/*;"]
- )
-
- assert resp.status_code == 200
- sections = parse_multipart(resp)
-
- # 1 section, with a multipart/related Content-Type
- assert length(sections) == 1
-
- ctype_value = Enum.at(sections, 0).headers["Content-Type"]
- assert String.starts_with?(ctype_value, "multipart/related;") == true
-
- inner_sections = parse_multipart(Enum.at(sections, 0))
- # 2 inner sections: a document body section plus an attachment data section
- assert length(inner_sections) == 3
- assert Enum.at(inner_sections, 0).headers["Content-Type"] == "application/json"
-
- doc = :jiffy.decode(Enum.at(inner_sections, 0).body, [:return_maps])
- assert doc["_attachments"]["foo.txt"]["follows"] == true
- assert doc["_attachments"]["bar.txt"]["follows"] == true
-
- assert Enum.at(inner_sections, 1).body == "this is 21 chars long"
- assert Enum.at(inner_sections, 2).body == "this is 18 chars l"
-
- # try it with a rev that doesn't exist (should get all attachments)
-
- resp =
- Couch.get(
- "/#{db_name}/multipart",
- query: %{
- :atts_since => ~s(["1-2897589","#{first_rev}"])
- },
- headers: [accept: "multipart/related,*/*;"]
- )
-
- assert resp.status_code == 200
- sections = parse_multipart(resp)
-
- assert length(sections) == 2
-
- doc = :jiffy.decode(Enum.at(sections, 0).body, [:return_maps])
- assert doc["_attachments"]["foo.txt"]["stub"] == true
- assert doc["_attachments"]["bar.txt"]["follows"] == true
- assert Enum.at(sections, 1).body == "this is 18 chars l"
- end
-
- @tag :with_db
- test "manages compressed attachments successfully", context do
- db_name = context[:db_name]
-
- # check that with the document multipart/mixed API it's possible to receive
- # attachments in compressed form (if they're stored in compressed form)
- server_config = [
- %{
- :section => "attachments",
- :key => "compression_level",
- :value => "8"
- },
- %{
- :section => "attachments",
- :key => "compressible_types",
- :value => "text/plain"
- }
- ]
-
- run_on_modified_server(
- server_config,
- fn -> test_multipart_att_compression(db_name) end
- )
- end
-
- @tag :with_db
- test "multipart attachments with new_edits=false", context do
- db_name = context[:db_name]
-
- att_data = String.duplicate("x", 100_000)
- att_len = byte_size(att_data)
- document = """
- {
- "body": "This is a body.",
- "_attachments": {
- "foo.txt": {
- "follows": true,
- "content_type": "application/test",
- "length": #{att_len}
- }
- }
- }
- """
-
- multipart_data =
- "--abc123\r\n" <>
- "content-type: application/json\r\n" <>
- "\r\n" <>
- document <>
- "\r\n--abc123\r\n" <>
- "\r\n" <>
- att_data <>
- "\r\n--abc123--epilogue"
-
- resp =
- Couch.put(
- "/#{db_name}/multipart_replicated_changes",
- body: multipart_data,
- headers: ["Content-Type": "multipart/related;boundary=\"abc123\""]
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"] == true
-
- rev = resp.body["rev"]
-
- resp = Couch.get("/#{db_name}/multipart_replicated_changes/foo.txt")
-
- assert resp.body == att_data
-
- # https://github.com/apache/couchdb/issues/3939
- # Repeating the request should not hang
- Enum.each(0..10, fn _ ->
- put_multipart_new_edits_false(db_name, rev, multipart_data)
- end)
- end
-
- defp put_multipart_new_edits_false(db_name, rev, multipart_data) do
- # Help ensure we're re-using client connections
- ibrowse_opts = [{:max_sessions, 1}, {:max_pipeline_size, 1}]
- resp =
- Couch.put(
- "/#{db_name}/multipart_replicated_changes?new_edits=false&rev=#{rev}",
- body: multipart_data,
- headers: ["Content-Type": "multipart/related;boundary=\"abc123\""],
- ibrowse: ibrowse_opts
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"] == true
- end
-
- defp test_multipart_att_compression(dbname) do
- doc = %{
- "_id" => "foobar"
- }
-
- lorem = Couch.get("/_utils/script/test/lorem.txt").body
- hello_data = "hello world"
- {_, resp} = create_doc(dbname, doc)
- first_rev = resp.body["rev"]
-
- resp =
- Couch.put(
- "/#{dbname}/#{doc["_id"]}/data.bin",
- query: %{:rev => first_rev},
- body: hello_data,
- headers: ["Content-Type": "application/binary"]
- )
-
- assert resp.status_code in [201, 202]
- second_rev = resp.body["rev"]
-
- resp =
- Couch.put(
- "/#{dbname}/#{doc["_id"]}/lorem.txt",
- query: %{:rev => second_rev},
- body: lorem,
- headers: ["Content-Type": "text/plain"]
- )
-
- assert resp.status_code in [201, 202]
- third_rev = resp.body["rev"]
-
- resp =
- Couch.get(
- "/#{dbname}/#{doc["_id"]}",
- query: %{:open_revs => ~s(["#{third_rev}"])},
- headers: [Accept: "multipart/mixed", "X-CouchDB-Send-Encoded-Atts": "true"]
- )
-
- assert resp.status_code == 200
- sections = parse_multipart(resp)
- # 1 section, with a multipart/related Content-Type
- assert length(sections) == 1
- ctype_value = Enum.at(sections, 0).headers["Content-Type"]
- assert String.starts_with?(ctype_value, "multipart/related;") == true
-
- inner_sections = parse_multipart(Enum.at(sections, 0))
- # 3 inner sections: a document body section plus 2 attachment data sections
- assert length(inner_sections) == 3
- assert Enum.at(inner_sections, 0).headers["Content-Type"] == "application/json"
-
- doc = :jiffy.decode(Enum.at(inner_sections, 0).body, [:return_maps])
- assert doc["_attachments"]["lorem.txt"]["follows"] == true
- assert doc["_attachments"]["lorem.txt"]["encoding"] == "gzip"
- assert doc["_attachments"]["data.bin"]["follows"] == true
- assert doc["_attachments"]["data.bin"]["encoding"] != "gzip"
-
- if Enum.at(inner_sections, 1).body == hello_data do
- assert Enum.at(inner_sections, 2).body != lorem
- else
- if assert Enum.at(inner_sections, 2).body == hello_data do
- assert Enum.at(inner_sections, 1).body != lorem
- else
- assert false, "Could not found data.bin attachment data"
- end
- end
-
- # now test that it works together with the atts_since parameter
-
- resp =
- Couch.get(
- "/#{dbname}/#{doc["_id"]}",
- query: %{:open_revs => ~s(["#{third_rev}"]), :atts_since => ~s(["#{second_rev}"])},
- headers: [Accept: "multipart/mixed", "X-CouchDB-Send-Encoded-Atts": "true"]
- )
-
- assert resp.status_code == 200
- sections = parse_multipart(resp)
- # 1 section, with a multipart/related Content-Type
-
- assert length(sections) == 1
- ctype_value = Enum.at(sections, 0).headers["Content-Type"]
- assert String.starts_with?(ctype_value, "multipart/related;") == true
-
- inner_sections = parse_multipart(Enum.at(sections, 0))
- # 3 inner sections: a document body section plus 2 attachment data sections
- assert length(inner_sections) == 3
- assert Enum.at(inner_sections, 0).headers["Content-Type"] == "application/json"
- doc = :jiffy.decode(Enum.at(inner_sections, 0).body, [:return_maps])
- assert doc["_attachments"]["lorem.txt"]["follows"] == true
- assert doc["_attachments"]["lorem.txt"]["encoding"] == "gzip"
- assert Enum.at(inner_sections, 1).body != lorem
- end
-
- def get_boundary(response) do
- ctype = response.headers["Content-Type"]
- ctype_args = String.split(ctype, "; ")
- ctype_args = Enum.slice(ctype_args, 1, length(ctype_args))
-
- boundary_arg =
- Enum.find(
- ctype_args,
- fn arg -> String.starts_with?(arg, "boundary=") end
- )
-
- boundary = Enum.at(String.split(boundary_arg, "="), 1)
-
- if String.starts_with?(boundary, ~s(")) do
- :jiffy.decode(boundary)
- else
- boundary
- end
- end
-
- def parse_multipart(response) do
- boundary = get_boundary(response)
-
- leading = "--#{boundary}\r\n"
- last = "\r\n--#{boundary}--"
- body = response.body
- mimetext = Enum.at(String.split(body, leading, parts: 2), 1)
- mimetext = Enum.at(String.split(mimetext, last, parts: 2), 0)
-
- sections = String.split(mimetext, ~s(\r\n--#{boundary}))
-
- Enum.map(sections, fn section ->
- section_parts = String.split(section, "\r\n\r\n", parts: 2)
- raw_headers = String.split(Enum.at(section_parts, 0), "\r\n")
- body = Enum.at(section_parts, 1)
-
- headers =
- Enum.reduce(raw_headers, %{}, fn raw_header, acc ->
- if raw_header != "" do
- header_parts = String.split(raw_header, ": ")
- Map.put(acc, Enum.at(header_parts, 0), Enum.at(header_parts, 1))
- else
- acc
- end
- end)
-
- %{
- :headers => headers,
- :body => body
- }
- end)
- end
-end
diff --git a/test/elixir/test/attachments_test.exs b/test/elixir/test/attachments_test.exs
deleted file mode 100644
index f1dd3ef61..000000000
--- a/test/elixir/test/attachments_test.exs
+++ /dev/null
@@ -1,506 +0,0 @@
-defmodule AttachmentsTest do
- use CouchTestCase
-
- @moduletag :attachments
-
- # MD5 Digests of compressible attachments and therefore Etags
- # will vary depending on platform gzip implementation.
- # These MIME types are defined in [attachments] compressible_types
- @bin_att_doc %{
- _id: "bin_doc",
- _attachments: %{
- "foo.txt": %{
- content_type: "application/octet-stream",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
-
- @moduledoc """
- Test CouchDB attachments
- This is a port of the attachments.js suite
- """
-
- @tag :with_db
- test "saves attachment successfully", context do
- db_name = context[:db_name]
-
- resp = Couch.put("/#{db_name}/bin_doc", body: @bin_att_doc, query: %{w: 3})
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
- end
-
- @tag :with_db
- test "errors for bad attachment", context do
- db_name = context[:db_name]
-
- bad_att_doc = %{
- _id: "bad_doc",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: "notBase64Encoded="
- }
- }
- }
-
- resp = Couch.put("/#{db_name}/bad_doc", body: bad_att_doc, query: %{w: 3})
- assert resp.status_code == 400
- end
-
- @tag :with_db
- test "reads attachment successfully", context do
- db_name = context[:db_name]
-
- resp = Couch.put("/#{db_name}/bin_doc", body: @bin_att_doc, query: %{w: 3})
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/bin_doc/foo.txt", body: @bin_att_doc)
-
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "application/octet-stream"
- assert resp.headers["Etag"] == "\"aEI7pOYCRBLTRQvvqYrrJQ==\""
- end
-
- @tag :with_db
- test "update attachment", context do
- db_name = context[:db_name]
-
- bin_att_doc2 = %{
- _id: "bin_doc2",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: ""
- }
- }
- }
-
- resp = Couch.put("/#{db_name}/bin_doc2", body: bin_att_doc2, query: %{w: 3})
- assert resp.status_code in [201, 202]
- rev = resp.body["rev"]
-
- resp = Couch.get("/#{db_name}/bin_doc2/foo.txt")
-
- assert resp.headers["Content-Type"] == "text/plain"
- assert resp.body == ""
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc2/foo2.txt",
- query: %{rev: rev, w: 3},
- body: "This is no base64 encoded text",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- assert resp.status_code in [201, 202]
- assert Regex.match?(~r/bin_doc2\/foo2.txt/, resp.headers["location"])
- end
-
- @tag :with_db
- test "delete attachment", context do
- db_name = context[:db_name]
-
- resp = Couch.put("/#{db_name}/bin_doc", body: @bin_att_doc, query: %{w: 3})
- assert resp.status_code in [201, 202]
- rev = resp.body["rev"]
-
- resp = Couch.delete("/#{db_name}/bin_doc/foo.txt", query: %{w: 3})
- assert resp.status_code == 409
-
- resp = Couch.delete("/#{db_name}/bin_doc/foo.txt", query: %{w: 3, rev: "4-garbage"})
- assert resp.status_code == 409
- assert resp.body["error"] == "not_found"
- assert resp.body["reason"] == "missing_rev"
-
- resp = Couch.delete("/#{db_name}/bin_doc/notexisting.txt", query: %{w: 3, rev: rev})
- assert resp.status_code == 404
- assert resp.body["error"] == "not_found"
- assert resp.body["reason"] == "Document is missing attachment"
-
- resp = Couch.delete("/#{db_name}/bin_doc/foo.txt", query: %{w: 3, rev: rev})
- assert resp.status_code == 200
- assert resp.headers["location"] == nil
- end
-
- @tag :with_db
- test "delete attachment request with a payload should not block following requests", context do
- db_name = context[:db_name]
-
- resp = Couch.put("/#{db_name}/bin_doc", body: @bin_att_doc, query: %{w: 3})
- assert resp.status_code in [201, 202]
- rev = resp.body["rev"]
-
- resp = Couch.delete("/#{db_name}/bin_doc/foo.txt", body: 'some payload', query: %{w: 3, rev: rev}, ibrowse: [{:max_sessions, 1}, {:max_pipeline_size, 1}])
- assert resp.status_code == 200
-
- resp = Couch.get("/", timeout: 1000, ibrowse: [{:max_sessions, 1}, {:max_pipeline_size, 1}])
- assert resp.status_code == 200
- end
-
- @tag :with_db
- test "saves binary", context do
- db_name = context[:db_name]
-
- bin_data = "JHAPDO*AU£PN ){(3u[d 93DQ9¡€])} ææøo'∂ƒæ≤çæππ•¥∫¶®#†π¶®¥π€ª®˙π8np"
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc3/attachment.txt",
- body: bin_data,
- headers: ["Content-Type": "text/plain;charset=utf-8"],
- query: %{w: 3}
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- rev = resp.body["rev"]
-
- resp = Couch.get("/#{db_name}/bin_doc3/attachment.txt")
- assert resp.body == bin_data
-
- resp =
- Couch.put("/#{db_name}/bin_doc3/attachment.txt", body: bin_data, query: %{w: 3})
-
- assert resp.status_code == 409
-
- # non-existent rev
- resp =
- Couch.put(
- "/#{db_name}/bin_doc3/attachment.txt",
- query: %{rev: "1-adae8575ecea588919bd08eb020c708e", w: 3},
- headers: ["Content-Type": "text/plain;charset=utf-8"],
- body: bin_data
- )
-
- assert resp.status_code == 409
-
- # current rev
- resp =
- Couch.put(
- "/#{db_name}/bin_doc3/attachment.txt",
- query: %{rev: rev, w: 3},
- headers: ["Content-Type": "text/plain;charset=utf-8"],
- body: bin_data
- )
-
- assert resp.status_code in [201, 202]
-
- rev = resp.body["rev"]
-
- resp = Couch.get("/#{db_name}/bin_doc3/attachment.txt")
- assert String.downcase(resp.headers["Content-Type"]) == "text/plain;charset=utf-8"
- assert resp.body == bin_data
-
- resp = Couch.get("/#{db_name}/bin_doc3/attachment.txt", query: %{rev: rev})
- assert String.downcase(resp.headers["Content-Type"]) == "text/plain;charset=utf-8"
- assert resp.body == bin_data
-
- resp = Couch.delete("/#{db_name}/bin_doc3/attachment.txt", query: %{rev: rev, w: 3})
- assert resp.status_code == 200
-
- resp = Couch.get("/#{db_name}/bin_doc3/attachment.txt")
- assert resp.status_code == 404
-
- resp = Couch.get("/#{db_name}/bin_doc3/attachment.txt", query: %{rev: rev})
- assert String.downcase(resp.headers["Content-Type"]) == "text/plain;charset=utf-8"
- assert resp.body == bin_data
- end
-
- @tag :with_db
- test "empty attachments", context do
- db_name = context[:db_name]
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc4/attachment.txt",
- body: "",
- headers: ["Content-Type": "text/plain;charset=utf-8"],
- query: %{w: 3}
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- rev = resp.body["rev"]
-
- resp = Couch.get("/#{db_name}/bin_doc4/attachment.txt")
- assert resp.status_code == 200
- assert resp.body == ""
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc4/attachment.txt",
- query: %{rev: rev, w: 3},
- headers: ["Content-Type": "text/plain;charset=utf-8"],
- body: "This is a string"
- )
-
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/bin_doc4/attachment.txt")
- assert resp.status_code == 200
- assert resp.body == "This is a string"
- end
-
- @tag :with_db
- test "large attachments COUCHDB-366", context do
- db_name = context[:db_name]
-
- lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
- range = 1..10
-
- large_att = Enum.reduce(range, lorem, fn _, acc -> lorem <> acc end)
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc5/attachment.txt",
- body: large_att,
- query: %{w: 3},
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- resp = Couch.get("/#{db_name}/bin_doc5/attachment.txt")
- assert String.downcase(resp.headers["Content-Type"]) == "text/plain;charset=utf-8"
- assert resp.body == large_att
-
- lorem_b64 =
- "TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4g"
-
- range = 1..10
-
- large_b64_att = Enum.reduce(range, lorem_b64, fn _, acc -> lorem_b64 <> acc end)
-
- resp =
- Couch.get(
- "/#{db_name}/bin_doc5",
- query: %{attachments: true},
- headers: [Accept: "application/json"]
- )
-
- assert large_b64_att == resp.body["_attachments"]["attachment.txt"]["data"]
- end
-
- @tag :with_db
- test "etags for attachments", context do
- db_name = context[:db_name]
-
- lorem_att = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc6/attachment.txt",
- body: lorem_att,
- headers: ["Content-Type": "text/plain;charset=utf-8"],
- query: %{w: 3}
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- resp = Couch.get("/#{db_name}/bin_doc6/attachment.txt")
- assert resp.status_code == 200
- etag = resp.headers["etag"]
-
- resp =
- Couch.get("/#{db_name}/bin_doc6/attachment.txt", headers: ["if-none-match": etag])
-
- assert resp.status_code == 304
- end
-
- @tag :with_db
- test "COUCHDB-497 - empty attachments", context do
- db_name = context[:db_name]
-
- lorem_att = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc7/attachment.txt",
- body: lorem_att,
- headers: ["Content-Type": "text/plain;charset=utf-8"],
- query: %{w: 3}
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- rev = resp.body["rev"]
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc7/empty.txt",
- query: %{rev: rev, w: 3},
- body: "",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- assert resp.status_code in [201, 202]
- rev = resp.body["rev"]
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc7/empty.txt",
- query: %{rev: rev, w: 3},
- body: "",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- assert resp.status_code in [201, 202]
- end
-
- @tag :with_db
- test "implicit doc creation allows creating docs with a reserved id. COUCHDB-565",
- context do
- db_name = context[:db_name]
-
- resp =
- Couch.put(
- "/#{db_name}/_nonexistant/attachment.txt",
- body: "ATTACHMENT INFO",
- headers: ["Content-Type": "text/plain;charset=utf-8"],
- query: %{w: 3}
- )
-
- assert resp.status_code == 400
- end
-
- @tag :with_db
- test "COUCHDB-809 - stubs should only require the 'stub' field", context do
- db_name = context[:db_name]
-
- stub_doc = %{
- _id: "stub_doc",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
-
- resp =
- Couch.put(
- "/#{db_name}/stub_doc",
- body: stub_doc,
- query: %{w: 3}
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- rev = resp.body["rev"]
-
- stub_doc =
- Map.merge(stub_doc, %{
- _rev: rev,
- _attachments: %{"foo.txt": %{stub: true}}
- })
-
- resp =
- Couch.put(
- "/#{db_name}/stub_doc",
- query: %{rev: rev, w: 3},
- body: stub_doc
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- rev = resp.body["rev"]
-
- stub_doc =
- Map.merge(stub_doc, %{
- _rev: rev,
- _attachments: %{"foo.txt": %{stub: true, revpos: 10}}
- })
-
- resp =
- Couch.put(
- "/#{db_name}/stub_doc",
- query: %{rev: rev},
- body: stub_doc
- )
-
- assert resp.status_code == 412
- assert resp.body["error"] == "missing_stub"
- end
-
- @tag :with_db
- test "md5 header for attachments", context do
- db_name = context[:db_name]
- md5 = "MntvB0NYESObxH4VRDUycw=="
-
- bin_data = "foo bar"
-
- resp =
- Couch.put(
- "/#{db_name}/bin_doc8/attachment.txt",
- body: bin_data,
- headers: ["Content-Type": "application/octet-stream", "Content-MD5": md5],
- query: %{w: 3}
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- resp = Couch.get("/#{db_name}/bin_doc8/attachment.txt")
- assert resp.status_code == 200
- assert md5 == resp.headers["Content-MD5"]
- end
-
- @tag :with_db
- test "attachment via multipart/form-data", context do
- db_name = context[:db_name]
-
- form_data_doc = %{
- _id: "form-data-doc"
- }
-
- resp =
- Couch.put(
- "/#{db_name}/form_data_doc",
- body: form_data_doc,
- query: %{w: 3}
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
- rev = resp.body["rev"]
-
- body =
- "------TF\r\n" <>
- "Content-Disposition: form-data; name=\"_rev\"\r\n\r\n" <>
- rev <>
- "\r\n" <>
- "------TF\r\n" <>
- "Content-Disposition: form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n" <>
- "Content-Type: text/plain\r\n\r\n" <>
- "contents of file.txt\r\n\r\n" <> "------TF--"
-
- resp =
- Couch.post(
- "/#{db_name}/form_data_doc",
- body: body,
- query: %{w: 3},
- headers: [
- Referer: "http://127.0.0.1:15984",
- "Content-Type": "multipart/form-data; boundary=----TF",
- "Content-Length": byte_size(body)
- ]
- )
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- resp = Couch.get("/#{db_name}/form_data_doc")
- assert resp.status_code == 200
-
- doc = resp.body
- assert doc["_attachments"]["file.txt"]["length"] == 22
- end
-end
diff --git a/test/elixir/test/auth_cache_test.exs b/test/elixir/test/auth_cache_test.exs
deleted file mode 100644
index 8b7c29c71..000000000
--- a/test/elixir/test/auth_cache_test.exs
+++ /dev/null
@@ -1,197 +0,0 @@
-defmodule AuthCacheTest do
- use CouchTestCase
-
- @moduletag :authentication
-
- @tag :pending
- @tag :with_db
- test "auth cache management", context do
- db_name = context[:db_name]
-
- server_config = [
- %{
- :section => "chttpd_auth",
- :key => "authentication_db",
- :value => db_name
- },
- %{
- :section => "chttpd_auth",
- :key => "auth_cache_size",
- :value => "3"
- },
- %{
- :section => "httpd",
- :key => "authentication_handlers",
- :value => "{couch_httpd_auth, default_authentication_handler}"
- },
- %{
- :section => "chttpd_auth",
- :key => "secret",
- :value => generate_secret(64)
- }
- ]
-
- run_on_modified_server(server_config, fn -> test_fun(db_name) end)
- end
-
- defp generate_secret(len) do
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
- |> String.splitter("", trim: true)
- |> Enum.take_random(len)
- |> Enum.join("")
- end
-
- defp hits() do
- hits = request_stats(["couchdb", "auth_cache_hits"], true)
- hits["value"] || 0
- end
-
- defp misses() do
- misses = request_stats(["couchdb", "auth_cache_misses"], true)
- misses["value"] || 0
- end
-
- defp logout(session) do
- assert Couch.Session.logout(session).body["ok"]
- end
-
- defp login_fail(user, password) do
- resp = Couch.login(user, password, :fail)
- assert resp.error, "Login error is expected."
- end
-
- defp login(user, password) do
- sess = Couch.login(user, password)
- assert sess.cookie, "Login correct is expected"
- sess
- end
-
- defp assert_cache(event, user, password, expect \\ :expect_login_success) do
- hits_before = hits()
- misses_before = misses()
-
- session =
- case expect do
- :expect_login_success -> login(user, password)
- :expect_login_fail -> login_fail(user, password)
- _ -> assert false
- end
-
- hits_after = hits()
- misses_after = misses()
-
- if expect == :expect_success do
- logout(session)
- end
-
- case event do
- :expect_miss ->
- assert misses_after == misses_before + 1,
- "Cache miss is expected for #{user} after login"
-
- assert hits_after == hits_before,
- "No cache hit is expected for #{user} after login"
-
- :expect_hit ->
- assert misses_after == misses_before,
- "No cache miss is expected for #{user} after login"
-
- assert hits_after == hits_before + 1,
- "Cache hit is expected for #{user} after login"
-
- _ ->
- assert false
- end
- end
-
- def save_doc(db_name, body) do
- resp = Couch.put("/#{db_name}/#{body["_id"]}", body: body)
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
- Map.put(body, "_rev", resp.body["rev"])
- end
-
- def delete_doc(db_name, body) do
- resp = Couch.delete("/#{db_name}/#{body["_id"]}", query: [rev: body["_rev"]])
- assert resp.status_code in [200, 202]
- assert resp.body["ok"]
- {:ok, resp}
- end
-
- defp test_fun(db_name) do
- fdmanana =
- prepare_user_doc([
- {:name, "fdmanana"},
- {:password, "qwerty"},
- {:roles, ["dev"]}
- ])
-
- {:ok, resp} = create_doc(db_name, fdmanana)
- fdmanana = Map.put(fdmanana, "_rev", resp.body["rev"])
-
- chris =
- prepare_user_doc([
- {:name, "chris"},
- {:password, "the_god_father"},
- {:roles, ["dev", "mafia", "white_costume"]}
- ])
-
- create_doc(db_name, chris)
-
- joe =
- prepare_user_doc([
- {:name, "joe"},
- {:password, "functional"},
- {:roles, ["erlnager"]}
- ])
-
- create_doc(db_name, joe)
-
- johndoe =
- prepare_user_doc([
- {:name, "johndoe"},
- {:password, "123456"},
- {:roles, ["user"]}
- ])
-
- create_doc(db_name, johndoe)
-
- assert_cache(:expect_miss, "fdmanana", "qwerty")
- assert_cache(:expect_hit, "fdmanana", "qwerty")
- assert_cache(:expect_miss, "chris", "the_god_father")
- assert_cache(:expect_miss, "joe", "functional")
- assert_cache(:expect_miss, "johndoe", "123456")
-
- # It's a MRU cache, joe was removed from cache to add johndoe
- # BUGGED assert_cache(:expect_miss, "joe", "functional")
-
- assert_cache(:expect_hit, "fdmanana", "qwerty")
-
- fdmanana = Map.replace!(fdmanana, "password", "foobar")
- fdmanana = save_doc(db_name, fdmanana)
-
- # Cache was refreshed
- # BUGGED
- # assert_cache(:expect_hit, "fdmanana", "qwerty", :expect_login_fail)
- # assert_cache(:expect_hit, "fdmanana", "foobar")
-
- # and yet another update
- fdmanana = Map.replace!(fdmanana, "password", "javascript")
- fdmanana = save_doc(db_name, fdmanana)
-
- # Cache was refreshed
- # BUGGED
- # assert_cache(:expect_hit, "fdmanana", "foobar", :expect_login_fail)
- # assert_cache(:expect_hit, "fdmanana", "javascript")
-
- delete_doc(db_name, fdmanana)
-
- assert_cache(:expect_hit, "fdmanana", "javascript", :expect_login_fail)
-
- # login, compact authentication DB, login again and verify that
- # there was a cache hit
- assert_cache(:expect_hit, "johndoe", "123456")
- compact(db_name)
- assert_cache(:expect_hit, "johndoe", "123456")
- end
-end
diff --git a/test/elixir/test/basics_test.exs b/test/elixir/test/basics_test.exs
deleted file mode 100644
index abc66ca40..000000000
--- a/test/elixir/test/basics_test.exs
+++ /dev/null
@@ -1,384 +0,0 @@
-defmodule BasicsTest do
- use CouchTestCase
-
- @moduletag :basics
-
- @moduledoc """
- Test CouchDB basics.
- This is a port of the basics.js suite
- """
-
- test "Session contains adm context" do
- user_ctx = Couch.get("/_session").body["userCtx"]
- assert user_ctx["name"] == "adm", "Should have adm user context"
- assert user_ctx["roles"] == ["_admin"], "Should have _admin role"
- end
-
- test "Welcome endpoint" do
- assert Couch.get("/").body["couchdb"] == "Welcome", "Should say welcome"
- end
-
- @tag :with_db
- test "PUT on existing DB should return 412 instead of 500", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}")
- assert resp.status_code == 412
- refute resp.body["ok"]
- end
-
- @tag :with_db_name
- test "Creating a new DB should return location header", context do
- db_name = context[:db_name]
- {:ok, resp} = create_db(db_name)
- msg = "Should return Location header for new db"
- assert String.ends_with?(resp.headers["location"], db_name), msg
- {:ok, _} = delete_db(db_name)
- end
-
- @tag :with_db_name
- test "Creating a new DB with slashes should return Location header (COUCHDB-411)",
- context do
- db_name = context[:db_name] <> "%2Fwith_slashes"
- {:ok, resp} = create_db(db_name)
- msg = "Should return Location header for new db"
- assert String.ends_with?(resp.headers["location"], db_name), msg
- {:ok, _} = delete_db(db_name)
- end
-
- @tag :with_db
- test "Created database has appropriate db info name", context do
- db_name = context[:db_name]
-
- assert Couch.get("/#{db_name}").body["db_name"] == db_name,
- "Get correct database name"
- end
-
- @tag :with_db
- test "Database should be in _all_dbs", context do
- assert context[:db_name] in Couch.get("/_all_dbs").body, "Db name in _all_dbs"
- end
-
- @tag :with_db
- test "Limit and skip should work in _all_dbs", context do
- db = context[:db_name]
- db_count = length(Couch.get("/_all_dbs").body)
- assert db_count > 0
- assert Couch.get("/_all_dbs?limit=0").body == []
- assert length(Couch.get("/_all_dbs?limit=1").body) >= 1
- assert length(Couch.get("/_all_dbs?skip=1").body) == (db_count - 1)
- assert [db] == Couch.get("/_all_dbs?start_key=\"#{db}\"&limit=1").body
- end
-
- test "Database name with '+' should encode to '+'", _context do
- set_config({"chttpd", "decode_plus_to_space", "false"})
-
- random_number = :rand.uniform(16_000_000)
- db_name = "random+test+db+#{random_number}"
- resp = Couch.put("/#{db_name}")
-
- assert resp.status_code == 201
- assert resp.body["ok"] == true
-
- resp = Couch.get("/#{db_name}")
-
- assert resp.status_code == 200
- assert resp.body["db_name"] == db_name
- end
-
- test "Database name with '%2B' should encode to '+'", _context do
- set_config({"chttpd", "decode_plus_to_space", "true"})
-
- random_number = :rand.uniform(16_000_000)
- db_name = "random%2Btest%2Bdb2%2B#{random_number}"
- resp = Couch.put("/#{db_name}")
-
- assert resp.status_code == 201
- assert resp.body["ok"] == true
-
- resp = Couch.get("/#{db_name}")
-
- assert resp.status_code == 200
- assert resp.body["db_name"] == "random+test+db2+#{random_number}"
- end
-
- @tag :with_db
- test "'+' in document name should encode to '+'", context do
- set_config({"chttpd", "decode_plus_to_space", "false"})
-
- db_name = context[:db_name]
- doc_id = "test+doc"
- resp = Couch.put("/#{db_name}/#{doc_id}", body: %{})
-
- assert resp.status_code == 201
- assert resp.body["id"] == "test+doc"
- end
-
- @tag :with_db
- test "'+' in document name should encode to space", context do
- set_config({"chttpd", "decode_plus_to_space", "true"})
-
- db_name = context[:db_name]
- doc_id = "test+doc+2"
- resp = Couch.put("/#{db_name}/#{doc_id}", body: %{})
-
- assert resp.status_code == 201
- assert resp.body["id"] == "test doc 2"
- end
-
- @tag :with_db
- test "Empty database should have zero docs", context do
- assert Couch.get("/#{context[:db_name]}").body["doc_count"] == 0,
- "Empty doc count in empty db"
- end
-
- @tag :with_db
- test "Create a document and save it to the database", context do
- resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0", :a => 1, :b => 1})
- assert resp.status_code in [201, 202], "Should be 201 created"
- assert resp.body["id"], "Id should be present"
- assert resp.body["rev"], "Rev should be present"
-
- resp2 = Couch.get("/#{context[:db_name]}/#{resp.body["id"]}")
- assert resp2.body["_id"] == resp.body["id"], "Ids should match"
- assert resp2.body["_rev"] == resp.body["rev"], "Revs should match"
- end
-
- @tag :with_db
- test "Revs info status is good", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, sample_doc_foo())
- resp = Couch.get("/#{db_name}/foo", query: %{:revs_info => true})
- assert hd(resp.body["_revs_info"])["status"] == "available", "Revs info is available"
- end
-
- @tag :with_db
- test "A document read with etag works", context do
- db_name = context[:db_name]
- {:ok, resp} = create_doc(db_name, sample_doc_foo())
- etag = ~s("#{resp.body["rev"]}")
- resp = Couch.get("/#{db_name}/foo", headers: ["If-None-Match": etag])
- assert resp.status_code == 304, "Should be 304 Not Modified"
- assert resp.headers[:"Content-Length"] == "0", "Should have zero content length"
- assert resp.body == "", "Should have an empty body"
- end
-
- @tag :with_db
- test "Make sure you can do a seq=true option", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, sample_doc_foo())
- resp = Couch.get("/#{db_name}/foo", query: %{:local_seq => true})
- assert resp.body["_local_seq"] == 1, "Local seq value == 1"
- end
-
- @tag :with_db
- test "Can create several documents", context do
- db_name = context[:db_name]
- assert Couch.post("/#{db_name}", body: %{:_id => "1", :a => 2, :b => 4}).body["ok"]
- assert Couch.post("/#{db_name}", body: %{:_id => "2", :a => 3, :b => 9}).body["ok"]
- assert Couch.post("/#{db_name}", body: %{:_id => "3", :a => 4, :b => 16}).body["ok"]
-
- retry_until(fn ->
- Couch.get("/#{db_name}").body["doc_count"] == 3
- end)
- end
-
- @tag :pending
- @tag :with_db
- test "Regression test for COUCHDB-954", context do
- db_name = context[:db_name]
- doc = %{:_id => "COUCHDB-954", :a => 1}
-
- resp1 = Couch.post("/#{db_name}", body: doc)
- assert resp1.body["ok"]
- old_rev = resp1.body["rev"]
-
- doc = Map.put(doc, :_rev, old_rev)
- resp2 = Couch.post("/#{db_name}", body: doc)
- assert resp2.body["ok"]
- _new_rev = resp2.body["rev"]
-
- # TODO: enable chunked encoding
- # resp3 = Couch.get("/#{db_name}/COUCHDB-954", [query: %{:open_revs => "[#{old_rev}, #{new_rev}]"}])
- # assert length(resp3.body) == 2, "Should get two revisions back"
- # resp3 = Couch.get("/#{db_name}/COUCHDB-954", [query: %{:open_revs => "[#{old_rev}]", :latest => true}])
- # assert resp3.body["_rev"] == new_rev
- end
-
- @tag :with_db
- test "Simple map functions", context do
- db_name = context[:db_name]
- map_fun = "function(doc) { if (doc.a==4) { emit(null, doc.b); } }"
- red_fun = "function(keys, values) { return sum(values); }"
- map_doc = %{:views => %{:baz => %{:map => map_fun}}}
- red_doc = %{:views => %{:baz => %{:map => map_fun, :reduce => red_fun}}}
-
- # Bootstrap database and ddoc
- assert Couch.post("/#{db_name}", body: %{:_id => "0", :a => 1, :b => 1}).body["ok"]
- assert Couch.post("/#{db_name}", body: %{:_id => "1", :a => 2, :b => 4}).body["ok"]
- assert Couch.post("/#{db_name}", body: %{:_id => "2", :a => 3, :b => 9}).body["ok"]
- assert Couch.post("/#{db_name}", body: %{:_id => "3", :a => 4, :b => 16}).body["ok"]
- assert Couch.put("/#{db_name}/_design/foo", body: map_doc).body["ok"]
- assert Couch.put("/#{db_name}/_design/bar", body: red_doc, query: [w: 3]).body["ok"]
- assert Couch.get("/#{db_name}").body["doc_count"] == 6
-
- # Initial view query test
- resp = Couch.get("/#{db_name}/_design/foo/_view/baz")
- assert resp.body["total_rows"] == 1
- assert hd(resp.body["rows"])["value"] == 16
-
- # Modified doc and test for updated view results
- doc0 = Couch.get("/#{db_name}/0").body
- doc0 = Map.put(doc0, :a, 4)
- assert Couch.put("/#{db_name}/0", body: doc0).body["ok"]
-
- retry_until(fn ->
- Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 2
- end)
-
- # Write 2 more docs and test for updated view results
- assert Couch.post("/#{db_name}", body: %{:a => 3, :b => 9}).body["ok"]
- assert Couch.post("/#{db_name}", body: %{:a => 4, :b => 16}).body["ok"]
-
- retry_until(fn ->
- Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 3
- end)
-
- assert Couch.get("/#{db_name}").body["doc_count"] == 8
-
- # Test reduce function
- resp = Couch.get("/#{db_name}/_design/bar/_view/baz")
- assert hd(resp.body["rows"])["value"] == 33
-
- # Delete doc and test for updated view results
- doc0 = Couch.get("/#{db_name}/0").body
- assert Couch.delete("/#{db_name}/0?rev=#{doc0["_rev"]}").body["ok"]
-
- retry_until(fn ->
- Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 2
- end)
-
- assert Couch.get("/#{db_name}").body["doc_count"] == 7
- assert Couch.get("/#{db_name}/0").status_code == 404
- refute Couch.get("/#{db_name}/0?rev=#{doc0["_rev"]}").status_code == 404
- end
-
- @tag :with_db
- test "POST doc response has a Location header", context do
- db_name = context[:db_name]
- resp = Couch.post("/#{db_name}", body: %{:foo => :bar})
- assert resp.body["ok"]
- loc = resp.headers["Location"]
- assert loc, "should have a Location header"
- locs = Enum.reverse(String.split(loc, "/"))
- assert hd(locs) == resp.body["id"]
- assert hd(tl(locs)) == db_name
- end
-
- @tag :with_db
- test "POST doc with an _id field isn't overwritten by uuid", context do
- db_name = context[:db_name]
- resp = Couch.post("/#{db_name}", body: %{:_id => "oppossum", :yar => "matey"})
- assert resp.body["ok"]
- assert resp.body["id"] == "oppossum"
- assert Couch.get("/#{db_name}/oppossum").body["yar"] == "matey"
- end
-
- @tag :pending
- @tag :with_db
- test "PUT doc has a Location header", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/newdoc", body: %{:a => 1})
- assert String.ends_with?(resp.headers["location"], "/#{db_name}/newdoc")
- # TODO: make protocol check use defined protocol value
- assert String.starts_with?(resp.headers["location"], "http")
- end
-
- @tag :with_db
- test "DELETE'ing a non-existent doc should 404", context do
- db_name = context[:db_name]
- assert Couch.delete("/#{db_name}/doc-does-not-exist").status_code == 404
- end
-
- @tag :with_db
- test "Check for invalid document members", context do
- db_name = context[:db_name]
-
- bad_docs = [
- {:goldfish, %{:_zing => 4}},
- {:zebrafish, %{:_zoom => "hello"}},
- {:mudfish, %{:zane => "goldfish", :_fan => "something smells delicious"}},
- {:tastyfish, %{:_bing => %{"wha?" => "soda can"}}}
- ]
-
- Enum.each(bad_docs, fn {id, doc} ->
- resp = Couch.put("/#{db_name}/#{id}", body: doc)
- assert resp.status_code == 400
- assert resp.body["error"] == "doc_validation"
-
- resp = Couch.post("/#{db_name}", body: doc)
- assert resp.status_code == 400
- assert resp.body["error"] == "doc_validation"
- end)
- end
-
- @tag :with_db
- test "PUT error when body not an object", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/bar", body: "[]")
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == "Document must be a JSON object"
- end
-
- @tag :with_db
- test "_bulk_docs POST error when body not an object", context do
- db_name = context[:db_name]
- resp = Couch.post("/#{db_name}/_bulk_docs", body: "[]")
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == "Request body must be a JSON object"
- end
-
- @tag :with_db
- test "_all_docs POST error when multi-get is not a {'key': [...]} structure", context do
- db_name = context[:db_name]
- resp = Couch.post("/#{db_name}/_all_docs", body: "[]")
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == "Request body must be a JSON object"
-
- resp = Couch.post("/#{db_name}/_all_docs", body: %{:keys => 1})
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == "`keys` body member must be an array."
- end
-
- @tag :with_db
- test "oops, the doc id got lost in code nirwana", context do
- db_name = context[:db_name]
- resp = Couch.delete("/#{db_name}/?rev=foobarbaz")
- assert resp.status_code == 400, "should return a bad request"
- assert resp.body["error"] == "bad_request"
-
- assert resp.body["reason"] ==
- "You tried to DELETE a database with a ?=rev parameter. Did you mean to DELETE a document instead?"
- end
-
- @tag :pending
- @tag :with_db
- test "On restart, a request for creating an already existing db can not override",
- _context do
- # TODO
- assert true
- end
-
- @tag :with_db
- test "Default headers are returned for doc with open_revs=all", context do
- db_name = context[:db_name]
- post_response = Couch.post("/#{db_name}", body: %{:foo => :bar})
- id = post_response.body["id"]
- head_response = Couch.head("/#{db_name}/#{id}?open_revs=all")
- assert head_response.headers["X-Couch-Request-ID"]
- assert head_response.headers["X-CouchDB-Body-Time"]
- end
-end
diff --git a/test/elixir/test/batch_save_test.exs b/test/elixir/test/batch_save_test.exs
deleted file mode 100644
index 030fcdfba..000000000
--- a/test/elixir/test/batch_save_test.exs
+++ /dev/null
@@ -1,42 +0,0 @@
-defmodule BatchSaveTest do
- use CouchTestCase
-
- @moduletag :batch_save
-
- @moduledoc """
- Test CouchDB batch save
- This is a port of batch_save.js
- """
-
- @doc_count 100
-
- @tag :with_db
- test "batch put", context do
- path_fun = &"/#{&1}/#{&2}"
- run(&Couch.put/2, path_fun, context[:db_name], @doc_count)
- end
-
- @tag :with_db
- test "batch post", context do
- path_fun = fn db_name, _ -> "/#{db_name}" end
- run(&Couch.post/2, path_fun, context[:db_name], @doc_count)
- end
-
- @tag :with_db
- test "batch put with identical doc ids", context do
- path_fun = fn db_name, _ -> "/#{db_name}/foo" end
- run(&Couch.put/2, path_fun, context[:db_name], 1)
- end
-
- defp run(req_fun, path_fun, db_name, expected_doc_count) do
- for i <- 1..@doc_count do
- opts = [body: %{a: i, b: i}, query: %{batch: "ok"}]
- resp = req_fun.(path_fun.(db_name, i), opts)
- assert resp.body["ok"] and resp.status_code == 202
- end
-
- retry_until(fn ->
- Couch.get("/#{db_name}").body["doc_count"] == expected_doc_count
- end)
- end
-end
diff --git a/test/elixir/test/bulk_docs_test.exs b/test/elixir/test/bulk_docs_test.exs
deleted file mode 100644
index 1a7c11045..000000000
--- a/test/elixir/test/bulk_docs_test.exs
+++ /dev/null
@@ -1,154 +0,0 @@
-defmodule BulkDocsTest do
- use CouchTestCase
-
- @moduletag :bulk_docs
-
- @moduledoc """
- Test CouchDB bulk docs
- This is a port of bulk_docs.js
- """
-
- @doc_range 1..5
-
- @tag :with_db
- test "bulk docs can create, update, & delete many docs per request", ctx do
- db = ctx[:db_name]
- docs = create_docs(@doc_range)
- resp = bulk_post(docs, db)
- assert revs_start_with(resp.body, "1-")
- docs = rev(docs, resp.body)
- # Modify each doc's `string` field and re-post
- docs =
- Enum.map(docs, fn doc = %{string: string} ->
- %{doc | string: string <> ".00"}
- end)
-
- resp = bulk_post(docs, db)
- assert revs_start_with(resp.body, "2-")
- docs = rev(docs, resp.body)
- # Confirm changes were applied for each doc
- assert Enum.all?(docs, fn doc ->
- String.ends_with?(Couch.get("/#{db}/#{doc._id}").body["string"], ".00")
- end)
-
- docs = Enum.map(docs, &Map.put(&1, :_deleted, true))
- resp = bulk_post(docs, db)
- assert revs_start_with(resp.body, "3-")
- # Confirm docs were deleted
- assert Enum.all?(docs, fn doc ->
- resp = Couch.get("/#{db}/#{doc._id}")
- assert resp.status_code == 404
- assert resp.body["error"] == "not_found"
- assert resp.body["reason"] == "deleted"
- end)
- end
-
- @tag :with_db
- @tag :skip_on_jenkins
- test "bulk docs can detect conflicts", ctx do
- db = ctx[:db_name]
- docs = create_docs(@doc_range)
- resp = bulk_post(docs, db)
- assert revs_start_with(resp.body, "1-")
- docs = rev(docs, resp.body)
- # Update just the first doc to create a conflict in subsequent bulk update
- doc = hd(docs)
- resp = Couch.put("/#{db}/#{doc._id}", body: doc)
- assert resp.status_code in [201, 202]
- # Attempt to delete all docs
- docs = Enum.map(docs, fn doc -> Map.put(doc, :_deleted, true) end)
-
- retry_until(fn ->
- resp = bulk_post(docs, db)
- # Confirm first doc not updated, and result has no rev field
- res = hd(resp.body)
- assert res["id"] == "1" and res["error"] == "conflict"
- assert Map.get(res, "rev") == nil
- # Confirm other docs updated normally
- assert revs_start_with(tl(resp.body), "2-")
- end)
- end
-
- @tag :with_db
- test "bulk docs supplies `id` if not provided in doc", ctx do
- docs = [%{foo: "bar"}]
- res = hd(bulk_post(docs, ctx[:db_name]).body)
- assert res["id"]
- assert res["rev"]
- end
-
- @tag :with_db
- test "bulk docs raises error for `all_or_nothing` option", ctx do
- opts = [body: %{docs: create_docs(@doc_range), all_or_nothing: true}]
- resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", opts)
- assert resp.status_code == 417
- assert Enum.all?(resp.body, &(Map.get(&1, "error") == "not_implemented"))
- expected_reason = "all_or_nothing is not supported"
- assert Enum.all?(resp.body, &(Map.get(&1, "reason") == expected_reason))
- end
-
- @tag :with_db
- test "bulk docs raises conflict error for combined update & delete", ctx do
- db = ctx[:db_name]
- doc = %{_id: "id", val: "val"}
- resp = Couch.put("/#{db}/#{doc._id}", body: doc)
- doc = rev(doc, resp.body)
- update = %{doc | val: "newval"}
- delete = Map.put(doc, :_deleted, true)
- body = bulk_post([update, delete], db).body
- assert Enum.count(body, &(Map.get(&1, "error") == "conflict")) == 1
- assert Enum.count(body, &Map.get(&1, "rev")) == 1
- end
-
- @tag :with_db
- test "bulk docs raises error for missing `docs` parameter", ctx do
- docs = [%{foo: "bar"}]
- resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", body: %{doc: docs})
- assert_bad_request(resp, "POST body must include `docs` parameter.")
- end
-
- @tag :with_db
- test "bulk docs raises error for invlaid `docs` parameter", ctx do
- resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", body: %{docs: "foo"})
- assert_bad_request(resp, "`docs` parameter must be an array.")
- end
-
- @tag :with_db
- test "bulk docs raises error for invlaid `new_edits` parameter", ctx do
- opts = [body: %{docs: [], new_edits: 0}]
- resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", opts)
- assert_bad_request(resp, "`new_edits` parameter must be a boolean.")
- end
-
- @tag :with_db
- test "bulk docs emits conflict error for duplicate doc `_id`s", ctx do
- docs = [%{_id: "0", a: 0}, %{_id: "1", a: 1}, %{_id: "1", a: 2}, %{_id: "3", a: 3}]
- rows = bulk_post(docs, ctx[:db_name]).body
- assert Enum.at(rows, 1)["id"] == "1"
- assert Enum.at(rows, 1)["ok"]
- assert Enum.at(rows, 2)["error"] == "conflict"
- end
-
- defp bulk_post(docs, db) do
- retry_until(fn ->
- resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs})
-
- assert resp.status_code in [201, 202] and length(resp.body) == length(docs), """
- Expected 201 and the same number of response rows as in request, but got
- #{pretty_inspect(resp)}
- """
-
- resp
- end)
- end
-
- defp revs_start_with(rows, prefix) do
- Enum.all?(rows, fn %{"rev" => rev} -> String.starts_with?(rev, prefix) end)
- end
-
- defp assert_bad_request(resp, reason) do
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == reason
- end
-end
diff --git a/test/elixir/test/changes_async_test.exs b/test/elixir/test/changes_async_test.exs
deleted file mode 100644
index 6c833d433..000000000
--- a/test/elixir/test/changes_async_test.exs
+++ /dev/null
@@ -1,442 +0,0 @@
-defmodule ChangesAsyncTest do
- use CouchTestCase
-
- @moduletag :changes
-
- @moduledoc """
- Test CouchDB /{db}/_changes
- """
-
- @tag :with_db
- test "live changes", context do
- db_name = context[:db_name]
- test_changes(db_name, "live")
- end
-
- @tag :with_db
- test "continuous changes", context do
- db_name = context[:db_name]
- test_changes(db_name, "continuous")
- end
-
- @tag :with_db
- test "longpoll changes", context do
- db_name = context[:db_name]
-
- check_empty_db(db_name)
-
- create_doc(db_name, sample_doc_foo())
-
- req_id =
- Couch.get("/#{db_name}/_changes?feed=longpoll",
- stream_to: self()
- )
-
- changes = process_response(req_id.id, &parse_chunk/1)
- {changes_length, last_seq_prefix} = parse_changes_response(changes)
- assert changes_length == 1, "db should not be empty"
- assert last_seq_prefix == "1-", "seq must start with 1-"
-
- last_seq = changes["last_seq"]
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
-
- req_id =
- Couch.get("/#{db_name}/_changes?feed=longpoll&since=#{last_seq}",
- stream_to: self(),
- direct: worker_pid
- )
-
- :ok = wait_for_headers(req_id.id, 200)
-
- create_doc_bar(db_name, "bar")
-
- {changes_length, last_seq_prefix} =
- req_id.id
- |> process_response(&parse_chunk/1)
- |> parse_changes_response()
-
- assert changes_length == 1, "should return one change"
- assert last_seq_prefix == "2-", "seq must start with 2-"
-
- req_id =
- Couch.get("/#{db_name}/_changes?feed=longpoll&since=now",
- stream_to: self(),
- direct: worker_pid
- )
-
- :ok = wait_for_headers(req_id.id, 200)
-
- create_doc_bar(db_name, "barzzzz")
-
- changes = process_response(req_id.id, &parse_chunk/1)
- {changes_length, last_seq_prefix} = parse_changes_response(changes)
- assert changes_length == 1, "should return one change"
- assert Enum.at(changes["results"], 0)["id"] == "barzzzz"
- assert last_seq_prefix == "3-", "seq must start with 3-"
- end
-
- @tag :with_db
- test "eventsource changes", context do
- db_name = context[:db_name]
-
- check_empty_db(db_name)
-
- create_doc(db_name, sample_doc_foo())
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
-
- req_id =
- Rawresp.get("/#{db_name}/_changes?feed=eventsource&timeout=500",
- stream_to: self(),
- direct: worker_pid
- )
-
- :ok = wait_for_headers(req_id.id, 200)
-
- create_doc_bar(db_name, "bar")
-
- changes = process_response(req_id.id, &parse_event/1)
-
- assert length(changes) == 2
- assert Enum.at(changes, 0)["id"] == "foo"
- assert Enum.at(changes, 1)["id"] == "bar"
-
- HTTPotion.stop_worker_process(worker_pid)
- end
-
- @tag :with_db
- test "eventsource heartbeat", context do
- db_name = context[:db_name]
-
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
-
- req_id =
- Rawresp.get("/#{db_name}/_changes?feed=eventsource&heartbeat=10",
- stream_to: {self(), :once},
- direct: worker_pid
- )
-
- :ok = wait_for_headers(req_id.id, 200)
- beats = wait_for_heartbeats(req_id.id, 0, 3)
- assert beats == 3
- HTTPotion.stop_worker_process(worker_pid)
- end
-
- @tag :with_db
- test "longpoll filtered changes", context do
- db_name = context[:db_name]
- create_filters_view(db_name)
-
- create_doc(db_name, %{bop: "foom"})
- create_doc(db_name, %{bop: false})
-
- req_id =
- Couch.get("/#{db_name}/_changes?feed=longpoll&filter=changes_filter/bop",
- stream_to: self()
- )
-
- changes = process_response(req_id.id, &parse_chunk/1)
- {changes_length, last_seq_prefix} = parse_changes_response(changes)
- assert changes_length == 1, "db should not be empty"
- assert last_seq_prefix == "3-", "seq must start with 3-"
-
- last_seq = changes["last_seq"]
- # longpoll waits until a matching change before returning
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
-
- req_id =
- Couch.get(
- "/#{db_name}/_changes?feed=longpoll&filter=changes_filter/bop&since=#{last_seq}",
- stream_to: self(),
- direct: worker_pid
- )
-
- :ok = wait_for_headers(req_id.id, 200)
- create_doc(db_name, %{_id: "falsy", bop: ""})
- # Doc doesn't match the filter
- changes = process_response(req_id.id, &parse_chunk/1)
- assert changes == :timeout
-
- # Doc matches the filter
- create_doc(db_name, %{_id: "bingo", bop: "bingo"})
- changes = process_response(req_id.id, &parse_chunk/1)
- {changes_length, last_seq_prefix} = parse_changes_response(changes)
- assert changes_length == 1, "db should not be empty"
- assert last_seq_prefix == "5-", "seq must start with 5-"
- assert Enum.at(changes["results"], 0)["id"] == "bingo"
- end
-
- @tag :with_db
- test "continuous filtered changes", context do
- db_name = context[:db_name]
- create_filters_view(db_name)
-
- create_doc(db_name, %{bop: false})
- create_doc(db_name, %{_id: "bingo", bop: "bingo"})
-
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
-
- req_id =
- Rawresp.get(
- "/#{db_name}/_changes?feed=continuous&filter=changes_filter/bop&timeout=500",
- stream_to: self(),
- direct: worker_pid
- )
-
- :ok = wait_for_headers(req_id.id, 200)
- create_doc(db_name, %{_id: "rusty", bop: "plankton"})
-
- changes = process_response(req_id.id, &parse_changes_line_chunk/1)
-
- changes_ids =
- changes
- |> Enum.filter(fn p -> Map.has_key?(p, "id") end)
- |> Enum.map(fn p -> p["id"] end)
-
- assert Enum.member?(changes_ids, "bingo")
- assert Enum.member?(changes_ids, "rusty")
- assert length(changes_ids) == 2
- end
-
- @tag :with_db
- test "continuous filtered changes with doc ids", context do
- db_name = context[:db_name]
- doc_ids = %{doc_ids: ["doc1", "doc3", "doc4"]}
-
- create_doc(db_name, %{_id: "doc1", value: 1})
- create_doc(db_name, %{_id: "doc2", value: 2})
-
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
-
- req_id =
- Rawresp.post(
- "/#{db_name}/_changes?feed=continuous&timeout=500&filter=_doc_ids",
- body: doc_ids,
- headers: ["Content-Type": "application/json"],
- stream_to: self(),
- direct: worker_pid
- )
-
- :ok = wait_for_headers(req_id.id, 200)
- create_doc(db_name, %{_id: "doc3", value: 3})
-
- changes = process_response(req_id.id, &parse_changes_line_chunk/1)
-
- changes_ids =
- changes
- |> Enum.filter(fn p -> Map.has_key?(p, "id") end)
- |> Enum.map(fn p -> p["id"] end)
-
- assert Enum.member?(changes_ids, "doc1")
- assert Enum.member?(changes_ids, "doc3")
- assert length(changes_ids) == 2
- end
-
- @tag :with_db
- test "COUCHDB-1852", context do
- db_name = context[:db_name]
-
- create_doc(db_name, %{bop: "foom"})
- create_doc(db_name, %{bop: "foom"})
- create_doc(db_name, %{bop: "foom"})
- create_doc(db_name, %{bop: "foom"})
-
- resp = Couch.get("/#{db_name}/_changes")
- assert length(resp.body["results"]) == 4
- seq = Enum.at(resp.body["results"], 1)["seq"]
-
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
-
- # simulate an EventSource request with a Last-Event-ID header
- req_id =
- Rawresp.get(
- "/#{db_name}/_changes?feed=eventsource&timeout=100&since=0",
- headers: [Accept: "text/event-stream", "Last-Event-ID": seq],
- stream_to: self(),
- direct: worker_pid
- )
-
- changes = process_response(req_id.id, &parse_event/1)
- assert length(changes) == 2
- end
-
- defp wait_for_heartbeats(id, beats, expexted_beats) do
- if beats < expexted_beats do
- :ibrowse.stream_next(id)
- is_heartbeat = process_response(id, &parse_heartbeat/1)
-
- case is_heartbeat do
- :heartbeat -> wait_for_heartbeats(id, beats + 1, expexted_beats)
- :timeout -> beats
- _ -> wait_for_heartbeats(id, beats, expexted_beats)
- end
- else
- beats
- end
- end
-
- defp wait_for_headers(id, status, timeout \\ 1000) do
- receive do
- %HTTPotion.AsyncHeaders{id: ^id, status_code: ^status} ->
- :ok
-
- _ ->
- wait_for_headers(id, status, timeout)
- after
- timeout -> :timeout
- end
- end
-
- defp process_response(id, chunk_parser, timeout \\ 1000) do
- receive do
- %HTTPotion.AsyncChunk{id: ^id} = msg ->
- chunk_parser.(msg)
-
- _ ->
- process_response(id, chunk_parser, timeout)
- after
- timeout -> :timeout
- end
- end
-
- defp parse_chunk(msg) do
- msg.chunk |> IO.iodata_to_binary() |> :jiffy.decode([:return_maps])
- end
-
- defp parse_event(msg) do
- captures = Regex.scan(~r/data: (.*)/, msg.chunk)
-
- captures
- |> Enum.map(fn p -> Enum.at(p, 1) end)
- |> Enum.filter(fn p -> String.trim(p) != "" end)
- |> Enum.map(fn p ->
- p
- |> IO.iodata_to_binary()
- |> :jiffy.decode([:return_maps])
- end)
- end
-
- defp parse_heartbeat(msg) do
- is_heartbeat = Regex.match?(~r/event: heartbeat/, msg.chunk)
-
- if is_heartbeat do
- :heartbeat
- else
- :other
- end
- end
-
- defp parse_changes_response(changes) do
- {length(changes["results"]), String.slice(changes["last_seq"], 0..1)}
- end
-
- defp check_empty_db(db_name) do
- resp = Couch.get("/#{db_name}/_changes")
- assert resp.body["results"] == [], "db must be empty"
- assert String.at(resp.body["last_seq"], 0) == "0", "seq must start with 0"
- end
-
- defp test_changes(db_name, feed) do
- check_empty_db(db_name)
- {_, resp} = create_doc(db_name, sample_doc_foo())
- rev = resp.body["rev"]
-
- # TODO: retry_part
- resp = Couch.get("/#{db_name}/_changes")
- assert length(resp.body["results"]) == 1, "db must not be empty"
- assert String.at(resp.body["last_seq"], 0) == "1", "seq must start with 1"
-
- # increase timeout to 100 to have enough time 2 assemble
- # (seems like too little timeouts kill
- resp = Rawresp.get("/#{db_name}/_changes?feed=#{feed}&timeout=100")
- changes = parse_changes_line(resp.body)
-
- change = Enum.at(changes, 0)
- assert Enum.at(change["changes"], 0)["rev"] == rev
-
- # the sequence is not fully ordered and a complex structure now
- change = Enum.at(changes, 1)
- assert String.at(change["last_seq"], 0) == "1"
-
- # create_doc_bar(db_name,"bar")
- {:ok, worker_pid} = HTTPotion.spawn_worker_process(Couch.process_url(""))
-
- %HTTPotion.AsyncResponse{id: req_id} =
- Rawresp.get("/#{db_name}/_changes?feed=#{feed}&timeout=500",
- stream_to: self(),
- direct: worker_pid
- )
-
- :ok = wait_for_headers(req_id, 200)
- create_doc_bar(db_name, "bar")
-
- changes = process_response(req_id, &parse_changes_line_chunk/1)
- assert length(changes) == 3
-
- HTTPotion.stop_worker_process(worker_pid)
- end
-
- def create_doc_bar(db_name, id) do
- create_doc(db_name, %{:_id => id, :bar => 1})
- end
-
- defp parse_changes_line_chunk(msg) do
- parse_changes_line(msg.chunk)
- end
-
- defp parse_changes_line(body) do
- body_lines = String.split(body, "\n")
-
- body_lines
- |> Enum.filter(fn line -> line != "" end)
- |> Enum.map(fn line ->
- line |> IO.iodata_to_binary() |> :jiffy.decode([:return_maps])
- end)
- end
-
- defp create_filters_view(db_name) do
- dynamic_fun = """
- function(doc, req) {
- var field = req.query.field;
- return doc[field];
- }
- """
-
- userctx_fun = """
- function(doc, req) {
- var field = req.query.field;
- return doc[field];
- }
- """
-
- blah_fun = """
- function(doc) {
- if (doc._id == "blah") {
- emit(null, null);
- }
- }
- """
-
- ddoc = %{
- _id: "_design/changes_filter",
- filters: %{
- bop: "function(doc, req) { return (doc.bop);}",
- dynamic: dynamic_fun,
- userCtx: userctx_fun,
- conflicted: "function(doc, req) { return (doc._conflicts);}"
- },
- options: %{
- local_seq: true
- },
- views: %{
- local_seq: %{
- map: "function(doc) {emit(doc._local_seq, null)}"
- },
- blah: %{
- map: blah_fun
- }
- }
- }
-
- create_doc(db_name, ddoc)
- end
-end
diff --git a/test/elixir/test/changes_test.exs b/test/elixir/test/changes_test.exs
deleted file mode 100644
index e3e8ba784..000000000
--- a/test/elixir/test/changes_test.exs
+++ /dev/null
@@ -1,509 +0,0 @@
-defmodule ChangesTest do
- use CouchTestCase
-
- @moduletag :changes
-
- @moduledoc """
- Test CouchDB /{db}/_changes
- """
-
- @tag :with_db
- test "Changes feed negative heartbeat", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get(
- "/#{db_name}/_changes",
- query: %{
- :feed => "continuous",
- :heartbeat => -1000
- }
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
-
- assert resp.body["reason"] ==
- "The heartbeat value should be a positive integer (in milliseconds)."
- end
-
- @tag :with_db
- test "Changes feed non-integer heartbeat", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get(
- "/#{db_name}/_changes",
- query: %{
- :feed => "continuous",
- :heartbeat => "a1000"
- }
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
-
- assert resp.body["reason"] ==
- "Invalid heartbeat value. Expecting a positive integer value (in milliseconds)."
- end
-
- @tag :with_db
- test "function filtered changes", context do
- db_name = context[:db_name]
- create_filters_view(db_name)
-
- resp = Couch.get("/#{db_name}/_changes?filter=changes_filter/bop")
- assert Enum.empty?(resp.body["results"]), "db must be empty"
-
- {:ok, doc_resp} = create_doc(db_name, %{bop: "foom"})
- rev = doc_resp.body["rev"]
- id = doc_resp.body["id"]
- create_doc(db_name, %{bop: false})
-
- resp = Couch.get("/#{db_name}/_changes?filter=changes_filter/bop")
- assert length(resp.body["results"]) == 1
- change_rev = get_change_rev_at(resp.body["results"], 0)
- assert change_rev == rev
-
- doc = open_doc(db_name, id)
- doc = Map.put(doc, "newattr", "a")
-
- doc = save_doc(db_name, doc)
-
- resp = Couch.get("/#{db_name}/_changes?filter=changes_filter/bop")
- assert length(resp.body["results"]) == 1
- new_change_rev = get_change_rev_at(resp.body["results"], 0)
- assert new_change_rev == doc["_rev"]
- assert new_change_rev != change_rev
-
- resp = Couch.get("/#{db_name}/_changes?filter=changes_filter/dynamic&field=woox")
- assert Enum.empty?(resp.body["results"]), "db must be empty"
-
- resp = Couch.get("/#{db_name}/_changes?filter=changes_filter/dynamic&field=bop")
- assert length(resp.body["results"]) == 1, "db must have one change"
- new_change_rev = get_change_rev_at(resp.body["results"], 0)
- assert new_change_rev == doc["_rev"]
- end
-
- @tag :with_db
- test "non-existing desing doc for filtered changes", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_changes?filter=nothingtosee/bop")
- assert resp.status_code == 404
- end
-
- @tag :with_db
- test "non-existing function for filtered changes", context do
- db_name = context[:db_name]
- create_filters_view(db_name)
- resp = Couch.get("/#{db_name}/_changes?filter=changes_filter/movealong")
- assert resp.status_code == 404
- end
-
- @tag :with_db
- test "non-existing desing doc and funcion for filtered changes", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_changes?filter=nothingtosee/movealong")
- assert resp.status_code == 404
- end
-
- @tag :with_db
- test "map function filtered changes", context do
- db_name = context[:db_name]
- create_filters_view(db_name)
- create_doc(db_name, %{_id: "blah", bop: "plankton"})
- resp = Couch.get("/#{db_name}/_changes?filter=_view&view=changes_filter/blah")
- assert length(resp.body["results"]) == 1
- assert Enum.at(resp.body["results"], 0)["id"] == "blah"
- end
-
- @tag :with_db
- test "changes limit", context do
- db_name = context[:db_name]
-
- create_doc(db_name, %{_id: "blah", bop: "plankton"})
- create_doc(db_name, %{_id: "blah2", bop: "plankton"})
- create_doc(db_name, %{_id: "blah3", bop: "plankton"})
-
- resp = Couch.get("/#{db_name}/_changes?limit=1")
- assert length(resp.body["results"]) == 1
-
- resp = Couch.get("/#{db_name}/_changes?limit=2")
- assert length(resp.body["results"]) == 2
- end
-
- @tag :with_db
- test "erlang function filtered changes", context do
- db_name = context[:db_name]
- create_erlang_filters_view(db_name)
-
- resp = Couch.get("/#{db_name}/_changes?filter=erlang/foo")
- assert Enum.empty?(resp.body["results"])
-
- create_doc(db_name, %{_id: "doc1", value: 1})
- create_doc(db_name, %{_id: "doc2", value: 2})
- create_doc(db_name, %{_id: "doc3", value: 3})
- create_doc(db_name, %{_id: "doc4", value: 4})
-
- resp = Couch.get("/#{db_name}/_changes?filter=erlang/foo")
-
- changes_ids =
- resp.body["results"]
- |> Enum.map(fn p -> p["id"] end)
-
- assert Enum.member?(changes_ids, "doc2")
- assert Enum.member?(changes_ids, "doc4")
- assert length(resp.body["results"]) == 2
- end
-
- @tag :with_db
- test "changes filtering on docids", context do
- db_name = context[:db_name]
- doc_ids = %{doc_ids: ["doc1", "doc3", "doc4"]}
-
- resp =
- Couch.post("/#{db_name}/_changes?filter=_doc_ids",
- body: doc_ids,
- headers: ["Content-Type": "application/json"]
- )
-
- assert Enum.empty?(resp.body["results"])
-
- create_doc(db_name, %{_id: "doc1", value: 1})
- create_doc(db_name, %{_id: "doc2", value: 2})
-
- resp =
- Couch.post("/#{db_name}/_changes?filter=_doc_ids",
- body: doc_ids,
- headers: ["Content-Type": "application/json"]
- )
-
- assert length(resp.body["results"]) == 1
- assert Enum.at(resp.body["results"], 0)["id"] == "doc1"
-
- create_doc(db_name, %{_id: "doc3", value: 3})
-
- resp =
- Couch.post("/#{db_name}/_changes?filter=_doc_ids",
- body: doc_ids,
- headers: ["Content-Type": "application/json"]
- )
-
- assert length(resp.body["results"]) == 2
-
- changes_ids =
- resp.body["results"]
- |> Enum.map(fn p -> p["id"] end)
-
- assert Enum.member?(changes_ids, "doc1")
- assert Enum.member?(changes_ids, "doc3")
-
- encoded_doc_ids = doc_ids.doc_ids |> :jiffy.encode()
-
- resp =
- Couch.get("/#{db_name}/_changes",
- query: %{filter: "_doc_ids", doc_ids: encoded_doc_ids}
- )
-
- assert length(resp.body["results"]) == 2
-
- changes_ids =
- resp.body["results"]
- |> Enum.map(fn p -> p["id"] end)
-
- assert Enum.member?(changes_ids, "doc1")
- assert Enum.member?(changes_ids, "doc3")
- end
-
- @tag :with_db
- test "changes filtering on design docs", context do
- db_name = context[:db_name]
-
- create_erlang_filters_view(db_name)
- create_doc(db_name, %{_id: "doc1", value: 1})
-
- resp = Couch.get("/#{db_name}/_changes?filter=_design")
- assert length(resp.body["results"]) == 1
- assert Enum.at(resp.body["results"], 0)["id"] == "_design/erlang"
- end
-
- @tag :with_db
- test "changes filtering on custom filter", context do
- db_name = context[:db_name]
- create_filters_view(db_name)
-
- resp = Couch.post("/#{db_name}/_changes?filter=changes_filter/bop")
- assert Enum.empty?(resp.body["results"]), "db must be empty"
-
- {:ok, doc_resp} = create_doc(db_name, %{bop: "foom"})
- rev = doc_resp.body["rev"]
- create_doc(db_name, %{bop: false})
-
- resp = Couch.post("/#{db_name}/_changes?filter=changes_filter/bop")
- assert length(resp.body["results"]) == 1
- change_rev = get_change_rev_at(resp.body["results"], 0)
- assert change_rev == rev
-
- resp = Couch.post("/#{db_name}/_changes?filter=changes_filter/bop",
- body: %{doc_ids: ["doc1", "doc3", "doc4"]},
- headers: ["Content-Type": "application/json"]
- )
- assert length(resp.body["results"]) == 1
- change_rev = get_change_rev_at(resp.body["results"], 0)
- assert change_rev == rev
- end
-
- @tag :with_db
- test "changes fail on invalid payload", context do
- db_name = context[:db_name]
- create_filters_view(db_name)
-
- resp = Couch.post("/#{db_name}/_changes?filter=changes_filter/bop",
- body: "[\"doc1\"]",
- headers: ["Content-Type": "application/json"]
- )
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == "Request body must be a JSON object"
-
- resp = Couch.post("/#{db_name}/_changes?filter=changes_filter/bop",
- body: "{\"doc_ids\": [\"doc1\",",
- headers: ["Content-Type": "application/json"]
- )
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == "invalid UTF-8 JSON"
-
- set_config({"chttpd", "max_http_request_size", "16"})
-
- resp = Couch.post("/#{db_name}/_changes?filter=changes_filter/bop",
- body: %{doc_ids: ["doc1", "doc3", "doc4"]},
- headers: ["Content-Type": "application/json"]
- )
- assert resp.status_code == 413
- assert resp.body["error"] == "too_large"
- assert resp.body["reason"] == "the request entity is too large"
- end
-
- @tag :with_db
- test "COUCHDB-1037-empty result for ?limit=1&filter=foo/bar in some cases",
- context do
- db_name = context[:db_name]
-
- filter_fun = """
- function(doc, req) {
- return (typeof doc.integer === "number");
- }
- """
-
- ddoc = %{
- _id: "_design/testdocs",
- language: "javascript",
- filters: %{
- testdocsonly: filter_fun
- }
- }
-
- create_doc(db_name, ddoc)
-
- ddoc = %{
- _id: "_design/foobar",
- foo: "bar"
- }
-
- create_doc(db_name, ddoc)
- bulk_save(db_name, make_docs(0..4))
-
- resp = Couch.get("/#{db_name}/_changes")
- assert length(resp.body["results"]) == 7
-
- resp = Couch.get("/#{db_name}/_changes?limit=1&filter=testdocs/testdocsonly")
- assert length(resp.body["results"]) == 1
- # we can't guarantee ordering
- assert Regex.match?(~r/[0-4]/, Enum.at(resp.body["results"], 0)["id"])
-
- resp = Couch.get("/#{db_name}/_changes?limit=2&filter=testdocs/testdocsonly")
- assert length(resp.body["results"]) == 2
- # we can't guarantee ordering
- assert Regex.match?(~r/[0-4]/, Enum.at(resp.body["results"], 0)["id"])
- assert Regex.match?(~r/[0-4]/, Enum.at(resp.body["results"], 1)["id"])
- end
-
- @tag :with_db
- test "COUCHDB-1256", context do
- db_name = context[:db_name]
- {:ok, resp} = create_doc(db_name, %{_id: "foo", a: 123})
- create_doc(db_name, %{_id: "bar", a: 456})
- foo_rev = resp.body["rev"]
-
- Couch.put("/#{db_name}/foo?new_edits=false",
- headers: ["Content-Type": "application/json"],
- body: %{_rev: foo_rev, a: 456}
- )
-
- resp = Couch.get("/#{db_name}/_changes?style=all_docs")
- assert length(resp.body["results"]) == 2
-
- resp =
- Couch.get("/#{db_name}/_changes",
- query: %{style: "all_docs", since: Enum.at(resp.body["results"], 0)["seq"]}
- )
-
- assert length(resp.body["results"]) == 1
- end
-
- @tag :with_db
- test "COUCHDB-1923", context do
- db_name = context[:db_name]
- attachment_data = "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
-
- docs =
- make_docs(20..29, %{
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: attachment_data
- },
- "bar.txt": %{
- content_type: "text/plain",
- data: attachment_data
- }
- }
- })
-
- bulk_save(db_name, docs)
-
- resp = Couch.get("/#{db_name}/_changes?include_docs=true")
- assert length(resp.body["results"]) == 10
-
- first_doc = Enum.at(resp.body["results"], 0)["doc"]
-
- assert first_doc["_attachments"]["foo.txt"]["stub"]
- assert not Enum.member?(first_doc["_attachments"]["foo.txt"], "data")
- assert not Enum.member?(first_doc["_attachments"]["foo.txt"], "encoding")
- assert not Enum.member?(first_doc["_attachments"]["foo.txt"], "encoded_length")
- assert first_doc["_attachments"]["bar.txt"]["stub"]
- assert not Enum.member?(first_doc["_attachments"]["bar.txt"], "data")
- assert not Enum.member?(first_doc["_attachments"]["bar.txt"], "encoding")
- assert not Enum.member?(first_doc["_attachments"]["bar.txt"], "encoded_length")
-
- resp = Couch.get("/#{db_name}/_changes?include_docs=true&attachments=true")
- assert length(resp.body["results"]) == 10
-
- first_doc = Enum.at(resp.body["results"], 0)["doc"]
-
- assert not Enum.member?(first_doc["_attachments"]["foo.txt"], "stub")
- assert first_doc["_attachments"]["foo.txt"]["data"] == attachment_data
- assert not Enum.member?(first_doc["_attachments"]["foo.txt"], "encoding")
- assert not Enum.member?(first_doc["_attachments"]["foo.txt"], "encoded_length")
-
- assert not Enum.member?(first_doc["_attachments"]["bar.txt"], "stub")
- assert first_doc["_attachments"]["bar.txt"]["data"] == attachment_data
- assert not Enum.member?(first_doc["_attachments"]["bar.txt"], "encoding")
- assert not Enum.member?(first_doc["_attachments"]["bar.txt"], "encoded_length")
-
- resp = Couch.get("/#{db_name}/_changes?include_docs=true&att_encoding_info=true")
- assert length(resp.body["results"]) == 10
-
- first_doc = Enum.at(resp.body["results"], 0)["doc"]
-
- assert first_doc["_attachments"]["foo.txt"]["stub"]
- assert not Enum.member?(first_doc["_attachments"]["foo.txt"], "data")
- assert first_doc["_attachments"]["foo.txt"]["encoding"] == "gzip"
- assert first_doc["_attachments"]["foo.txt"]["encoded_length"] == 47
- assert first_doc["_attachments"]["bar.txt"]["stub"]
- assert not Enum.member?(first_doc["_attachments"]["bar.txt"], "data")
- assert first_doc["_attachments"]["bar.txt"]["encoding"] == "gzip"
- assert first_doc["_attachments"]["bar.txt"]["encoded_length"] == 47
- end
-
- defp create_erlang_filters_view(db_name) do
- erlang_fun = """
- fun({Doc}, Req) ->
- case couch_util:get_value(<<"value">>, Doc) of
- undefined -> false;
- Value -> (Value rem 2) =:= 0;
- _ -> false
- end
- end.
- """
-
- ddoc = %{
- _id: "_design/erlang",
- language: "erlang",
- filters: %{
- foo: erlang_fun
- }
- }
-
- create_doc(db_name, ddoc)
- end
-
- defp create_filters_view(db_name) do
- dynamic_fun = """
- function(doc, req) {
- var field = req.query.field;
- return doc[field];
- }
- """
-
- userctx_fun = """
- function(doc, req) {
- var field = req.query.field;
- return doc[field];
- }
- """
-
- blah_fun = """
- function(doc) {
- if (doc._id == "blah") {
- emit(null, null);
- }
- }
- """
-
- ddoc = %{
- _id: "_design/changes_filter",
- filters: %{
- bop: "function(doc, req) { return (doc.bop);}",
- dynamic: dynamic_fun,
- userCtx: userctx_fun,
- conflicted: "function(doc, req) { return (doc._conflicts);}"
- },
- options: %{
- local_seq: true
- },
- views: %{
- local_seq: %{
- map: "function(doc) {emit(doc._local_seq, null)}"
- },
- blah: %{
- map: blah_fun
- }
- }
- }
-
- create_doc(db_name, ddoc)
- end
-
- defp get_change_rev_at(results, idx) do
- results
- |> Enum.at(idx)
- |> Map.fetch!("changes")
- |> Enum.at(0)
- |> Map.fetch!("rev")
- end
-
- defp open_doc(db_name, id) do
- resp = Couch.get("/#{db_name}/#{id}")
- assert resp.status_code == 200
- resp.body
- end
-
- defp save_doc(db_name, body) do
- resp = Couch.put("/#{db_name}/#{body["_id"]}", body: body)
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
- Map.put(body, "_rev", resp.body["rev"])
- end
-end
diff --git a/test/elixir/test/cluster_with_quorum_test.exs b/test/elixir/test/cluster_with_quorum_test.exs
deleted file mode 100644
index fc3b28a0b..000000000
--- a/test/elixir/test/cluster_with_quorum_test.exs
+++ /dev/null
@@ -1,185 +0,0 @@
-defmodule WithQuorumTest do
- use CouchTestCase
-
- @moduletag :with_quorum_test
-
- @moduledoc """
- Test CouchDB API in a cluster without quorum.
- """
- @tag :with_db_name
- test "Creating/Deleting DB should return 201-Created/202-Acepted", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}")
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
- resp = Couch.delete("/#{db_name}")
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
- end
-
- @tag :with_db_name
- test "Creating-Updating/Deleting doc should return 201-Created/200-OK", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
-
- resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0", :a => 1})
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
-
- resp = Couch.get("/#{context[:db_name]}/0")
- rev = resp.body["_rev"]
-
- resp =
- Couch.put("/#{context[:db_name]}/0", body: %{:_id => "0", :_rev => rev, :a => 2})
-
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
-
- resp = Couch.get("/#{context[:db_name]}/0")
- rev = resp.body["_rev"]
- resp = Couch.delete("/#{context[:db_name]}/0", query: %{:rev => rev})
- msg = "Should return 200-OK"
- assert resp.status_code == 200, msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Creating-Updating/Deleting doc with overriden quorum should return 202-Acepted/200-OK",
- context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
-
- resp =
- Couch.post(
- "/#{context[:db_name]}",
- query: %{:w => 3},
- body: %{:_id => "0", :a => 1}
- )
-
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
-
- resp = Couch.get("/#{context[:db_name]}/0")
- rev = resp.body["_rev"]
-
- resp =
- Couch.put(
- "/#{context[:db_name]}/0",
- query: %{:w => 3},
- body: %{:_id => "0", :_rev => rev, :a => 2}
- )
-
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
-
- resp = Couch.get("/#{context[:db_name]}/0")
- rev = resp.body["_rev"]
- resp = Couch.delete("/#{context[:db_name]}/0", query: %{:w => 1, :rev => rev})
- msg = "Should return 200-Ok"
- assert resp.status_code == 200, msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Copy doc should return 201-Created", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
-
- Couch.post(
- "/#{context[:db_name]}",
- body: %{:_id => "0", :a => 1}
- )
-
- headers = [Destination: "1"]
- resp = Couch.request(:copy, "/#{context[:db_name]}/0", headers: headers)
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
- Couch.delete("/#{db_name}")
- end
-
- @doc_range 1..5
-
- @tag :with_db_name
- test "Bulk docs should return 201-Created", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
- docs = create_docs(@doc_range)
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: docs})
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Bulk docs overriden quorum should return 202-Acepted", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
- docs = create_docs(@doc_range)
- resp = Couch.post("/#{db_name}/_bulk_docs", query: %{:w => 3}, body: %{docs: docs})
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Attachments should return 201-Created", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
- resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0"})
- rev = resp.body["rev"]
-
- resp =
- Couch.put(
- "/#{context[:db_name]}/0/foo.txt",
- query: %{:rev => rev},
- body: "This is a no bas64 encoded text",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
-
- rev = resp.body["rev"]
- resp = Couch.delete("/#{context[:db_name]}/0/foo.txt", query: %{:rev => rev})
- msg = "Should return 200-Ok"
- assert resp.status_code == 200, msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Attachments overriden quorum should return 202-Acepted", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
- resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0"})
- rev = resp.body["rev"]
-
- resp =
- Couch.put(
- "/#{context[:db_name]}/0/foo.txt",
- query: %{:rev => rev, :w => 3},
- body: "This is a no bas64 encoded text",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
-
- rev = resp.body["rev"]
-
- resp =
- Couch.delete(
- "/#{context[:db_name]}/0/foo.txt",
- query: %{:rev => rev, :w => 3}
- )
-
- msg = "Should return 200-Ok"
- assert resp.status_code == 200, msg
-
- Couch.delete("/#{db_name}")
- end
-end
diff --git a/test/elixir/test/cluster_without_quorum_test.exs b/test/elixir/test/cluster_without_quorum_test.exs
deleted file mode 100644
index e0095c351..000000000
--- a/test/elixir/test/cluster_without_quorum_test.exs
+++ /dev/null
@@ -1,184 +0,0 @@
-defmodule WithoutQuorumTest do
- use CouchTestCase
-
- @moduletag :without_quorum_test
-
- @moduledoc """
- Test CouchDB API in a cluster without quorum.
- """
- @tag :with_db_name
- test "Creating/Deleting DB should return 202-Acepted", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}")
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
- resp = Couch.delete("/#{db_name}")
- assert resp.status_code == 202, msg
- end
-
- @tag :with_db_name
- test "Creating/Updating/Deleting doc should return 202-Acepted", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
-
- resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0", :a => 1})
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
-
- resp = Couch.get("/#{context[:db_name]}/0")
- rev = resp.body["_rev"]
-
- resp =
- Couch.put("/#{context[:db_name]}/0", body: %{:_id => "0", :_rev => rev, :a => 2})
-
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
-
- resp = Couch.get("/#{context[:db_name]}/0")
- rev = resp.body["_rev"]
- resp = Couch.delete("/#{context[:db_name]}/0", query: %{:rev => rev})
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Creating-Updating/Deleting doc with overriden quorum should return 201-Created/200-OK",
- context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
-
- resp =
- Couch.post(
- "/#{context[:db_name]}",
- query: %{:w => 1},
- body: %{:_id => "0", :a => 1}
- )
-
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
-
- resp = Couch.get("/#{context[:db_name]}/0")
- rev = resp.body["_rev"]
-
- resp =
- Couch.put(
- "/#{context[:db_name]}/0",
- query: %{:w => 1},
- body: %{:_id => "0", :_rev => rev, :a => 2}
- )
-
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
-
- resp = Couch.get("/#{context[:db_name]}/0")
- rev = resp.body["_rev"]
- resp = Couch.delete("/#{context[:db_name]}/0", query: %{:w => 1, :rev => rev})
- msg = "Should return 200-Ok"
- assert resp.status_code == 200, msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Copy doc should return 202-Acepted", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
-
- Couch.post(
- "/#{context[:db_name]}",
- body: %{:_id => "0", :a => 1}
- )
-
- headers = [Destination: "1"]
- resp = Couch.request(:copy, "/#{context[:db_name]}/0", headers: headers)
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
- Couch.delete("/#{db_name}")
- end
-
- @doc_range 1..5
-
- @tag :with_db_name
- test "Bulk docs should return 202-Acepted", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
- docs = create_docs(@doc_range)
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: docs})
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Bulk docs overriden quorum should return 201-Created", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
- docs = create_docs(@doc_range)
- resp = Couch.post("/#{db_name}/_bulk_docs", query: %{:w => 1}, body: %{docs: docs})
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Attachments should return 202-Acepted", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
- resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0"})
- rev = resp.body["rev"]
-
- resp =
- Couch.put(
- "/#{context[:db_name]}/0/foo.txt",
- query: %{:rev => rev},
- body: "This is a no bas64 encoded text",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- msg = "Should return 202-Acepted"
- assert resp.status_code == 202, msg
-
- rev = resp.body["rev"]
- resp = Couch.delete("/#{context[:db_name]}/0/foo.txt", query: %{:rev => rev})
- msg = "Should return 200-Ok"
- assert resp.status_code == 200, msg
-
- Couch.delete("/#{db_name}")
- end
-
- @tag :with_db_name
- test "Attachments overriden quorum should return 201-Created", context do
- db_name = context[:db_name]
- Couch.put("/#{db_name}")
- resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0"})
- rev = resp.body["rev"]
-
- resp =
- Couch.put(
- "/#{context[:db_name]}/0/foo.txt",
- query: %{:rev => rev, :w => 1},
- body: "This is a no bas64 encoded text",
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- msg = "Should return 201-Created"
- assert resp.status_code in [201, 202], msg
-
- rev = resp.body["rev"]
-
- resp =
- Couch.delete(
- "/#{context[:db_name]}/0/foo.txt",
- query: %{:rev => rev, :w => 1}
- )
-
- msg = "Should return 200-Ok"
- assert resp.status_code == 200, msg
-
- Couch.delete("/#{db_name}")
- end
-end
diff --git a/test/elixir/test/coffee_test.exs b/test/elixir/test/coffee_test.exs
deleted file mode 100644
index 3b26f5e59..000000000
--- a/test/elixir/test/coffee_test.exs
+++ /dev/null
@@ -1,73 +0,0 @@
-defmodule CoffeeTest do
- use CouchTestCase
-
- @moduletag :coffee
-
- @moduledoc """
- Test basic coffeescript functionality.
- This is a port of the coffee.js test suite.
- """
-
- @tag :with_db
- test "CoffeeScript basic functionality", context do
- db_name = context[:db_name]
-
- docs = [
- %{:_id => "a", :foo => 100},
- %{:foo => 1},
- %{:foo => 1},
- %{:foo => 2},
- %{:foo => 2},
- %{:bar => 1},
- %{:bar => 1},
- %{:bar => 2},
- %{:bar => 2}
- ]
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: docs})
-
- design_doc = %{
- :_id => "_design/coffee",
- :language => "coffeescript",
- :views => %{
- :myview => %{
- :map => "(doc) -> if doc.foo\n emit(doc.foo, 1)",
- :reduce =>
- "(keys, values, rereduce) ->\n sum = 0\n for x in values\n sum = sum + x\n sum"
- }
- },
- :shows => %{
- :myshow => "(doc) ->\n \"Foo #\{doc.foo}\""
- },
- :lists => %{
- :mylist =>
- "(head, req) ->\n while row = getRow()\n send(toJSON({\"resp\": \"Foo #\{row.value}\"}))\n return"
- },
- :filters => %{
- :filter => "(doc) ->\n doc.foo"
- }
- }
-
- design_resp = Couch.put("/#{db_name}/_design/coffee", body: design_doc)
- assert design_resp.status_code === 201
-
- assert resp.status_code === 201 and length(resp.body) === length(docs)
-
- retry_until(fn ->
- %{"rows" => values} = Couch.get("/#{db_name}/_design/coffee/_view/myview").body
- assert 5 === hd(values)["value"]
- end)
-
- assert Couch.get("/#{db_name}/_design/coffee/_show/myshow/a").body === "Foo 100"
-
- %{"resp" => list_output} =
- Couch.get("/#{db_name}/_design/coffee/_list/mylist/myview").body
-
- assert list_output === "Foo 5"
-
- %{"results" => changes_results} =
- Couch.get("/#{db_name}/_changes", query: %{"filter" => "coffee/filter"}).body
-
- assert length(changes_results) === 5
- end
-end
diff --git a/test/elixir/test/compact_test.exs b/test/elixir/test/compact_test.exs
deleted file mode 100644
index 461a1d347..000000000
--- a/test/elixir/test/compact_test.exs
+++ /dev/null
@@ -1,88 +0,0 @@
-defmodule CompactTest do
- use CouchTestCase
-
- @moduletag :compact
-
- @moduledoc """
- Test CouchDB compaction
- This is a port of compact.js
- """
-
- @att_doc_id "att_doc"
- @att_name "foo.txt"
- @att_plaintext "This is plain text"
-
- # Need to investigate why compaction is not compacting (or compactor cannot complete)
- # Refer:- https://github.com/apache/couchdb/pull/2127
- @tag :pending
- @tag :skip_on_jenkins
- @tag :with_db
- test "compaction reduces size of deleted docs", context do
- db = context[:db_name]
- docs = populate(db)
- info = get_info(db)
- orig_data_size = info["sizes"]["active"]
- orig_disk_size = info["sizes"]["file"]
- start_time = info["instance_start_time"]
- assert is_integer(orig_data_size) and is_integer(orig_disk_size)
- assert orig_data_size < orig_disk_size
-
- delete(db, docs)
-
- retry_until(fn ->
- deleted_data_size = get_info(db)["data_size"]
- assert deleted_data_size > orig_data_size
- end)
-
- deleted_data_size = get_info(db)["data_size"]
-
- compact(db)
-
- retry_until(fn ->
- assert get_info(db)["instance_start_time"] == start_time
- assert_attachment_available(db)
- info = get_info(db)
- final_data_size = info["sizes"]["active"]
- final_disk_size = info["sizes"]["file"]
- assert final_data_size < final_disk_size
- assert is_integer(final_data_size) and is_integer(final_disk_size)
- assert final_data_size < deleted_data_size
- end)
- end
-
- defp assert_attachment_available(db) do
- resp = Couch.get("/#{db}/#{@att_doc_id}/#{@att_name}")
- assert resp.body == @att_plaintext
- assert resp.headers["content-type"] == "text/plain"
- assert Couch.get("/#{db}").body["doc_count"] == 1
- end
-
- defp populate(db) do
- docs = create_docs(0..19)
- resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs})
- assert resp.status_code in [201, 202]
- docs = rev(docs, resp.body)
-
- doc = %{
- _id: "#{@att_doc_id}",
- _attachments: %{
- "#{@att_name}": %{content_type: "text/plain", data: Base.encode64(@att_plaintext)}
- }
- }
-
- resp = Couch.put("/#{db}/#{doc._id}", body: doc)
- assert resp.status_code in [201, 202]
- docs
- end
-
- defp delete(db, docs) do
- docs = Enum.map(docs, &Map.put(&1, :_deleted, true))
- resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs})
- assert resp.status_code in [201, 202]
- assert Couch.post("/#{db}/_ensure_full_commit").body["ok"] == true
- end
-
- defp get_info(db) do
- Couch.get("/#{db}").body
- end
-end
diff --git a/test/elixir/test/config/skip.elixir b/test/elixir/test/config/skip.elixir
deleted file mode 100644
index bb27f13cd..000000000
--- a/test/elixir/test/config/skip.elixir
+++ /dev/null
@@ -1,33 +0,0 @@
-%{
- "CookieAuthTest": [
- "cookie auth"
- ],
- "ProxyAuthTest": [
- "proxy auth with secret",
- "proxy auth without secret"
- ],
- "ReaderACLTest": [
- "unrestricted db can be read"
- ],
- "ReplicationTest": [
- "non-admin user on target - remote-to-remote",
- "non-admin or reader user on source - remote-to-remote",
- "unauthorized replication cancellation"
- ],
- "SecurityValidationTest": [
- "Author presence and user security when replicated"
- ],
- "UsersDbSecurityTest": [
- "user db security"
- ],
- "WithQuorumTest": [
- "Creating/Deleting DB should return 201-Created/202-Acepted"
- ],
- "WithoutQuorumTest": [
- "Attachments should return 202-Acepted",
- "Bulk docs should return 202-Acepted",
- "Copy doc should return 202-Acepted",
- "Creating/Deleting DB should return 202-Acepted",
- "Creating/Updating/Deleting doc should return 202-Acepted"
- ]
-}
diff --git a/test/elixir/test/config/suite.elixir b/test/elixir/test/config/suite.elixir
deleted file mode 100644
index e071da87f..000000000
--- a/test/elixir/test/config/suite.elixir
+++ /dev/null
@@ -1,713 +0,0 @@
-%{
- "AllDocsTest": [
- "All Docs tests",
- "GET with one key",
- "POST boolean",
- "POST edge case with colliding parameters - query takes precedence",
- "POST with empty body",
- "POST with keys and limit",
- "POST with query parameter and JSON body"
- ],
- "AttachmentMultipartTest": [
- "manages attachments multipart requests successfully",
- "manages compressed attachments successfully",
- "multipart attachments with new_edits=false"
- ],
- "AttachmentNamesTest": [
- "saves attachment names successfully"
- ],
- "AttachmentPathsTest": [
- "manages attachment paths successfully",
- "manages attachment paths successfully - design docs"
- ],
- "AttachmentRangesTest": [
- "manages attachment range requests successfully"
- ],
- "AttachmentViewTest": [
- "manages attachments in views successfully"
- ],
- "AttachmentsTest": [
- "COUCHDB-809 - stubs should only require the 'stub' field",
- "attachment via multipart/form-data",
- "delete attachment",
- "delete attachment request with a payload should not block following requests",
- "empty attachments",
- "errors for bad attachment",
- "etags for attachments",
- "implicit doc creation allows creating docs with a reserved id. COUCHDB-565",
- "large attachments COUCHDB-366",
- "md5 header for attachments",
- "reads attachment successfully",
- "saves attachment successfully",
- "saves binary",
- "COUCHDB-497 - empty attachments",
- "update attachment"
- ],
- "AuthCacheTest": [
- "auth cache management"
- ],
- "BasicsTest": [
- "'+' in document name should encode to '+'",
- "'+' in document name should encode to space",
- "A document read with etag works",
- "Can create several documents",
- "Check for invalid document members",
- "Create a document and save it to the database",
- "Created database has appropriate db info name",
- "Creating a new DB should return location header",
- "Creating a new DB with slashes should return Location header (COUCHDB-411)",
- "DELETE'ing a non-existent doc should 404",
- "Database name with '%2B' should encode to '+'",
- "Database name with '+' should encode to '+'",
- "Database should be in _all_dbs",
- "Limit and skip should work in _all_dbs",
- "Default headers are returned for doc with open_revs=all",
- "Empty database should have zero docs",
- "Make sure you can do a seq=true option",
- "On restart, a request for creating an already existing db can not override",
- "POST doc response has a Location header",
- "POST doc with an _id field isn't overwritten by uuid",
- "PUT doc has a Location header",
- "PUT error when body not an object",
- "PUT on existing DB should return 412 instead of 500",
- "Regression test for COUCHDB-954",
- "Revs info status is good",
- "Session contains adm context",
- "Simple map functions",
- "Welcome endpoint",
- "_all_docs POST error when multi-get is not a {'key': [...]} structure",
- "_bulk_docs POST error when body not an object",
- "oops, the doc id got lost in code nirwana"
- ],
- "BatchSaveTest": [
- "batch post",
- "batch put",
- "batch put with identical doc ids"
- ],
- "BulkDocsTest": [
- "bulk docs can create, update, & delete many docs per request",
- "bulk docs can detect conflicts",
- "bulk docs emits conflict error for duplicate doc `_id`s",
- "bulk docs raises conflict error for combined update & delete",
- "bulk docs raises error for `all_or_nothing` option",
- "bulk docs raises error for invlaid `docs` parameter",
- "bulk docs raises error for invlaid `new_edits` parameter",
- "bulk docs raises error for missing `docs` parameter",
- "bulk docs supplies `id` if not provided in doc"
- ],
- "ChangesAsyncTest": [
- "COUCHDB-1852",
- "continuous changes",
- "continuous filtered changes",
- "continuous filtered changes with doc ids",
- "eventsource changes",
- "eventsource heartbeat",
- "live changes",
- "longpoll changes",
- "longpoll filtered changes"
- ],
- "ChangesTest": [
- "COUCHDB-1037-empty result for ?limit=1&filter=foo/bar in some cases",
- "COUCHDB-1256",
- "COUCHDB-1923",
- "Changes feed negative heartbeat",
- "Changes feed non-integer heartbeat",
- "changes fail on invalid payload",
- "changes filtering on custom filter",
- "changes filtering on design docs",
- "changes filtering on docids",
- "changes limit",
- "erlang function filtered changes",
- "function filtered changes",
- "map function filtered changes",
- "non-existing desing doc and funcion for filtered changes",
- "non-existing desing doc for filtered changes",
- "non-existing function for filtered changes"
- ],
- "CoffeeTest": [
- "CoffeeScript basic functionality"
- ],
- "CompactTest": [
- "compaction reduces size of deleted docs"
- ],
- "ConfigTest": [
- "Atoms, binaries, and strings suffice as whitelist sections and keys.",
- "Blacklist is functional",
- "CouchDB respects configured protocols",
- "Keys not in the whitelist may not be modified",
- "Non-2-tuples in the whitelist are ignored",
- "Non-list whitelist values allow further modification of the whitelist",
- "Non-term whitelist values allow further modification of the whitelist",
- "PORT `BUGGED` ?raw tests from config.js",
- "Reload config",
- "Server-side password hashing, and raw updates disabling that",
- "Settings can be altered with undefined whitelist allowing any change",
- "Standard config options are present"
- ],
- "CookieAuthTest": [
- "cookie auth"
- ],
- "CopyDocTest": [
- "Copy doc tests"
- ],
- "DesignDocsQueryTest": [
- "POST edge case with colliding parameters - query takes precedence",
- "POST with empty body",
- "POST with keys and limit",
- "POST with query parameter and JSON body",
- "query _design_docs (GET with no parameters)",
- "query _design_docs descending=false",
- "query _design_docs descending=true",
- "query _design_docs end_key",
- "query _design_docs end_key inclusive_end=false",
- "query _design_docs end_key inclusive_end=false descending",
- "query _design_docs end_key inclusive_end=true",
- "query _design_docs end_key limit",
- "query _design_docs end_key skip",
- "query _design_docs endkey",
- "query _design_docs post with keys",
- "query _design_docs start_key",
- "query _design_docs startkey",
- "query _design_docs update_seq",
- "query _design_docs with multiple key",
- "query _design_docs with single key"
- ],
- "DesignDocsTest": [
- "_all_docs view returns correctly with keys",
- "all_docs_twice",
- "circular commonjs dependencies",
- "commonjs in map functions",
- "commonjs require",
- "consistent _rev for design docs",
- "design doc deletion",
- "language not specified, Javascript is implied",
- "module id values are as expected",
- "startkey and endkey",
- "that we get correct design doc info back",
- "validate doc update"
- ],
- "DesignOptionsTest": [
- "design doc options - include_design default value",
- "design doc options - include_desing=false",
- "design doc options - include_desing=true",
- "design doc options - local_seq=true"
- ],
- "DesignPathTest": [
- "design doc path",
- "design doc path with slash in db name"
- ],
- "ErlangViewsTest": [
- "Erlang map function",
- "Erlang reduce function",
- "Erlang reduce function larger dataset"
- ],
- "EtagsHeadTest": [
- "etag header on creation",
- "etag header on head",
- "etag header on retrieval",
- "etags head"
- ],
- "FormSubmitTest": [
- "form submission gives back invalid content-type"
- ],
- "HelperTest": [
- "retry_until handles assertions",
- "retry_until handles boolean conditions",
- "retry_until times out"
- ],
- "HttpTest": [
- "COUCHDB-708: newlines document names",
- "location header",
- "location header should include X-Forwarded-Host",
- "location header should include custom header"
- ],
- "InvalidDocIDsTest": [
- "_local-prefixed ids are illegal",
- "a PUT request with absent _id is forbidden",
- "accidental POST to form handling code",
- "explicit _bulk_docks policy",
- "invalid _prefix",
- "using a non-string id is forbidden"
- ],
- "JsonpTest": [
- "jsonp chunked callbacks",
- "jsonp not configured callbacks",
- "jsonp unchunked callbacks"
- ],
- "JwtAuthTest": [
- "jwt auth with EC secret",
- "jwt auth with HMAC secret",
- "jwt auth with RSA secret",
- "jwt auth with required iss claim",
- "jwt auth without secret"
- ],
- "ListViewsTest": [
- "COUCHDB-1113",
- "HTTP header response set after getRow() called in _list function",
- "abort iteration with reduce",
- "empty list",
- "extra qs params",
- "get with query params",
- "handling _all_docs by _list functions. the result should be equal",
- "multi-key fetch with GET",
- "multi-key fetch with POST",
- "multiple languages in design docs",
- "no multi-key fetch allowed when group=false",
- "reduce with 0 rows",
- "secObj is available",
- "standard GET",
- "standard OPTIONS",
- "stop iteration",
- "the richness of the arguments",
- "too many Get Rows",
- "we can run lists and views from separate docs",
- "we do multi-key requests on lists and views in separate docs",
- "when there is a reduce present, and used",
- "when there is a reduce present, but not used",
- "with 0 rows",
- "with accept headers for HTML",
- "with include_docs and a reference to the doc"
- ],
- "LocalDocsTest": [
- "GET with multiple keys",
- "GET with no parameters",
- "POST edge case with colliding parameters - query takes precedence",
- "POST with empty body",
- "POST with keys and limit",
- "POST with query parameter and JSON body"
- ],
- "LotsOfDocsTest": [
- "lots of docs with _all_docs",
- "lots of docs with a regular view"
- ],
- "MethodOverrideTest": [
- "Method Override is ignored when original Method isn't POST",
- "method override DELETE",
- "method override PUT"
- ],
- "MultipleRowsTest": [
- "multiple rows"
- ],
- "PartitionAllDocsTest": [
- "all_docs with partitioned:true returns partitioned fields",
- "partition _all_docs with descending",
- "partition _all_docs with key",
- "partition _all_docs with skip",
- "partition _all_docs with timeout",
- "partition _all_docs works with limit",
- "partition all docs can set query limits",
- "partition all_docs errors with incorrect partition supplied",
- "partitioned _all_docs works with keys",
- "partitioned _all_docs works with startkey, endkey range"
- ],
- "PartitionCrudTest": [
- "GET to partition returns 400",
- "POST and GET document",
- "POST and _bulk_get document",
- "POST fails if a partition key is not supplied",
- "PUT and GET document",
- "PUT fails for bad partitions",
- "PUT fails for partitions with _",
- "PUT fails if a partition key is not supplied",
- "Sets partition in db info",
- "_bulk_docs errors with bad doc key",
- "_bulk_docs errors with bad partition key",
- "_bulk_docs errors with missing partition key",
- "_bulk_docs saves docs with partition key",
- "_bulk_get bad partitioned document",
- "can create unpartitioned system db",
- "can purge partitioned db docs",
- "cannot create partitioned system db",
- "create database with bad `partitioned` value",
- "purge rejects unpartitioned docid",
- "saves attachment with partitioned doc"
- ],
- "PartitionDDocTest": [
- "DELETE /dbname/_design/foo",
- "GET /dbname/_all_docs?key=$ddoc_id",
- "GET /dbname/_bulk_get",
- "GET /dbname/_bulk_get with rev",
- "GET /dbname/_design/foo",
- "GET /dbname/_design/foo?rev=$rev",
- "GET /dbname/_design_docs",
- "POST /dbname with design doc",
- "POST /dbname/_bulk_docs with design doc",
- "PUT /dbname/_design/foo",
- "PUT /dbname/_design/foo to update",
- "PUT /dbname/_design/foo/readme.txt"
- ],
- "PartitionDesignDocsTest": [
- "/_partition/:pk/_design/doc 404"
- ],
- "PartitionMangoTest": [
- "explain works with non partitioned db",
- "explain works with partitions",
- "global query does not use partition index",
- "global query uses global index",
- "non-partitioned query using _all_docs and $eq",
- "partitioned _find and _explain with missing partition returns 400",
- "partitioned query does not use global index",
- "partitioned query sends correct errors for sort errors",
- "partitioned query using _all_docs",
- "partitioned query using _all_docs and range scan",
- "partitioned query using _all_docs with $eq",
- "partitioned query using bookmarks",
- "partitioned query using index and range scan",
- "partitioned query with query server config set",
- "query using _id and partition works",
- "query using _id works for global and local query",
- "query with partitioned:true using index and $eq"
- ],
- "PartitionSizeLimitTest": [
- "compacting a full partition works",
- "decreasing partition size disables more writes",
- "fill partition manually",
- "full partition does not affect design documents",
- "full partition does not affect other partitions",
- "full partitions are still readable",
- "full partitions can accept deletes",
- "full partitions can accept updates that reduce size",
- "full partitions reject POST /dbname",
- "full partitions reject POST /dbname/_bulk_docs",
- "full partitions reject PUT /dbname/docid",
- "full partitions with mixed POST /dbname/_bulk_docs",
- "increasing partition size allows more writes",
- "indexing a full partition works",
- "purging docs allows writes",
- "replication into a full partition works"
- ],
- "PartitionSizeTest": [
- "adding docs increases partition sizes",
- "attachments don't affect other partitions",
- "deleting a doc affects partition sizes",
- "design docs do not affect partition sizes",
- "get all partition sizes",
- "get empty partition",
- "get partition size with attachment",
- "partition activity not affect other partition sizes",
- "purging docs decreases partition size",
- "simple partition size",
- "unknown partition return's zero",
- "updating docs affects partition sizes"
- ],
- "PartitionViewUpdateTest": [
- "purge removes view rows",
- "purged conflict changes view rows",
- "query with update=false works",
- "view updates properly remove old keys"
- ],
- "ProxyAuthTest": [
- "proxy auth with secret",
- "proxy auth without secret"
- ],
- "PurgeTest": [
- "COUCHDB-1065",
- "purge documents"
- ],
- "ReaderACLTest": [
- "can't set non string reader names or roles",
- "members can query views",
- "restricted db can be read by authorized users",
- "unrestricted db can be read",
- "works with readers (backwards compat with 1.0)"
- ],
- "RecreateDocTest": [
- "COUCHDB-1265 - changes feed after we try and break the update_seq tree",
- "COUCHDB-292 - recreate a deleted document",
- "Recreate a deleted document with non-exsistant rev",
- "recreate document"
- ],
- "ReduceBuiltinTest": [
- "Builtin count and sum reduce for key as array",
- "Builtin reduce functions",
- "Builtin reduce functions with trailings"
- ],
- "ReduceFalseTest": [
- "Basic reduce functions"
- ],
- "ReduceTest": [
- "Basic reduce functions",
- "More complex array key view row testing",
- "More complex reductions that need to use the combine option",
- "Reduce pagination"
- ],
- "ReplicationTest": [
- "compressed attachment replication - remote-to-remote",
- "continuous replication - remote-to-remote",
- "create_target filter option - remote-to-remote",
- "default headers returned for _scheduler/docs ",
- "default headers returned for _scheduler/jobs",
- "filtered replications - remote-to-remote",
- "non-admin or reader user on source - remote-to-remote",
- "non-admin user on target - remote-to-remote",
- "replicate with since_seq - remote-to-remote",
- "replicating attachment without conflict - COUCHDB-885",
- "replication by doc ids - remote-to-remote",
- "replication cancellation",
- "replication restarts after filter change - COUCHDB-892 - remote-to-remote",
- "simple remote-to-remote replication - remote-to-remote",
- "source database not found with host",
- "unauthorized replication cancellation",
- "validate_doc_update failure replications - remote-to-remote"
- ],
- "ReshardAllDocsTest": [
- "all_docs after splitting all shards on node1",
- "all_docs after splitting the same range on all nodes"
- ],
- "ReshardBasicTest": [
- "basic api querying, no jobs present",
- "check validation of invalid parameters",
- "split q=1 db shards on node1 (1 job)",
- "split q=2 shards on node1 (2 jobs)",
- "toggle global state"
- ],
- "RevStemmingTest": [
- "revs limit is kept after compaction",
- "revs limit produces replication conflict ",
- "revs limit update"
- ],
- "RevisionTest": [
- "`new_edits: false` prevents bulk updates (COUCHDB-1178)",
- "mismatched rev in body and etag returns error",
- "mismatched rev in body and query string returns error",
- "multiple updates with same _rev raise conflict errors"
- ],
- "RewriteJSTest": [
- "Test basic js rewrites on test_rewrite_suite_db",
- "Test basic js rewrites on test_rewrite_suite_db%2Fwith_slashes",
- "early response on test_rewrite_suite_db",
- "early response on test_rewrite_suite_db%2Fwith_slashes",
- "loop on test_rewrite_suite_db",
- "loop on test_rewrite_suite_db%2Fwith_slashes",
- "path relative to server on test_rewrite_suite_db",
- "path relative to server on test_rewrite_suite_db%2Fwith_slashes",
- "requests with body preserve the query string rewrite on test_rewrite_suite_db",
- "requests with body preserve the query string rewrite on test_rewrite_suite_db%2Fwith_slashes"
- ],
- "RewriteTest": [
- "Test basic rewrites on test_rewrite_suite_db",
- "Test basic rewrites on test_rewrite_suite_db%2Fwith_slashes",
- "loop detection on test_rewrite_suite_db",
- "loop detection on test_rewrite_suite_db%2Fwith_slashes",
- "path relative to server on test_rewrite_suite_db",
- "path relative to server on test_rewrite_suite_db%2Fwith_slashes",
- "serial execution is not spuriously counted as loop on test_rewrite_suite_db",
- "serial execution is not spuriously counted as loop on test_rewrite_suite_db%2Fwith_slashes"
- ],
- "SecurityValidationTest": [
- "Author presence and user security",
- "Author presence and user security when replicated",
- "Ddoc writes with admin and replication contexts",
- "Force basic login",
- "Jerry can save a document normally",
- "Non-admin user cannot save a ddoc",
- "Saving document using the wrong credentials",
- "_session API",
- "try to set a wrong value for _security"
- ],
- "ShowDocumentsTest": [
- "JS can't set etag",
- "accept header switching - different mime has different etag",
- "deleted docs",
- "id with slash",
- "list() compatible API",
- "list() compatible API with provides function",
- "missing design doc",
- "registering types works",
- "security object",
- "should keep next result order: chunks + return value + provided chunks + provided return value",
- "show error",
- "show fail with non-existing docid",
- "show query parameters",
- "show with doc",
- "show with doc - etags",
- "show with existing doc",
- "show with missing doc",
- "show with non-existing docid",
- "show without docid",
- "the provides mime matcher",
- "the provides mime matcher without a match"
- ],
- "UTF8Test": [
- "UTF8 support"
- ],
- "UUIDsTest": [
- "Bad Request error when exceeding max UUID count",
- "Method Not Allowed error on POST",
- "cache busting headers are set",
- "can return single uuid",
- "no duplicates in 1,000 UUIDs",
- "sequential uuids are sequential",
- "utc_id uuids are correct",
- "utc_random uuids are roughly random"
- ],
- "UpdateDocumentsTest": [
- "COUCHDB-1229 - allow slashes in doc ids for update handlers",
- "COUCHDB-648 - the code in the JSON response should be honored",
- "GET is not allowed",
- "Insert doc with empty id",
- "Server provides UUID when POSTing without an ID in the URL",
- "base64 response",
- "bump counter",
- "doc can be created",
- "form update via application/x-www-form-urlencoded",
- "in place update",
- "update document",
- "update error invalid path"
- ],
- "UsersDbSecurityTest": [
- "user db security"
- ],
- "UsersDbTest": [
- "users db",
- "users password requirements"
- ],
- "ViewCollationRawTest": [
- "ascending collation order",
- "descending collation order",
- "inclusive_end=false",
- "inclusive_end=true",
- "key query option",
- "raw semantics in key ranges"
- ],
- "ViewCollationTest": [
- "ascending collation order",
- "descending collation order",
- "inclusive_end=false",
- "inclusive_end=true",
- "key query option"
- ],
- "ViewCompactionTest": [
- "view compaction"
- ],
- "ViewConflictsTest": [
- "view conflict"
- ],
- "ViewErrorsTest": [
- "emit undefined key results as null",
- "emit undefined value results as null",
- "error responses for invalid multi-get bodies",
- "exception in map function",
- "infinite loop",
- "query parse error",
- "query view with invalid params",
- "reduce overflow error",
- "temporary view should give error message"
- ],
- "ViewIncludeDocsTest": [
- "COUCHDB-549 - include_docs=true with conflicts=true",
- "Not an error with include_docs=false&reduce=true",
- "Reduce support when reduce=false",
- "emitted _rev controls things",
- "include docs in all_docs",
- "include docs in view",
- "link to another doc from a value",
- "no reduce support"
- ],
- "ViewMultiKeyAllDocsTest": [
- "GET - get invalid rows when the key doesn't exist",
- "POST - get invalid rows when the key doesn't exist",
- "empty keys",
- "keys in GET parameters",
- "keys in GET parameters (descending)",
- "keys in GET parameters (descending, skip, limit)",
- "keys in GET parameters (limit)",
- "keys in GET parameters (skip)",
- "keys in POST body",
- "keys in POST body (descending)",
- "keys in POST body (descending, skip, limit)",
- "keys in POST body (limit)",
- "keys in POST body (skip)"
- ],
- "ViewMultiKeyDesignTest": [
- "GET - invalid parameter combinations get rejected ",
- "POST - invalid parameter combinations get rejected ",
- "argument combinations",
- "dir works",
- "empty keys",
- "keys in GET body (group)",
- "keys in GET parameters",
- "keys in POST body",
- "keys in POST body (group)",
- "limit works",
- "offset works",
- "that a map & reduce containing func support keys when reduce=false",
- "that limiting by startkey_docid and endkey_docid get applied",
- "that missing keys work too"
- ],
- "ViewOffsetTest": [
- "basic view offsets",
- "repeated view offsets"
- ],
- "ViewPaginationTest": [
- "aliases start_key and start_key_doc_id should work",
- "basic view pagination",
- "descending view pagination",
- "descending=false parameter should just be ignored",
- "endkey document id",
- "endkey document id, but with end_key_doc_id alias"
- ],
- "ViewPartitionTest": [
- "conflicting partitions in path and query string rejected",
- "default view query returns partitioned fields",
- "global query works with keys",
- "include_design works correctly",
- "partition query can set query limits",
- "partition query errors with incorrect partition supplied",
- "partition query with descending",
- "partition query with key",
- "partition query with skip",
- "partition query with startkey_docid and endkey_docid",
- "partition query works with limit",
- "partitioned ddoc cannot be used in global query",
- "partitioned query cannot be used with global ddoc",
- "partitioned query works with keys",
- "partitioned query works with startkey, endkey range",
- "query will return zero results for wrong inputs",
- "query with partitioned:true returns partitioned fields",
- "query with reduce works",
- "view query returns all docs for global query"
- ],
- "ViewSandboxingTest": [
- "COUCHDB-925 - altering 'doc' variable in map function affects other map functions",
- "attempting to change the document has no effect",
- "runtime code evaluation can be prevented",
- "view cannot access the map_funs and map_results array",
- "view cannot invoke interpreter internals"
- ],
- "ViewTest": [
- "GET with multiple keys",
- "GET with no parameters",
- "GET with one key",
- "POST edge case with colliding parameters - query takes precedence",
- "POST with boolean parameter",
- "POST with empty body",
- "POST with keys and limit",
- "POST with query parameter and JSON body"
- ],
- "ViewUpdateSeqTest": [
- "_all_docs update seq",
- "db info update seq",
- "view update seq"
- ],
- "WithQuorumTest": [
- "Attachments overriden quorum should return 202-Acepted",
- "Attachments should return 201-Created",
- "Bulk docs overriden quorum should return 202-Acepted",
- "Bulk docs should return 201-Created",
- "Copy doc should return 201-Created",
- "Creating-Updating/Deleting doc should return 201-Created/200-OK",
- "Creating-Updating/Deleting doc with overriden quorum should return 202-Acepted/200-OK",
- "Creating/Deleting DB should return 201-Created/202-Acepted"
- ],
- "WithoutQuorumTest": [
- "Attachments overriden quorum should return 201-Created",
- "Attachments should return 202-Acepted",
- "Bulk docs overriden quorum should return 201-Created",
- "Bulk docs should return 202-Acepted",
- "Copy doc should return 202-Acepted",
- "Creating-Updating/Deleting doc with overriden quorum should return 201-Created/200-OK",
- "Creating/Deleting DB should return 202-Acepted",
- "Creating/Updating/Deleting doc should return 202-Acepted"
- ]
-}
diff --git a/test/elixir/test/config/test-config.ini b/test/elixir/test/config/test-config.ini
deleted file mode 100644
index 1980139d1..000000000
--- a/test/elixir/test/config/test-config.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[chttpd]
-authentication_handlers = {chttpd_auth, jwt_authentication_handler}, {chttpd_auth, proxy_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
diff --git a/test/elixir/test/config_test.exs b/test/elixir/test/config_test.exs
deleted file mode 100644
index 1ad70a8a7..000000000
--- a/test/elixir/test/config_test.exs
+++ /dev/null
@@ -1,184 +0,0 @@
-defmodule ConfigTest do
- use CouchTestCase
-
- @moduletag :config
-
- @moduledoc """
- Test CouchDB config API
- This is a port of the config.js suite
- """
-
- setup do
- # TODO: switch this to _local when that's landed
- config_url = "/_node/node1@127.0.0.1/_config"
- resp = Couch.get(config_url)
- assert resp.status_code == 200
- {:ok, config: resp.body, config_url: config_url}
- end
-
- def set_config(context, section, key, val) do
- set_config(context, section, key, val, 200)
- end
-
- def set_config(context, section, key, val, status_assert) do
- url = "#{context[:config_url]}/#{section}/#{key}"
- headers = ["X-Couch-Persist": "false"]
- resp = Couch.put(url, headers: headers, body: :jiffy.encode(val))
-
- if status_assert do
- assert resp.status_code == status_assert
- end
-
- resp.body
- end
-
- def get_config(context, section) do
- get_config(context, section, nil, 200)
- end
-
- def get_config(context, section, key) do
- get_config(context, section, key, 200)
- end
-
- def get_config(context, section, key, status_assert) do
- url =
- if key do
- "#{context[:config_url]}/#{section}/#{key}"
- else
- "#{context[:config_url]}/#{section}"
- end
-
- resp = Couch.get(url)
-
- if status_assert do
- assert resp.status_code == status_assert
- end
-
- resp.body
- end
-
- def delete_config(context, section, key) do
- delete_config(context, section, key, 200)
- end
-
- def delete_config(context, section, key, status_assert) do
- url = "#{context[:config_url]}/#{section}/#{key}"
- resp = Couch.delete(url)
-
- if status_assert do
- assert resp.status_code == status_assert
- end
- end
-
- # TODO: port sever_port tests from config.js
- @tag :pending
- test "CouchDB respects configured protocols"
-
- test "Standard config options are present", context do
- assert context[:config]["couchdb"]["database_dir"]
- assert context[:config]["chttpd"]["port"]
- end
-
- test "Settings can be altered with undefined whitelist allowing any change", context do
- refute context["config"]["chttpd"]["config_whitelist"], "Default whitelist is empty"
- set_config(context, "test", "foo", "bar")
- assert get_config(context, "test")["foo"] == "bar"
- assert get_config(context, "test", "foo") == "bar"
- end
-
- test "Server-side password hashing, and raw updates disabling that", context do
- plain_pass = "s3cret"
- set_config(context, "admins", "administrator", plain_pass)
- assert Couch.login("administrator", plain_pass)
- hash_pass = get_config(context, "admins", "administrator")
-
- assert Regex.match?(~r/^-pbkdf2-/, hash_pass) or
- Regex.match?(~r/^-hashed-/, hash_pass)
-
- delete_config(context, "admins", "administrator")
- assert Couch.delete("/_session").body["ok"]
- end
-
- @tag :pending
- test "PORT `BUGGED` ?raw tests from config.js"
-
- test "Non-term whitelist values allow further modification of the whitelist", context do
- val = "!This is an invalid Erlang term!"
- set_config(context, "chttpd", "config_whitelist", val)
- assert val == get_config(context, "chttpd", "config_whitelist")
- delete_config(context, "chttpd", "config_whitelist")
- end
-
- test "Non-list whitelist values allow further modification of the whitelist", context do
- val = "{[yes, a_valid_erlang_term, but_unfortunately, not_a_list]}"
- set_config(context, "chttpd", "config_whitelist", val)
- assert val == get_config(context, "chttpd", "config_whitelist")
- delete_config(context, "chttpd", "config_whitelist")
- end
-
- test "Keys not in the whitelist may not be modified", context do
- val = "[{chttpd,config_whitelist}, {test,foo}]"
- set_config(context, "chttpd", "config_whitelist", val)
- assert val == get_config(context, "chttpd", "config_whitelist")
- set_config(context, "test", "foo", "PUT to whitelisted config variable")
- delete_config(context, "test", "foo")
- end
-
- test "Non-2-tuples in the whitelist are ignored", context do
- val =
- "[{chttpd,config_whitelist}, these, {are}, {nOt, 2, tuples}, [so], [they, will], [all, become, noops], {test,foo}]"
-
- set_config(context, "chttpd", "config_whitelist", val)
- assert val == get_config(context, "chttpd", "config_whitelist")
- set_config(context, "test", "foo", "PUT to whitelisted config variable")
- delete_config(context, "test", "foo")
- end
-
- test "Atoms, binaries, and strings suffice as whitelist sections and keys.", context do
- vals = ["{test,foo}", "{\"test\",\"foo\"}", "{<<\"test\">>,<<\"foo\">>}"]
-
- Enum.each(vals, fn pair ->
- set_config(
- context,
- "chttpd",
- "config_whitelist",
- "[{chttpd,config_whitelist}, #{pair}"
- )
-
- pair_format =
- case String.at(pair, 1) do
- "t" -> "tuple"
- "\"" -> "string"
- "<" -> "binary"
- end
-
- set_config(context, "test", "foo", "PUT with #{pair_format}")
- delete_config(context, "test", "foo")
- end)
-
- delete_config(context, "chttpd", "config_whitelist")
- end
-
- test "Blacklist is functional", context do
- sections = [
- "daemons",
- "external",
- "httpd_design_handlers",
- "httpd_db_handlers",
- "native_query_servers",
- "os_daemons",
- "query_servers"
- ]
-
- Enum.each(sections, fn section ->
- set_config(context, section, "wohali", "rules", 403)
- end)
- end
-
- test "Reload config", context do
- url = "#{context[:config_url]}/_reload"
- resp = Couch.post(url)
-
- assert resp.status_code == 200
- end
-end
diff --git a/test/elixir/test/conflicts_test.exs b/test/elixir/test/conflicts_test.exs
deleted file mode 100644
index a45f5c4ed..000000000
--- a/test/elixir/test/conflicts_test.exs
+++ /dev/null
@@ -1,110 +0,0 @@
-defmodule RevisionTest do
- use CouchTestCase
-
- @moduletag :conflicts
-
- @moduledoc """
- Test CouchDB conflicts
- This is a port of conflicts.js
- (but is arguably more focused on revisions than conflicts)
- """
-
- setup context do
- # Generate a doc with _rev field for each test
- doc = %{_id: "doc-1", a: 1, b: 1}
- doc = rev(doc, put(context[:db_name], doc))
- %{doc: doc}
- end
-
- @tag :with_db
- test "multiple updates with same _rev raise conflict errors", context do
- db = context[:db_name]
- doc = context[:doc]
- # doc and doc2 have same _rev
- doc2 = %{doc | a: 2, b: 2}
- # doc updated with new _rev
- _doc = rev(doc, put(db, doc))
-
- retry_until(fn ->
- assert_conflict(Couch.put("/#{db}/#{doc2._id}", body: doc2))
-
- resp = Couch.get("/#{db}/_changes")
- assert length(resp.body["results"]) == 1
-
- doc2 = Map.delete(doc2, :_rev)
- assert_conflict(Couch.put("/#{db}/#{doc2._id}", body: doc2))
- end)
- end
-
- @tag :with_db
- test "mismatched rev in body and query string returns error", context do
- db = context[:db_name]
- doc = context[:doc]
- resp = Couch.put("/#{db}/#{doc._id}?rev=1-foobar", body: doc)
-
- expected_reason =
- "Document rev from request body and query string " <> "have different values"
-
- assert_bad_request(resp, expected_reason)
- end
-
- @tag :with_db
- test "mismatched rev in body and etag returns error", context do
- opts = [body: context[:doc], headers: [{:"If-Match", "1-foobar"}]]
- resp = Couch.put("/#{context[:db_name]}/foobar", opts)
- expected_reason = "Document rev and etag have different values"
- assert_bad_request(resp, expected_reason)
- end
-
- @tag :with_db
- test "`new_edits: false` prevents bulk updates (COUCHDB-1178)", context do
- db = context[:db_name]
-
- ddoc = %{_id: "_design/couchdb-1178", validate_doc_update: "function(){}"}
- assert put(db, ddoc)["ok"] == true
-
- r0 = %{_id: "doc", val: "r0"}
- r1 = %{_id: "doc", val: "r1", _rev: "1-47f3268e7546965196b57572099f4372"}
- r2 = %{_id: "doc", val: "r2", _rev: "2-1d8171ab3a91475cfece749291e6f897"}
- r3 = %{_id: "doc", val: "r3", _rev: "3-3fb0a342d2ce092fdcc77856dbe8a2ef"}
- assert put(db, r0)["ok"] == true
- assert put(db, r1)["ok"] == true
- assert put(db, r2)["ok"] == true
- # N.b. that we *do not* put r3
-
- expected = %{
- "_id" => "doc",
- "_rev" => r3._rev,
- "_revisions" => %{
- "ids" => for(r <- [r3._rev, r2._rev, r1._rev], do: suffix(r)),
- "start" => 3
- },
- "val" => r2.val
- }
-
- assert Couch.get("/#{db}/doc?revs=true").body == expected
-
- opts = [body: %{docs: [r3, r2, r1], new_edits: false}]
- assert Couch.post("/#{db}/_bulk_docs", opts).body == []
- end
-
- defp put(db, doc) do
- Couch.put("/#{db}/#{doc._id}", body: doc).body
- end
-
- defp suffix(rev) do
- hd(tl(String.split(rev, "-")))
- end
-
- defp assert_conflict(resp) do
- assert resp.status_code == 409
- assert resp.body["error"] == "conflict"
- assert resp.body["reason"] == "Document update conflict."
- end
-
- defp assert_bad_request(resp, reason) do
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == reason
- end
-end
diff --git a/test/elixir/test/cookie_auth_test.exs b/test/elixir/test/cookie_auth_test.exs
deleted file mode 100644
index d7971868a..000000000
--- a/test/elixir/test/cookie_auth_test.exs
+++ /dev/null
@@ -1,395 +0,0 @@
-defmodule CookieAuthTest do
- use CouchTestCase
-
- @moduletag :authentication
-
- @users_db "_users"
-
- @moduletag config: [
- {
- "chttpd_auth",
- "authentication_db",
- @users_db
- },
- {
- "couch_httpd_auth",
- "authentication_db",
- @users_db
- },
- {
- "chttpd_auth",
- "iterations",
- "1"
- },
- {
- "admins",
- "jan",
- "apple"
- }
- ]
-
- @password "3.141592653589"
-
- setup do
- # Create db if not exists
- Couch.put("/#{@users_db}")
-
- retry_until(fn ->
- resp =
- Couch.get(
- "/#{@users_db}/_changes",
- query: [feed: "longpoll", timeout: 5000, filter: "_design"]
- )
- length(resp.body["results"]) > 0
- end)
-
- on_exit(&tear_down/0)
-
- :ok
- end
-
- defp tear_down do
- # delete users
- user = URI.encode("org.couchdb.user:jchris")
- user_doc = Couch.get("/#{@users_db}/#{URI.encode(user)}").body
- Couch.delete("/#{@users_db}/#{user}", query: [rev: user_doc["_rev"]])
-
- user = URI.encode("org.couchdb.user:Jason Davies")
- user_doc = Couch.get("/#{@users_db}/#{user}").body
- Couch.delete("/#{@users_db}/#{user}", query: [rev: user_doc["_rev"]])
- end
-
- defp login(user, password) do
- sess = Couch.login(user, password)
- assert sess.cookie, "Login correct is expected"
- sess
- end
-
- defp logout(session) do
- assert Couch.Session.logout(session).body["ok"]
- end
-
- defp login_as(user) do
- pws = %{
- "jan" => "apple",
- "Jason Davies" => @password,
- "jchris" => "funnybone"
- }
-
- user1 = Regex.replace(~r/[0-9]$/, user, "")
- login(user1, pws[user])
- end
-
- defp create_doc_expect_error(db_name, doc, status_code, msg) do
- resp = Couch.post("/#{db_name}", body: doc)
- assert resp.status_code == status_code
- assert resp.body["error"] == msg
- resp
- end
-
- defp open_as(db_name, doc_id, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- expect_response = Keyword.get(options, :expect_response, 200)
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login_as(user)
-
- resp =
- Couch.Session.get(
- session,
- "/#{db_name}/#{URI.encode(doc_id)}"
- )
-
- if use_session == nil do
- logout(session)
- end
-
- assert resp.status_code == expect_response
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp.body
- end
-
- defp save_as(db_name, doc, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- expect_response = Keyword.get(options, :expect_response, [201, 202])
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login_as(user)
-
- resp =
- Couch.Session.put(
- session,
- "/#{db_name}/#{URI.encode(doc["_id"])}",
- body: doc
- )
-
- if use_session == nil do
- logout(session)
- end
-
- if is_list(expect_response) do
- assert resp.status_code in expect_response
- else
- assert resp.status_code == expect_response
- end
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp
- end
-
- defp delete_as(db_name, doc, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- expect_response = Keyword.get(options, :expect_response, [200, 202])
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login_as(user)
-
- resp =
- Couch.Session.delete(
- session,
- "/#{db_name}/#{URI.encode(doc["_id"])}"
- )
-
- if use_session == nil do
- logout(session)
- end
-
- if is_list(expect_response) do
- assert resp.status_code in expect_response
- else
- assert resp.status_code == expect_response
- end
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp
- end
-
- defp test_change_admin_fun do
- sess = login("jchris", "funnybone")
- info = Couch.Session.info(sess)
- assert info["userCtx"]["name"] == "jchris"
- assert Enum.member?(info["userCtx"]["roles"], "_admin")
- assert Enum.member?(info["userCtx"]["roles"], "foo")
-
- jchris_user_doc =
- open_as(
- @users_db,
- "org.couchdb.user:jchris",
- use_session: sess
- )
-
- jchris_user_doc = Map.drop(jchris_user_doc, [:salt, :password_sha])
- save_as(@users_db, jchris_user_doc, use_session: sess)
- logout(sess)
- sess = login("jchris", "funnybone")
- info = Couch.Session.info(sess)
- assert info["userCtx"]["name"] == "jchris"
- assert Enum.member?(info["userCtx"]["roles"], "_admin")
- assert info["info"]["authenticated"] == "cookie"
- assert info["info"]["authentication_db"] == @users_db
- assert Enum.member?(info["userCtx"]["roles"], "foo")
- logout(sess)
- end
-
- test "cookie auth" do
- # test that the users db is born with the auth ddoc
- ddoc = open_as(@users_db, "_design/_auth", user: "jan")
- assert ddoc["validate_doc_update"] != nil
-
- jason_user_doc =
- prepare_user_doc([
- {:name, "Jason Davies"},
- {:password, @password}
- ])
-
- create_doc(@users_db, jason_user_doc)
- jason_check_doc = open_as(@users_db, jason_user_doc["_id"], user: "jan")
- assert jason_check_doc["name"] == "Jason Davies"
-
- jchris_user_doc =
- prepare_user_doc([
- {:name, "jchris"},
- {:password, "funnybone"}
- ])
-
- {:ok, resp} = create_doc(@users_db, jchris_user_doc)
- jchris_rev = resp.body["rev"]
-
- duplicate_jchris_user_doc =
- prepare_user_doc([
- {:name, "jchris"},
- {:password, "eh, Boo-Boo?"}
- ])
-
- # make sure we cant create duplicate users
- create_doc_expect_error(@users_db, duplicate_jchris_user_doc, 409, "conflict")
-
- # we can't create _names
- underscore_user_doc =
- prepare_user_doc([
- {:name, "_why"},
- {:password, "copperfield"}
- ])
-
- create_doc_expect_error(@users_db, underscore_user_doc, 403, "forbidden")
-
- # we can't create malformed ids
- bad_id_user_doc =
- prepare_user_doc([
- {:id, "org.apache.couchdb:w00x"},
- {:name, "w00x"},
- {:password, "bar"}
- ])
-
- create_doc_expect_error(@users_db, bad_id_user_doc, 403, "forbidden")
-
- # login works
- session = login_as("Jason Davies")
- info = Couch.Session.info(session)
- assert info["userCtx"]["name"] == "Jason Davies"
- assert not Enum.member?(info["userCtx"]["roles"], "_admin")
-
- # update one's own credentials document
- jason_user_doc =
- jason_user_doc
- |> Map.put("_rev", jason_check_doc["_rev"])
- |> Map.put("foo", 2)
-
- resp = save_as(@users_db, jason_user_doc, use_session: session)
- jason_user_doc_rev = resp.body["rev"]
-
- # can't delete another users doc unless you are admin
-
- jchris_user_doc = Map.put(jchris_user_doc, "_rev", jchris_rev)
-
- delete_as(
- @users_db,
- jchris_user_doc,
- use_session: session,
- expect_response: 404,
- error_message: "not_found"
- )
-
- logout(session)
-
- # test redirect on success
- resp =
- Couch.post(
- "/_session",
- query: [next: "/_up"],
- body: %{
- :username => "Jason Davies",
- :password => @password
- }
- )
-
- assert resp.status_code == 302
- assert resp.body["ok"]
- assert String.ends_with?(resp.headers["location"], "/_up")
-
- # test redirect on fail
- resp =
- Couch.post(
- "/_session",
- query: [fail: "/_up"],
- body: %{
- :username => "Jason Davies",
- :password => "foobar"
- }
- )
-
- assert resp.status_code == 302
- assert resp.body["error"] == "unauthorized"
- assert String.ends_with?(resp.headers["location"], "/_up")
-
- session = login("jchris", "funnybone")
- info = Couch.Session.info(session)
- assert info["userCtx"]["name"] == "jchris"
- assert Enum.empty?(info["userCtx"]["roles"])
-
- jason_user_doc =
- jason_user_doc
- |> Map.put("_rev", jason_user_doc_rev)
- |> Map.put("foo", 3)
-
- save_as(
- @users_db,
- jason_user_doc,
- use_session: session,
- expect_response: 404,
- error_message: "not_found"
- )
-
- jchris_user_doc = Map.put(jchris_user_doc, "roles", ["foo"])
-
- save_as(
- @users_db,
- jchris_user_doc,
- use_session: session,
- expect_response: 403,
- error_message: "forbidden"
- )
-
- logout(session)
-
- jchris_user_doc = Map.put(jchris_user_doc, "foo", ["foo"])
-
- resp =
- save_as(
- @users_db,
- jchris_user_doc,
- user: "jan"
- )
-
- # test that you can't save system (underscore) roles even if you are admin
- jchris_user_doc =
- jchris_user_doc
- |> Map.put("roles", ["_bar"])
- |> Map.put("_rev", resp.body["rev"])
-
- save_as(
- @users_db,
- jchris_user_doc,
- user: "jan",
- expect_response: 403,
- error_message: "forbidden"
- )
-
- session = login("jchris", "funnybone")
- info = Couch.Session.info(session)
-
- assert not Enum.member?(info["userCtx"]["roles"], "_admin")
- assert(Enum.member?(info["userCtx"]["roles"], "foo"))
-
- logout(session)
-
- login("jan", "apple")
-
- run_on_modified_server(
- [
- %{
- :section => "admins",
- :key => "jchris",
- :value => "funnybone"
- }
- ],
- &test_change_admin_fun/0
- )
-
- # log in one last time so run_on_modified_server can clean up the admin account
- login("jan", "apple")
- end
-end
diff --git a/test/elixir/test/copy_doc_test.exs b/test/elixir/test/copy_doc_test.exs
deleted file mode 100644
index 4641ff6ea..000000000
--- a/test/elixir/test/copy_doc_test.exs
+++ /dev/null
@@ -1,71 +0,0 @@
-defmodule CopyDocTest do
- use CouchTestCase
-
- @moduletag :copy_doc
-
- @moduledoc """
- Test CouchDB Copy Doc
- This is a port of the copy_doc.js suite
- """
- @tag :with_db
- test "Copy doc tests", context do
- db_name = context[:db_name]
- create_doc(db_name, %{_id: "doc_to_be_copied", v: 1})
-
- resp =
- Couch.request(
- :copy,
- "/#{db_name}/doc_to_be_copied",
- headers: [Destination: "doc_that_was_copied"]
- )
-
- assert resp.body["ok"]
- assert resp.status_code in [201, 202]
-
- assert Couch.get("/#{db_name}/doc_that_was_copied").body["v"] == 1
-
- create_doc(db_name, %{_id: "doc_to_be_copied2", v: 1})
- {_, resp} = create_doc(db_name, %{_id: "doc_to_be_overwritten", v: 2})
- rev = resp.body["rev"]
-
- resp =
- Couch.request(
- :copy,
- "/#{db_name}/doc_to_be_copied2",
- headers: [Destination: "doc_to_be_overwritten"]
- )
-
- assert resp.status_code == 409
-
- resp =
- Couch.request(
- :copy,
- "/#{db_name}/doc_to_be_copied2"
- )
-
- assert resp.status_code == 400
- assert resp.body["reason"] == "Destination header is mandatory for COPY."
-
- resp =
- Couch.request(
- :copy,
- "/#{db_name}/doc_to_be_copied2",
- headers: [Destination: "http://localhost:5984/#{db_name}/doc_to_be_written"]
- )
-
- assert resp.status_code == 400
- assert resp.body["reason"] == "Destination URL must be relative."
-
- resp =
- Couch.request(
- :copy,
- "/#{db_name}/doc_to_be_copied2",
- headers: [Destination: "doc_to_be_overwritten?rev=#{rev}"]
- )
-
- assert resp.status_code in [201, 202]
- resp = Couch.get("/#{db_name}/doc_to_be_overwritten")
- assert resp.body["_rev"] != rev
- assert resp.body["v"] == 1
- end
-end
diff --git a/test/elixir/test/data/lorem.txt b/test/elixir/test/data/lorem.txt
deleted file mode 100644
index 0ef85bab8..000000000
--- a/test/elixir/test/data/lorem.txt
+++ /dev/null
@@ -1,103 +0,0 @@
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus nunc sapien, porta id pellentesque at, elementum et felis. Curabitur condimentum ante in metus iaculis quis congue diam commodo. Donec eleifend ante sed nulla dapibus convallis. Ut cursus aliquam neque, vel porttitor tellus interdum ut. Sed pharetra lacinia adipiscing. In tristique tristique felis non tincidunt. Nulla auctor mauris a velit cursus ultricies. In at libero quis justo consectetur laoreet. Nullam id ultrices nunc. Donec non turpis nulla, eu lacinia ante. Nunc eu orci et turpis pretium venenatis. Nam molestie, lacus at dignissim elementum, ante libero consectetur libero, ut lacinia lacus urna et purus. Nullam lorem ipsum, dapibus vel ullamcorper a, malesuada a metus. Sed porta adipiscing magna, quis pulvinar purus mattis fringilla. Integer pellentesque sapien in neque tristique ac iaculis libero ultricies. Ut eget pharetra purus.
-
-Nulla in convallis tellus. Proin tincidunt suscipit vulputate. Suspendisse potenti. Nullam tristique justo mi, a tristique ligula. Duis convallis aliquam iaculis. Nulla dictum fringilla congue. Suspendisse ac leo lectus, ac aliquam justo. Ut porttitor commodo mi sed luctus. Nulla at enim lorem. Nunc eu justo sapien, a blandit odio. Curabitur faucibus sollicitudin dolor, id lacinia sem auctor in. Donec varius nunc at lectus sagittis nec luctus arcu pharetra. Nunc sed metus justo. Cras vel mauris diam. Ut feugiat felis eget neque pharetra vestibulum consectetur massa facilisis. Quisque consectetur luctus nisi quis tincidunt. Vivamus cursus cursus quam non blandit. Pellentesque et velit lacus. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
-
-In et dolor vitae orci adipiscing congue. Aliquam gravida nibh at nisl gravida molestie. Curabitur a bibendum sapien. Aliquam tincidunt, nulla nec pretium lobortis, odio augue tincidunt arcu, a lobortis odio sem ut purus. Donec accumsan mattis nunc vitae lacinia. Suspendisse potenti. Integer commodo nisl quis nibh interdum non fringilla dui sodales. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. In hac habitasse platea dictumst. Etiam ullamcorper, mi id feugiat bibendum, purus neque cursus mauris, id sodales quam nisi id velit. Sed lectus leo, tincidunt vel rhoncus imperdiet, blandit in leo. Integer quis magna nulla. Donec vel nisl magna, ut rhoncus dui. Aliquam gravida, nulla nec eleifend luctus, neque nibh pharetra ante, quis egestas elit metus a mi. Nunc nec augue quam. Morbi tincidunt tristique varius. Suspendisse iaculis elit feugiat magna pellentesque ultricies. Vestibulum aliquam tortor non ante ullamcorper fringilla. Donec iaculis mi quis mauris ornare vestibulum.
-
-In a magna nisi, a ultricies massa. Donec elit neque, viverra non tempor quis, fringilla in metus. Integer odio odio, euismod vitae mollis sed, sodales eget libero. Donec nec massa in felis ornare pharetra at nec tellus. Nunc lorem dolor, pretium vel auctor in, volutpat vitae felis. Maecenas rhoncus, orci vel blandit euismod, turpis erat tincidunt ante, elementum adipiscing nisl urna in nisi. Phasellus sagittis, enim sed accumsan consequat, urna augue lobortis erat, non malesuada quam metus sollicitudin ante. In leo purus, dignissim quis varius vel, pellentesque et nibh. In sed tortor iaculis libero mollis pellentesque id vitae lectus. In hac habitasse platea dictumst. Phasellus mauris enim, posuere eget luctus ac, iaculis et quam. Vivamus et nibh diam, elementum egestas tellus. Aenean vulputate malesuada est. Sed posuere porta diam a sodales. Proin eu sem non velit facilisis venenatis sed a turpis.
-
-Pellentesque sed risus a ante vulputate lobortis sit amet eu nisl. Suspendisse ut eros mi, a rhoncus lacus. Curabitur fermentum vehicula tellus, a ornare mi condimentum vel. Integer molestie volutpat viverra. Integer posuere euismod venenatis. Proin ac mauris sed nulla pharetra porttitor. Duis vel dui in risus sodales auctor sit amet non enim. Maecenas mollis lacus at ligula faucibus sodales. Cras vel neque arcu. Sed tincidunt tortor pretium nisi interdum quis dictum arcu laoreet. Morbi pretium ultrices feugiat. Maecenas convallis augue nec felis malesuada malesuada scelerisque mauris placerat. Sed at magna enim, at fringilla dolor. Quisque ut mattis dui. Praesent consectetur ante viverra nisi blandit pharetra. Quisque metus elit, dignissim vitae fermentum sit amet, fringilla imperdiet odio. Cras eget purus eget tellus feugiat luctus a ac purus. Cras vitae nisl vel augue rhoncus porttitor sit amet quis lorem. Donec interdum pellentesque adipiscing. Phasellus neque libero, aliquam in mattis vitae, consectetur adipiscing nibh.
-
-Donec nec nulla urna, ac sagittis lectus. Suspendisse non elit sed mi auctor facilisis vitae et lectus. Fusce ac vulputate mauris. Morbi condimentum ultrices metus, et accumsan purus malesuada at. Maecenas lobortis ante sed massa dictum vitae venenatis elit commodo. Proin tellus eros, adipiscing sed dignissim vitae, tempor eget ante. Aenean id tellus nec magna cursus pharetra vitae vel enim. Morbi vestibulum pharetra est in vulputate. Aliquam vitae metus arcu, id aliquet nulla. Phasellus ligula est, hendrerit nec iaculis ut, volutpat vel eros. Suspendisse vitae urna turpis, placerat adipiscing diam. Phasellus feugiat vestibulum neque eu dapibus. Nulla facilisi. Duis tortor felis, euismod sit amet aliquet in, volutpat nec turpis. Mauris rhoncus ipsum ut purus eleifend ut lobortis lectus dapibus. Quisque non erat lorem. Vivamus posuere imperdiet iaculis. Ut ligula lacus, eleifend at tempor id, auctor eu leo.
-
-Donec mi enim, laoreet pulvinar mollis eu, malesuada viverra nunc. In vitae metus vitae neque tempor dapibus. Maecenas tincidunt purus a felis aliquam placerat. Nulla facilisi. Suspendisse placerat pharetra mattis. Integer tempor malesuada justo at tempus. Maecenas vehicula lorem a sapien bibendum vel iaculis risus feugiat. Pellentesque diam erat, dapibus et pellentesque quis, molestie ut massa. Vivamus iaculis interdum massa id bibendum. Quisque ut mauris dui, sit amet varius elit. Vestibulum elit lorem, rutrum non consectetur ut, laoreet nec nunc. Donec nec mauris ante. Curabitur ut est sed odio pharetra laoreet. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur purus risus, laoreet sed porta id, sagittis vel ipsum. Maecenas nibh diam, cursus et varius sit amet, fringilla sed magna. Nullam id neque eu leo faucibus mollis. Duis nec adipiscing mauris. Suspendisse sollicitudin, enim eu pulvinar commodo, erat augue ultrices mi, a tristique magna sem non libero.
-
-Sed in metus nulla. Praesent nec adipiscing sapien. Donec laoreet, velit non rutrum vestibulum, ligula neque adipiscing turpis, at auctor sapien elit ut massa. Nullam aliquam, enim vel posuere rutrum, justo erat laoreet est, vel fringilla lacus nisi non lectus. Etiam lectus nunc, laoreet et placerat at, venenatis quis libero. Praesent in placerat elit. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Pellentesque fringilla augue eu nibh placerat dictum. Nunc porttitor tristique diam, eu aliquam enim aliquet vel. Aliquam lacinia interdum ipsum, in posuere metus luctus vel. Vivamus et nisl a eros semper elementum. Donec venenatis orci at diam tristique sollicitudin. In eu eros sed odio rutrum luctus non nec tellus.
-
-Nulla nec felis elit. Nullam in ipsum in ipsum consequat fringilla quis vel tortor. Phasellus non massa nisi, sit amet aliquam urna. Sed fermentum nibh vitae lacus tincidunt nec tincidunt massa bibendum. Etiam elit dui, facilisis sit amet vehicula nec, iaculis at sapien. Ut at massa id dui ultrices volutpat ut ac libero. Fusce ipsum mi, bibendum a lacinia et, pulvinar eget mauris. Proin faucibus urna ut lorem elementum vulputate. Duis quam leo, malesuada non euismod ut, blandit facilisis mauris. Suspendisse sit amet magna id velit tincidunt aliquet nec eu dolor. Curabitur bibendum lorem vel felis tempus dapibus. Aliquam erat volutpat. Aenean cursus tortor nec dui aliquet porta. Aenean commodo iaculis suscipit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Quisque sit amet ornare elit. Nam ligula risus, vestibulum nec mattis in, condimentum ac ante. Donec fringilla, justo et ultrices faucibus, tellus est volutpat massa, vitae commodo sapien diam non risus. Vivamus at arcu gravida purus mollis feugiat.
-
-Nulla a turpis quis sapien commodo dignissim eu quis justo. Maecenas eu lorem odio, ut hendrerit velit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin facilisis porttitor ullamcorper. Praesent mollis dignissim massa, laoreet aliquet velit pellentesque non. Nunc facilisis convallis tristique. Mauris porttitor ante at tellus convallis placerat. Morbi aliquet nisi ac nisl pulvinar id dictum nisl mollis. Sed ornare sem et risus placerat lobortis id eget elit. Integer consequat, magna id suscipit pharetra, nulla velit suscipit orci, ut interdum augue augue quis quam. Fusce pretium aliquet vulputate. Mauris blandit dictum molestie. Proin nulla nibh, bibendum eu placerat at, tincidunt ac nisl. Nullam vulputate metus ut libero rutrum ultricies. Nunc sit amet dui mauris. Suspendisse adipiscing lacus in augue eleifend mollis.
-
-Duis pretium ultrices mattis. Nam euismod risus a erat lacinia bibendum. Morbi massa tortor, consectetur id eleifend id, pellentesque vel tortor. Praesent urna lorem, porttitor at condimentum vitae, luctus eget elit. Maecenas fringilla quam convallis est hendrerit viverra. Etiam vehicula, sapien non pulvinar adipiscing, nisi massa vestibulum est, id interdum mauris velit eu est. Vestibulum est arcu, facilisis at ultricies non, vulputate id sapien. Vestibulum ipsum metus, pharetra nec pellentesque id, facilisis id sapien. Donec rutrum odio et lacus ultricies ullamcorper. Integer sed est ut mi posuere tincidunt quis non leo. Morbi tellus justo, ultricies sit amet ultrices quis, facilisis vitae magna. Donec ligula metus, pellentesque non tristique ac, vestibulum sed erat. Aliquam erat volutpat.
-
-Nam dignissim, nisl eget consequat euismod, sem lectus auctor orci, ut porttitor lacus dui ac neque. In hac habitasse platea dictumst. Fusce egestas porta facilisis. In hac habitasse platea dictumst. Mauris cursus rhoncus risus ac euismod. Quisque vitae risus a tellus venenatis convallis. Curabitur laoreet sapien eu quam luctus lobortis. Vivamus sollicitudin sodales dolor vitae sodales. Suspendisse pharetra laoreet aliquet. Maecenas ullamcorper orci vel tortor luctus iaculis ut vitae metus. Vestibulum ut arcu ac tellus mattis eleifend eget vehicula elit.
-
-In sed feugiat eros. Donec bibendum ullamcorper diam, eu faucibus mauris dictum sed. Duis tincidunt justo in neque accumsan dictum. Maecenas in rutrum sapien. Ut id feugiat lacus. Nulla facilisi. Nunc ac lorem id quam varius cursus a et elit. Aenean posuere libero eu tortor vehicula ut ullamcorper odio consequat. Sed in dignissim dui. Curabitur iaculis tempor quam nec placerat. Aliquam venenatis nibh et justo iaculis lacinia. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque tempus magna sed mi aliquet eget varius odio congue.
-
-Integer sem sem, semper in vestibulum vitae, lobortis quis erat. Duis ante lectus, fermentum sed tempor sit amet, placerat sit amet sem. Mauris congue tincidunt ipsum. Ut viverra, lacus vel varius pharetra, purus enim pulvinar ipsum, non pellentesque enim justo non erat. Fusce ipsum orci, ultrices sed pellentesque at, hendrerit laoreet enim. Nunc blandit mollis pretium. Ut mollis, nulla aliquam sodales vestibulum, libero lorem tempus tortor, a pellentesque nibh elit a ipsum. Phasellus fermentum ligula at neque adipiscing sollicitudin. Suspendisse id ipsum arcu. Sed tincidunt placerat viverra. Donec libero augue, porttitor sit amet varius eget, rutrum nec lacus. Proin blandit orci sit amet diam dictum id porttitor risus iaculis. Integer lacinia feugiat leo, vitae auctor turpis eleifend vel. Suspendisse lorem quam, pretium id bibendum sed, viverra vitae tortor. Nullam ultricies libero eu risus convallis eget ullamcorper nisi elementum. Mauris nulla elit, bibendum id vulputate vitae, imperdiet rutrum lorem. Curabitur eget dignissim orci. Sed semper tellus ipsum, at blandit dui. Integer dapibus facilisis sodales. Vivamus sollicitudin varius est, quis ornare justo cursus id.
-
-Nunc vel ullamcorper mi. Suspendisse potenti. Nunc et urna a augue scelerisque ultrices non quis mi. In quis porttitor elit. Aenean quis erat nulla, a venenatis tellus. Fusce vestibulum nisi sed leo adipiscing dignissim. Nunc interdum, lorem et lacinia vestibulum, quam est mattis magna, sit amet volutpat elit augue at libero. Cras gravida dui quis velit lobortis condimentum et eleifend ligula. Phasellus ac metus quam, id venenatis mi. Aliquam ut turpis ac tellus dapibus dapibus eu in mi. Quisque eget nibh eros. Fusce consectetur leo velit.
-
-Vestibulum semper egestas mauris. Morbi vestibulum sem sem. Aliquam venenatis, felis sed eleifend porta, mauris diam semper arcu, sit amet ultricies est sapien sit amet libero. Vestibulum dui orci, ornare condimentum mollis nec, molestie ac eros. Proin vitae mollis velit. Praesent eget felis mi. Maecenas eu vulputate nisi. Vestibulum varius, arcu in ultricies vestibulum, nibh leo sagittis odio, ut bibendum nisl mi nec diam. Integer at enim feugiat nulla semper bibendum ut a velit. Proin at nisi ut lorem aliquam varius eget quis elit. Nullam nec odio vel lectus congue consequat adipiscing ac mi. Fusce vitae laoreet libero. Curabitur sit amet sem neque, nec posuere enim. Curabitur at massa a sem gravida iaculis nec et nibh. Sed vitae dui vitae leo tincidunt pretium a aliquam erat. Suspendisse ultricies odio at metus tempor in pellentesque arcu ultricies.
-
-Sed aliquam mattis quam, in vulputate sapien ultrices in. Pellentesque quis velit sed dui hendrerit cursus. Pellentesque non nunc lacus, a semper metus. Fusce euismod velit quis diam suscipit consequat. Praesent commodo accumsan neque. Proin viverra, ipsum non tristique ultrices, velit velit facilisis lorem, vel rutrum neque eros ac nisi. Suspendisse felis massa, faucibus in volutpat ac, dapibus et odio. Pellentesque id tellus sit amet risus ultricies ullamcorper non nec sapien. Nam placerat viverra ullamcorper. Nam placerat porttitor sapien nec pulvinar. Curabitur vel odio sit amet odio accumsan aliquet vitae a lectus. Pellentesque lobortis viverra consequat. Mauris elementum cursus nulla, sit amet hendrerit justo dictum sed. Maecenas diam odio, fringilla ac congue quis, adipiscing ut elit.
-
-Aliquam lorem eros, pharetra nec egestas vitae, mattis nec risus. Mauris arcu massa, sodales eget gravida sed, viverra vitae turpis. Ut ligula urna, euismod ac tincidunt eu, faucibus sed felis. Praesent mollis, ipsum quis rhoncus dignissim, odio sem venenatis nulla, at consequat felis augue vel erat. Nam fermentum feugiat volutpat. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Etiam vitae dui in nisi adipiscing ultricies non eu justo. Donec tristique ultricies adipiscing. Nulla sodales, nunc a tristique elementum, erat neque egestas nisl, at hendrerit orci sapien sed libero. Vivamus a mauris turpis, quis laoreet ipsum. Nunc nec mi et nisl pellentesque scelerisque. Vivamus volutpat, justo tristique lacinia condimentum, erat justo ultrices urna, elementum viverra eros augue non libero. Sed mollis mollis arcu, at fermentum diam suscipit quis.
-
-Etiam sit amet nibh justo, posuere volutpat nunc. Morbi pellentesque neque in orci volutpat eu scelerisque lorem dictum. Mauris mollis iaculis est, nec sagittis sapien consequat id. Nunc nec malesuada odio. Duis quis suscipit odio. Mauris purus dui, sodales id mattis sit amet, posuere in arcu. Phasellus porta elementum convallis. Maecenas at orci et mi vulputate sollicitudin in in turpis. Pellentesque cursus adipiscing neque sit amet commodo. Fusce ut mi eu lectus porttitor volutpat et nec felis.
-
-Curabitur scelerisque eros quis nisl viverra vel ultrices velit vestibulum. Sed lobortis pulvinar sapien ac venenatis. Sed ante nibh, rhoncus eget dictum in, mollis ut nisi. Phasellus facilisis mi non lorem tristique non eleifend sem fringilla. Integer ut augue est. In venenatis tincidunt scelerisque. Etiam ante dui, posuere quis malesuada vitae, malesuada a arcu. Aenean faucibus venenatis sapien, ut facilisis nisi blandit vel. Aenean ac lorem eu sem fermentum placerat. Proin neque purus, aliquet ut tincidunt ut, convallis sit amet eros. Phasellus vehicula ullamcorper enim non vehicula. Etiam porta odio ut ipsum adipiscing egestas id a odio. Pellentesque blandit, sapien ut pulvinar interdum, mi nulla hendrerit elit, in tempor diam enim a urna. In tellus odio, ornare sed condimentum a, mattis eu augue.
-
-Fusce hendrerit porttitor euismod. Donec malesuada egestas turpis, et ultricies felis elementum vitae. Nullam in sem nibh. Nullam ultricies hendrerit justo sit amet lobortis. Sed tincidunt, mauris at ornare laoreet, sapien purus elementum elit, nec porttitor nisl purus et erat. Donec felis nisi, rutrum ullamcorper gravida ac, tincidunt sit amet urna. Proin vel justo vitae eros sagittis bibendum a ut nibh. Phasellus sodales laoreet tincidunt. Maecenas odio massa, condimentum id aliquet ut, rhoncus vel lectus. Duis pharetra consectetur sapien. Phasellus posuere ultricies massa, non rhoncus risus aliquam tempus.
-
-Praesent venenatis magna id sem dictum eu vehicula ipsum vulputate. Sed a convallis sapien. Sed justo dolor, rhoncus vel rutrum mattis, sollicitudin ut risus. Nullam sit amet convallis est. Etiam non tincidunt ligula. Fusce suscipit pretium elit at ullamcorper. Quisque sollicitudin, diam id interdum porta, metus ipsum volutpat libero, id venenatis felis orci non velit. Suspendisse potenti. Mauris rutrum, tortor sit amet pellentesque tincidunt, erat quam ultricies odio, id aliquam elit leo nec leo. Pellentesque justo eros, rutrum at feugiat nec, porta et tellus. Aenean eget metus lectus.
-
-Praesent euismod, turpis quis laoreet consequat, neque ante imperdiet quam, ac semper tortor nibh in nulla. Integer scelerisque eros vehicula urna lacinia ac facilisis mauris accumsan. Phasellus at mauris nibh. Curabitur enim ante, rutrum sed adipiscing hendrerit, pellentesque non augue. In hac habitasse platea dictumst. Nam tempus euismod massa a dictum. Donec sit amet justo ac diam ultricies ultricies. Sed tincidunt erat quis quam tempus vel interdum erat rhoncus. In hac habitasse platea dictumst. Vestibulum vehicula varius sem eget interdum. Cras bibendum leo nec felis venenatis sed pharetra sem feugiat. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Sed quam orci, mollis eget sagittis accumsan, vulputate sit amet dui. Praesent eu elementum arcu.
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum nisl metus, hendrerit ut laoreet sed, consectetur at purus. Duis interdum congue lobortis. Nullam sed massa porta felis eleifend consequat sit amet nec metus. Aliquam placerat dictum erat at eleifend. Vestibulum libero ante, ullamcorper a porttitor suscipit, accumsan vel nisi. Donec et magna neque. Nam elementum ultrices justo, eget sollicitudin sapien imperdiet eget. Nullam auctor dictum nunc, at feugiat odio vestibulum a. Sed erat nulla, viverra hendrerit commodo id, ullamcorper ac orci. Phasellus pellentesque feugiat suscipit. Etiam egestas fermentum enim. Etiam gravida interdum tellus ac laoreet. Morbi mattis aliquet eros, non tempor erat ullamcorper in. Etiam pulvinar interdum turpis ac vehicula. Sed quam justo, accumsan id consectetur a, aliquet sed leo. Aenean vitae blandit mauris.
-
-In sed eros augue, non rutrum odio. Etiam vitae dui neque, in tristique massa. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Maecenas dictum elit at lectus tempor non pharetra nisl hendrerit. Sed sed quam eu lectus ultrices malesuada tincidunt a est. Nam vel eros risus. Maecenas eros elit, blandit fermentum tempor eget, lobortis id diam. Vestibulum lacinia lacus vitae magna volutpat eu dignissim eros convallis. Vivamus ac velit tellus, a congue neque. Integer mi nulla, varius non luctus in, dictum sit amet sem. Ut laoreet, sapien sit amet scelerisque porta, purus sapien vestibulum nibh, sed luctus libero massa ac elit. Donec iaculis odio eget odio sagittis nec venenatis lorem blandit.
-
-Aliquam imperdiet tellus posuere justo vehicula sed vestibulum ante tristique. Fusce feugiat faucibus purus nec molestie. Nulla tempor neque id magna iaculis quis sollicitudin eros semper. Praesent viverra sagittis luctus. Morbi sit amet magna sed odio gravida varius. Ut nisi libero, vulputate feugiat pretium tempus, egestas sit amet justo. Pellentesque consequat tempor nisi in lobortis. Sed fermentum convallis dui ac sollicitudin. Integer auctor augue eget tellus tempus fringilla. Proin nec dolor sapien, nec tristique nibh. Aliquam a velit at mi mattis aliquet.
-
-Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Aliquam ultrices erat non turpis auctor id ornare mauris sagittis. Quisque porttitor, tellus ut convallis sagittis, mi libero feugiat tellus, rhoncus placerat ipsum tortor id risus. Donec tincidunt feugiat leo. Cras id mi neque, eu malesuada eros. Ut molestie magna quis libero placerat malesuada. Aliquam erat volutpat. Aliquam non mauris lorem, in adipiscing metus. Donec eget ipsum in elit commodo ornare bibendum a nibh. Vivamus odio erat, placerat ac vestibulum eget, malesuada ut nisi. Etiam suscipit sollicitudin leo semper sollicitudin. Sed rhoncus risus sit amet sem eleifend dictum pretium sapien egestas. Nulla at urna nunc, vel aliquet leo. Praesent ultricies, mi eu pretium lobortis, erat nibh euismod leo, sit amet gravida sapien eros et turpis. Donec lacinia venenatis lectus, non lacinia mi hendrerit sit amet. Integer sed felis vel orci aliquam pulvinar. Phasellus et risus id erat euismod tincidunt. Sed luctus tempor nisi, nec tempor ipsum elementum eget. Integer nisl tortor, viverra in dapibus at, mattis ac erat. Curabitur nec dui lectus.
-
-Phasellus suscipit, tortor eu varius fringilla, sapien magna egestas risus, ut suscipit dui mauris quis velit. Cras a sapien quis sapien hendrerit tristique a sit amet elit. Pellentesque dui arcu, malesuada et sodales sit amet, dapibus vel quam. Sed non adipiscing ligula. Ut vulputate purus at nisl posuere sodales. Maecenas diam velit, tincidunt id mattis eu, aliquam ac nisi. Maecenas pretium, augue a sagittis suscipit, leo ligula eleifend dolor, mollis feugiat odio augue non eros. Pellentesque scelerisque orci pretium quam mollis at lobortis dui facilisis. Morbi congue metus id tortor porta fringilla. Sed lorem mi, molestie fermentum sagittis at, gravida a nisi. Donec eu vestibulum velit. In viverra, enim eu elementum sodales, enim odio dapibus urna, eget commodo nisl mauris ut odio. Curabitur nec enim nulla. In nec elit ipsum. Nunc in massa suscipit magna elementum faucibus in nec ipsum. Nullam suscipit malesuada elementum. Etiam sed mi in nibh ultricies venenatis nec pharetra magna. In purus ante, rhoncus vel placerat sed, fermentum sit amet dui. Sed at sodales velit.
-
-Duis suscipit pellentesque pellentesque. Praesent porta lobortis cursus. Quisque sagittis velit non tellus bibendum at sollicitudin lacus aliquet. Sed nibh risus, blandit a aliquet eget, vehicula et est. Suspendisse facilisis bibendum aliquam. Fusce consectetur convallis erat, eget mollis diam fermentum sollicitudin. Quisque tincidunt porttitor pretium. Nullam id nisl et urna vulputate dapibus. Donec quis lorem urna. Quisque id justo nec nunc blandit convallis. Nunc volutpat, massa sollicitudin adipiscing vestibulum, massa urna congue lectus, sit amet ultricies augue orci convallis turpis. Nulla at lorem elit. Nunc tristique, quam facilisis commodo porttitor, lacus ligula accumsan nisi, et laoreet justo ante vitae eros. Curabitur sed augue arcu. Phasellus porttitor vestibulum felis, ut consectetur arcu tempor non. In justo risus, semper et suscipit id, ullamcorper at urna. Quisque tincidunt, urna nec aliquam tristique, nibh odio faucibus augue, in ornare enim turpis accumsan dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse sodales varius turpis eu fermentum.
-
-Morbi ultricies diam eget massa posuere lobortis. Aliquam volutpat pellentesque enim eu porttitor. Donec lacus felis, consectetur a pretium vitae, bibendum non enim. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Etiam ut nibh a quam pellentesque auctor ut id velit. Duis lacinia justo eget mi placerat bibendum. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec velit tortor, tempus nec tristique id, aliquet sit amet turpis. Praesent et neque nec magna porta fringilla. Morbi id egestas eros. Donec semper tincidunt ullamcorper. Phasellus tempus lacinia hendrerit. Quisque faucibus pretium neque non convallis. Nunc malesuada accumsan rhoncus. Cras lobortis, sem sed fringilla convallis, augue velit semper nisl, commodo varius nisi diam ac leo.
-
-Quisque interdum tellus ac ante posuere ut cursus lorem egestas. Nulla facilisi. Aenean sed massa nec nisi scelerisque vulputate. Etiam convallis consectetur iaculis. Maecenas ac purus ut ante dignissim auctor ac quis lorem. Pellentesque suscipit tincidunt orci. Fusce aliquam dapibus orci, at bibendum ipsum adipiscing eget. Morbi pellentesque hendrerit quam, nec placerat urna vulputate sed. Quisque vel diam lorem. Praesent id diam quis enim elementum rhoncus sagittis eget purus. Quisque fringilla bibendum leo in laoreet. Vestibulum id nibh risus, non elementum metus. Ut a felis diam, non mollis nisl. Cras elit ante, ullamcorper quis iaculis eu, sodales vel est. Curabitur quis lobortis dolor. Aliquam mattis gravida metus pellentesque vulputate.
-
-Ut id augue id dolor luctus euismod et quis velit. Maecenas enim dolor, tempus sit amet hendrerit eu, faucibus vitae neque. Proin sit amet varius elit. Proin varius felis ullamcorper purus dignissim consequat. Cras cursus tempus eros. Nunc ultrices venenatis ullamcorper. Aliquam et feugiat tellus. Phasellus sit amet vestibulum elit. Phasellus ac purus lacus, et accumsan eros. Morbi ultrices, purus a porta sodales, odio metus posuere neque, nec elementum risus turpis sit amet magna. Sed est quam, ultricies at congue adipiscing, lobortis in justo. Proin iaculis dictum nunc, eu laoreet quam varius vitae. Donec sit amet feugiat turpis. Mauris sit amet magna quam, ac consectetur dui. Curabitur eget magna tellus, eu pharetra felis. Donec sit amet tortor nisl. Aliquam et tortor facilisis lacus tincidunt commodo. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Curabitur nunc magna, ultricies id convallis at, ullamcorper vitae massa.
-
-Phasellus viverra iaculis placerat. Nulla consequat dolor sit amet erat dignissim posuere. Nulla lacinia augue vitae mi tempor gravida. Phasellus non tempor tellus. Quisque non enim semper tortor sagittis facilisis. Aliquam urna felis, egestas at posuere nec, aliquet eu nibh. Praesent sed vestibulum enim. Mauris iaculis velit dui, et fringilla enim. Nulla nec nisi orci. Sed volutpat, justo eget fringilla adipiscing, nisl nulla condimentum libero, sed sodales est est et odio. Cras ipsum dui, varius eu elementum consequat, faucibus in leo. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
-
-Ut malesuada molestie eleifend. Curabitur id enim dui, eu tincidunt nibh. Mauris sit amet ante leo. Duis turpis ipsum, bibendum sed mattis sit amet, accumsan quis dolor. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Aenean a imperdiet metus. Quisque sollicitudin felis id neque tempor scelerisque. Donec at orci felis. Vivamus tempus convallis auctor. Donec interdum euismod lobortis. Sed at lacus nec odio dignissim mollis. Sed sapien orci, porttitor tempus accumsan vel, tincidunt nec ante. Nunc rhoncus egestas dapibus. Suspendisse fermentum dictum fringilla. Nullam nisi justo, eleifend a consectetur convallis, porttitor et tortor. Proin vitae lorem non dolor suscipit lacinia eu eget nulla.
-
-Suspendisse egestas, sapien sit amet blandit scelerisque, nulla arcu tristique dui, a porta justo quam vitae arcu. In metus libero, bibendum non volutpat ut, laoreet vel turpis. Nunc faucibus velit eu ipsum commodo nec iaculis eros volutpat. Vivamus congue auctor elit sed suscipit. Duis commodo, libero eu vestibulum feugiat, leo mi dapibus tellus, in placerat nisl dui at est. Vestibulum viverra tristique lorem, ornare egestas erat rutrum a. Nullam at augue massa, ut consectetur ipsum. Pellentesque malesuada, velit ut lobortis sagittis, nisi massa semper odio, malesuada semper purus nisl vel lectus. Nunc dui sem, mattis vitae laoreet vitae, sollicitudin ac leo. Nulla vel fermentum est.
-
-Vivamus in odio a nisi dignissim rhoncus in in lacus. Donec et nisl tortor. Donec sagittis consequat mi, vel placerat tellus convallis id. Aliquam facilisis rutrum nisl sed pretium. Donec et lacinia nisl. Aliquam erat volutpat. Curabitur ac pulvinar tellus. Nullam varius lobortis porta. Cras dapibus, ligula ut porta ultricies, leo lacus viverra purus, quis mollis urna risus eu leo. Nunc malesuada consectetur purus, vel auctor lectus scelerisque posuere. Maecenas dui massa, vestibulum bibendum blandit non, interdum eget mauris. Phasellus est ante, pulvinar at imperdiet quis, imperdiet vel urna. Quisque eget volutpat orci. Quisque et arcu purus, ut faucibus velit.
-
-Praesent sed ipsum urna. Praesent sagittis varius magna, id commodo dolor malesuada ac. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Quisque sit amet nunc eu sem ornare tempor. Mauris id dolor nec erat convallis porta in lobortis nisi. Curabitur hendrerit rhoncus tortor eu hendrerit. Pellentesque eu ante vel elit luctus eleifend quis viverra nulla. Suspendisse odio diam, euismod eu porttitor molestie, sollicitudin sit amet nulla. Sed ante urna, dictum bibendum rhoncus et, blandit nec ante. Suspendisse tortor augue, accumsan quis suscipit id, accumsan sit amet erat. Donec pharetra varius lobortis. Maecenas ipsum diam, faucibus eu tempus id, convallis nec enim. Duis arcu turpis, fringilla nec egestas ut, dignissim tristique nulla. Curabitur suscipit dui non justo ultrices pharetra. Aliquam erat volutpat. Nulla facilisi. Quisque id felis eu sem aliquam fringilla.
-
-Etiam quis augue in tellus consequat eleifend. Aenean dignissim congue felis id elementum. Duis fringilla varius ipsum, nec suscipit leo semper vel. Ut sollicitudin, orci a tincidunt accumsan, diam lectus laoreet lacus, vel fermentum quam est vel eros. Aliquam fringilla sapien ac sapien faucibus convallis. Aliquam id nunc eu justo consequat tincidunt. Quisque nec nisl dui. Phasellus augue lectus, varius vitae auctor vel, rutrum at risus. Vivamus lacinia leo quis neque ultrices nec elementum felis fringilla. Proin vel porttitor lectus.
-
-Curabitur sapien lorem, mollis ut accumsan non, ultricies et metus. Curabitur vel lorem quis sapien fringilla laoreet. Morbi id urna ac orci elementum blandit eget volutpat neque. Pellentesque sem odio, iaculis eu pharetra vitae, cursus in quam. Nulla molestie ligula id massa luctus et pulvinar nisi pulvinar. Nunc fermentum augue a lacus fringilla rhoncus porttitor erat dictum. Nunc sit amet tellus et dui viverra auctor euismod at nisl. In sed congue magna. Proin et tortor ut augue placerat dignissim a eu justo. Morbi porttitor porta lobortis. Pellentesque nibh lacus, adipiscing ut tristique quis, consequat vitae velit. Maecenas ut luctus libero. Vivamus auctor odio et erat semper sagittis. Vivamus interdum velit in risus mattis quis dictum ante rhoncus. In sagittis porttitor eros, at lobortis metus ultrices vel. Curabitur non aliquam nisl. Vestibulum luctus feugiat suscipit. Etiam non lacus vel nulla egestas iaculis id quis risus.
-
-Etiam in auctor urna. Fusce ultricies molestie convallis. In hac habitasse platea dictumst. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Mauris iaculis lorem faucibus purus gravida at convallis turpis sollicitudin. Suspendisse at velit lorem, a fermentum ipsum. Etiam condimentum, dui vel condimentum elementum, sapien sem blandit sapien, et pharetra leo neque et lectus. Nunc viverra urna iaculis augue ultrices ac porttitor lacus dignissim. Aliquam ut turpis dui. Sed eget aliquet felis. In bibendum nibh sit amet sapien accumsan accumsan pharetra magna molestie.
-
-Mauris aliquet urna eget lectus adipiscing at congue turpis consequat. Vivamus tincidunt fermentum risus et feugiat. Nulla molestie ullamcorper nibh sed facilisis. Phasellus et cursus purus. Nam cursus, dui dictum ultrices viverra, erat risus varius elit, eu molestie dui eros quis quam. Aliquam et ante neque, ac consectetur dui. Donec condimentum erat id elit dictum sed accumsan leo sagittis. Proin consequat congue risus, vel tincidunt leo imperdiet eu. Vestibulum malesuada turpis eu metus imperdiet pretium. Aliquam condimentum ultrices nibh, eu semper enim eleifend a. Etiam condimentum nisl quam.
-
-Pellentesque id molestie nisl. Maecenas et lectus at justo molestie viverra sit amet sit amet ligula. Nullam non porttitor magna. Quisque elementum arcu cursus tortor rutrum lobortis. Morbi sit amet lectus vitae enim euismod dignissim eget at neque. Vivamus consequat vehicula dui, vitae auctor augue dignissim in. In tempus sem quis justo tincidunt sit amet auctor turpis lobortis. Pellentesque non est nunc. Vestibulum mollis fringilla interdum. Maecenas ipsum dolor, pharetra id tristique mattis, luctus vitae urna. Ut ullamcorper arcu eget elit convallis mollis. Pellentesque condimentum, massa ac hendrerit tempor, mauris purus blandit justo, et pharetra leo justo a est. Duis arcu augue, facilisis vel dignissim sed, aliquam quis magna. Quisque non consequat dolor. Suspendisse a ultrices leo.
-
-Donec vitae pretium nibh. Maecenas bibendum bibendum diam in placerat. Ut accumsan, mi vitae vestibulum euismod, nunc justo vulputate nisi, non placerat mi urna et diam. Maecenas malesuada lorem ut arcu mattis mollis. Nulla facilisi. Donec est leo, bibendum eu pulvinar in, cursus vel metus. Aliquam erat volutpat. Nullam feugiat porttitor neque in vulputate. Quisque nec mi eu magna consequat cursus non at arcu. Etiam risus metus, sollicitudin et ultrices at, tincidunt sed nunc. Sed eget scelerisque augue. Ut fringilla venenatis sem non eleifend. Nunc mattis, risus sit amet vulputate varius, risus justo egestas mauris, id interdum odio ipsum et nisl. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi id erat odio, nec pulvinar enim.
-
-Curabitur ac fermentum quam. Morbi eu eros sapien, vitae tempus dolor. Mauris vestibulum blandit enim ut venenatis. Aliquam egestas, eros at consectetur tincidunt, lorem augue iaculis est, nec mollis felis arcu in nunc. Sed in odio sed libero pellentesque volutpat vitae a ante. Morbi commodo volutpat tellus, ut viverra purus placerat fermentum. Integer iaculis facilisis arcu, at gravida lorem bibendum at. Aenean id eros eget est sagittis convallis sed et dui. Donec eu pulvinar tellus. Nunc dignissim rhoncus tellus, at pellentesque metus luctus at. Sed ornare aliquam diam, a porttitor leo sollicitudin sed. Nam vitae lectus lacus. Integer adipiscing quam neque, blandit posuere libero. Sed libero nunc, egestas sodales tempus sed, cursus blandit tellus. Vestibulum mi purus, ultricies quis placerat vel, molestie at dui.
-
-Nulla commodo odio justo. Pellentesque non ornare diam. In consectetur sapien ac nunc sagittis malesuada. Morbi ullamcorper tempor erat nec rutrum. Duis ut commodo justo. Cras est orci, consectetur sed interdum sed, scelerisque sit amet nulla. Vestibulum justo nulla, pellentesque a tempus et, dapibus et arcu. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique, eros nec congue adipiscing, ligula sem rhoncus felis, at ornare tellus mauris ac risus. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin mauris dui, tempor fermentum dictum et, cursus a leo. Maecenas nec nisl a tellus pellentesque rhoncus. Nullam ultrices euismod dui eu congue.
-
-In nec tempor risus. In faucibus nisi eget diam dignissim consequat. Donec pulvinar ante nec enim mattis rutrum. Vestibulum leo augue, molestie nec dapibus in, dictum at enim. Integer aliquam, lorem eu vulputate lacinia, mi orci tempor enim, eget mattis ligula magna a magna. Praesent sed erat ut tortor interdum viverra. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla facilisi. Maecenas sit amet lectus lacus. Nunc vitae purus id ligula laoreet condimentum. Duis auctor tortor vel dui pulvinar a facilisis arcu dignissim. In hac habitasse platea dictumst. Donec sollicitudin pellentesque egestas. Sed sed sem justo. Maecenas laoreet hendrerit mauris, ut porttitor lorem iaculis ac. Quisque molestie sem quis lorem tempor rutrum. Phasellus nibh mauris, rhoncus in consectetur non, aliquet eu massa.
-
-Curabitur velit arcu, pretium porta placerat quis, varius ut metus. Vestibulum vulputate tincidunt justo, vitae porttitor lectus imperdiet sit amet. Vivamus enim dolor, sollicitudin ut semper non, ornare ornare dui. Aliquam tempor fermentum sapien eget condimentum. Curabitur laoreet bibendum ante, in euismod lacus lacinia eu. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse potenti. Sed at libero eu tortor tempus scelerisque. Nulla facilisi. Nullam vitae neque id justo viverra rhoncus pretium at libero. Etiam est urna, aliquam vel pulvinar non, ornare vel purus.
-
-Nulla varius, nisi eget condimentum semper, metus est dictum odio, vel mattis risus est sed velit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nunc non est nec tellus ultricies mattis ut eget velit. Integer condimentum ante id lorem blandit lacinia. Donec vel tortor augue, in condimentum nisi. Pellentesque pellentesque nulla ut nulla porttitor quis sodales enim rutrum. Sed augue risus, euismod a aliquet at, vulputate non libero. Nullam nibh odio, dignissim fermentum pulvinar ac, congue eu mi. Duis tincidunt, nibh id venenatis placerat, diam turpis gravida leo, sit amet mollis massa dolor quis mauris. Vivamus scelerisque sodales arcu et dapibus. Suspendisse potenti. Cras quis tellus arcu, quis laoreet sem. Fusce porttitor, sapien vel tristique sodales, velit leo porta arcu, quis pellentesque nunc metus non odio. Nam arcu libero, ullamcorper ut pharetra non, dignissim et velit. Quisque dolor lorem, vehicula sit amet scelerisque in, varius at nulla. Pellentesque vitae sem eget tortor iaculis pulvinar. Sed nunc justo, euismod gravida pulvinar eget, gravida eget turpis. Cras vel dictum nisi. Nullam nulla libero, gravida sit amet aliquam quis, commodo vitae odio. Cras vitae nibh nec dui placerat semper.
-
-Vivamus at fringilla eros. Vivamus at nisl id massa commodo feugiat quis non massa. Morbi tellus urna, auctor sit amet elementum sed, rutrum non lectus. Nulla feugiat dui in sapien ornare et imperdiet est ornare. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum semper rutrum tempor. Sed in felis nibh, sed aliquam enim. Curabitur ut quam scelerisque velit placerat dictum. Donec eleifend vehicula purus, eu vestibulum sapien rutrum eu. Vivamus in odio vel est vulputate iaculis. Nunc rutrum feugiat pretium.
-
-Maecenas ipsum neque, auctor quis lacinia vitae, euismod ac orci. Donec molestie massa consequat est porta ac porta purus tincidunt. Nam bibendum leo nec lacus mollis non condimentum dolor rhoncus. Nulla ac volutpat lorem. Nullam erat purus, convallis eget commodo id, varius quis augue. Nullam aliquam egestas mi, vel suscipit nisl mattis consequat. Quisque vel egestas sapien. Nunc lorem velit, convallis nec laoreet et, aliquet eget massa. Nam et nibh ac dui vehicula aliquam quis eu augue. Cras vel magna ut elit rhoncus interdum iaculis volutpat nisl. Suspendisse arcu lorem, varius rhoncus tempor id, pulvinar sed tortor. Pellentesque ultricies laoreet odio ac dignissim. Aliquam diam arcu, placerat quis egestas eget, facilisis eu nunc. Mauris vulputate, nisl sit amet mollis interdum, risus tortor ornare orci, sed egestas orci eros non diam. Vestibulum hendrerit, metus quis placerat pellentesque, enim purus faucibus dui, sit amet ultricies lectus ipsum id lorem. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Praesent eget diam odio, eu bibendum elit. In vestibulum orci eu erat tincidunt tristique.
-
-Cras consectetur ante eu turpis placerat sollicitudin. Mauris et lacus tortor, eget pharetra velit. Donec accumsan ultrices tempor. Donec at nibh a elit condimentum dapibus. Integer sit amet vulputate ante. Suspendisse potenti. In sodales laoreet massa vitae lacinia. Morbi vel lacus feugiat arcu vulputate molestie. Aliquam massa magna, ullamcorper accumsan gravida quis, rhoncus pulvinar nulla. Praesent sit amet ipsum diam, sit amet lacinia neque. In et sapien augue. Etiam enim elit, ultrices vel rutrum id, scelerisque non enim.
-
-Proin et egestas neque. Praesent et ipsum dolor. Nunc non varius nisl. Fusce in tortor nisi. Maecenas convallis neque in ligula blandit quis vehicula leo mollis. Pellentesque sagittis blandit leo, dapibus pellentesque leo ultrices ac. Curabitur ac egestas libero. Donec pretium pharetra pretium. Fusce imperdiet, turpis eu aliquam porta, ante elit eleifend risus, luctus auctor arcu ante ut nunc. Vivamus in leo felis, vitae eleifend lacus. Donec tempus aliquam purus porttitor tristique. Suspendisse diam neque, suscipit feugiat fringilla non, eleifend sit nullam.
diff --git a/test/elixir/test/data/lorem_b64.txt b/test/elixir/test/data/lorem_b64.txt
deleted file mode 100644
index 8a21d79e6..000000000
--- a/test/elixir/test/data/lorem_b64.txt
+++ /dev/null
@@ -1 +0,0 @@
-TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGhhc2VsbHVzIG51bmMgc2FwaWVuLCBwb3J0YSBpZCBwZWxsZW50ZXNxdWUgYXQsIGVsZW1lbnR1bSBldCBmZWxpcy4gQ3VyYWJpdHVyIGNvbmRpbWVudHVtIGFudGUgaW4gbWV0dXMgaWFjdWxpcyBxdWlzIGNvbmd1ZSBkaWFtIGNvbW1vZG8uIERvbmVjIGVsZWlmZW5kIGFudGUgc2VkIG51bGxhIGRhcGlidXMgY29udmFsbGlzLiBVdCBjdXJzdXMgYWxpcXVhbSBuZXF1ZSwgdmVsIHBvcnR0aXRvciB0ZWxsdXMgaW50ZXJkdW0gdXQuIFNlZCBwaGFyZXRyYSBsYWNpbmlhIGFkaXBpc2NpbmcuIEluIHRyaXN0aXF1ZSB0cmlzdGlxdWUgZmVsaXMgbm9uIHRpbmNpZHVudC4gTnVsbGEgYXVjdG9yIG1hdXJpcyBhIHZlbGl0IGN1cnN1cyB1bHRyaWNpZXMuIEluIGF0IGxpYmVybyBxdWlzIGp1c3RvIGNvbnNlY3RldHVyIGxhb3JlZXQuIE51bGxhbSBpZCB1bHRyaWNlcyBudW5jLiBEb25lYyBub24gdHVycGlzIG51bGxhLCBldSBsYWNpbmlhIGFudGUuIE51bmMgZXUgb3JjaSBldCB0dXJwaXMgcHJldGl1bSB2ZW5lbmF0aXMuIE5hbSBtb2xlc3RpZSwgbGFjdXMgYXQgZGlnbmlzc2ltIGVsZW1lbnR1bSwgYW50ZSBsaWJlcm8gY29uc2VjdGV0dXIgbGliZXJvLCB1dCBsYWNpbmlhIGxhY3VzIHVybmEgZXQgcHVydXMuIE51bGxhbSBsb3JlbSBpcHN1bSwgZGFwaWJ1cyB2ZWwgdWxsYW1jb3JwZXIgYSwgbWFsZXN1YWRhIGEgbWV0dXMuIFNlZCBwb3J0YSBhZGlwaXNjaW5nIG1hZ25hLCBxdWlzIHB1bHZpbmFyIHB1cnVzIG1hdHRpcyBmcmluZ2lsbGEuIEludGVnZXIgcGVsbGVudGVzcXVlIHNhcGllbiBpbiBuZXF1ZSB0cmlzdGlxdWUgYWMgaWFjdWxpcyBsaWJlcm8gdWx0cmljaWVzLiBVdCBlZ2V0IHBoYXJldHJhIHB1cnVzLgoKTnVsbGEgaW4gY29udmFsbGlzIHRlbGx1cy4gUHJvaW4gdGluY2lkdW50IHN1c2NpcGl0IHZ1bHB1dGF0ZS4gU3VzcGVuZGlzc2UgcG90ZW50aS4gTnVsbGFtIHRyaXN0aXF1ZSBqdXN0byBtaSwgYSB0cmlzdGlxdWUgbGlndWxhLiBEdWlzIGNvbnZhbGxpcyBhbGlxdWFtIGlhY3VsaXMuIE51bGxhIGRpY3R1bSBmcmluZ2lsbGEgY29uZ3VlLiBTdXNwZW5kaXNzZSBhYyBsZW8gbGVjdHVzLCBhYyBhbGlxdWFtIGp1c3RvLiBVdCBwb3J0dGl0b3IgY29tbW9kbyBtaSBzZWQgbHVjdHVzLiBOdWxsYSBhdCBlbmltIGxvcmVtLiBOdW5jIGV1IGp1c3RvIHNhcGllbiwgYSBibGFuZGl0IG9kaW8uIEN1cmFiaXR1ciBmYXVjaWJ1cyBzb2xsaWNpdHVkaW4gZG9sb3IsIGlkIGxhY2luaWEgc2VtIGF1Y3RvciBpbi4gRG9uZWMgdmFyaXVzIG51bmMgYXQgbGVjdHVzIHNhZ2l0dGlzIG5lYyBsdWN0dXMgYXJjdSBwaGFyZXRyYS4gTnVuYyBzZWQgbWV0dXMganVzdG8uIENyYXMgdmVsIG1hdXJpcyBkaWFtLiBVdCBmZXVnaWF0IGZlbGlzIGVnZXQgbmVxdWUgcGhhcmV0cmEgdmVzdGlidWx1bSBjb25zZWN0ZXR1ciBtYXNzYSBmYWNpbGlzaXMuIFF1aXNxdWUgY29uc2VjdGV0dXIgbHVjdHVzIG5pc2kgcXVpcyB0aW5jaWR1bnQuIFZpdmFtdXMgY3Vyc3VzIGN1cnN1cyBxdWFtIG5vbiBibGFuZGl0LiBQZWxsZW50ZXNxdWUgZXQgdmVsaXQgbGFjdXMuIFBlbGxlbnRlc3F1ZSBoYWJpdGFudCBtb3JiaSB0cmlzdGlxdWUgc2VuZWN0dXMgZXQgbmV0dXMgZXQgbWFsZXN1YWRhIGZhbWVzIGFjIHR1cnBpcyBlZ2VzdGFzLgoKSW4gZXQgZG9sb3Igdml0YWUgb3JjaSBhZGlwaXNjaW5nIGNvbmd1ZS4gQWxpcXVhbSBncmF2aWRhIG5pYmggYXQgbmlzbCBncmF2aWRhIG1vbGVzdGllLiBDdXJhYml0dXIgYSBiaWJlbmR1bSBzYXBpZW4uIEFsaXF1YW0gdGluY2lkdW50LCBudWxsYSBuZWMgcHJldGl1bSBsb2JvcnRpcywgb2RpbyBhdWd1ZSB0aW5jaWR1bnQgYXJjdSwgYSBsb2JvcnRpcyBvZGlvIHNlbSB1dCBwdXJ1cy4gRG9uZWMgYWNjdW1zYW4gbWF0dGlzIG51bmMgdml0YWUgbGFjaW5pYS4gU3VzcGVuZGlzc2UgcG90ZW50aS4gSW50ZWdlciBjb21tb2RvIG5pc2wgcXVpcyBuaWJoIGludGVyZHVtIG5vbiBmcmluZ2lsbGEgZHVpIHNvZGFsZXMuIENsYXNzIGFwdGVudCB0YWNpdGkgc29jaW9zcXUgYWQgbGl0b3JhIHRvcnF1ZW50IHBlciBjb251YmlhIG5vc3RyYSwgcGVyIGluY2VwdG9zIGhpbWVuYWVvcy4gSW4gaGFjIGhhYml0YXNzZSBwbGF0ZWEgZGljdHVtc3QuIEV0aWFtIHVsbGFtY29ycGVyLCBtaSBpZCBmZXVnaWF0IGJpYmVuZHVtLCBwdXJ1cyBuZXF1ZSBjdXJzdXMgbWF1cmlzLCBpZCBzb2RhbGVzIHF1YW0gbmlzaSBpZCB2ZWxpdC4gU2VkIGxlY3R1cyBsZW8sIHRpbmNpZHVudCB2ZWwgcmhvbmN1cyBpbXBlcmRpZXQsIGJsYW5kaXQgaW4gbGVvLiBJbnRlZ2VyIHF1aXMgbWFnbmEgbnVsbGEuIERvbmVjIHZlbCBuaXNsIG1hZ25hLCB1dCByaG9uY3VzIGR1aS4gQWxpcXVhbSBncmF2aWRhLCBudWxsYSBuZWMgZWxlaWZlbmQgbHVjdHVzLCBuZXF1ZSBuaWJoIHBoYXJldHJhIGFudGUsIHF1aXMgZWdlc3RhcyBlbGl0IG1ldHVzIGEgbWkuIE51bmMgbmVjIGF1Z3VlIHF1YW0uIE1vcmJpIHRpbmNpZHVudCB0cmlzdGlxdWUgdmFyaXVzLiBTdXNwZW5kaXNzZSBpYWN1bGlzIGVsaXQgZmV1Z2lhdCBtYWduYSBwZWxsZW50ZXNxdWUgdWx0cmljaWVzLiBWZXN0aWJ1bHVtIGFsaXF1YW0gdG9ydG9yIG5vbiBhbnRlIHVsbGFtY29ycGVyIGZyaW5naWxsYS4gRG9uZWMgaWFjdWxpcyBtaSBxdWlzIG1hdXJpcyBvcm5hcmUgdmVzdGlidWx1bS4KCkluIGEgbWFnbmEgbmlzaSwgYSB1bHRyaWNpZXMgbWFzc2EuIERvbmVjIGVsaXQgbmVxdWUsIHZpdmVycmEgbm9uIHRlbXBvciBxdWlzLCBmcmluZ2lsbGEgaW4gbWV0dXMuIEludGVnZXIgb2RpbyBvZGlvLCBldWlzbW9kIHZpdGFlIG1vbGxpcyBzZWQsIHNvZGFsZXMgZWdldCBsaWJlcm8uIERvbmVjIG5lYyBtYXNzYSBpbiBmZWxpcyBvcm5hcmUgcGhhcmV0cmEgYXQgbmVjIHRlbGx1cy4gTnVuYyBsb3JlbSBkb2xvciwgcHJldGl1bSB2ZWwgYXVjdG9yIGluLCB2b2x1dHBhdCB2aXRhZSBmZWxpcy4gTWFlY2VuYXMgcmhvbmN1cywgb3JjaSB2ZWwgYmxhbmRpdCBldWlzbW9kLCB0dXJwaXMgZXJhdCB0aW5jaWR1bnQgYW50ZSwgZWxlbWVudHVtIGFkaXBpc2NpbmcgbmlzbCB1cm5hIGluIG5pc2kuIFBoYXNlbGx1cyBzYWdpdHRpcywgZW5pbSBzZWQgYWNjdW1zYW4gY29uc2VxdWF0LCB1cm5hIGF1Z3VlIGxvYm9ydGlzIGVyYXQsIG5vbiBtYWxlc3VhZGEgcXVhbSBtZXR1cyBzb2xsaWNpdHVkaW4gYW50ZS4gSW4gbGVvIHB1cnVzLCBkaWduaXNzaW0gcXVpcyB2YXJpdXMgdmVsLCBwZWxsZW50ZXNxdWUgZXQgbmliaC4gSW4gc2VkIHRvcnRvciBpYWN1bGlzIGxpYmVybyBtb2xsaXMgcGVsbGVudGVzcXVlIGlkIHZpdGFlIGxlY3R1cy4gSW4gaGFjIGhhYml0YXNzZSBwbGF0ZWEgZGljdHVtc3QuIFBoYXNlbGx1cyBtYXVyaXMgZW5pbSwgcG9zdWVyZSBlZ2V0IGx1Y3R1cyBhYywgaWFjdWxpcyBldCBxdWFtLiBWaXZhbXVzIGV0IG5pYmggZGlhbSwgZWxlbWVudHVtIGVnZXN0YXMgdGVsbHVzLiBBZW5lYW4gdnVscHV0YXRlIG1hbGVzdWFkYSBlc3QuIFNlZCBwb3N1ZXJlIHBvcnRhIGRpYW0gYSBzb2RhbGVzLiBQcm9pbiBldSBzZW0gbm9uIHZlbGl0IGZhY2lsaXNpcyB2ZW5lbmF0aXMgc2VkIGEgdHVycGlzLgoKUGVsbGVudGVzcXVlIHNlZCByaXN1cyBhIGFudGUgdnVscHV0YXRlIGxvYm9ydGlzIHNpdCBhbWV0IGV1IG5pc2wuIFN1c3BlbmRpc3NlIHV0IGVyb3MgbWksIGEgcmhvbmN1cyBsYWN1cy4gQ3VyYWJpdHVyIGZlcm1lbnR1bSB2ZWhpY3VsYSB0ZWxsdXMsIGEgb3JuYXJlIG1pIGNvbmRpbWVudHVtIHZlbC4gSW50ZWdlciBtb2xlc3RpZSB2b2x1dHBhdCB2aXZlcnJhLiBJbnRlZ2VyIHBvc3VlcmUgZXVpc21vZCB2ZW5lbmF0aXMuIFByb2luIGFjIG1hdXJpcyBzZWQgbnVsbGEgcGhhcmV0cmEgcG9ydHRpdG9yLiBEdWlzIHZlbCBkdWkgaW4gcmlzdXMgc29kYWxlcyBhdWN0b3Igc2l0IGFtZXQgbm9uIGVuaW0uIE1hZWNlbmFzIG1vbGxpcyBsYWN1cyBhdCBsaWd1bGEgZmF1Y2lidXMgc29kYWxlcy4gQ3JhcyB2ZWwgbmVxdWUgYXJjdS4gU2VkIHRpbmNpZHVudCB0b3J0b3IgcHJldGl1bSBuaXNpIGludGVyZHVtIHF1aXMgZGljdHVtIGFyY3UgbGFvcmVldC4gTW9yYmkgcHJldGl1bSB1bHRyaWNlcyBmZXVnaWF0LiBNYWVjZW5hcyBjb252YWxsaXMgYXVndWUgbmVjIGZlbGlzIG1hbGVzdWFkYSBtYWxlc3VhZGEgc2NlbGVyaXNxdWUgbWF1cmlzIHBsYWNlcmF0LiBTZWQgYXQgbWFnbmEgZW5pbSwgYXQgZnJpbmdpbGxhIGRvbG9yLiBRdWlzcXVlIHV0IG1hdHRpcyBkdWkuIFByYWVzZW50IGNvbnNlY3RldHVyIGFudGUgdml2ZXJyYSBuaXNpIGJsYW5kaXQgcGhhcmV0cmEuIFF1aXNxdWUgbWV0dXMgZWxpdCwgZGlnbmlzc2ltIHZpdGFlIGZlcm1lbnR1bSBzaXQgYW1ldCwgZnJpbmdpbGxhIGltcGVyZGlldCBvZGlvLiBDcmFzIGVnZXQgcHVydXMgZWdldCB0ZWxsdXMgZmV1Z2lhdCBsdWN0dXMgYSBhYyBwdXJ1cy4gQ3JhcyB2aXRhZSBuaXNsIHZlbCBhdWd1ZSByaG9uY3VzIHBvcnR0aXRvciBzaXQgYW1ldCBxdWlzIGxvcmVtLiBEb25lYyBpbnRlcmR1bSBwZWxsZW50ZXNxdWUgYWRpcGlzY2luZy4gUGhhc2VsbHVzIG5lcXVlIGxpYmVybywgYWxpcXVhbSBpbiBtYXR0aXMgdml0YWUsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgbmliaC4KCkRvbmVjIG5lYyBudWxsYSB1cm5hLCBhYyBzYWdpdHRpcyBsZWN0dXMuIFN1c3BlbmRpc3NlIG5vbiBlbGl0IHNlZCBtaSBhdWN0b3IgZmFjaWxpc2lzIHZpdGFlIGV0IGxlY3R1cy4gRnVzY2UgYWMgdnVscHV0YXRlIG1hdXJpcy4gTW9yYmkgY29uZGltZW50dW0gdWx0cmljZXMgbWV0dXMsIGV0IGFjY3Vtc2FuIHB1cnVzIG1hbGVzdWFkYSBhdC4gTWFlY2VuYXMgbG9ib3J0aXMgYW50ZSBzZWQgbWFzc2EgZGljdHVtIHZpdGFlIHZlbmVuYXRpcyBlbGl0IGNvbW1vZG8uIFByb2luIHRlbGx1cyBlcm9zLCBhZGlwaXNjaW5nIHNlZCBkaWduaXNzaW0gdml0YWUsIHRlbXBvciBlZ2V0IGFudGUuIEFlbmVhbiBpZCB0ZWxsdXMgbmVjIG1hZ25hIGN1cnN1cyBwaGFyZXRyYSB2aXRhZSB2ZWwgZW5pbS4gTW9yYmkgdmVzdGlidWx1bSBwaGFyZXRyYSBlc3QgaW4gdnVscHV0YXRlLiBBbGlxdWFtIHZpdGFlIG1ldHVzIGFyY3UsIGlkIGFsaXF1ZXQgbnVsbGEuIFBoYXNlbGx1cyBsaWd1bGEgZXN0LCBoZW5kcmVyaXQgbmVjIGlhY3VsaXMgdXQsIHZvbHV0cGF0IHZlbCBlcm9zLiBTdXNwZW5kaXNzZSB2aXRhZSB1cm5hIHR1cnBpcywgcGxhY2VyYXQgYWRpcGlzY2luZyBkaWFtLiBQaGFzZWxsdXMgZmV1Z2lhdCB2ZXN0aWJ1bHVtIG5lcXVlIGV1IGRhcGlidXMuIE51bGxhIGZhY2lsaXNpLiBEdWlzIHRvcnRvciBmZWxpcywgZXVpc21vZCBzaXQgYW1ldCBhbGlxdWV0IGluLCB2b2x1dHBhdCBuZWMgdHVycGlzLiBNYXVyaXMgcmhvbmN1cyBpcHN1bSB1dCBwdXJ1cyBlbGVpZmVuZCB1dCBsb2JvcnRpcyBsZWN0dXMgZGFwaWJ1cy4gUXVpc3F1ZSBub24gZXJhdCBsb3JlbS4gVml2YW11cyBwb3N1ZXJlIGltcGVyZGlldCBpYWN1bGlzLiBVdCBsaWd1bGEgbGFjdXMsIGVsZWlmZW5kIGF0IHRlbXBvciBpZCwgYXVjdG9yIGV1IGxlby4KCkRvbmVjIG1pIGVuaW0sIGxhb3JlZXQgcHVsdmluYXIgbW9sbGlzIGV1LCBtYWxlc3VhZGEgdml2ZXJyYSBudW5jLiBJbiB2aXRhZSBtZXR1cyB2aXRhZSBuZXF1ZSB0ZW1wb3IgZGFwaWJ1cy4gTWFlY2VuYXMgdGluY2lkdW50IHB1cnVzIGEgZmVsaXMgYWxpcXVhbSBwbGFjZXJhdC4gTnVsbGEgZmFjaWxpc2kuIFN1c3BlbmRpc3NlIHBsYWNlcmF0IHBoYXJldHJhIG1hdHRpcy4gSW50ZWdlciB0ZW1wb3IgbWFsZXN1YWRhIGp1c3RvIGF0IHRlbXB1cy4gTWFlY2VuYXMgdmVoaWN1bGEgbG9yZW0gYSBzYXBpZW4gYmliZW5kdW0gdmVsIGlhY3VsaXMgcmlzdXMgZmV1Z2lhdC4gUGVsbGVudGVzcXVlIGRpYW0gZXJhdCwgZGFwaWJ1cyBldCBwZWxsZW50ZXNxdWUgcXVpcywgbW9sZXN0aWUgdXQgbWFzc2EuIFZpdmFtdXMgaWFjdWxpcyBpbnRlcmR1bSBtYXNzYSBpZCBiaWJlbmR1bS4gUXVpc3F1ZSB1dCBtYXVyaXMgZHVpLCBzaXQgYW1ldCB2YXJpdXMgZWxpdC4gVmVzdGlidWx1bSBlbGl0IGxvcmVtLCBydXRydW0gbm9uIGNvbnNlY3RldHVyIHV0LCBsYW9yZWV0IG5lYyBudW5jLiBEb25lYyBuZWMgbWF1cmlzIGFudGUuIEN1cmFiaXR1ciB1dCBlc3Qgc2VkIG9kaW8gcGhhcmV0cmEgbGFvcmVldC4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3VyYWJpdHVyIHB1cnVzIHJpc3VzLCBsYW9yZWV0IHNlZCBwb3J0YSBpZCwgc2FnaXR0aXMgdmVsIGlwc3VtLiBNYWVjZW5hcyBuaWJoIGRpYW0sIGN1cnN1cyBldCB2YXJpdXMgc2l0IGFtZXQsIGZyaW5naWxsYSBzZWQgbWFnbmEuIE51bGxhbSBpZCBuZXF1ZSBldSBsZW8gZmF1Y2lidXMgbW9sbGlzLiBEdWlzIG5lYyBhZGlwaXNjaW5nIG1hdXJpcy4gU3VzcGVuZGlzc2Ugc29sbGljaXR1ZGluLCBlbmltIGV1IHB1bHZpbmFyIGNvbW1vZG8sIGVyYXQgYXVndWUgdWx0cmljZXMgbWksIGEgdHJpc3RpcXVlIG1hZ25hIHNlbSBub24gbGliZXJvLgoKU2VkIGluIG1ldHVzIG51bGxhLiBQcmFlc2VudCBuZWMgYWRpcGlzY2luZyBzYXBpZW4uIERvbmVjIGxhb3JlZXQsIHZlbGl0IG5vbiBydXRydW0gdmVzdGlidWx1bSwgbGlndWxhIG5lcXVlIGFkaXBpc2NpbmcgdHVycGlzLCBhdCBhdWN0b3Igc2FwaWVuIGVsaXQgdXQgbWFzc2EuIE51bGxhbSBhbGlxdWFtLCBlbmltIHZlbCBwb3N1ZXJlIHJ1dHJ1bSwganVzdG8gZXJhdCBsYW9yZWV0IGVzdCwgdmVsIGZyaW5naWxsYSBsYWN1cyBuaXNpIG5vbiBsZWN0dXMuIEV0aWFtIGxlY3R1cyBudW5jLCBsYW9yZWV0IGV0IHBsYWNlcmF0IGF0LCB2ZW5lbmF0aXMgcXVpcyBsaWJlcm8uIFByYWVzZW50IGluIHBsYWNlcmF0IGVsaXQuIENsYXNzIGFwdGVudCB0YWNpdGkgc29jaW9zcXUgYWQgbGl0b3JhIHRvcnF1ZW50IHBlciBjb251YmlhIG5vc3RyYSwgcGVyIGluY2VwdG9zIGhpbWVuYWVvcy4gUGVsbGVudGVzcXVlIGZyaW5naWxsYSBhdWd1ZSBldSBuaWJoIHBsYWNlcmF0IGRpY3R1bS4gTnVuYyBwb3J0dGl0b3IgdHJpc3RpcXVlIGRpYW0sIGV1IGFsaXF1YW0gZW5pbSBhbGlxdWV0IHZlbC4gQWxpcXVhbSBsYWNpbmlhIGludGVyZHVtIGlwc3VtLCBpbiBwb3N1ZXJlIG1ldHVzIGx1Y3R1cyB2ZWwuIFZpdmFtdXMgZXQgbmlzbCBhIGVyb3Mgc2VtcGVyIGVsZW1lbnR1bS4gRG9uZWMgdmVuZW5hdGlzIG9yY2kgYXQgZGlhbSB0cmlzdGlxdWUgc29sbGljaXR1ZGluLiBJbiBldSBlcm9zIHNlZCBvZGlvIHJ1dHJ1bSBsdWN0dXMgbm9uIG5lYyB0ZWxsdXMuCgpOdWxsYSBuZWMgZmVsaXMgZWxpdC4gTnVsbGFtIGluIGlwc3VtIGluIGlwc3VtIGNvbnNlcXVhdCBmcmluZ2lsbGEgcXVpcyB2ZWwgdG9ydG9yLiBQaGFzZWxsdXMgbm9uIG1hc3NhIG5pc2ksIHNpdCBhbWV0IGFsaXF1YW0gdXJuYS4gU2VkIGZlcm1lbnR1bSBuaWJoIHZpdGFlIGxhY3VzIHRpbmNpZHVudCBuZWMgdGluY2lkdW50IG1hc3NhIGJpYmVuZHVtLiBFdGlhbSBlbGl0IGR1aSwgZmFjaWxpc2lzIHNpdCBhbWV0IHZlaGljdWxhIG5lYywgaWFjdWxpcyBhdCBzYXBpZW4uIFV0IGF0IG1hc3NhIGlkIGR1aSB1bHRyaWNlcyB2b2x1dHBhdCB1dCBhYyBsaWJlcm8uIEZ1c2NlIGlwc3VtIG1pLCBiaWJlbmR1bSBhIGxhY2luaWEgZXQsIHB1bHZpbmFyIGVnZXQgbWF1cmlzLiBQcm9pbiBmYXVjaWJ1cyB1cm5hIHV0IGxvcmVtIGVsZW1lbnR1bSB2dWxwdXRhdGUuIER1aXMgcXVhbSBsZW8sIG1hbGVzdWFkYSBub24gZXVpc21vZCB1dCwgYmxhbmRpdCBmYWNpbGlzaXMgbWF1cmlzLiBTdXNwZW5kaXNzZSBzaXQgYW1ldCBtYWduYSBpZCB2ZWxpdCB0aW5jaWR1bnQgYWxpcXVldCBuZWMgZXUgZG9sb3IuIEN1cmFiaXR1ciBiaWJlbmR1bSBsb3JlbSB2ZWwgZmVsaXMgdGVtcHVzIGRhcGlidXMuIEFsaXF1YW0gZXJhdCB2b2x1dHBhdC4gQWVuZWFuIGN1cnN1cyB0b3J0b3IgbmVjIGR1aSBhbGlxdWV0IHBvcnRhLiBBZW5lYW4gY29tbW9kbyBpYWN1bGlzIHN1c2NpcGl0LiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgUXVpc3F1ZSBzaXQgYW1ldCBvcm5hcmUgZWxpdC4gTmFtIGxpZ3VsYSByaXN1cywgdmVzdGlidWx1bSBuZWMgbWF0dGlzIGluLCBjb25kaW1lbnR1bSBhYyBhbnRlLiBEb25lYyBmcmluZ2lsbGEsIGp1c3RvIGV0IHVsdHJpY2VzIGZhdWNpYnVzLCB0ZWxsdXMgZXN0IHZvbHV0cGF0IG1hc3NhLCB2aXRhZSBjb21tb2RvIHNhcGllbiBkaWFtIG5vbiByaXN1cy4gVml2YW11cyBhdCBhcmN1IGdyYXZpZGEgcHVydXMgbW9sbGlzIGZldWdpYXQuCgpOdWxsYSBhIHR1cnBpcyBxdWlzIHNhcGllbiBjb21tb2RvIGRpZ25pc3NpbSBldSBxdWlzIGp1c3RvLiBNYWVjZW5hcyBldSBsb3JlbSBvZGlvLCB1dCBoZW5kcmVyaXQgdmVsaXQuIEN1bSBzb2NpaXMgbmF0b3F1ZSBwZW5hdGlidXMgZXQgbWFnbmlzIGRpcyBwYXJ0dXJpZW50IG1vbnRlcywgbmFzY2V0dXIgcmlkaWN1bHVzIG11cy4gUHJvaW4gZmFjaWxpc2lzIHBvcnR0aXRvciB1bGxhbWNvcnBlci4gUHJhZXNlbnQgbW9sbGlzIGRpZ25pc3NpbSBtYXNzYSwgbGFvcmVldCBhbGlxdWV0IHZlbGl0IHBlbGxlbnRlc3F1ZSBub24uIE51bmMgZmFjaWxpc2lzIGNvbnZhbGxpcyB0cmlzdGlxdWUuIE1hdXJpcyBwb3J0dGl0b3IgYW50ZSBhdCB0ZWxsdXMgY29udmFsbGlzIHBsYWNlcmF0LiBNb3JiaSBhbGlxdWV0IG5pc2kgYWMgbmlzbCBwdWx2aW5hciBpZCBkaWN0dW0gbmlzbCBtb2xsaXMuIFNlZCBvcm5hcmUgc2VtIGV0IHJpc3VzIHBsYWNlcmF0IGxvYm9ydGlzIGlkIGVnZXQgZWxpdC4gSW50ZWdlciBjb25zZXF1YXQsIG1hZ25hIGlkIHN1c2NpcGl0IHBoYXJldHJhLCBudWxsYSB2ZWxpdCBzdXNjaXBpdCBvcmNpLCB1dCBpbnRlcmR1bSBhdWd1ZSBhdWd1ZSBxdWlzIHF1YW0uIEZ1c2NlIHByZXRpdW0gYWxpcXVldCB2dWxwdXRhdGUuIE1hdXJpcyBibGFuZGl0IGRpY3R1bSBtb2xlc3RpZS4gUHJvaW4gbnVsbGEgbmliaCwgYmliZW5kdW0gZXUgcGxhY2VyYXQgYXQsIHRpbmNpZHVudCBhYyBuaXNsLiBOdWxsYW0gdnVscHV0YXRlIG1ldHVzIHV0IGxpYmVybyBydXRydW0gdWx0cmljaWVzLiBOdW5jIHNpdCBhbWV0IGR1aSBtYXVyaXMuIFN1c3BlbmRpc3NlIGFkaXBpc2NpbmcgbGFjdXMgaW4gYXVndWUgZWxlaWZlbmQgbW9sbGlzLgoKRHVpcyBwcmV0aXVtIHVsdHJpY2VzIG1hdHRpcy4gTmFtIGV1aXNtb2QgcmlzdXMgYSBlcmF0IGxhY2luaWEgYmliZW5kdW0uIE1vcmJpIG1hc3NhIHRvcnRvciwgY29uc2VjdGV0dXIgaWQgZWxlaWZlbmQgaWQsIHBlbGxlbnRlc3F1ZSB2ZWwgdG9ydG9yLiBQcmFlc2VudCB1cm5hIGxvcmVtLCBwb3J0dGl0b3IgYXQgY29uZGltZW50dW0gdml0YWUsIGx1Y3R1cyBlZ2V0IGVsaXQuIE1hZWNlbmFzIGZyaW5naWxsYSBxdWFtIGNvbnZhbGxpcyBlc3QgaGVuZHJlcml0IHZpdmVycmEuIEV0aWFtIHZlaGljdWxhLCBzYXBpZW4gbm9uIHB1bHZpbmFyIGFkaXBpc2NpbmcsIG5pc2kgbWFzc2EgdmVzdGlidWx1bSBlc3QsIGlkIGludGVyZHVtIG1hdXJpcyB2ZWxpdCBldSBlc3QuIFZlc3RpYnVsdW0gZXN0IGFyY3UsIGZhY2lsaXNpcyBhdCB1bHRyaWNpZXMgbm9uLCB2dWxwdXRhdGUgaWQgc2FwaWVuLiBWZXN0aWJ1bHVtIGlwc3VtIG1ldHVzLCBwaGFyZXRyYSBuZWMgcGVsbGVudGVzcXVlIGlkLCBmYWNpbGlzaXMgaWQgc2FwaWVuLiBEb25lYyBydXRydW0gb2RpbyBldCBsYWN1cyB1bHRyaWNpZXMgdWxsYW1jb3JwZXIuIEludGVnZXIgc2VkIGVzdCB1dCBtaSBwb3N1ZXJlIHRpbmNpZHVudCBxdWlzIG5vbiBsZW8uIE1vcmJpIHRlbGx1cyBqdXN0bywgdWx0cmljaWVzIHNpdCBhbWV0IHVsdHJpY2VzIHF1aXMsIGZhY2lsaXNpcyB2aXRhZSBtYWduYS4gRG9uZWMgbGlndWxhIG1ldHVzLCBwZWxsZW50ZXNxdWUgbm9uIHRyaXN0aXF1ZSBhYywgdmVzdGlidWx1bSBzZWQgZXJhdC4gQWxpcXVhbSBlcmF0IHZvbHV0cGF0LgoKTmFtIGRpZ25pc3NpbSwgbmlzbCBlZ2V0IGNvbnNlcXVhdCBldWlzbW9kLCBzZW0gbGVjdHVzIGF1Y3RvciBvcmNpLCB1dCBwb3J0dGl0b3IgbGFjdXMgZHVpIGFjIG5lcXVlLiBJbiBoYWMgaGFiaXRhc3NlIHBsYXRlYSBkaWN0dW1zdC4gRnVzY2UgZWdlc3RhcyBwb3J0YSBmYWNpbGlzaXMuIEluIGhhYyBoYWJpdGFzc2UgcGxhdGVhIGRpY3R1bXN0LiBNYXVyaXMgY3Vyc3VzIHJob25jdXMgcmlzdXMgYWMgZXVpc21vZC4gUXVpc3F1ZSB2aXRhZSByaXN1cyBhIHRlbGx1cyB2ZW5lbmF0aXMgY29udmFsbGlzLiBDdXJhYml0dXIgbGFvcmVldCBzYXBpZW4gZXUgcXVhbSBsdWN0dXMgbG9ib3J0aXMuIFZpdmFtdXMgc29sbGljaXR1ZGluIHNvZGFsZXMgZG9sb3Igdml0YWUgc29kYWxlcy4gU3VzcGVuZGlzc2UgcGhhcmV0cmEgbGFvcmVldCBhbGlxdWV0LiBNYWVjZW5hcyB1bGxhbWNvcnBlciBvcmNpIHZlbCB0b3J0b3IgbHVjdHVzIGlhY3VsaXMgdXQgdml0YWUgbWV0dXMuIFZlc3RpYnVsdW0gdXQgYXJjdSBhYyB0ZWxsdXMgbWF0dGlzIGVsZWlmZW5kIGVnZXQgdmVoaWN1bGEgZWxpdC4KCkluIHNlZCBmZXVnaWF0IGVyb3MuIERvbmVjIGJpYmVuZHVtIHVsbGFtY29ycGVyIGRpYW0sIGV1IGZhdWNpYnVzIG1hdXJpcyBkaWN0dW0gc2VkLiBEdWlzIHRpbmNpZHVudCBqdXN0byBpbiBuZXF1ZSBhY2N1bXNhbiBkaWN0dW0uIE1hZWNlbmFzIGluIHJ1dHJ1bSBzYXBpZW4uIFV0IGlkIGZldWdpYXQgbGFjdXMuIE51bGxhIGZhY2lsaXNpLiBOdW5jIGFjIGxvcmVtIGlkIHF1YW0gdmFyaXVzIGN1cnN1cyBhIGV0IGVsaXQuIEFlbmVhbiBwb3N1ZXJlIGxpYmVybyBldSB0b3J0b3IgdmVoaWN1bGEgdXQgdWxsYW1jb3JwZXIgb2RpbyBjb25zZXF1YXQuIFNlZCBpbiBkaWduaXNzaW0gZHVpLiBDdXJhYml0dXIgaWFjdWxpcyB0ZW1wb3IgcXVhbSBuZWMgcGxhY2VyYXQuIEFsaXF1YW0gdmVuZW5hdGlzIG5pYmggZXQganVzdG8gaWFjdWxpcyBsYWNpbmlhLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGVsbGVudGVzcXVlIHRlbXB1cyBtYWduYSBzZWQgbWkgYWxpcXVldCBlZ2V0IHZhcml1cyBvZGlvIGNvbmd1ZS4KCkludGVnZXIgc2VtIHNlbSwgc2VtcGVyIGluIHZlc3RpYnVsdW0gdml0YWUsIGxvYm9ydGlzIHF1aXMgZXJhdC4gRHVpcyBhbnRlIGxlY3R1cywgZmVybWVudHVtIHNlZCB0ZW1wb3Igc2l0IGFtZXQsIHBsYWNlcmF0IHNpdCBhbWV0IHNlbS4gTWF1cmlzIGNvbmd1ZSB0aW5jaWR1bnQgaXBzdW0uIFV0IHZpdmVycmEsIGxhY3VzIHZlbCB2YXJpdXMgcGhhcmV0cmEsIHB1cnVzIGVuaW0gcHVsdmluYXIgaXBzdW0sIG5vbiBwZWxsZW50ZXNxdWUgZW5pbSBqdXN0byBub24gZXJhdC4gRnVzY2UgaXBzdW0gb3JjaSwgdWx0cmljZXMgc2VkIHBlbGxlbnRlc3F1ZSBhdCwgaGVuZHJlcml0IGxhb3JlZXQgZW5pbS4gTnVuYyBibGFuZGl0IG1vbGxpcyBwcmV0aXVtLiBVdCBtb2xsaXMsIG51bGxhIGFsaXF1YW0gc29kYWxlcyB2ZXN0aWJ1bHVtLCBsaWJlcm8gbG9yZW0gdGVtcHVzIHRvcnRvciwgYSBwZWxsZW50ZXNxdWUgbmliaCBlbGl0IGEgaXBzdW0uIFBoYXNlbGx1cyBmZXJtZW50dW0gbGlndWxhIGF0IG5lcXVlIGFkaXBpc2Npbmcgc29sbGljaXR1ZGluLiBTdXNwZW5kaXNzZSBpZCBpcHN1bSBhcmN1LiBTZWQgdGluY2lkdW50IHBsYWNlcmF0IHZpdmVycmEuIERvbmVjIGxpYmVybyBhdWd1ZSwgcG9ydHRpdG9yIHNpdCBhbWV0IHZhcml1cyBlZ2V0LCBydXRydW0gbmVjIGxhY3VzLiBQcm9pbiBibGFuZGl0IG9yY2kgc2l0IGFtZXQgZGlhbSBkaWN0dW0gaWQgcG9ydHRpdG9yIHJpc3VzIGlhY3VsaXMuIEludGVnZXIgbGFjaW5pYSBmZXVnaWF0IGxlbywgdml0YWUgYXVjdG9yIHR1cnBpcyBlbGVpZmVuZCB2ZWwuIFN1c3BlbmRpc3NlIGxvcmVtIHF1YW0sIHByZXRpdW0gaWQgYmliZW5kdW0gc2VkLCB2aXZlcnJhIHZpdGFlIHRvcnRvci4gTnVsbGFtIHVsdHJpY2llcyBsaWJlcm8gZXUgcmlzdXMgY29udmFsbGlzIGVnZXQgdWxsYW1jb3JwZXIgbmlzaSBlbGVtZW50dW0uIE1hdXJpcyBudWxsYSBlbGl0LCBiaWJlbmR1bSBpZCB2dWxwdXRhdGUgdml0YWUsIGltcGVyZGlldCBydXRydW0gbG9yZW0uIEN1cmFiaXR1ciBlZ2V0IGRpZ25pc3NpbSBvcmNpLiBTZWQgc2VtcGVyIHRlbGx1cyBpcHN1bSwgYXQgYmxhbmRpdCBkdWkuIEludGVnZXIgZGFwaWJ1cyBmYWNpbGlzaXMgc29kYWxlcy4gVml2YW11cyBzb2xsaWNpdHVkaW4gdmFyaXVzIGVzdCwgcXVpcyBvcm5hcmUganVzdG8gY3Vyc3VzIGlkLgoKTnVuYyB2ZWwgdWxsYW1jb3JwZXIgbWkuIFN1c3BlbmRpc3NlIHBvdGVudGkuIE51bmMgZXQgdXJuYSBhIGF1Z3VlIHNjZWxlcmlzcXVlIHVsdHJpY2VzIG5vbiBxdWlzIG1pLiBJbiBxdWlzIHBvcnR0aXRvciBlbGl0LiBBZW5lYW4gcXVpcyBlcmF0IG51bGxhLCBhIHZlbmVuYXRpcyB0ZWxsdXMuIEZ1c2NlIHZlc3RpYnVsdW0gbmlzaSBzZWQgbGVvIGFkaXBpc2NpbmcgZGlnbmlzc2ltLiBOdW5jIGludGVyZHVtLCBsb3JlbSBldCBsYWNpbmlhIHZlc3RpYnVsdW0sIHF1YW0gZXN0IG1hdHRpcyBtYWduYSwgc2l0IGFtZXQgdm9sdXRwYXQgZWxpdCBhdWd1ZSBhdCBsaWJlcm8uIENyYXMgZ3JhdmlkYSBkdWkgcXVpcyB2ZWxpdCBsb2JvcnRpcyBjb25kaW1lbnR1bSBldCBlbGVpZmVuZCBsaWd1bGEuIFBoYXNlbGx1cyBhYyBtZXR1cyBxdWFtLCBpZCB2ZW5lbmF0aXMgbWkuIEFsaXF1YW0gdXQgdHVycGlzIGFjIHRlbGx1cyBkYXBpYnVzIGRhcGlidXMgZXUgaW4gbWkuIFF1aXNxdWUgZWdldCBuaWJoIGVyb3MuIEZ1c2NlIGNvbnNlY3RldHVyIGxlbyB2ZWxpdC4KClZlc3RpYnVsdW0gc2VtcGVyIGVnZXN0YXMgbWF1cmlzLiBNb3JiaSB2ZXN0aWJ1bHVtIHNlbSBzZW0uIEFsaXF1YW0gdmVuZW5hdGlzLCBmZWxpcyBzZWQgZWxlaWZlbmQgcG9ydGEsIG1hdXJpcyBkaWFtIHNlbXBlciBhcmN1LCBzaXQgYW1ldCB1bHRyaWNpZXMgZXN0IHNhcGllbiBzaXQgYW1ldCBsaWJlcm8uIFZlc3RpYnVsdW0gZHVpIG9yY2ksIG9ybmFyZSBjb25kaW1lbnR1bSBtb2xsaXMgbmVjLCBtb2xlc3RpZSBhYyBlcm9zLiBQcm9pbiB2aXRhZSBtb2xsaXMgdmVsaXQuIFByYWVzZW50IGVnZXQgZmVsaXMgbWkuIE1hZWNlbmFzIGV1IHZ1bHB1dGF0ZSBuaXNpLiBWZXN0aWJ1bHVtIHZhcml1cywgYXJjdSBpbiB1bHRyaWNpZXMgdmVzdGlidWx1bSwgbmliaCBsZW8gc2FnaXR0aXMgb2RpbywgdXQgYmliZW5kdW0gbmlzbCBtaSBuZWMgZGlhbS4gSW50ZWdlciBhdCBlbmltIGZldWdpYXQgbnVsbGEgc2VtcGVyIGJpYmVuZHVtIHV0IGEgdmVsaXQuIFByb2luIGF0IG5pc2kgdXQgbG9yZW0gYWxpcXVhbSB2YXJpdXMgZWdldCBxdWlzIGVsaXQuIE51bGxhbSBuZWMgb2RpbyB2ZWwgbGVjdHVzIGNvbmd1ZSBjb25zZXF1YXQgYWRpcGlzY2luZyBhYyBtaS4gRnVzY2Ugdml0YWUgbGFvcmVldCBsaWJlcm8uIEN1cmFiaXR1ciBzaXQgYW1ldCBzZW0gbmVxdWUsIG5lYyBwb3N1ZXJlIGVuaW0uIEN1cmFiaXR1ciBhdCBtYXNzYSBhIHNlbSBncmF2aWRhIGlhY3VsaXMgbmVjIGV0IG5pYmguIFNlZCB2aXRhZSBkdWkgdml0YWUgbGVvIHRpbmNpZHVudCBwcmV0aXVtIGEgYWxpcXVhbSBlcmF0LiBTdXNwZW5kaXNzZSB1bHRyaWNpZXMgb2RpbyBhdCBtZXR1cyB0ZW1wb3IgaW4gcGVsbGVudGVzcXVlIGFyY3UgdWx0cmljaWVzLgoKU2VkIGFsaXF1YW0gbWF0dGlzIHF1YW0sIGluIHZ1bHB1dGF0ZSBzYXBpZW4gdWx0cmljZXMgaW4uIFBlbGxlbnRlc3F1ZSBxdWlzIHZlbGl0IHNlZCBkdWkgaGVuZHJlcml0IGN1cnN1cy4gUGVsbGVudGVzcXVlIG5vbiBudW5jIGxhY3VzLCBhIHNlbXBlciBtZXR1cy4gRnVzY2UgZXVpc21vZCB2ZWxpdCBxdWlzIGRpYW0gc3VzY2lwaXQgY29uc2VxdWF0LiBQcmFlc2VudCBjb21tb2RvIGFjY3Vtc2FuIG5lcXVlLiBQcm9pbiB2aXZlcnJhLCBpcHN1bSBub24gdHJpc3RpcXVlIHVsdHJpY2VzLCB2ZWxpdCB2ZWxpdCBmYWNpbGlzaXMgbG9yZW0sIHZlbCBydXRydW0gbmVxdWUgZXJvcyBhYyBuaXNpLiBTdXNwZW5kaXNzZSBmZWxpcyBtYXNzYSwgZmF1Y2lidXMgaW4gdm9sdXRwYXQgYWMsIGRhcGlidXMgZXQgb2Rpby4gUGVsbGVudGVzcXVlIGlkIHRlbGx1cyBzaXQgYW1ldCByaXN1cyB1bHRyaWNpZXMgdWxsYW1jb3JwZXIgbm9uIG5lYyBzYXBpZW4uIE5hbSBwbGFjZXJhdCB2aXZlcnJhIHVsbGFtY29ycGVyLiBOYW0gcGxhY2VyYXQgcG9ydHRpdG9yIHNhcGllbiBuZWMgcHVsdmluYXIuIEN1cmFiaXR1ciB2ZWwgb2RpbyBzaXQgYW1ldCBvZGlvIGFjY3Vtc2FuIGFsaXF1ZXQgdml0YWUgYSBsZWN0dXMuIFBlbGxlbnRlc3F1ZSBsb2JvcnRpcyB2aXZlcnJhIGNvbnNlcXVhdC4gTWF1cmlzIGVsZW1lbnR1bSBjdXJzdXMgbnVsbGEsIHNpdCBhbWV0IGhlbmRyZXJpdCBqdXN0byBkaWN0dW0gc2VkLiBNYWVjZW5hcyBkaWFtIG9kaW8sIGZyaW5naWxsYSBhYyBjb25ndWUgcXVpcywgYWRpcGlzY2luZyB1dCBlbGl0LgoKQWxpcXVhbSBsb3JlbSBlcm9zLCBwaGFyZXRyYSBuZWMgZWdlc3RhcyB2aXRhZSwgbWF0dGlzIG5lYyByaXN1cy4gTWF1cmlzIGFyY3UgbWFzc2EsIHNvZGFsZXMgZWdldCBncmF2aWRhIHNlZCwgdml2ZXJyYSB2aXRhZSB0dXJwaXMuIFV0IGxpZ3VsYSB1cm5hLCBldWlzbW9kIGFjIHRpbmNpZHVudCBldSwgZmF1Y2lidXMgc2VkIGZlbGlzLiBQcmFlc2VudCBtb2xsaXMsIGlwc3VtIHF1aXMgcmhvbmN1cyBkaWduaXNzaW0sIG9kaW8gc2VtIHZlbmVuYXRpcyBudWxsYSwgYXQgY29uc2VxdWF0IGZlbGlzIGF1Z3VlIHZlbCBlcmF0LiBOYW0gZmVybWVudHVtIGZldWdpYXQgdm9sdXRwYXQuIENsYXNzIGFwdGVudCB0YWNpdGkgc29jaW9zcXUgYWQgbGl0b3JhIHRvcnF1ZW50IHBlciBjb251YmlhIG5vc3RyYSwgcGVyIGluY2VwdG9zIGhpbWVuYWVvcy4gRXRpYW0gdml0YWUgZHVpIGluIG5pc2kgYWRpcGlzY2luZyB1bHRyaWNpZXMgbm9uIGV1IGp1c3RvLiBEb25lYyB0cmlzdGlxdWUgdWx0cmljaWVzIGFkaXBpc2NpbmcuIE51bGxhIHNvZGFsZXMsIG51bmMgYSB0cmlzdGlxdWUgZWxlbWVudHVtLCBlcmF0IG5lcXVlIGVnZXN0YXMgbmlzbCwgYXQgaGVuZHJlcml0IG9yY2kgc2FwaWVuIHNlZCBsaWJlcm8uIFZpdmFtdXMgYSBtYXVyaXMgdHVycGlzLCBxdWlzIGxhb3JlZXQgaXBzdW0uIE51bmMgbmVjIG1pIGV0IG5pc2wgcGVsbGVudGVzcXVlIHNjZWxlcmlzcXVlLiBWaXZhbXVzIHZvbHV0cGF0LCBqdXN0byB0cmlzdGlxdWUgbGFjaW5pYSBjb25kaW1lbnR1bSwgZXJhdCBqdXN0byB1bHRyaWNlcyB1cm5hLCBlbGVtZW50dW0gdml2ZXJyYSBlcm9zIGF1Z3VlIG5vbiBsaWJlcm8uIFNlZCBtb2xsaXMgbW9sbGlzIGFyY3UsIGF0IGZlcm1lbnR1bSBkaWFtIHN1c2NpcGl0IHF1aXMuCgpFdGlhbSBzaXQgYW1ldCBuaWJoIGp1c3RvLCBwb3N1ZXJlIHZvbHV0cGF0IG51bmMuIE1vcmJpIHBlbGxlbnRlc3F1ZSBuZXF1ZSBpbiBvcmNpIHZvbHV0cGF0IGV1IHNjZWxlcmlzcXVlIGxvcmVtIGRpY3R1bS4gTWF1cmlzIG1vbGxpcyBpYWN1bGlzIGVzdCwgbmVjIHNhZ2l0dGlzIHNhcGllbiBjb25zZXF1YXQgaWQuIE51bmMgbmVjIG1hbGVzdWFkYSBvZGlvLiBEdWlzIHF1aXMgc3VzY2lwaXQgb2Rpby4gTWF1cmlzIHB1cnVzIGR1aSwgc29kYWxlcyBpZCBtYXR0aXMgc2l0IGFtZXQsIHBvc3VlcmUgaW4gYXJjdS4gUGhhc2VsbHVzIHBvcnRhIGVsZW1lbnR1bSBjb252YWxsaXMuIE1hZWNlbmFzIGF0IG9yY2kgZXQgbWkgdnVscHV0YXRlIHNvbGxpY2l0dWRpbiBpbiBpbiB0dXJwaXMuIFBlbGxlbnRlc3F1ZSBjdXJzdXMgYWRpcGlzY2luZyBuZXF1ZSBzaXQgYW1ldCBjb21tb2RvLiBGdXNjZSB1dCBtaSBldSBsZWN0dXMgcG9ydHRpdG9yIHZvbHV0cGF0IGV0IG5lYyBmZWxpcy4KCkN1cmFiaXR1ciBzY2VsZXJpc3F1ZSBlcm9zIHF1aXMgbmlzbCB2aXZlcnJhIHZlbCB1bHRyaWNlcyB2ZWxpdCB2ZXN0aWJ1bHVtLiBTZWQgbG9ib3J0aXMgcHVsdmluYXIgc2FwaWVuIGFjIHZlbmVuYXRpcy4gU2VkIGFudGUgbmliaCwgcmhvbmN1cyBlZ2V0IGRpY3R1bSBpbiwgbW9sbGlzIHV0IG5pc2kuIFBoYXNlbGx1cyBmYWNpbGlzaXMgbWkgbm9uIGxvcmVtIHRyaXN0aXF1ZSBub24gZWxlaWZlbmQgc2VtIGZyaW5naWxsYS4gSW50ZWdlciB1dCBhdWd1ZSBlc3QuIEluIHZlbmVuYXRpcyB0aW5jaWR1bnQgc2NlbGVyaXNxdWUuIEV0aWFtIGFudGUgZHVpLCBwb3N1ZXJlIHF1aXMgbWFsZXN1YWRhIHZpdGFlLCBtYWxlc3VhZGEgYSBhcmN1LiBBZW5lYW4gZmF1Y2lidXMgdmVuZW5hdGlzIHNhcGllbiwgdXQgZmFjaWxpc2lzIG5pc2kgYmxhbmRpdCB2ZWwuIEFlbmVhbiBhYyBsb3JlbSBldSBzZW0gZmVybWVudHVtIHBsYWNlcmF0LiBQcm9pbiBuZXF1ZSBwdXJ1cywgYWxpcXVldCB1dCB0aW5jaWR1bnQgdXQsIGNvbnZhbGxpcyBzaXQgYW1ldCBlcm9zLiBQaGFzZWxsdXMgdmVoaWN1bGEgdWxsYW1jb3JwZXIgZW5pbSBub24gdmVoaWN1bGEuIEV0aWFtIHBvcnRhIG9kaW8gdXQgaXBzdW0gYWRpcGlzY2luZyBlZ2VzdGFzIGlkIGEgb2Rpby4gUGVsbGVudGVzcXVlIGJsYW5kaXQsIHNhcGllbiB1dCBwdWx2aW5hciBpbnRlcmR1bSwgbWkgbnVsbGEgaGVuZHJlcml0IGVsaXQsIGluIHRlbXBvciBkaWFtIGVuaW0gYSB1cm5hLiBJbiB0ZWxsdXMgb2Rpbywgb3JuYXJlIHNlZCBjb25kaW1lbnR1bSBhLCBtYXR0aXMgZXUgYXVndWUuCgpGdXNjZSBoZW5kcmVyaXQgcG9ydHRpdG9yIGV1aXNtb2QuIERvbmVjIG1hbGVzdWFkYSBlZ2VzdGFzIHR1cnBpcywgZXQgdWx0cmljaWVzIGZlbGlzIGVsZW1lbnR1bSB2aXRhZS4gTnVsbGFtIGluIHNlbSBuaWJoLiBOdWxsYW0gdWx0cmljaWVzIGhlbmRyZXJpdCBqdXN0byBzaXQgYW1ldCBsb2JvcnRpcy4gU2VkIHRpbmNpZHVudCwgbWF1cmlzIGF0IG9ybmFyZSBsYW9yZWV0LCBzYXBpZW4gcHVydXMgZWxlbWVudHVtIGVsaXQsIG5lYyBwb3J0dGl0b3IgbmlzbCBwdXJ1cyBldCBlcmF0LiBEb25lYyBmZWxpcyBuaXNpLCBydXRydW0gdWxsYW1jb3JwZXIgZ3JhdmlkYSBhYywgdGluY2lkdW50IHNpdCBhbWV0IHVybmEuIFByb2luIHZlbCBqdXN0byB2aXRhZSBlcm9zIHNhZ2l0dGlzIGJpYmVuZHVtIGEgdXQgbmliaC4gUGhhc2VsbHVzIHNvZGFsZXMgbGFvcmVldCB0aW5jaWR1bnQuIE1hZWNlbmFzIG9kaW8gbWFzc2EsIGNvbmRpbWVudHVtIGlkIGFsaXF1ZXQgdXQsIHJob25jdXMgdmVsIGxlY3R1cy4gRHVpcyBwaGFyZXRyYSBjb25zZWN0ZXR1ciBzYXBpZW4uIFBoYXNlbGx1cyBwb3N1ZXJlIHVsdHJpY2llcyBtYXNzYSwgbm9uIHJob25jdXMgcmlzdXMgYWxpcXVhbSB0ZW1wdXMuCgpQcmFlc2VudCB2ZW5lbmF0aXMgbWFnbmEgaWQgc2VtIGRpY3R1bSBldSB2ZWhpY3VsYSBpcHN1bSB2dWxwdXRhdGUuIFNlZCBhIGNvbnZhbGxpcyBzYXBpZW4uIFNlZCBqdXN0byBkb2xvciwgcmhvbmN1cyB2ZWwgcnV0cnVtIG1hdHRpcywgc29sbGljaXR1ZGluIHV0IHJpc3VzLiBOdWxsYW0gc2l0IGFtZXQgY29udmFsbGlzIGVzdC4gRXRpYW0gbm9uIHRpbmNpZHVudCBsaWd1bGEuIEZ1c2NlIHN1c2NpcGl0IHByZXRpdW0gZWxpdCBhdCB1bGxhbWNvcnBlci4gUXVpc3F1ZSBzb2xsaWNpdHVkaW4sIGRpYW0gaWQgaW50ZXJkdW0gcG9ydGEsIG1ldHVzIGlwc3VtIHZvbHV0cGF0IGxpYmVybywgaWQgdmVuZW5hdGlzIGZlbGlzIG9yY2kgbm9uIHZlbGl0LiBTdXNwZW5kaXNzZSBwb3RlbnRpLiBNYXVyaXMgcnV0cnVtLCB0b3J0b3Igc2l0IGFtZXQgcGVsbGVudGVzcXVlIHRpbmNpZHVudCwgZXJhdCBxdWFtIHVsdHJpY2llcyBvZGlvLCBpZCBhbGlxdWFtIGVsaXQgbGVvIG5lYyBsZW8uIFBlbGxlbnRlc3F1ZSBqdXN0byBlcm9zLCBydXRydW0gYXQgZmV1Z2lhdCBuZWMsIHBvcnRhIGV0IHRlbGx1cy4gQWVuZWFuIGVnZXQgbWV0dXMgbGVjdHVzLgoKUHJhZXNlbnQgZXVpc21vZCwgdHVycGlzIHF1aXMgbGFvcmVldCBjb25zZXF1YXQsIG5lcXVlIGFudGUgaW1wZXJkaWV0IHF1YW0sIGFjIHNlbXBlciB0b3J0b3IgbmliaCBpbiBudWxsYS4gSW50ZWdlciBzY2VsZXJpc3F1ZSBlcm9zIHZlaGljdWxhIHVybmEgbGFjaW5pYSBhYyBmYWNpbGlzaXMgbWF1cmlzIGFjY3Vtc2FuLiBQaGFzZWxsdXMgYXQgbWF1cmlzIG5pYmguIEN1cmFiaXR1ciBlbmltIGFudGUsIHJ1dHJ1bSBzZWQgYWRpcGlzY2luZyBoZW5kcmVyaXQsIHBlbGxlbnRlc3F1ZSBub24gYXVndWUuIEluIGhhYyBoYWJpdGFzc2UgcGxhdGVhIGRpY3R1bXN0LiBOYW0gdGVtcHVzIGV1aXNtb2QgbWFzc2EgYSBkaWN0dW0uIERvbmVjIHNpdCBhbWV0IGp1c3RvIGFjIGRpYW0gdWx0cmljaWVzIHVsdHJpY2llcy4gU2VkIHRpbmNpZHVudCBlcmF0IHF1aXMgcXVhbSB0ZW1wdXMgdmVsIGludGVyZHVtIGVyYXQgcmhvbmN1cy4gSW4gaGFjIGhhYml0YXNzZSBwbGF0ZWEgZGljdHVtc3QuIFZlc3RpYnVsdW0gdmVoaWN1bGEgdmFyaXVzIHNlbSBlZ2V0IGludGVyZHVtLiBDcmFzIGJpYmVuZHVtIGxlbyBuZWMgZmVsaXMgdmVuZW5hdGlzIHNlZCBwaGFyZXRyYSBzZW0gZmV1Z2lhdC4gQ3VtIHNvY2lpcyBuYXRvcXVlIHBlbmF0aWJ1cyBldCBtYWduaXMgZGlzIHBhcnR1cmllbnQgbW9udGVzLCBuYXNjZXR1ciByaWRpY3VsdXMgbXVzLiBTZWQgcXVhbSBvcmNpLCBtb2xsaXMgZWdldCBzYWdpdHRpcyBhY2N1bXNhbiwgdnVscHV0YXRlIHNpdCBhbWV0IGR1aS4gUHJhZXNlbnQgZXUgZWxlbWVudHVtIGFyY3UuCgpMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBWZXN0aWJ1bHVtIG5pc2wgbWV0dXMsIGhlbmRyZXJpdCB1dCBsYW9yZWV0IHNlZCwgY29uc2VjdGV0dXIgYXQgcHVydXMuIER1aXMgaW50ZXJkdW0gY29uZ3VlIGxvYm9ydGlzLiBOdWxsYW0gc2VkIG1hc3NhIHBvcnRhIGZlbGlzIGVsZWlmZW5kIGNvbnNlcXVhdCBzaXQgYW1ldCBuZWMgbWV0dXMuIEFsaXF1YW0gcGxhY2VyYXQgZGljdHVtIGVyYXQgYXQgZWxlaWZlbmQuIFZlc3RpYnVsdW0gbGliZXJvIGFudGUsIHVsbGFtY29ycGVyIGEgcG9ydHRpdG9yIHN1c2NpcGl0LCBhY2N1bXNhbiB2ZWwgbmlzaS4gRG9uZWMgZXQgbWFnbmEgbmVxdWUuIE5hbSBlbGVtZW50dW0gdWx0cmljZXMganVzdG8sIGVnZXQgc29sbGljaXR1ZGluIHNhcGllbiBpbXBlcmRpZXQgZWdldC4gTnVsbGFtIGF1Y3RvciBkaWN0dW0gbnVuYywgYXQgZmV1Z2lhdCBvZGlvIHZlc3RpYnVsdW0gYS4gU2VkIGVyYXQgbnVsbGEsIHZpdmVycmEgaGVuZHJlcml0IGNvbW1vZG8gaWQsIHVsbGFtY29ycGVyIGFjIG9yY2kuIFBoYXNlbGx1cyBwZWxsZW50ZXNxdWUgZmV1Z2lhdCBzdXNjaXBpdC4gRXRpYW0gZWdlc3RhcyBmZXJtZW50dW0gZW5pbS4gRXRpYW0gZ3JhdmlkYSBpbnRlcmR1bSB0ZWxsdXMgYWMgbGFvcmVldC4gTW9yYmkgbWF0dGlzIGFsaXF1ZXQgZXJvcywgbm9uIHRlbXBvciBlcmF0IHVsbGFtY29ycGVyIGluLiBFdGlhbSBwdWx2aW5hciBpbnRlcmR1bSB0dXJwaXMgYWMgdmVoaWN1bGEuIFNlZCBxdWFtIGp1c3RvLCBhY2N1bXNhbiBpZCBjb25zZWN0ZXR1ciBhLCBhbGlxdWV0IHNlZCBsZW8uIEFlbmVhbiB2aXRhZSBibGFuZGl0IG1hdXJpcy4KCkluIHNlZCBlcm9zIGF1Z3VlLCBub24gcnV0cnVtIG9kaW8uIEV0aWFtIHZpdGFlIGR1aSBuZXF1ZSwgaW4gdHJpc3RpcXVlIG1hc3NhLiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgTWFlY2VuYXMgZGljdHVtIGVsaXQgYXQgbGVjdHVzIHRlbXBvciBub24gcGhhcmV0cmEgbmlzbCBoZW5kcmVyaXQuIFNlZCBzZWQgcXVhbSBldSBsZWN0dXMgdWx0cmljZXMgbWFsZXN1YWRhIHRpbmNpZHVudCBhIGVzdC4gTmFtIHZlbCBlcm9zIHJpc3VzLiBNYWVjZW5hcyBlcm9zIGVsaXQsIGJsYW5kaXQgZmVybWVudHVtIHRlbXBvciBlZ2V0LCBsb2JvcnRpcyBpZCBkaWFtLiBWZXN0aWJ1bHVtIGxhY2luaWEgbGFjdXMgdml0YWUgbWFnbmEgdm9sdXRwYXQgZXUgZGlnbmlzc2ltIGVyb3MgY29udmFsbGlzLiBWaXZhbXVzIGFjIHZlbGl0IHRlbGx1cywgYSBjb25ndWUgbmVxdWUuIEludGVnZXIgbWkgbnVsbGEsIHZhcml1cyBub24gbHVjdHVzIGluLCBkaWN0dW0gc2l0IGFtZXQgc2VtLiBVdCBsYW9yZWV0LCBzYXBpZW4gc2l0IGFtZXQgc2NlbGVyaXNxdWUgcG9ydGEsIHB1cnVzIHNhcGllbiB2ZXN0aWJ1bHVtIG5pYmgsIHNlZCBsdWN0dXMgbGliZXJvIG1hc3NhIGFjIGVsaXQuIERvbmVjIGlhY3VsaXMgb2RpbyBlZ2V0IG9kaW8gc2FnaXR0aXMgbmVjIHZlbmVuYXRpcyBsb3JlbSBibGFuZGl0LgoKQWxpcXVhbSBpbXBlcmRpZXQgdGVsbHVzIHBvc3VlcmUganVzdG8gdmVoaWN1bGEgc2VkIHZlc3RpYnVsdW0gYW50ZSB0cmlzdGlxdWUuIEZ1c2NlIGZldWdpYXQgZmF1Y2lidXMgcHVydXMgbmVjIG1vbGVzdGllLiBOdWxsYSB0ZW1wb3IgbmVxdWUgaWQgbWFnbmEgaWFjdWxpcyBxdWlzIHNvbGxpY2l0dWRpbiBlcm9zIHNlbXBlci4gUHJhZXNlbnQgdml2ZXJyYSBzYWdpdHRpcyBsdWN0dXMuIE1vcmJpIHNpdCBhbWV0IG1hZ25hIHNlZCBvZGlvIGdyYXZpZGEgdmFyaXVzLiBVdCBuaXNpIGxpYmVybywgdnVscHV0YXRlIGZldWdpYXQgcHJldGl1bSB0ZW1wdXMsIGVnZXN0YXMgc2l0IGFtZXQganVzdG8uIFBlbGxlbnRlc3F1ZSBjb25zZXF1YXQgdGVtcG9yIG5pc2kgaW4gbG9ib3J0aXMuIFNlZCBmZXJtZW50dW0gY29udmFsbGlzIGR1aSBhYyBzb2xsaWNpdHVkaW4uIEludGVnZXIgYXVjdG9yIGF1Z3VlIGVnZXQgdGVsbHVzIHRlbXB1cyBmcmluZ2lsbGEuIFByb2luIG5lYyBkb2xvciBzYXBpZW4sIG5lYyB0cmlzdGlxdWUgbmliaC4gQWxpcXVhbSBhIHZlbGl0IGF0IG1pIG1hdHRpcyBhbGlxdWV0LgoKUGVsbGVudGVzcXVlIGhhYml0YW50IG1vcmJpIHRyaXN0aXF1ZSBzZW5lY3R1cyBldCBuZXR1cyBldCBtYWxlc3VhZGEgZmFtZXMgYWMgdHVycGlzIGVnZXN0YXMuIEFsaXF1YW0gdWx0cmljZXMgZXJhdCBub24gdHVycGlzIGF1Y3RvciBpZCBvcm5hcmUgbWF1cmlzIHNhZ2l0dGlzLiBRdWlzcXVlIHBvcnR0aXRvciwgdGVsbHVzIHV0IGNvbnZhbGxpcyBzYWdpdHRpcywgbWkgbGliZXJvIGZldWdpYXQgdGVsbHVzLCByaG9uY3VzIHBsYWNlcmF0IGlwc3VtIHRvcnRvciBpZCByaXN1cy4gRG9uZWMgdGluY2lkdW50IGZldWdpYXQgbGVvLiBDcmFzIGlkIG1pIG5lcXVlLCBldSBtYWxlc3VhZGEgZXJvcy4gVXQgbW9sZXN0aWUgbWFnbmEgcXVpcyBsaWJlcm8gcGxhY2VyYXQgbWFsZXN1YWRhLiBBbGlxdWFtIGVyYXQgdm9sdXRwYXQuIEFsaXF1YW0gbm9uIG1hdXJpcyBsb3JlbSwgaW4gYWRpcGlzY2luZyBtZXR1cy4gRG9uZWMgZWdldCBpcHN1bSBpbiBlbGl0IGNvbW1vZG8gb3JuYXJlIGJpYmVuZHVtIGEgbmliaC4gVml2YW11cyBvZGlvIGVyYXQsIHBsYWNlcmF0IGFjIHZlc3RpYnVsdW0gZWdldCwgbWFsZXN1YWRhIHV0IG5pc2kuIEV0aWFtIHN1c2NpcGl0IHNvbGxpY2l0dWRpbiBsZW8gc2VtcGVyIHNvbGxpY2l0dWRpbi4gU2VkIHJob25jdXMgcmlzdXMgc2l0IGFtZXQgc2VtIGVsZWlmZW5kIGRpY3R1bSBwcmV0aXVtIHNhcGllbiBlZ2VzdGFzLiBOdWxsYSBhdCB1cm5hIG51bmMsIHZlbCBhbGlxdWV0IGxlby4gUHJhZXNlbnQgdWx0cmljaWVzLCBtaSBldSBwcmV0aXVtIGxvYm9ydGlzLCBlcmF0IG5pYmggZXVpc21vZCBsZW8sIHNpdCBhbWV0IGdyYXZpZGEgc2FwaWVuIGVyb3MgZXQgdHVycGlzLiBEb25lYyBsYWNpbmlhIHZlbmVuYXRpcyBsZWN0dXMsIG5vbiBsYWNpbmlhIG1pIGhlbmRyZXJpdCBzaXQgYW1ldC4gSW50ZWdlciBzZWQgZmVsaXMgdmVsIG9yY2kgYWxpcXVhbSBwdWx2aW5hci4gUGhhc2VsbHVzIGV0IHJpc3VzIGlkIGVyYXQgZXVpc21vZCB0aW5jaWR1bnQuIFNlZCBsdWN0dXMgdGVtcG9yIG5pc2ksIG5lYyB0ZW1wb3IgaXBzdW0gZWxlbWVudHVtIGVnZXQuIEludGVnZXIgbmlzbCB0b3J0b3IsIHZpdmVycmEgaW4gZGFwaWJ1cyBhdCwgbWF0dGlzIGFjIGVyYXQuIEN1cmFiaXR1ciBuZWMgZHVpIGxlY3R1cy4KClBoYXNlbGx1cyBzdXNjaXBpdCwgdG9ydG9yIGV1IHZhcml1cyBmcmluZ2lsbGEsIHNhcGllbiBtYWduYSBlZ2VzdGFzIHJpc3VzLCB1dCBzdXNjaXBpdCBkdWkgbWF1cmlzIHF1aXMgdmVsaXQuIENyYXMgYSBzYXBpZW4gcXVpcyBzYXBpZW4gaGVuZHJlcml0IHRyaXN0aXF1ZSBhIHNpdCBhbWV0IGVsaXQuIFBlbGxlbnRlc3F1ZSBkdWkgYXJjdSwgbWFsZXN1YWRhIGV0IHNvZGFsZXMgc2l0IGFtZXQsIGRhcGlidXMgdmVsIHF1YW0uIFNlZCBub24gYWRpcGlzY2luZyBsaWd1bGEuIFV0IHZ1bHB1dGF0ZSBwdXJ1cyBhdCBuaXNsIHBvc3VlcmUgc29kYWxlcy4gTWFlY2VuYXMgZGlhbSB2ZWxpdCwgdGluY2lkdW50IGlkIG1hdHRpcyBldSwgYWxpcXVhbSBhYyBuaXNpLiBNYWVjZW5hcyBwcmV0aXVtLCBhdWd1ZSBhIHNhZ2l0dGlzIHN1c2NpcGl0LCBsZW8gbGlndWxhIGVsZWlmZW5kIGRvbG9yLCBtb2xsaXMgZmV1Z2lhdCBvZGlvIGF1Z3VlIG5vbiBlcm9zLiBQZWxsZW50ZXNxdWUgc2NlbGVyaXNxdWUgb3JjaSBwcmV0aXVtIHF1YW0gbW9sbGlzIGF0IGxvYm9ydGlzIGR1aSBmYWNpbGlzaXMuIE1vcmJpIGNvbmd1ZSBtZXR1cyBpZCB0b3J0b3IgcG9ydGEgZnJpbmdpbGxhLiBTZWQgbG9yZW0gbWksIG1vbGVzdGllIGZlcm1lbnR1bSBzYWdpdHRpcyBhdCwgZ3JhdmlkYSBhIG5pc2kuIERvbmVjIGV1IHZlc3RpYnVsdW0gdmVsaXQuIEluIHZpdmVycmEsIGVuaW0gZXUgZWxlbWVudHVtIHNvZGFsZXMsIGVuaW0gb2RpbyBkYXBpYnVzIHVybmEsIGVnZXQgY29tbW9kbyBuaXNsIG1hdXJpcyB1dCBvZGlvLiBDdXJhYml0dXIgbmVjIGVuaW0gbnVsbGEuIEluIG5lYyBlbGl0IGlwc3VtLiBOdW5jIGluIG1hc3NhIHN1c2NpcGl0IG1hZ25hIGVsZW1lbnR1bSBmYXVjaWJ1cyBpbiBuZWMgaXBzdW0uIE51bGxhbSBzdXNjaXBpdCBtYWxlc3VhZGEgZWxlbWVudHVtLiBFdGlhbSBzZWQgbWkgaW4gbmliaCB1bHRyaWNpZXMgdmVuZW5hdGlzIG5lYyBwaGFyZXRyYSBtYWduYS4gSW4gcHVydXMgYW50ZSwgcmhvbmN1cyB2ZWwgcGxhY2VyYXQgc2VkLCBmZXJtZW50dW0gc2l0IGFtZXQgZHVpLiBTZWQgYXQgc29kYWxlcyB2ZWxpdC4KCkR1aXMgc3VzY2lwaXQgcGVsbGVudGVzcXVlIHBlbGxlbnRlc3F1ZS4gUHJhZXNlbnQgcG9ydGEgbG9ib3J0aXMgY3Vyc3VzLiBRdWlzcXVlIHNhZ2l0dGlzIHZlbGl0IG5vbiB0ZWxsdXMgYmliZW5kdW0gYXQgc29sbGljaXR1ZGluIGxhY3VzIGFsaXF1ZXQuIFNlZCBuaWJoIHJpc3VzLCBibGFuZGl0IGEgYWxpcXVldCBlZ2V0LCB2ZWhpY3VsYSBldCBlc3QuIFN1c3BlbmRpc3NlIGZhY2lsaXNpcyBiaWJlbmR1bSBhbGlxdWFtLiBGdXNjZSBjb25zZWN0ZXR1ciBjb252YWxsaXMgZXJhdCwgZWdldCBtb2xsaXMgZGlhbSBmZXJtZW50dW0gc29sbGljaXR1ZGluLiBRdWlzcXVlIHRpbmNpZHVudCBwb3J0dGl0b3IgcHJldGl1bS4gTnVsbGFtIGlkIG5pc2wgZXQgdXJuYSB2dWxwdXRhdGUgZGFwaWJ1cy4gRG9uZWMgcXVpcyBsb3JlbSB1cm5hLiBRdWlzcXVlIGlkIGp1c3RvIG5lYyBudW5jIGJsYW5kaXQgY29udmFsbGlzLiBOdW5jIHZvbHV0cGF0LCBtYXNzYSBzb2xsaWNpdHVkaW4gYWRpcGlzY2luZyB2ZXN0aWJ1bHVtLCBtYXNzYSB1cm5hIGNvbmd1ZSBsZWN0dXMsIHNpdCBhbWV0IHVsdHJpY2llcyBhdWd1ZSBvcmNpIGNvbnZhbGxpcyB0dXJwaXMuIE51bGxhIGF0IGxvcmVtIGVsaXQuIE51bmMgdHJpc3RpcXVlLCBxdWFtIGZhY2lsaXNpcyBjb21tb2RvIHBvcnR0aXRvciwgbGFjdXMgbGlndWxhIGFjY3Vtc2FuIG5pc2ksIGV0IGxhb3JlZXQganVzdG8gYW50ZSB2aXRhZSBlcm9zLiBDdXJhYml0dXIgc2VkIGF1Z3VlIGFyY3UuIFBoYXNlbGx1cyBwb3J0dGl0b3IgdmVzdGlidWx1bSBmZWxpcywgdXQgY29uc2VjdGV0dXIgYXJjdSB0ZW1wb3Igbm9uLiBJbiBqdXN0byByaXN1cywgc2VtcGVyIGV0IHN1c2NpcGl0IGlkLCB1bGxhbWNvcnBlciBhdCB1cm5hLiBRdWlzcXVlIHRpbmNpZHVudCwgdXJuYSBuZWMgYWxpcXVhbSB0cmlzdGlxdWUsIG5pYmggb2RpbyBmYXVjaWJ1cyBhdWd1ZSwgaW4gb3JuYXJlIGVuaW0gdHVycGlzIGFjY3Vtc2FuIGRvbG9yLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gU3VzcGVuZGlzc2Ugc29kYWxlcyB2YXJpdXMgdHVycGlzIGV1IGZlcm1lbnR1bS4KCk1vcmJpIHVsdHJpY2llcyBkaWFtIGVnZXQgbWFzc2EgcG9zdWVyZSBsb2JvcnRpcy4gQWxpcXVhbSB2b2x1dHBhdCBwZWxsZW50ZXNxdWUgZW5pbSBldSBwb3J0dGl0b3IuIERvbmVjIGxhY3VzIGZlbGlzLCBjb25zZWN0ZXR1ciBhIHByZXRpdW0gdml0YWUsIGJpYmVuZHVtIG5vbiBlbmltLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gRXRpYW0gdXQgbmliaCBhIHF1YW0gcGVsbGVudGVzcXVlIGF1Y3RvciB1dCBpZCB2ZWxpdC4gRHVpcyBsYWNpbmlhIGp1c3RvIGVnZXQgbWkgcGxhY2VyYXQgYmliZW5kdW0uIEN1bSBzb2NpaXMgbmF0b3F1ZSBwZW5hdGlidXMgZXQgbWFnbmlzIGRpcyBwYXJ0dXJpZW50IG1vbnRlcywgbmFzY2V0dXIgcmlkaWN1bHVzIG11cy4gRG9uZWMgdmVsaXQgdG9ydG9yLCB0ZW1wdXMgbmVjIHRyaXN0aXF1ZSBpZCwgYWxpcXVldCBzaXQgYW1ldCB0dXJwaXMuIFByYWVzZW50IGV0IG5lcXVlIG5lYyBtYWduYSBwb3J0YSBmcmluZ2lsbGEuIE1vcmJpIGlkIGVnZXN0YXMgZXJvcy4gRG9uZWMgc2VtcGVyIHRpbmNpZHVudCB1bGxhbWNvcnBlci4gUGhhc2VsbHVzIHRlbXB1cyBsYWNpbmlhIGhlbmRyZXJpdC4gUXVpc3F1ZSBmYXVjaWJ1cyBwcmV0aXVtIG5lcXVlIG5vbiBjb252YWxsaXMuIE51bmMgbWFsZXN1YWRhIGFjY3Vtc2FuIHJob25jdXMuIENyYXMgbG9ib3J0aXMsIHNlbSBzZWQgZnJpbmdpbGxhIGNvbnZhbGxpcywgYXVndWUgdmVsaXQgc2VtcGVyIG5pc2wsIGNvbW1vZG8gdmFyaXVzIG5pc2kgZGlhbSBhYyBsZW8uCgpRdWlzcXVlIGludGVyZHVtIHRlbGx1cyBhYyBhbnRlIHBvc3VlcmUgdXQgY3Vyc3VzIGxvcmVtIGVnZXN0YXMuIE51bGxhIGZhY2lsaXNpLiBBZW5lYW4gc2VkIG1hc3NhIG5lYyBuaXNpIHNjZWxlcmlzcXVlIHZ1bHB1dGF0ZS4gRXRpYW0gY29udmFsbGlzIGNvbnNlY3RldHVyIGlhY3VsaXMuIE1hZWNlbmFzIGFjIHB1cnVzIHV0IGFudGUgZGlnbmlzc2ltIGF1Y3RvciBhYyBxdWlzIGxvcmVtLiBQZWxsZW50ZXNxdWUgc3VzY2lwaXQgdGluY2lkdW50IG9yY2kuIEZ1c2NlIGFsaXF1YW0gZGFwaWJ1cyBvcmNpLCBhdCBiaWJlbmR1bSBpcHN1bSBhZGlwaXNjaW5nIGVnZXQuIE1vcmJpIHBlbGxlbnRlc3F1ZSBoZW5kcmVyaXQgcXVhbSwgbmVjIHBsYWNlcmF0IHVybmEgdnVscHV0YXRlIHNlZC4gUXVpc3F1ZSB2ZWwgZGlhbSBsb3JlbS4gUHJhZXNlbnQgaWQgZGlhbSBxdWlzIGVuaW0gZWxlbWVudHVtIHJob25jdXMgc2FnaXR0aXMgZWdldCBwdXJ1cy4gUXVpc3F1ZSBmcmluZ2lsbGEgYmliZW5kdW0gbGVvIGluIGxhb3JlZXQuIFZlc3RpYnVsdW0gaWQgbmliaCByaXN1cywgbm9uIGVsZW1lbnR1bSBtZXR1cy4gVXQgYSBmZWxpcyBkaWFtLCBub24gbW9sbGlzIG5pc2wuIENyYXMgZWxpdCBhbnRlLCB1bGxhbWNvcnBlciBxdWlzIGlhY3VsaXMgZXUsIHNvZGFsZXMgdmVsIGVzdC4gQ3VyYWJpdHVyIHF1aXMgbG9ib3J0aXMgZG9sb3IuIEFsaXF1YW0gbWF0dGlzIGdyYXZpZGEgbWV0dXMgcGVsbGVudGVzcXVlIHZ1bHB1dGF0ZS4KClV0IGlkIGF1Z3VlIGlkIGRvbG9yIGx1Y3R1cyBldWlzbW9kIGV0IHF1aXMgdmVsaXQuIE1hZWNlbmFzIGVuaW0gZG9sb3IsIHRlbXB1cyBzaXQgYW1ldCBoZW5kcmVyaXQgZXUsIGZhdWNpYnVzIHZpdGFlIG5lcXVlLiBQcm9pbiBzaXQgYW1ldCB2YXJpdXMgZWxpdC4gUHJvaW4gdmFyaXVzIGZlbGlzIHVsbGFtY29ycGVyIHB1cnVzIGRpZ25pc3NpbSBjb25zZXF1YXQuIENyYXMgY3Vyc3VzIHRlbXB1cyBlcm9zLiBOdW5jIHVsdHJpY2VzIHZlbmVuYXRpcyB1bGxhbWNvcnBlci4gQWxpcXVhbSBldCBmZXVnaWF0IHRlbGx1cy4gUGhhc2VsbHVzIHNpdCBhbWV0IHZlc3RpYnVsdW0gZWxpdC4gUGhhc2VsbHVzIGFjIHB1cnVzIGxhY3VzLCBldCBhY2N1bXNhbiBlcm9zLiBNb3JiaSB1bHRyaWNlcywgcHVydXMgYSBwb3J0YSBzb2RhbGVzLCBvZGlvIG1ldHVzIHBvc3VlcmUgbmVxdWUsIG5lYyBlbGVtZW50dW0gcmlzdXMgdHVycGlzIHNpdCBhbWV0IG1hZ25hLiBTZWQgZXN0IHF1YW0sIHVsdHJpY2llcyBhdCBjb25ndWUgYWRpcGlzY2luZywgbG9ib3J0aXMgaW4ganVzdG8uIFByb2luIGlhY3VsaXMgZGljdHVtIG51bmMsIGV1IGxhb3JlZXQgcXVhbSB2YXJpdXMgdml0YWUuIERvbmVjIHNpdCBhbWV0IGZldWdpYXQgdHVycGlzLiBNYXVyaXMgc2l0IGFtZXQgbWFnbmEgcXVhbSwgYWMgY29uc2VjdGV0dXIgZHVpLiBDdXJhYml0dXIgZWdldCBtYWduYSB0ZWxsdXMsIGV1IHBoYXJldHJhIGZlbGlzLiBEb25lYyBzaXQgYW1ldCB0b3J0b3IgbmlzbC4gQWxpcXVhbSBldCB0b3J0b3IgZmFjaWxpc2lzIGxhY3VzIHRpbmNpZHVudCBjb21tb2RvLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gQ3VyYWJpdHVyIG51bmMgbWFnbmEsIHVsdHJpY2llcyBpZCBjb252YWxsaXMgYXQsIHVsbGFtY29ycGVyIHZpdGFlIG1hc3NhLgoKUGhhc2VsbHVzIHZpdmVycmEgaWFjdWxpcyBwbGFjZXJhdC4gTnVsbGEgY29uc2VxdWF0IGRvbG9yIHNpdCBhbWV0IGVyYXQgZGlnbmlzc2ltIHBvc3VlcmUuIE51bGxhIGxhY2luaWEgYXVndWUgdml0YWUgbWkgdGVtcG9yIGdyYXZpZGEuIFBoYXNlbGx1cyBub24gdGVtcG9yIHRlbGx1cy4gUXVpc3F1ZSBub24gZW5pbSBzZW1wZXIgdG9ydG9yIHNhZ2l0dGlzIGZhY2lsaXNpcy4gQWxpcXVhbSB1cm5hIGZlbGlzLCBlZ2VzdGFzIGF0IHBvc3VlcmUgbmVjLCBhbGlxdWV0IGV1IG5pYmguIFByYWVzZW50IHNlZCB2ZXN0aWJ1bHVtIGVuaW0uIE1hdXJpcyBpYWN1bGlzIHZlbGl0IGR1aSwgZXQgZnJpbmdpbGxhIGVuaW0uIE51bGxhIG5lYyBuaXNpIG9yY2kuIFNlZCB2b2x1dHBhdCwganVzdG8gZWdldCBmcmluZ2lsbGEgYWRpcGlzY2luZywgbmlzbCBudWxsYSBjb25kaW1lbnR1bSBsaWJlcm8sIHNlZCBzb2RhbGVzIGVzdCBlc3QgZXQgb2Rpby4gQ3JhcyBpcHN1bSBkdWksIHZhcml1cyBldSBlbGVtZW50dW0gY29uc2VxdWF0LCBmYXVjaWJ1cyBpbiBsZW8uIFBlbGxlbnRlc3F1ZSBoYWJpdGFudCBtb3JiaSB0cmlzdGlxdWUgc2VuZWN0dXMgZXQgbmV0dXMgZXQgbWFsZXN1YWRhIGZhbWVzIGFjIHR1cnBpcyBlZ2VzdGFzLgoKVXQgbWFsZXN1YWRhIG1vbGVzdGllIGVsZWlmZW5kLiBDdXJhYml0dXIgaWQgZW5pbSBkdWksIGV1IHRpbmNpZHVudCBuaWJoLiBNYXVyaXMgc2l0IGFtZXQgYW50ZSBsZW8uIER1aXMgdHVycGlzIGlwc3VtLCBiaWJlbmR1bSBzZWQgbWF0dGlzIHNpdCBhbWV0LCBhY2N1bXNhbiBxdWlzIGRvbG9yLiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgQWVuZWFuIGEgaW1wZXJkaWV0IG1ldHVzLiBRdWlzcXVlIHNvbGxpY2l0dWRpbiBmZWxpcyBpZCBuZXF1ZSB0ZW1wb3Igc2NlbGVyaXNxdWUuIERvbmVjIGF0IG9yY2kgZmVsaXMuIFZpdmFtdXMgdGVtcHVzIGNvbnZhbGxpcyBhdWN0b3IuIERvbmVjIGludGVyZHVtIGV1aXNtb2QgbG9ib3J0aXMuIFNlZCBhdCBsYWN1cyBuZWMgb2RpbyBkaWduaXNzaW0gbW9sbGlzLiBTZWQgc2FwaWVuIG9yY2ksIHBvcnR0aXRvciB0ZW1wdXMgYWNjdW1zYW4gdmVsLCB0aW5jaWR1bnQgbmVjIGFudGUuIE51bmMgcmhvbmN1cyBlZ2VzdGFzIGRhcGlidXMuIFN1c3BlbmRpc3NlIGZlcm1lbnR1bSBkaWN0dW0gZnJpbmdpbGxhLiBOdWxsYW0gbmlzaSBqdXN0bywgZWxlaWZlbmQgYSBjb25zZWN0ZXR1ciBjb252YWxsaXMsIHBvcnR0aXRvciBldCB0b3J0b3IuIFByb2luIHZpdGFlIGxvcmVtIG5vbiBkb2xvciBzdXNjaXBpdCBsYWNpbmlhIGV1IGVnZXQgbnVsbGEuCgpTdXNwZW5kaXNzZSBlZ2VzdGFzLCBzYXBpZW4gc2l0IGFtZXQgYmxhbmRpdCBzY2VsZXJpc3F1ZSwgbnVsbGEgYXJjdSB0cmlzdGlxdWUgZHVpLCBhIHBvcnRhIGp1c3RvIHF1YW0gdml0YWUgYXJjdS4gSW4gbWV0dXMgbGliZXJvLCBiaWJlbmR1bSBub24gdm9sdXRwYXQgdXQsIGxhb3JlZXQgdmVsIHR1cnBpcy4gTnVuYyBmYXVjaWJ1cyB2ZWxpdCBldSBpcHN1bSBjb21tb2RvIG5lYyBpYWN1bGlzIGVyb3Mgdm9sdXRwYXQuIFZpdmFtdXMgY29uZ3VlIGF1Y3RvciBlbGl0IHNlZCBzdXNjaXBpdC4gRHVpcyBjb21tb2RvLCBsaWJlcm8gZXUgdmVzdGlidWx1bSBmZXVnaWF0LCBsZW8gbWkgZGFwaWJ1cyB0ZWxsdXMsIGluIHBsYWNlcmF0IG5pc2wgZHVpIGF0IGVzdC4gVmVzdGlidWx1bSB2aXZlcnJhIHRyaXN0aXF1ZSBsb3JlbSwgb3JuYXJlIGVnZXN0YXMgZXJhdCBydXRydW0gYS4gTnVsbGFtIGF0IGF1Z3VlIG1hc3NhLCB1dCBjb25zZWN0ZXR1ciBpcHN1bS4gUGVsbGVudGVzcXVlIG1hbGVzdWFkYSwgdmVsaXQgdXQgbG9ib3J0aXMgc2FnaXR0aXMsIG5pc2kgbWFzc2Egc2VtcGVyIG9kaW8sIG1hbGVzdWFkYSBzZW1wZXIgcHVydXMgbmlzbCB2ZWwgbGVjdHVzLiBOdW5jIGR1aSBzZW0sIG1hdHRpcyB2aXRhZSBsYW9yZWV0IHZpdGFlLCBzb2xsaWNpdHVkaW4gYWMgbGVvLiBOdWxsYSB2ZWwgZmVybWVudHVtIGVzdC4KClZpdmFtdXMgaW4gb2RpbyBhIG5pc2kgZGlnbmlzc2ltIHJob25jdXMgaW4gaW4gbGFjdXMuIERvbmVjIGV0IG5pc2wgdG9ydG9yLiBEb25lYyBzYWdpdHRpcyBjb25zZXF1YXQgbWksIHZlbCBwbGFjZXJhdCB0ZWxsdXMgY29udmFsbGlzIGlkLiBBbGlxdWFtIGZhY2lsaXNpcyBydXRydW0gbmlzbCBzZWQgcHJldGl1bS4gRG9uZWMgZXQgbGFjaW5pYSBuaXNsLiBBbGlxdWFtIGVyYXQgdm9sdXRwYXQuIEN1cmFiaXR1ciBhYyBwdWx2aW5hciB0ZWxsdXMuIE51bGxhbSB2YXJpdXMgbG9ib3J0aXMgcG9ydGEuIENyYXMgZGFwaWJ1cywgbGlndWxhIHV0IHBvcnRhIHVsdHJpY2llcywgbGVvIGxhY3VzIHZpdmVycmEgcHVydXMsIHF1aXMgbW9sbGlzIHVybmEgcmlzdXMgZXUgbGVvLiBOdW5jIG1hbGVzdWFkYSBjb25zZWN0ZXR1ciBwdXJ1cywgdmVsIGF1Y3RvciBsZWN0dXMgc2NlbGVyaXNxdWUgcG9zdWVyZS4gTWFlY2VuYXMgZHVpIG1hc3NhLCB2ZXN0aWJ1bHVtIGJpYmVuZHVtIGJsYW5kaXQgbm9uLCBpbnRlcmR1bSBlZ2V0IG1hdXJpcy4gUGhhc2VsbHVzIGVzdCBhbnRlLCBwdWx2aW5hciBhdCBpbXBlcmRpZXQgcXVpcywgaW1wZXJkaWV0IHZlbCB1cm5hLiBRdWlzcXVlIGVnZXQgdm9sdXRwYXQgb3JjaS4gUXVpc3F1ZSBldCBhcmN1IHB1cnVzLCB1dCBmYXVjaWJ1cyB2ZWxpdC4KClByYWVzZW50IHNlZCBpcHN1bSB1cm5hLiBQcmFlc2VudCBzYWdpdHRpcyB2YXJpdXMgbWFnbmEsIGlkIGNvbW1vZG8gZG9sb3IgbWFsZXN1YWRhIGFjLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gUXVpc3F1ZSBzaXQgYW1ldCBudW5jIGV1IHNlbSBvcm5hcmUgdGVtcG9yLiBNYXVyaXMgaWQgZG9sb3IgbmVjIGVyYXQgY29udmFsbGlzIHBvcnRhIGluIGxvYm9ydGlzIG5pc2kuIEN1cmFiaXR1ciBoZW5kcmVyaXQgcmhvbmN1cyB0b3J0b3IgZXUgaGVuZHJlcml0LiBQZWxsZW50ZXNxdWUgZXUgYW50ZSB2ZWwgZWxpdCBsdWN0dXMgZWxlaWZlbmQgcXVpcyB2aXZlcnJhIG51bGxhLiBTdXNwZW5kaXNzZSBvZGlvIGRpYW0sIGV1aXNtb2QgZXUgcG9ydHRpdG9yIG1vbGVzdGllLCBzb2xsaWNpdHVkaW4gc2l0IGFtZXQgbnVsbGEuIFNlZCBhbnRlIHVybmEsIGRpY3R1bSBiaWJlbmR1bSByaG9uY3VzIGV0LCBibGFuZGl0IG5lYyBhbnRlLiBTdXNwZW5kaXNzZSB0b3J0b3IgYXVndWUsIGFjY3Vtc2FuIHF1aXMgc3VzY2lwaXQgaWQsIGFjY3Vtc2FuIHNpdCBhbWV0IGVyYXQuIERvbmVjIHBoYXJldHJhIHZhcml1cyBsb2JvcnRpcy4gTWFlY2VuYXMgaXBzdW0gZGlhbSwgZmF1Y2lidXMgZXUgdGVtcHVzIGlkLCBjb252YWxsaXMgbmVjIGVuaW0uIER1aXMgYXJjdSB0dXJwaXMsIGZyaW5naWxsYSBuZWMgZWdlc3RhcyB1dCwgZGlnbmlzc2ltIHRyaXN0aXF1ZSBudWxsYS4gQ3VyYWJpdHVyIHN1c2NpcGl0IGR1aSBub24ganVzdG8gdWx0cmljZXMgcGhhcmV0cmEuIEFsaXF1YW0gZXJhdCB2b2x1dHBhdC4gTnVsbGEgZmFjaWxpc2kuIFF1aXNxdWUgaWQgZmVsaXMgZXUgc2VtIGFsaXF1YW0gZnJpbmdpbGxhLgoKRXRpYW0gcXVpcyBhdWd1ZSBpbiB0ZWxsdXMgY29uc2VxdWF0IGVsZWlmZW5kLiBBZW5lYW4gZGlnbmlzc2ltIGNvbmd1ZSBmZWxpcyBpZCBlbGVtZW50dW0uIER1aXMgZnJpbmdpbGxhIHZhcml1cyBpcHN1bSwgbmVjIHN1c2NpcGl0IGxlbyBzZW1wZXIgdmVsLiBVdCBzb2xsaWNpdHVkaW4sIG9yY2kgYSB0aW5jaWR1bnQgYWNjdW1zYW4sIGRpYW0gbGVjdHVzIGxhb3JlZXQgbGFjdXMsIHZlbCBmZXJtZW50dW0gcXVhbSBlc3QgdmVsIGVyb3MuIEFsaXF1YW0gZnJpbmdpbGxhIHNhcGllbiBhYyBzYXBpZW4gZmF1Y2lidXMgY29udmFsbGlzLiBBbGlxdWFtIGlkIG51bmMgZXUganVzdG8gY29uc2VxdWF0IHRpbmNpZHVudC4gUXVpc3F1ZSBuZWMgbmlzbCBkdWkuIFBoYXNlbGx1cyBhdWd1ZSBsZWN0dXMsIHZhcml1cyB2aXRhZSBhdWN0b3IgdmVsLCBydXRydW0gYXQgcmlzdXMuIFZpdmFtdXMgbGFjaW5pYSBsZW8gcXVpcyBuZXF1ZSB1bHRyaWNlcyBuZWMgZWxlbWVudHVtIGZlbGlzIGZyaW5naWxsYS4gUHJvaW4gdmVsIHBvcnR0aXRvciBsZWN0dXMuCgpDdXJhYml0dXIgc2FwaWVuIGxvcmVtLCBtb2xsaXMgdXQgYWNjdW1zYW4gbm9uLCB1bHRyaWNpZXMgZXQgbWV0dXMuIEN1cmFiaXR1ciB2ZWwgbG9yZW0gcXVpcyBzYXBpZW4gZnJpbmdpbGxhIGxhb3JlZXQuIE1vcmJpIGlkIHVybmEgYWMgb3JjaSBlbGVtZW50dW0gYmxhbmRpdCBlZ2V0IHZvbHV0cGF0IG5lcXVlLiBQZWxsZW50ZXNxdWUgc2VtIG9kaW8sIGlhY3VsaXMgZXUgcGhhcmV0cmEgdml0YWUsIGN1cnN1cyBpbiBxdWFtLiBOdWxsYSBtb2xlc3RpZSBsaWd1bGEgaWQgbWFzc2EgbHVjdHVzIGV0IHB1bHZpbmFyIG5pc2kgcHVsdmluYXIuIE51bmMgZmVybWVudHVtIGF1Z3VlIGEgbGFjdXMgZnJpbmdpbGxhIHJob25jdXMgcG9ydHRpdG9yIGVyYXQgZGljdHVtLiBOdW5jIHNpdCBhbWV0IHRlbGx1cyBldCBkdWkgdml2ZXJyYSBhdWN0b3IgZXVpc21vZCBhdCBuaXNsLiBJbiBzZWQgY29uZ3VlIG1hZ25hLiBQcm9pbiBldCB0b3J0b3IgdXQgYXVndWUgcGxhY2VyYXQgZGlnbmlzc2ltIGEgZXUganVzdG8uIE1vcmJpIHBvcnR0aXRvciBwb3J0YSBsb2JvcnRpcy4gUGVsbGVudGVzcXVlIG5pYmggbGFjdXMsIGFkaXBpc2NpbmcgdXQgdHJpc3RpcXVlIHF1aXMsIGNvbnNlcXVhdCB2aXRhZSB2ZWxpdC4gTWFlY2VuYXMgdXQgbHVjdHVzIGxpYmVyby4gVml2YW11cyBhdWN0b3Igb2RpbyBldCBlcmF0IHNlbXBlciBzYWdpdHRpcy4gVml2YW11cyBpbnRlcmR1bSB2ZWxpdCBpbiByaXN1cyBtYXR0aXMgcXVpcyBkaWN0dW0gYW50ZSByaG9uY3VzLiBJbiBzYWdpdHRpcyBwb3J0dGl0b3IgZXJvcywgYXQgbG9ib3J0aXMgbWV0dXMgdWx0cmljZXMgdmVsLiBDdXJhYml0dXIgbm9uIGFsaXF1YW0gbmlzbC4gVmVzdGlidWx1bSBsdWN0dXMgZmV1Z2lhdCBzdXNjaXBpdC4gRXRpYW0gbm9uIGxhY3VzIHZlbCBudWxsYSBlZ2VzdGFzIGlhY3VsaXMgaWQgcXVpcyByaXN1cy4KCkV0aWFtIGluIGF1Y3RvciB1cm5hLiBGdXNjZSB1bHRyaWNpZXMgbW9sZXN0aWUgY29udmFsbGlzLiBJbiBoYWMgaGFiaXRhc3NlIHBsYXRlYSBkaWN0dW1zdC4gVmVzdGlidWx1bSBhbnRlIGlwc3VtIHByaW1pcyBpbiBmYXVjaWJ1cyBvcmNpIGx1Y3R1cyBldCB1bHRyaWNlcyBwb3N1ZXJlIGN1YmlsaWEgQ3VyYWU7IE1hdXJpcyBpYWN1bGlzIGxvcmVtIGZhdWNpYnVzIHB1cnVzIGdyYXZpZGEgYXQgY29udmFsbGlzIHR1cnBpcyBzb2xsaWNpdHVkaW4uIFN1c3BlbmRpc3NlIGF0IHZlbGl0IGxvcmVtLCBhIGZlcm1lbnR1bSBpcHN1bS4gRXRpYW0gY29uZGltZW50dW0sIGR1aSB2ZWwgY29uZGltZW50dW0gZWxlbWVudHVtLCBzYXBpZW4gc2VtIGJsYW5kaXQgc2FwaWVuLCBldCBwaGFyZXRyYSBsZW8gbmVxdWUgZXQgbGVjdHVzLiBOdW5jIHZpdmVycmEgdXJuYSBpYWN1bGlzIGF1Z3VlIHVsdHJpY2VzIGFjIHBvcnR0aXRvciBsYWN1cyBkaWduaXNzaW0uIEFsaXF1YW0gdXQgdHVycGlzIGR1aS4gU2VkIGVnZXQgYWxpcXVldCBmZWxpcy4gSW4gYmliZW5kdW0gbmliaCBzaXQgYW1ldCBzYXBpZW4gYWNjdW1zYW4gYWNjdW1zYW4gcGhhcmV0cmEgbWFnbmEgbW9sZXN0aWUuCgpNYXVyaXMgYWxpcXVldCB1cm5hIGVnZXQgbGVjdHVzIGFkaXBpc2NpbmcgYXQgY29uZ3VlIHR1cnBpcyBjb25zZXF1YXQuIFZpdmFtdXMgdGluY2lkdW50IGZlcm1lbnR1bSByaXN1cyBldCBmZXVnaWF0LiBOdWxsYSBtb2xlc3RpZSB1bGxhbWNvcnBlciBuaWJoIHNlZCBmYWNpbGlzaXMuIFBoYXNlbGx1cyBldCBjdXJzdXMgcHVydXMuIE5hbSBjdXJzdXMsIGR1aSBkaWN0dW0gdWx0cmljZXMgdml2ZXJyYSwgZXJhdCByaXN1cyB2YXJpdXMgZWxpdCwgZXUgbW9sZXN0aWUgZHVpIGVyb3MgcXVpcyBxdWFtLiBBbGlxdWFtIGV0IGFudGUgbmVxdWUsIGFjIGNvbnNlY3RldHVyIGR1aS4gRG9uZWMgY29uZGltZW50dW0gZXJhdCBpZCBlbGl0IGRpY3R1bSBzZWQgYWNjdW1zYW4gbGVvIHNhZ2l0dGlzLiBQcm9pbiBjb25zZXF1YXQgY29uZ3VlIHJpc3VzLCB2ZWwgdGluY2lkdW50IGxlbyBpbXBlcmRpZXQgZXUuIFZlc3RpYnVsdW0gbWFsZXN1YWRhIHR1cnBpcyBldSBtZXR1cyBpbXBlcmRpZXQgcHJldGl1bS4gQWxpcXVhbSBjb25kaW1lbnR1bSB1bHRyaWNlcyBuaWJoLCBldSBzZW1wZXIgZW5pbSBlbGVpZmVuZCBhLiBFdGlhbSBjb25kaW1lbnR1bSBuaXNsIHF1YW0uCgpQZWxsZW50ZXNxdWUgaWQgbW9sZXN0aWUgbmlzbC4gTWFlY2VuYXMgZXQgbGVjdHVzIGF0IGp1c3RvIG1vbGVzdGllIHZpdmVycmEgc2l0IGFtZXQgc2l0IGFtZXQgbGlndWxhLiBOdWxsYW0gbm9uIHBvcnR0aXRvciBtYWduYS4gUXVpc3F1ZSBlbGVtZW50dW0gYXJjdSBjdXJzdXMgdG9ydG9yIHJ1dHJ1bSBsb2JvcnRpcy4gTW9yYmkgc2l0IGFtZXQgbGVjdHVzIHZpdGFlIGVuaW0gZXVpc21vZCBkaWduaXNzaW0gZWdldCBhdCBuZXF1ZS4gVml2YW11cyBjb25zZXF1YXQgdmVoaWN1bGEgZHVpLCB2aXRhZSBhdWN0b3IgYXVndWUgZGlnbmlzc2ltIGluLiBJbiB0ZW1wdXMgc2VtIHF1aXMganVzdG8gdGluY2lkdW50IHNpdCBhbWV0IGF1Y3RvciB0dXJwaXMgbG9ib3J0aXMuIFBlbGxlbnRlc3F1ZSBub24gZXN0IG51bmMuIFZlc3RpYnVsdW0gbW9sbGlzIGZyaW5naWxsYSBpbnRlcmR1bS4gTWFlY2VuYXMgaXBzdW0gZG9sb3IsIHBoYXJldHJhIGlkIHRyaXN0aXF1ZSBtYXR0aXMsIGx1Y3R1cyB2aXRhZSB1cm5hLiBVdCB1bGxhbWNvcnBlciBhcmN1IGVnZXQgZWxpdCBjb252YWxsaXMgbW9sbGlzLiBQZWxsZW50ZXNxdWUgY29uZGltZW50dW0sIG1hc3NhIGFjIGhlbmRyZXJpdCB0ZW1wb3IsIG1hdXJpcyBwdXJ1cyBibGFuZGl0IGp1c3RvLCBldCBwaGFyZXRyYSBsZW8ganVzdG8gYSBlc3QuIER1aXMgYXJjdSBhdWd1ZSwgZmFjaWxpc2lzIHZlbCBkaWduaXNzaW0gc2VkLCBhbGlxdWFtIHF1aXMgbWFnbmEuIFF1aXNxdWUgbm9uIGNvbnNlcXVhdCBkb2xvci4gU3VzcGVuZGlzc2UgYSB1bHRyaWNlcyBsZW8uCgpEb25lYyB2aXRhZSBwcmV0aXVtIG5pYmguIE1hZWNlbmFzIGJpYmVuZHVtIGJpYmVuZHVtIGRpYW0gaW4gcGxhY2VyYXQuIFV0IGFjY3Vtc2FuLCBtaSB2aXRhZSB2ZXN0aWJ1bHVtIGV1aXNtb2QsIG51bmMganVzdG8gdnVscHV0YXRlIG5pc2ksIG5vbiBwbGFjZXJhdCBtaSB1cm5hIGV0IGRpYW0uIE1hZWNlbmFzIG1hbGVzdWFkYSBsb3JlbSB1dCBhcmN1IG1hdHRpcyBtb2xsaXMuIE51bGxhIGZhY2lsaXNpLiBEb25lYyBlc3QgbGVvLCBiaWJlbmR1bSBldSBwdWx2aW5hciBpbiwgY3Vyc3VzIHZlbCBtZXR1cy4gQWxpcXVhbSBlcmF0IHZvbHV0cGF0LiBOdWxsYW0gZmV1Z2lhdCBwb3J0dGl0b3IgbmVxdWUgaW4gdnVscHV0YXRlLiBRdWlzcXVlIG5lYyBtaSBldSBtYWduYSBjb25zZXF1YXQgY3Vyc3VzIG5vbiBhdCBhcmN1LiBFdGlhbSByaXN1cyBtZXR1cywgc29sbGljaXR1ZGluIGV0IHVsdHJpY2VzIGF0LCB0aW5jaWR1bnQgc2VkIG51bmMuIFNlZCBlZ2V0IHNjZWxlcmlzcXVlIGF1Z3VlLiBVdCBmcmluZ2lsbGEgdmVuZW5hdGlzIHNlbSBub24gZWxlaWZlbmQuIE51bmMgbWF0dGlzLCByaXN1cyBzaXQgYW1ldCB2dWxwdXRhdGUgdmFyaXVzLCByaXN1cyBqdXN0byBlZ2VzdGFzIG1hdXJpcywgaWQgaW50ZXJkdW0gb2RpbyBpcHN1bSBldCBuaXNsLiBMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBNb3JiaSBpZCBlcmF0IG9kaW8sIG5lYyBwdWx2aW5hciBlbmltLgoKQ3VyYWJpdHVyIGFjIGZlcm1lbnR1bSBxdWFtLiBNb3JiaSBldSBlcm9zIHNhcGllbiwgdml0YWUgdGVtcHVzIGRvbG9yLiBNYXVyaXMgdmVzdGlidWx1bSBibGFuZGl0IGVuaW0gdXQgdmVuZW5hdGlzLiBBbGlxdWFtIGVnZXN0YXMsIGVyb3MgYXQgY29uc2VjdGV0dXIgdGluY2lkdW50LCBsb3JlbSBhdWd1ZSBpYWN1bGlzIGVzdCwgbmVjIG1vbGxpcyBmZWxpcyBhcmN1IGluIG51bmMuIFNlZCBpbiBvZGlvIHNlZCBsaWJlcm8gcGVsbGVudGVzcXVlIHZvbHV0cGF0IHZpdGFlIGEgYW50ZS4gTW9yYmkgY29tbW9kbyB2b2x1dHBhdCB0ZWxsdXMsIHV0IHZpdmVycmEgcHVydXMgcGxhY2VyYXQgZmVybWVudHVtLiBJbnRlZ2VyIGlhY3VsaXMgZmFjaWxpc2lzIGFyY3UsIGF0IGdyYXZpZGEgbG9yZW0gYmliZW5kdW0gYXQuIEFlbmVhbiBpZCBlcm9zIGVnZXQgZXN0IHNhZ2l0dGlzIGNvbnZhbGxpcyBzZWQgZXQgZHVpLiBEb25lYyBldSBwdWx2aW5hciB0ZWxsdXMuIE51bmMgZGlnbmlzc2ltIHJob25jdXMgdGVsbHVzLCBhdCBwZWxsZW50ZXNxdWUgbWV0dXMgbHVjdHVzIGF0LiBTZWQgb3JuYXJlIGFsaXF1YW0gZGlhbSwgYSBwb3J0dGl0b3IgbGVvIHNvbGxpY2l0dWRpbiBzZWQuIE5hbSB2aXRhZSBsZWN0dXMgbGFjdXMuIEludGVnZXIgYWRpcGlzY2luZyBxdWFtIG5lcXVlLCBibGFuZGl0IHBvc3VlcmUgbGliZXJvLiBTZWQgbGliZXJvIG51bmMsIGVnZXN0YXMgc29kYWxlcyB0ZW1wdXMgc2VkLCBjdXJzdXMgYmxhbmRpdCB0ZWxsdXMuIFZlc3RpYnVsdW0gbWkgcHVydXMsIHVsdHJpY2llcyBxdWlzIHBsYWNlcmF0IHZlbCwgbW9sZXN0aWUgYXQgZHVpLgoKTnVsbGEgY29tbW9kbyBvZGlvIGp1c3RvLiBQZWxsZW50ZXNxdWUgbm9uIG9ybmFyZSBkaWFtLiBJbiBjb25zZWN0ZXR1ciBzYXBpZW4gYWMgbnVuYyBzYWdpdHRpcyBtYWxlc3VhZGEuIE1vcmJpIHVsbGFtY29ycGVyIHRlbXBvciBlcmF0IG5lYyBydXRydW0uIER1aXMgdXQgY29tbW9kbyBqdXN0by4gQ3JhcyBlc3Qgb3JjaSwgY29uc2VjdGV0dXIgc2VkIGludGVyZHVtIHNlZCwgc2NlbGVyaXNxdWUgc2l0IGFtZXQgbnVsbGEuIFZlc3RpYnVsdW0ganVzdG8gbnVsbGEsIHBlbGxlbnRlc3F1ZSBhIHRlbXB1cyBldCwgZGFwaWJ1cyBldCBhcmN1LiBMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBNb3JiaSB0cmlzdGlxdWUsIGVyb3MgbmVjIGNvbmd1ZSBhZGlwaXNjaW5nLCBsaWd1bGEgc2VtIHJob25jdXMgZmVsaXMsIGF0IG9ybmFyZSB0ZWxsdXMgbWF1cmlzIGFjIHJpc3VzLiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgUHJvaW4gbWF1cmlzIGR1aSwgdGVtcG9yIGZlcm1lbnR1bSBkaWN0dW0gZXQsIGN1cnN1cyBhIGxlby4gTWFlY2VuYXMgbmVjIG5pc2wgYSB0ZWxsdXMgcGVsbGVudGVzcXVlIHJob25jdXMuIE51bGxhbSB1bHRyaWNlcyBldWlzbW9kIGR1aSBldSBjb25ndWUuCgpJbiBuZWMgdGVtcG9yIHJpc3VzLiBJbiBmYXVjaWJ1cyBuaXNpIGVnZXQgZGlhbSBkaWduaXNzaW0gY29uc2VxdWF0LiBEb25lYyBwdWx2aW5hciBhbnRlIG5lYyBlbmltIG1hdHRpcyBydXRydW0uIFZlc3RpYnVsdW0gbGVvIGF1Z3VlLCBtb2xlc3RpZSBuZWMgZGFwaWJ1cyBpbiwgZGljdHVtIGF0IGVuaW0uIEludGVnZXIgYWxpcXVhbSwgbG9yZW0gZXUgdnVscHV0YXRlIGxhY2luaWEsIG1pIG9yY2kgdGVtcG9yIGVuaW0sIGVnZXQgbWF0dGlzIGxpZ3VsYSBtYWduYSBhIG1hZ25hLiBQcmFlc2VudCBzZWQgZXJhdCB1dCB0b3J0b3IgaW50ZXJkdW0gdml2ZXJyYS4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gTnVsbGEgZmFjaWxpc2kuIE1hZWNlbmFzIHNpdCBhbWV0IGxlY3R1cyBsYWN1cy4gTnVuYyB2aXRhZSBwdXJ1cyBpZCBsaWd1bGEgbGFvcmVldCBjb25kaW1lbnR1bS4gRHVpcyBhdWN0b3IgdG9ydG9yIHZlbCBkdWkgcHVsdmluYXIgYSBmYWNpbGlzaXMgYXJjdSBkaWduaXNzaW0uIEluIGhhYyBoYWJpdGFzc2UgcGxhdGVhIGRpY3R1bXN0LiBEb25lYyBzb2xsaWNpdHVkaW4gcGVsbGVudGVzcXVlIGVnZXN0YXMuIFNlZCBzZWQgc2VtIGp1c3RvLiBNYWVjZW5hcyBsYW9yZWV0IGhlbmRyZXJpdCBtYXVyaXMsIHV0IHBvcnR0aXRvciBsb3JlbSBpYWN1bGlzIGFjLiBRdWlzcXVlIG1vbGVzdGllIHNlbSBxdWlzIGxvcmVtIHRlbXBvciBydXRydW0uIFBoYXNlbGx1cyBuaWJoIG1hdXJpcywgcmhvbmN1cyBpbiBjb25zZWN0ZXR1ciBub24sIGFsaXF1ZXQgZXUgbWFzc2EuCgpDdXJhYml0dXIgdmVsaXQgYXJjdSwgcHJldGl1bSBwb3J0YSBwbGFjZXJhdCBxdWlzLCB2YXJpdXMgdXQgbWV0dXMuIFZlc3RpYnVsdW0gdnVscHV0YXRlIHRpbmNpZHVudCBqdXN0bywgdml0YWUgcG9ydHRpdG9yIGxlY3R1cyBpbXBlcmRpZXQgc2l0IGFtZXQuIFZpdmFtdXMgZW5pbSBkb2xvciwgc29sbGljaXR1ZGluIHV0IHNlbXBlciBub24sIG9ybmFyZSBvcm5hcmUgZHVpLiBBbGlxdWFtIHRlbXBvciBmZXJtZW50dW0gc2FwaWVuIGVnZXQgY29uZGltZW50dW0uIEN1cmFiaXR1ciBsYW9yZWV0IGJpYmVuZHVtIGFudGUsIGluIGV1aXNtb2QgbGFjdXMgbGFjaW5pYSBldS4gUGVsbGVudGVzcXVlIGhhYml0YW50IG1vcmJpIHRyaXN0aXF1ZSBzZW5lY3R1cyBldCBuZXR1cyBldCBtYWxlc3VhZGEgZmFtZXMgYWMgdHVycGlzIGVnZXN0YXMuIFN1c3BlbmRpc3NlIHBvdGVudGkuIFNlZCBhdCBsaWJlcm8gZXUgdG9ydG9yIHRlbXB1cyBzY2VsZXJpc3F1ZS4gTnVsbGEgZmFjaWxpc2kuIE51bGxhbSB2aXRhZSBuZXF1ZSBpZCBqdXN0byB2aXZlcnJhIHJob25jdXMgcHJldGl1bSBhdCBsaWJlcm8uIEV0aWFtIGVzdCB1cm5hLCBhbGlxdWFtIHZlbCBwdWx2aW5hciBub24sIG9ybmFyZSB2ZWwgcHVydXMuCgpOdWxsYSB2YXJpdXMsIG5pc2kgZWdldCBjb25kaW1lbnR1bSBzZW1wZXIsIG1ldHVzIGVzdCBkaWN0dW0gb2RpbywgdmVsIG1hdHRpcyByaXN1cyBlc3Qgc2VkIHZlbGl0LiBDdW0gc29jaWlzIG5hdG9xdWUgcGVuYXRpYnVzIGV0IG1hZ25pcyBkaXMgcGFydHVyaWVudCBtb250ZXMsIG5hc2NldHVyIHJpZGljdWx1cyBtdXMuIE51bmMgbm9uIGVzdCBuZWMgdGVsbHVzIHVsdHJpY2llcyBtYXR0aXMgdXQgZWdldCB2ZWxpdC4gSW50ZWdlciBjb25kaW1lbnR1bSBhbnRlIGlkIGxvcmVtIGJsYW5kaXQgbGFjaW5pYS4gRG9uZWMgdmVsIHRvcnRvciBhdWd1ZSwgaW4gY29uZGltZW50dW0gbmlzaS4gUGVsbGVudGVzcXVlIHBlbGxlbnRlc3F1ZSBudWxsYSB1dCBudWxsYSBwb3J0dGl0b3IgcXVpcyBzb2RhbGVzIGVuaW0gcnV0cnVtLiBTZWQgYXVndWUgcmlzdXMsIGV1aXNtb2QgYSBhbGlxdWV0IGF0LCB2dWxwdXRhdGUgbm9uIGxpYmVyby4gTnVsbGFtIG5pYmggb2RpbywgZGlnbmlzc2ltIGZlcm1lbnR1bSBwdWx2aW5hciBhYywgY29uZ3VlIGV1IG1pLiBEdWlzIHRpbmNpZHVudCwgbmliaCBpZCB2ZW5lbmF0aXMgcGxhY2VyYXQsIGRpYW0gdHVycGlzIGdyYXZpZGEgbGVvLCBzaXQgYW1ldCBtb2xsaXMgbWFzc2EgZG9sb3IgcXVpcyBtYXVyaXMuIFZpdmFtdXMgc2NlbGVyaXNxdWUgc29kYWxlcyBhcmN1IGV0IGRhcGlidXMuIFN1c3BlbmRpc3NlIHBvdGVudGkuIENyYXMgcXVpcyB0ZWxsdXMgYXJjdSwgcXVpcyBsYW9yZWV0IHNlbS4gRnVzY2UgcG9ydHRpdG9yLCBzYXBpZW4gdmVsIHRyaXN0aXF1ZSBzb2RhbGVzLCB2ZWxpdCBsZW8gcG9ydGEgYXJjdSwgcXVpcyBwZWxsZW50ZXNxdWUgbnVuYyBtZXR1cyBub24gb2Rpby4gTmFtIGFyY3UgbGliZXJvLCB1bGxhbWNvcnBlciB1dCBwaGFyZXRyYSBub24sIGRpZ25pc3NpbSBldCB2ZWxpdC4gUXVpc3F1ZSBkb2xvciBsb3JlbSwgdmVoaWN1bGEgc2l0IGFtZXQgc2NlbGVyaXNxdWUgaW4sIHZhcml1cyBhdCBudWxsYS4gUGVsbGVudGVzcXVlIHZpdGFlIHNlbSBlZ2V0IHRvcnRvciBpYWN1bGlzIHB1bHZpbmFyLiBTZWQgbnVuYyBqdXN0bywgZXVpc21vZCBncmF2aWRhIHB1bHZpbmFyIGVnZXQsIGdyYXZpZGEgZWdldCB0dXJwaXMuIENyYXMgdmVsIGRpY3R1bSBuaXNpLiBOdWxsYW0gbnVsbGEgbGliZXJvLCBncmF2aWRhIHNpdCBhbWV0IGFsaXF1YW0gcXVpcywgY29tbW9kbyB2aXRhZSBvZGlvLiBDcmFzIHZpdGFlIG5pYmggbmVjIGR1aSBwbGFjZXJhdCBzZW1wZXIuCgpWaXZhbXVzIGF0IGZyaW5naWxsYSBlcm9zLiBWaXZhbXVzIGF0IG5pc2wgaWQgbWFzc2EgY29tbW9kbyBmZXVnaWF0IHF1aXMgbm9uIG1hc3NhLiBNb3JiaSB0ZWxsdXMgdXJuYSwgYXVjdG9yIHNpdCBhbWV0IGVsZW1lbnR1bSBzZWQsIHJ1dHJ1bSBub24gbGVjdHVzLiBOdWxsYSBmZXVnaWF0IGR1aSBpbiBzYXBpZW4gb3JuYXJlIGV0IGltcGVyZGlldCBlc3Qgb3JuYXJlLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gVmVzdGlidWx1bSBzZW1wZXIgcnV0cnVtIHRlbXBvci4gU2VkIGluIGZlbGlzIG5pYmgsIHNlZCBhbGlxdWFtIGVuaW0uIEN1cmFiaXR1ciB1dCBxdWFtIHNjZWxlcmlzcXVlIHZlbGl0IHBsYWNlcmF0IGRpY3R1bS4gRG9uZWMgZWxlaWZlbmQgdmVoaWN1bGEgcHVydXMsIGV1IHZlc3RpYnVsdW0gc2FwaWVuIHJ1dHJ1bSBldS4gVml2YW11cyBpbiBvZGlvIHZlbCBlc3QgdnVscHV0YXRlIGlhY3VsaXMuIE51bmMgcnV0cnVtIGZldWdpYXQgcHJldGl1bS4KCk1hZWNlbmFzIGlwc3VtIG5lcXVlLCBhdWN0b3IgcXVpcyBsYWNpbmlhIHZpdGFlLCBldWlzbW9kIGFjIG9yY2kuIERvbmVjIG1vbGVzdGllIG1hc3NhIGNvbnNlcXVhdCBlc3QgcG9ydGEgYWMgcG9ydGEgcHVydXMgdGluY2lkdW50LiBOYW0gYmliZW5kdW0gbGVvIG5lYyBsYWN1cyBtb2xsaXMgbm9uIGNvbmRpbWVudHVtIGRvbG9yIHJob25jdXMuIE51bGxhIGFjIHZvbHV0cGF0IGxvcmVtLiBOdWxsYW0gZXJhdCBwdXJ1cywgY29udmFsbGlzIGVnZXQgY29tbW9kbyBpZCwgdmFyaXVzIHF1aXMgYXVndWUuIE51bGxhbSBhbGlxdWFtIGVnZXN0YXMgbWksIHZlbCBzdXNjaXBpdCBuaXNsIG1hdHRpcyBjb25zZXF1YXQuIFF1aXNxdWUgdmVsIGVnZXN0YXMgc2FwaWVuLiBOdW5jIGxvcmVtIHZlbGl0LCBjb252YWxsaXMgbmVjIGxhb3JlZXQgZXQsIGFsaXF1ZXQgZWdldCBtYXNzYS4gTmFtIGV0IG5pYmggYWMgZHVpIHZlaGljdWxhIGFsaXF1YW0gcXVpcyBldSBhdWd1ZS4gQ3JhcyB2ZWwgbWFnbmEgdXQgZWxpdCByaG9uY3VzIGludGVyZHVtIGlhY3VsaXMgdm9sdXRwYXQgbmlzbC4gU3VzcGVuZGlzc2UgYXJjdSBsb3JlbSwgdmFyaXVzIHJob25jdXMgdGVtcG9yIGlkLCBwdWx2aW5hciBzZWQgdG9ydG9yLiBQZWxsZW50ZXNxdWUgdWx0cmljaWVzIGxhb3JlZXQgb2RpbyBhYyBkaWduaXNzaW0uIEFsaXF1YW0gZGlhbSBhcmN1LCBwbGFjZXJhdCBxdWlzIGVnZXN0YXMgZWdldCwgZmFjaWxpc2lzIGV1IG51bmMuIE1hdXJpcyB2dWxwdXRhdGUsIG5pc2wgc2l0IGFtZXQgbW9sbGlzIGludGVyZHVtLCByaXN1cyB0b3J0b3Igb3JuYXJlIG9yY2ksIHNlZCBlZ2VzdGFzIG9yY2kgZXJvcyBub24gZGlhbS4gVmVzdGlidWx1bSBoZW5kcmVyaXQsIG1ldHVzIHF1aXMgcGxhY2VyYXQgcGVsbGVudGVzcXVlLCBlbmltIHB1cnVzIGZhdWNpYnVzIGR1aSwgc2l0IGFtZXQgdWx0cmljaWVzIGxlY3R1cyBpcHN1bSBpZCBsb3JlbS4gQ2xhc3MgYXB0ZW50IHRhY2l0aSBzb2Npb3NxdSBhZCBsaXRvcmEgdG9ycXVlbnQgcGVyIGNvbnViaWEgbm9zdHJhLCBwZXIgaW5jZXB0b3MgaGltZW5hZW9zLiBQcmFlc2VudCBlZ2V0IGRpYW0gb2RpbywgZXUgYmliZW5kdW0gZWxpdC4gSW4gdmVzdGlidWx1bSBvcmNpIGV1IGVyYXQgdGluY2lkdW50IHRyaXN0aXF1ZS4KCkNyYXMgY29uc2VjdGV0dXIgYW50ZSBldSB0dXJwaXMgcGxhY2VyYXQgc29sbGljaXR1ZGluLiBNYXVyaXMgZXQgbGFjdXMgdG9ydG9yLCBlZ2V0IHBoYXJldHJhIHZlbGl0LiBEb25lYyBhY2N1bXNhbiB1bHRyaWNlcyB0ZW1wb3IuIERvbmVjIGF0IG5pYmggYSBlbGl0IGNvbmRpbWVudHVtIGRhcGlidXMuIEludGVnZXIgc2l0IGFtZXQgdnVscHV0YXRlIGFudGUuIFN1c3BlbmRpc3NlIHBvdGVudGkuIEluIHNvZGFsZXMgbGFvcmVldCBtYXNzYSB2aXRhZSBsYWNpbmlhLiBNb3JiaSB2ZWwgbGFjdXMgZmV1Z2lhdCBhcmN1IHZ1bHB1dGF0ZSBtb2xlc3RpZS4gQWxpcXVhbSBtYXNzYSBtYWduYSwgdWxsYW1jb3JwZXIgYWNjdW1zYW4gZ3JhdmlkYSBxdWlzLCByaG9uY3VzIHB1bHZpbmFyIG51bGxhLiBQcmFlc2VudCBzaXQgYW1ldCBpcHN1bSBkaWFtLCBzaXQgYW1ldCBsYWNpbmlhIG5lcXVlLiBJbiBldCBzYXBpZW4gYXVndWUuIEV0aWFtIGVuaW0gZWxpdCwgdWx0cmljZXMgdmVsIHJ1dHJ1bSBpZCwgc2NlbGVyaXNxdWUgbm9uIGVuaW0uCgpQcm9pbiBldCBlZ2VzdGFzIG5lcXVlLiBQcmFlc2VudCBldCBpcHN1bSBkb2xvci4gTnVuYyBub24gdmFyaXVzIG5pc2wuIEZ1c2NlIGluIHRvcnRvciBuaXNpLiBNYWVjZW5hcyBjb252YWxsaXMgbmVxdWUgaW4gbGlndWxhIGJsYW5kaXQgcXVpcyB2ZWhpY3VsYSBsZW8gbW9sbGlzLiBQZWxsZW50ZXNxdWUgc2FnaXR0aXMgYmxhbmRpdCBsZW8sIGRhcGlidXMgcGVsbGVudGVzcXVlIGxlbyB1bHRyaWNlcyBhYy4gQ3VyYWJpdHVyIGFjIGVnZXN0YXMgbGliZXJvLiBEb25lYyBwcmV0aXVtIHBoYXJldHJhIHByZXRpdW0uIEZ1c2NlIGltcGVyZGlldCwgdHVycGlzIGV1IGFsaXF1YW0gcG9ydGEsIGFudGUgZWxpdCBlbGVpZmVuZCByaXN1cywgbHVjdHVzIGF1Y3RvciBhcmN1IGFudGUgdXQgbnVuYy4gVml2YW11cyBpbiBsZW8gZmVsaXMsIHZpdGFlIGVsZWlmZW5kIGxhY3VzLiBEb25lYyB0ZW1wdXMgYWxpcXVhbSBwdXJ1cyBwb3J0dGl0b3IgdHJpc3RpcXVlLiBTdXNwZW5kaXNzZSBkaWFtIG5lcXVlLCBzdXNjaXBpdCBmZXVnaWF0IGZyaW5naWxsYSBub24sIGVsZWlmZW5kIHNpdCBudWxsYW0uCg== \ No newline at end of file
diff --git a/test/elixir/test/design_docs_query_test.exs b/test/elixir/test/design_docs_query_test.exs
deleted file mode 100644
index b439a2e02..000000000
--- a/test/elixir/test/design_docs_query_test.exs
+++ /dev/null
@@ -1,273 +0,0 @@
-defmodule DesignDocsQueryTest do
- use CouchTestCase
-
- @moduletag :design_docs
-
- @moduledoc """
- Test CouchDB /{db}/_design_docs
- """
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- bulk_save(db_name, make_docs(1..5))
-
- Enum.each(1..5, fn x -> create_ddoc(db_name, x) end)
-
- {:ok, [db_name: db_name]}
- end
-
- defp create_ddoc(db_name, idx) do
- ddoc = %{
- _id: "_design/ddoc0#{idx}",
- views: %{
- testing: %{
- map: "function(){emit(1,1)}"
- }
- }
- }
-
- create_doc(db_name, ddoc)
- end
-
- test "query _design_docs (GET with no parameters)", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design_docs")
- assert resp.status_code == 200, "standard get should be 200"
- assert resp.body["total_rows"] == 5, "total_rows mismatch"
- assert length(resp.body["rows"]) == 5, "amount of rows mismatch"
- end
-
- test "query _design_docs with single key", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design_docs?key=\"_design/ddoc03\"")
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 1, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 0)["key"] == "_design/ddoc03"
- end
-
- test "query _design_docs with multiple key", context do
- resp =
- Couch.get(
- "/#{context[:db_name]}/_design_docs",
- query: %{
- :keys => "[\"_design/ddoc02\", \"_design/ddoc03\"]"
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 2
- end
-
- test "POST with empty body", context do
- resp =
- Couch.post(
- "/#{context[:db_name]}/_design_docs",
- body: %{}
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 5
- end
-
- test "POST with keys and limit", context do
- resp =
- Couch.post(
- "/#{context[:db_name]}/_design_docs",
- body: %{
- :keys => ["_design/ddoc02", "_design/ddoc03"],
- :limit => 1
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- test "POST with query parameter and JSON body", context do
- resp =
- Couch.post(
- "/#{context[:db_name]}/_design_docs",
- query: %{
- :limit => 1
- },
- body: %{
- :keys => ["_design/ddoc02", "_design/ddoc03"]
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- test "POST edge case with colliding parameters - query takes precedence", context do
- resp =
- Couch.post(
- "/#{context[:db_name]}/_design_docs",
- query: %{
- :limit => 0
- },
- body: %{
- :keys => ["_design/ddoc02", "_design/ddoc03"],
- :limit => 2
- }
- )
-
- assert resp.status_code == 200
- assert Enum.empty?(Map.get(resp, :body)["rows"])
- end
-
- test "query _design_docs descending=true", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design_docs?descending=true")
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 5, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 0)["key"] == "_design/ddoc05"
- end
-
- test "query _design_docs descending=false", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design_docs?descending=false")
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 5, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 0)["key"] == "_design/ddoc01"
- end
-
- test "query _design_docs end_key", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design_docs?end_key=\"_design/ddoc03\"")
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 3, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 2)["key"] == "_design/ddoc03"
- end
-
- test "query _design_docs endkey", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design_docs?endkey=\"_design/ddoc03\"")
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 3, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 2)["key"] == "_design/ddoc03"
- end
-
- test "query _design_docs start_key", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design_docs?start_key=\"_design/ddoc03\"")
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 3, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 0)["key"] == "_design/ddoc03"
- end
-
- test "query _design_docs startkey", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design_docs?startkey=\"_design/ddoc03\"")
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 3, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 0)["key"] == "_design/ddoc03"
- end
-
- test "query _design_docs end_key inclusive_end=true", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design_docs",
- query: [end_key: "\"_design/ddoc03\"", inclusive_end: true]
- )
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 3, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 2)["key"] == "_design/ddoc03"
- end
-
- test "query _design_docs end_key inclusive_end=false", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design_docs",
- query: [end_key: "\"_design/ddoc03\"", inclusive_end: false]
- )
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 2, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 1)["key"] == "_design/ddoc02"
- end
-
- test "query _design_docs end_key inclusive_end=false descending", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design_docs",
- query: [end_key: "\"_design/ddoc03\"", inclusive_end: false, descending: true]
- )
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 2, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 1)["key"] == "_design/ddoc04"
- end
-
- test "query _design_docs end_key limit", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design_docs",
- query: [end_key: "\"_design/ddoc05\"", limit: 2]
- )
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 2, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 1)["key"] == "_design/ddoc02"
- end
-
- test "query _design_docs end_key skip", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design_docs",
- query: [end_key: "\"_design/ddoc05\"", skip: 2]
- )
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 3, "amount of rows mismatch"
- assert Enum.at(resp.body["rows"], 0)["key"] == "_design/ddoc03"
- assert Enum.at(resp.body["rows"], 2)["key"] == "_design/ddoc05"
- end
-
- test "query _design_docs update_seq", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design_docs",
- query: [end_key: "\"_design/ddoc05\"", update_seq: true]
- )
-
- assert resp.status_code == 200, "standard get should be 200"
- assert Map.has_key?(resp.body, "update_seq")
- end
-
- test "query _design_docs post with keys", context do
- db_name = context[:db_name]
-
- resp =
- Couch.post("/#{db_name}/_design_docs",
- headers: ["Content-Type": "application/json"],
- body: %{keys: ["_design/ddoc02", "_design/ddoc03"]}
- )
-
- keys =
- resp.body["rows"]
- |> Enum.map(fn p -> p["key"] end)
-
- assert resp.status_code == 200, "standard get should be 200"
- assert length(resp.body["rows"]) == 2, "amount of rows mismatch"
- assert Enum.member?(keys, "_design/ddoc03")
- assert Enum.member?(keys, "_design/ddoc02")
- end
-end
diff --git a/test/elixir/test/design_docs_test.exs b/test/elixir/test/design_docs_test.exs
deleted file mode 100644
index 58058b0c4..000000000
--- a/test/elixir/test/design_docs_test.exs
+++ /dev/null
@@ -1,488 +0,0 @@
-defmodule DesignDocsTest do
- use CouchTestCase
-
- @moduletag :design_docs
-
- @design_doc %{
- _id: "_design/test",
- language: "javascript",
- autoupdate: false,
- whatever: %{
- stringzone: "exports.string = 'plankton';",
- commonjs: %{
- whynot: """
- exports.test = require('../stringzone');
- exports.foo = require('whatever/stringzone');
- """,
- upper: """
- exports.testing = require('./whynot').test.string.toUpperCase()+
- module.id+require('./whynot').foo.string
- """,
- circular_one: "require('./circular_two'); exports.name = 'One';",
- circular_two: "require('./circular_one'); exports.name = 'Two';"
- },
- # paths relative to parent
- idtest1: %{
- a: %{
- b: %{d: "module.exports = require('../c/e').id;"},
- c: %{e: "exports.id = module.id;"}
- }
- },
- # multiple paths relative to parent
- idtest2: %{
- a: %{
- b: %{d: "module.exports = require('../../a/c/e').id;"},
- c: %{e: "exports.id = module.id;"}
- }
- },
- # paths relative to module
- idtest3: %{
- a: %{
- b: "module.exports = require('./c/d').id;",
- c: %{
- d: "module.exports = require('./e');",
- e: "exports.id = module.id;"
- }
- }
- },
- # paths relative to module and parent
- idtest4: %{
- a: %{
- b: "module.exports = require('../a/./c/d').id;",
- c: %{
- d: "module.exports = require('./e');",
- e: "exports.id = module.id;"
- }
- }
- },
- # paths relative to root
- idtest5: %{
- a: "module.exports = require('whatever/idtest5/b').id;",
- b: "exports.id = module.id;"
- }
- },
- views: %{
- all_docs_twice: %{
- map: """
- function(doc) {
- emit(doc.integer, null);
- emit(doc.integer, null);
- }
- """
- },
- no_docs: %{
- map: """
- function(doc) {}
- """
- },
- single_doc: %{
- map: """
- function(doc) {
- if (doc._id === "1") {
- emit(1, null);
- }
- }
- """
- },
- summate: %{
- map: """
- function(doc) {
- emit(doc.integer, doc.integer);
- }
- """,
- reduce: """
- function(keys, values) {
- return sum(values);
- }
- """
- },
- summate2: %{
- map: """
- function(doc) {
- emit(doc.integer, doc.integer);
- }
- """,
- reduce: """
- function(keys, values) {
- return sum(values);
- }
- """
- },
- huge_src_and_results: %{
- map: """
- function(doc) {
- if (doc._id === "1") {
- emit("#{String.duplicate("a", 16)}", null);
- }
- }
- """,
- reduce: """
- function(keys, values) {
- return "#{String.duplicate("a", 16)}";
- }
- """
- },
- lib: %{
- baz: "exports.baz = 'bam';",
- foo: %{
- foo: "exports.foo = 'bar';",
- boom: "exports.boom = 'ok';",
- zoom: "exports.zoom = 'yeah';"
- }
- },
- commonjs: %{
- map: """
- function(doc) {
- emit(null, require('views/lib/foo/boom').boom);
- }
- """
- }
- },
- shows: %{
- simple: """
- function() {
- return 'ok';
- }
- """,
- requirey: """
- function() {
- var lib = require('whatever/commonjs/upper');
- return lib.testing;
- }
- """,
- circular: """
- function() {
- var lib = require('whatever/commonjs/upper');
- return JSON.stringify(this);
- }
- """,
- circular_require: """
- function() {
- return require('whatever/commonjs/circular_one').name;
- }
- """,
- idtest1: """
- function() {
- return require('whatever/idtest1/a/b/d');
- }
- """,
- idtest2: """
- function() {
- return require('whatever/idtest2/a/b/d');
- }
- """,
- idtest3: """
- function() {
- return require('whatever/idtest3/a/b');
- }
- """,
- idtest4: """
- function() {
- return require('whatever/idtest4/a/b');
- }
- """,
- idtest5: """
- function() {
- return require('whatever/idtest5/a');
- }
- """
- }
- }
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- {:ok, _} = create_doc(db_name, @design_doc)
- {:ok, _} = create_doc(db_name, %{})
- {:ok, [db_name: db_name]}
- end
-
- test "consistent _rev for design docs", context do
- resp = Couch.get("/#{context[:db_name]}/_design/test")
- assert resp.status_code == 200
- first_db_rev = resp.body["_rev"]
-
- second_db_name = random_db_name()
- create_db(second_db_name)
- {:ok, resp2} = create_doc(second_db_name, @design_doc)
- assert first_db_rev == resp2.body["rev"]
- end
-
- test "commonjs require", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design/test/_show/requirey")
- assert resp.status_code == 200
- assert resp.body == "PLANKTONwhatever/commonjs/upperplankton"
-
- resp = Couch.get("/#{db_name}/_design/test/_show/circular")
- assert resp.status_code == 200
-
- result =
- resp.body
- |> IO.iodata_to_binary()
- |> :jiffy.decode([:return_maps])
-
- assert result["language"] == "javascript"
- end
-
- test "circular commonjs dependencies", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design/test/_show/circular_require")
- assert resp.status_code == 200
- assert resp.body == "One"
- end
-
- test "module id values are as expected", context do
- db_name = context[:db_name]
-
- check_id_value(db_name, "idtest1", "whatever/idtest1/a/c/e")
- check_id_value(db_name, "idtest2", "whatever/idtest2/a/c/e")
- check_id_value(db_name, "idtest3", "whatever/idtest3/a/c/e")
- check_id_value(db_name, "idtest4", "whatever/idtest4/a/c/e")
- check_id_value(db_name, "idtest5", "whatever/idtest5/b")
- end
-
- defp check_id_value(db_name, id, expected) do
- resp = Couch.get("/#{db_name}/_design/test/_show/#{id}")
- assert resp.status_code == 200
- assert resp.body == expected
- end
-
- @tag :with_db
- test "that we get correct design doc info back", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @design_doc)
-
- resp = Couch.get("/#{db_name}/_design/test/_info")
- prev_view_sig = resp.body["view_index"]["signature"]
- prev_view_size = resp.body["view_index"]["sizes"]["file"]
-
- num_docs = 500
- bulk_save(db_name, make_docs(1..(num_docs + 1)))
-
- Couch.get("/#{db_name}/_design/test/_view/summate", query: [stale: "ok"])
-
- for _x <- 0..1 do
- resp = Couch.get("/#{db_name}/_design/test/_info")
- assert resp.body["name"] == "test"
- assert is_map(resp.body["view_index"])
- view_index = resp.body["view_index"]
- assert view_index["sizes"]["file"] == prev_view_size
- assert view_index["compact_running"] == false
- assert view_index["signature"] == prev_view_sig
-
- # check collator_versions result
- assert is_list(view_index["collator_versions"])
- collator_versions = view_index["collator_versions"]
- assert length(collator_versions) == 1
- version = hd(collator_versions)
- assert is_binary(version)
- end
- end
-
- test "commonjs in map functions", context do
- db_name = context[:db_name]
-
- resp = Couch.get("/#{db_name}/_design/test/_view/commonjs", query: [limit: 1])
- assert resp.status_code == 200
- assert Enum.at(resp.body["rows"], 0)["value"] == "ok"
- end
-
- test "_all_docs view returns correctly with keys", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_all_docs",
- query: [startkey: :jiffy.encode("_design"), endkey: :jiffy.encode("_design0")]
- )
-
- assert length(resp.body["rows"]) == 1
- end
-
- @tag :with_db
- test "all_docs_twice", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @design_doc)
-
- num_docs = 500
- bulk_save(db_name, make_docs(1..(2 * num_docs)))
-
- for _x <- 0..1 do
- test_all_docs_twice(db_name, num_docs)
- end
- end
-
- defp test_all_docs_twice(db_name, num_docs) do
- resp = Couch.get("/#{db_name}/_design/test/_view/all_docs_twice")
- assert resp.status_code == 200
- rows = resp.body["rows"]
-
- for x <- 0..num_docs do
- assert Map.get(Enum.at(rows, 2 * x), "key") == x + 1
- assert Map.get(Enum.at(rows, 2 * x + 1), "key") == x + 1
- end
-
- resp = Couch.get("/#{db_name}/_design/test/_view/no_docs")
- assert resp.body["total_rows"] == 0
-
- resp = Couch.get("/#{db_name}/_design/test/_view/single_doc")
- assert resp.body["total_rows"] == 1
- end
-
- @tag :with_db
- test "language not specified, Javascript is implied", context do
- db_name = context[:db_name]
- bulk_save(db_name, make_docs(1..2))
-
- design_doc_2 = %{
- _id: "_design/test2",
- views: %{
- single_doc: %{
- map: """
- function(doc) {
- if (doc._id === "1") {
- emit(1, null);
- }
- }
- """
- }
- }
- }
-
- {:ok, _} = create_doc(db_name, design_doc_2)
-
- resp = Couch.get("/#{db_name}/_design/test2/_view/single_doc")
- assert resp.status_code == 200
- assert length(resp.body["rows"]) == 1
- end
-
- @tag :with_db
- test "startkey and endkey", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @design_doc)
-
- num_docs = 500
- bulk_save(db_name, make_docs(1..(2 * num_docs)))
-
- resp = Couch.get("/#{db_name}/_design/test/_view/summate")
- assert Enum.at(resp.body["rows"], 0)["value"] == summate(num_docs * 2)
-
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate",
- query: [startkey: 4, endkey: 4]
- )
-
- assert Enum.at(resp.body["rows"], 0)["value"] == 4
-
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate",
- query: [startkey: 4, endkey: 5]
- )
-
- assert Enum.at(resp.body["rows"], 0)["value"] == 9
-
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate",
- query: [startkey: 4, endkey: 6]
- )
-
- assert Enum.at(resp.body["rows"], 0)["value"] == 15
-
- # test start_key and end_key aliases
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate",
- query: [start_key: 4, end_key: 6]
- )
-
- assert Enum.at(resp.body["rows"], 0)["value"] == 15
-
- # Verify that a shared index (view def is an exact copy of "summate")
- # does not confuse the reduce stage
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate2",
- query: [startkey: 4, endkey: 6]
- )
-
- assert Enum.at(resp.body["rows"], 0)["value"] == 15
-
- for x <- 0..Integer.floor_div(num_docs, 60) do
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate",
- query: [startkey: x * 30, endkey: num_docs - x * 30]
- )
-
- assert Enum.at(resp.body["rows"], 0)["value"] ==
- summate(num_docs - x * 30) - summate(x * 30 - 1)
- end
- end
-
- defp summate(n) do
- (n + 1) * (n / 2)
- end
-
- @tag :with_db
- test "design doc deletion", context do
- db_name = context[:db_name]
- {:ok, resp} = create_doc(db_name, @design_doc)
-
- del_resp =
- Couch.delete("/#{db_name}/#{resp.body["id"]}", query: [rev: resp.body["rev"]])
-
- assert del_resp.status_code == 200
-
- resp = Couch.get("/#{db_name}/#{resp.body["id"]}")
- assert resp.status_code == 404
-
- resp = Couch.get("/#{db_name}/_design/test/_view/no_docs")
- assert resp.status_code == 404
- end
-
- @tag :with_db
- test "validate doc update", context do
- db_name = context[:db_name]
-
- # COUCHDB-1227 - if a design document is deleted, by adding a "_deleted"
- # field with the boolean value true, its validate_doc_update functions
- # should no longer have effect.
-
- ddoc = %{
- _id: "_design/test",
- language: "javascript",
- validate_doc_update: """
- function(newDoc, oldDoc, userCtx, secObj) {
- if (newDoc.value % 2 == 0) {
- throw({forbidden: "dont like even numbers"});
- }
- return true;
- }
- """
- }
-
- {:ok, resp_ddoc} = create_doc(db_name, ddoc)
-
- resp =
- Couch.post("/#{db_name}",
- body: %{_id: "doc1", value: 4}
- )
-
- assert resp.status_code == 403
- assert resp.body["reason"] == "dont like even numbers"
-
- ddoc_resp = Couch.get("/#{db_name}/#{resp_ddoc.body["id"]}")
-
- ddoc =
- ddoc_resp.body
- |> Map.put("_deleted", true)
-
- del_resp =
- Couch.post("/#{db_name}",
- body: ddoc
- )
-
- assert del_resp.status_code in [201, 202]
-
- {:ok, _} = create_doc(db_name, %{_id: "doc1", value: 4})
- end
-end
diff --git a/test/elixir/test/design_options_test.exs b/test/elixir/test/design_options_test.exs
deleted file mode 100644
index 95a938e38..000000000
--- a/test/elixir/test/design_options_test.exs
+++ /dev/null
@@ -1,74 +0,0 @@
-defmodule DesignOptionsTest do
- use CouchTestCase
-
- @moduletag :design_docs
-
- @moduledoc """
- Test CouchDB design documents options include_design and local_seq
- """
- @tag :with_db
- test "design doc options - include_desing=true", context do
- db_name = context[:db_name]
-
- create_test_view(db_name, "_design/fu", %{include_design: true})
-
- resp = Couch.get("/#{db_name}/_design/fu/_view/data")
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- assert Enum.at(resp.body["rows"], 0)["value"] == "_design/fu"
- end
-
- @tag :with_db
- test "design doc options - include_desing=false", context do
- db_name = context[:db_name]
-
- create_test_view(db_name, "_design/bingo", %{include_design: false})
-
- resp = Couch.get("/#{db_name}/_design/bingo/_view/data")
- assert resp.status_code == 200
- assert Enum.empty?(Map.get(resp, :body)["rows"])
- end
-
- @tag :with_db
- test "design doc options - include_design default value", context do
- db_name = context[:db_name]
-
- create_test_view(db_name, "_design/bango", %{})
-
- resp = Couch.get("/#{db_name}/_design/bango/_view/data")
- assert resp.status_code == 200
- assert Enum.empty?(Map.get(resp, :body)["rows"])
- end
-
- @tag :with_db
- test "design doc options - local_seq=true", context do
- db_name = context[:db_name]
-
- create_test_view(db_name, "_design/fu", %{include_design: true, local_seq: true})
- create_doc(db_name, %{})
- resp = Couch.get("/#{db_name}/_design/fu/_view/with_seq")
-
- row_with_key =
- resp.body["rows"]
- |> Enum.filter(fn p -> p["key"] != :null end)
-
- assert length(row_with_key) == 2
- end
-
- defp create_test_view(db_name, id, options) do
- map = "function (doc) {emit(null, doc._id);}"
- withseq = "function(doc) {emit(doc._local_seq, null)}"
-
- design_doc = %{
- _id: id,
- language: "javascript",
- options: options,
- views: %{
- data: %{map: map},
- with_seq: %{map: withseq}
- }
- }
-
- create_doc(db_name, design_doc)
- end
-end
diff --git a/test/elixir/test/design_paths_test.exs b/test/elixir/test/design_paths_test.exs
deleted file mode 100644
index b3e10c165..000000000
--- a/test/elixir/test/design_paths_test.exs
+++ /dev/null
@@ -1,76 +0,0 @@
-defmodule DesignPathTest do
- use CouchTestCase
-
- @moduletag :design_docs
-
- @moduledoc """
- Test CouchDB design documents path
- """
- @tag :with_db
- test "design doc path", context do
- db_name = context[:db_name]
- ddoc_path_test(db_name)
- end
-
- @tag :with_db_name
- test "design doc path with slash in db name", context do
- db_name = URI.encode_www_form(context[:db_name] <> "/with_slashes")
- create_db(db_name)
- ddoc_path_test(db_name)
- end
-
- defp ddoc_path_test(db_name) do
- create_test_view(db_name, "_design/test")
-
- resp = Couch.get("/#{db_name}/_design/test")
- assert resp.body["_id"] == "_design/test"
-
- resp =
- Couch.get(Couch.process_url("/#{db_name}/_design%2Ftest"),
- follow_redirects: true
- )
-
- assert resp.body["_id"] == "_design/test"
-
- resp = Couch.get("/#{db_name}/_design/test/_view/testing")
- assert Enum.empty?(Map.get(resp, :body)["rows"])
-
- design_doc2 = %{
- _id: "_design/test2",
- views: %{
- testing: %{
- map: "function(){emit(1,1)}"
- }
- }
- }
-
- resp = Couch.put("/#{db_name}/_design/test2", body: design_doc2)
- assert resp.status_code == 201
-
- resp = Couch.get("/#{db_name}/_design/test2")
- assert resp.body["_id"] == "_design/test2"
-
- resp =
- Couch.get(Couch.process_url("/#{db_name}/_design%2Ftest2"),
- follow_redirects: true
- )
-
- assert resp.body["_id"] == "_design/test2"
-
- resp = Couch.get("/#{db_name}/_design/test2/_view/testing")
- assert Enum.empty?(Map.get(resp, :body)["rows"])
- end
-
- defp create_test_view(db_name, id) do
- design_doc = %{
- _id: id,
- views: %{
- testing: %{
- map: "function(){emit(1,1)}"
- }
- }
- }
-
- create_doc(db_name, design_doc)
- end
-end
diff --git a/test/elixir/test/erlang_views_test.exs b/test/elixir/test/erlang_views_test.exs
deleted file mode 100644
index 3346c2274..000000000
--- a/test/elixir/test/erlang_views_test.exs
+++ /dev/null
@@ -1,117 +0,0 @@
-defmodule ErlangViewsTest do
- use CouchTestCase
-
- @moduletag :erlang_views
-
- @moduledoc """
- basic 'smoke tests' of erlang views.
- This is a port of the erlang_views.js test suite.
- """
-
- @doc1 %{:_id => "1", :integer => 1, :string => "str1", :array => [1, 2, 3]}
-
- @erlang_map_fun """
- fun({Doc}) ->
- K = couch_util:get_value(<<"integer">>, Doc, null),
- V = couch_util:get_value(<<"string">>, Doc, null),
- Emit(K, V)
- end.
- """
-
- @erlang_reduce_fun """
- fun (_, Values, false) -> length(Values);
- (_, Values, true) -> lists:sum(Values)
- end.
- """
-
- @erlang_map_fun_2 """
- fun({Doc}) ->
- Words = couch_util:get_value(<<"words">>, Doc),
- lists:foreach(fun({Word}) ->
- WordString = couch_util:get_value(<<"word">>, Word),
- Count = couch_util:get_value(<<"count">>, Word),
- Emit(WordString , Count)
- end, Words)
- end.
- """
-
- @erlang_reduce_fun_2 """
- fun(Keys, Values, RR) -> length(Values) end.
- """
-
- @word_list ["foo", "bar", "abc", "def", "baz", "xxyz"]
-
- @tag :with_db
- test "Erlang map function", context do
- db_name = context[:db_name]
- create_doc(db_name, @doc1)
-
- results =
- query(
- db_name,
- @erlang_map_fun,
- nil,
- nil,
- nil,
- "erlang"
- )
-
- assert results["total_rows"] == 1
- assert List.first(results["rows"])["key"] == 1
- assert List.first(results["rows"])["value"] == "str1"
- end
-
- @tag :with_db
- test "Erlang reduce function", context do
- db_name = context[:db_name]
- create_doc(db_name, @doc1)
- doc2 = @doc1 |> Map.replace!(:_id, "2") |> Map.replace!(:string, "str2")
- create_doc(db_name, doc2)
-
- results =
- query(
- db_name,
- @erlang_map_fun,
- @erlang_reduce_fun,
- nil,
- nil,
- "erlang"
- )
-
- assert List.first(results["rows"])["value"] == 2
- end
-
- @tag :with_db
- test "Erlang reduce function larger dataset", context do
- db_name = context[:db_name]
- bulk_save(db_name, create_large_dataset(250))
-
- results =
- query(
- db_name,
- @erlang_map_fun_2,
- @erlang_reduce_fun_2,
- nil,
- nil,
- "erlang"
- )
-
- assert Map.get(List.first(results["rows"]), "key", :null) == :null
- assert List.first(results["rows"])["value"] > 0
- end
-
- defp create_large_dataset(size) do
- doc_words =
- for j <- 0..100 do
- %{word: get_word(j), count: j}
- end
-
- template_doc = %{words: doc_words}
-
- make_docs(0..size, template_doc)
- end
-
- defp get_word(idx) do
- Enum.at(@word_list, rem(idx, length(@word_list)))
- end
-end
diff --git a/test/elixir/test/etags_head_test.exs b/test/elixir/test/etags_head_test.exs
deleted file mode 100644
index 9b9ff8bb0..000000000
--- a/test/elixir/test/etags_head_test.exs
+++ /dev/null
@@ -1,151 +0,0 @@
-defmodule EtagsHeadTest do
- use CouchTestCase
-
- @moduletag :etags
-
- @tag :with_db
- test "etag header on creation", context do
- db_name = context[:db_name]
-
- resp =
- Couch.put("/#{db_name}/1",
- headers: ["Content-Type": "application/json"],
- body: %{}
- )
-
- assert resp.status_code == 201
- assert Map.has_key?(resp.headers.hdrs, "etag")
- end
-
- @tag :with_db
- test "etag header on retrieval", context do
- db_name = context[:db_name]
-
- resp =
- Couch.put("/#{db_name}/1",
- headers: ["Content-Type": "application/json"],
- body: %{}
- )
-
- etag = resp.headers.hdrs["etag"]
-
- # get the doc and verify the headers match
- resp = Couch.get("/#{db_name}/1")
- assert etag == resp.headers.hdrs["etag"]
-
- # 'head' the doc and verify the headers match
- resp =
- Couch.head("/#{db_name}/1",
- headers: ["if-none-match": "s"]
- )
-
- assert etag == resp.headers.hdrs["etag"]
- end
-
- @tag :with_db
- test "etag header on head", context do
- db_name = context[:db_name]
-
- resp =
- Couch.put("/#{db_name}/1",
- headers: ["Content-Type": "application/json"],
- body: %{}
- )
-
- etag = resp.headers.hdrs["etag"]
-
- # 'head' the doc and verify the headers match
- resp =
- Couch.head("/#{db_name}/1",
- headers: ["if-none-match": "s"]
- )
-
- assert etag == resp.headers.hdrs["etag"]
- end
-
- @tag :with_db
- test "etags head", context do
- db_name = context[:db_name]
-
- resp =
- Couch.put("/#{db_name}/1",
- headers: ["Content-Type": "application/json"],
- body: %{}
- )
-
- assert resp.status_code == 201
- assert Map.has_key?(resp.headers.hdrs, "etag")
-
- etag = resp.headers.hdrs["etag"]
-
- # get the doc and verify the headers match
- resp = Couch.get("/#{db_name}/1")
- assert etag == resp.headers.hdrs["etag"]
-
- # 'head' the doc and verify the headers match
- resp =
- Couch.head("/#{db_name}/1",
- headers: ["if-none-match": "s"]
- )
-
- assert etag == resp.headers.hdrs["etag"]
-
- # replace a doc
- resp =
- Couch.put("/#{db_name}/1",
- headers: ["if-match": etag],
- body: %{}
- )
-
- assert resp.status_code == 201
-
- # extract the new ETag value
- previous_etag = etag
- etag = resp.headers.hdrs["etag"]
-
- # fail to replace a doc
- resp =
- Couch.put("/#{db_name}/1",
- body: %{}
- )
-
- assert resp.status_code == 409
-
- # verify get w/Etag
- resp =
- Couch.get("/#{db_name}/1",
- headers: ["if-none-match": previous_etag]
- )
-
- assert resp.status_code == 200
-
- resp =
- Couch.get("/#{db_name}/1",
- headers: ["if-none-match": etag]
- )
-
- assert resp.status_code == 304
-
- resp =
- Couch.get("/#{db_name}/1",
- headers: ["if-none-match": "W/#{etag}"]
- )
-
- assert resp.status_code == 304
-
- # fail to delete a doc
- resp =
- Couch.delete("/#{db_name}/1",
- headers: ["if-match": previous_etag]
- )
-
- assert resp.status_code == 409
-
- resp =
- Couch.delete("/#{db_name}/1",
- headers: ["if-match": etag]
- )
-
- assert resp.status_code == 200
- end
-end
diff --git a/test/elixir/test/form_submit_test.exs b/test/elixir/test/form_submit_test.exs
deleted file mode 100644
index 1baf947ac..000000000
--- a/test/elixir/test/form_submit_test.exs
+++ /dev/null
@@ -1,29 +0,0 @@
-defmodule FormSubmitTest do
- use CouchTestCase
-
- @moduletag :form_submit
-
- @moduledoc """
- Test that form submission is invalid
- This is a port of form_submit.js
- """
-
- @tag :with_db
- test "form submission gives back invalid content-type", context do
- headers = [
- Referer: "http://127.0.0.1:15984",
- "Content-Type": "application/x-www-form-urlencoded"
- ]
-
- body = %{}
-
- %{:body => response_body, :status_code => status_code} =
- Couch.post("/#{context[:db_name]}/baz", headers: headers, body: body)
-
- %{"error" => error, "reason" => reason} = response_body
-
- assert status_code == 415
- assert error == "bad_content_type"
- assert reason == "Content-Type must be multipart/form-data"
- end
-end
diff --git a/test/elixir/test/helper_test.exs b/test/elixir/test/helper_test.exs
deleted file mode 100644
index 19d70eac8..000000000
--- a/test/elixir/test/helper_test.exs
+++ /dev/null
@@ -1,31 +0,0 @@
-defmodule HelperTest do
- use CouchTestCase
-
- @moduledoc """
- Test helper code
- """
-
- test "retry_until handles boolean conditions", _context do
- retry_until(fn ->
- true
- end)
- end
-
- test "retry_until handles assertions", _context do
- retry_until(fn ->
- assert true
- end)
- end
-
- test "retry_until times out", _context do
- assert_raise RuntimeError, ~r/^timed out after \d+ ms$/, fn ->
- retry_until(
- fn ->
- assert false
- end,
- 1,
- 5
- )
- end
- end
-end
diff --git a/test/elixir/test/http_test.exs b/test/elixir/test/http_test.exs
deleted file mode 100644
index 4f4cf26d6..000000000
--- a/test/elixir/test/http_test.exs
+++ /dev/null
@@ -1,81 +0,0 @@
-defmodule HttpTest do
- use CouchTestCase
-
- @moduletag :http
-
- @tag :with_db
- test "location header", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/test", body: %{})
- db_url = Couch.process_url("/" <> db_name)
- assert resp.headers.hdrs["location"] == db_url <> "/test"
- end
-
- @tag :with_db
- test "location header should include X-Forwarded-Host", context do
- db_name = context[:db_name]
-
- resp =
- Couch.put("/#{db_name}/test2",
- body: %{},
- headers: ["X-Forwarded-Host": "mysite.com"]
- )
-
- assert resp.headers.hdrs["location"] == "http://mysite.com/#{db_name}/test2"
- end
-
- @tag :with_db
- test "location header should include custom header", context do
- db_name = context[:db_name]
-
- server_config = [
- %{
- :section => "chttpd",
- :key => "x_forwarded_host",
- :value => "X-Host"
- }
- ]
-
- run_on_modified_server(server_config, fn ->
- resp =
- Couch.put("/#{db_name}/test3",
- body: %{},
- headers: ["X-Host": "mysite2.com"]
- )
-
- assert resp.headers.hdrs["location"] == "http://mysite2.com/#{db_name}/test3"
- end)
- end
-
- @tag :with_db
- test "COUCHDB-708: newlines document names", context do
- db_name = context[:db_name]
-
- resp =
- Couch.put("/#{db_name}/docid%0A/attachment.txt",
- body: %{},
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- db_url = Couch.process_url("/" <> db_name)
- assert resp.headers.hdrs["location"] == db_url <> "/docid%0A/attachment.txt"
-
- resp =
- Couch.put("/#{db_name}/docidtest%0A",
- body: %{},
- headers: ["Content-Type": "text/plain;charset=utf-8"]
- )
-
- db_url = Couch.process_url("/" <> db_name)
- assert resp.headers.hdrs["location"] == db_url <> "/docidtest%0A"
-
- resp =
- Couch.post("/#{db_name}/",
- body: %{_id: "docidtestpost%0A"},
- headers: ["Content-Type": "application/json"]
- )
-
- db_url = Couch.process_url("/" <> db_name)
- assert resp.headers.hdrs["location"] == db_url <> "/docidtestpost%250A"
- end
-end
diff --git a/test/elixir/test/invalid_docids_test.exs b/test/elixir/test/invalid_docids_test.exs
deleted file mode 100644
index edce5cc65..000000000
--- a/test/elixir/test/invalid_docids_test.exs
+++ /dev/null
@@ -1,85 +0,0 @@
-defmodule InvalidDocIDsTest do
- use CouchTestCase
-
- @moduletag :invalid_doc_ids
-
- @moduledoc """
- Test invalid document ids
- This is a port of the invalid_docids.js suite
- """
-
- @tag :with_db
- test "_local-prefixed ids are illegal", context do
- db_name = context[:db_name]
-
- [
- "/#{db_name}/_local",
- "/#{db_name}/_local/",
- "/#{db_name}/_local%2F",
- "/#{db_name}/_local/foo/bar"
- ]
- |> Enum.each(fn url ->
- %{status_code: status, body: body} = Couch.put(url, body: %{})
- assert status === 400
- assert body["error"] === "bad_request"
- end)
- end
-
- @tag :with_db
- test "using a non-string id is forbidden", context do
- db_name = context[:db_name]
- %{status_code: status, body: body} = Couch.post("/#{db_name}", body: %{:_id => 1})
- assert status === 400
- assert body["error"] === "illegal_docid"
- assert body["reason"] === "Document id must be a string"
- end
-
- @tag :with_db
- test "a PUT request with absent _id is forbidden", context do
- db_name = context[:db_name]
- %{status_code: status, body: body} = Couch.put("/#{db_name}/_other", body: %{})
- assert status === 400
- assert body["error"] === "illegal_docid"
- end
-
- @tag :with_db
- test "accidental POST to form handling code", context do
- db_name = context[:db_name]
- %{status_code: status, body: body} = Couch.put("/#{db_name}/_tmp_view", body: %{})
- assert status === 400
- assert body["error"] === "illegal_docid"
- end
-
- @tag :with_db
- test "invalid _prefix", context do
- db_name = context[:db_name]
-
- %{status_code: status, body: body} =
- Couch.post("/#{db_name}", body: %{:_id => "_invalid"})
-
- assert status === 400
- assert body["error"] === "illegal_docid"
- assert body["reason"] === "Only reserved document ids may start with underscore."
- end
-
- @tag :with_db
- test "explicit _bulk_docks policy", context do
- db_name = context[:db_name]
- docs = [%{:_id => "_design/foo"}, %{:_id => "_local/bar"}]
-
- %{status_code: status} = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: docs})
-
- assert status in [201, 202]
-
- Enum.each(docs, fn %{:_id => id} ->
- %{:body => %{"_id" => document_id}} = Couch.get("/#{db_name}/#{id}")
- assert document_id === id
- end)
-
- %{status_code: invalid_status, body: invalid_body} =
- Couch.post("/#{db_name}/_bulk_docs", body: %{docs: [%{:_id => "_invalid"}]})
-
- assert invalid_status === 400
- assert invalid_body["error"] === "illegal_docid"
- end
-end
diff --git a/test/elixir/test/jsonp_test.exs b/test/elixir/test/jsonp_test.exs
deleted file mode 100644
index 169f66387..000000000
--- a/test/elixir/test/jsonp_test.exs
+++ /dev/null
@@ -1,116 +0,0 @@
-defmodule JsonpTest do
- use CouchTestCase
-
- @moduletag :jsonp
-
- @tag :with_db
- test "jsonp not configured callbacks", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, %{_id: "0", a: 0, b: 0})
-
- resp = Couch.get("/#{db_name}/0?callback=jsonp_no_chunk")
- assert resp.status_code == 200
- assert resp.headers.hdrs["content-type"] == "application/json"
- end
-
- @tag :with_db
- test "jsonp unchunked callbacks", context do
- db_name = context[:db_name]
-
- server_config = [
- %{
- :section => "chttpd",
- :key => "allow_jsonp",
- :value => "true"
- }
- ]
-
- {:ok, create_resp} = create_doc(db_name, %{_id: "0", a: 0, b: 0})
-
- run_on_modified_server(server_config, fn ->
- resp = Couch.get("/#{db_name}/0?callback=jsonp_no_chunk")
-
- assert resp.status_code == 200
- assert resp.headers.hdrs["content-type"] == "application/javascript"
-
- {callback_fun, callback_param} = parse_callback(resp.body)
-
- assert callback_fun == "jsonp_no_chunk"
- assert create_resp.body["id"] == callback_param["_id"]
- assert create_resp.body["rev"] == callback_param["_rev"]
-
- resp = Couch.get("/#{db_name}/0?callback=jsonp_no_chunk\"")
- assert resp.status_code == 400
- end)
- end
-
- @tag :with_db
- test "jsonp chunked callbacks", context do
- db_name = context[:db_name]
-
- server_config = [
- %{
- :section => "chttpd",
- :key => "allow_jsonp",
- :value => "true"
- }
- ]
-
- design_doc = %{
- _id: "_design/test",
- language: "javascript",
- views: %{
- all_docs: %{map: "function(doc) {if(doc.a) emit(null, doc.a);}"}
- }
- }
-
- {:ok, _} = create_doc(db_name, design_doc)
- {:ok, _} = create_doc(db_name, %{_id: "0", a: 0, b: 0})
- {:ok, _} = create_doc(db_name, %{_id: "1", a: 1, b: 1})
-
- run_on_modified_server(server_config, fn ->
- resp = Couch.get("/#{db_name}/_design/test/_view/all_docs?callback=jsonp_chunk")
- assert resp.status_code == 200
- assert resp.headers.hdrs["content-type"] == "application/javascript"
-
- {callback_fun, callback_param} = parse_callback(resp.body)
-
- assert callback_fun == "jsonp_chunk"
- assert callback_param["total_rows"] == 1
-
- resp = Couch.get("/#{db_name}/_design/test/_view/all_docs?callback=jsonp_chunk'")
- assert resp.status_code == 400
-
- resp = Couch.get("/#{db_name}/_changes?callback=jsonp_chunk")
- assert resp.status_code == 200
- assert resp.headers.hdrs["content-type"] == "application/javascript"
-
- {callback_fun, callback_param} = parse_callback(resp.body)
- assert callback_fun == "jsonp_chunk"
- assert length(callback_param["results"]) == 3
-
- end)
- end
-
- defp parse_callback(msg) do
- captures = Regex.scan(~r/\/\* CouchDB \*\/(\w+)\((.*)\)/s, msg)
-
- callback_fun =
- captures
- |> Enum.map(fn p -> Enum.at(p, 1) end)
- |> Enum.at(0)
-
- param =
- captures
- |> Enum.map(fn p -> Enum.at(p, 2) end)
- |> Enum.filter(fn p -> String.trim(p) != "" end)
- |> Enum.map(fn p ->
- p
- |> IO.iodata_to_binary()
- |> :jiffy.decode([:return_maps])
- end)
- |> Enum.at(0)
-
- {callback_fun, param}
- end
-end
diff --git a/test/elixir/test/jwtauth_test.exs b/test/elixir/test/jwtauth_test.exs
deleted file mode 100644
index e4f21f261..000000000
--- a/test/elixir/test/jwtauth_test.exs
+++ /dev/null
@@ -1,217 +0,0 @@
-defmodule JwtAuthTest do
- use CouchTestCase
-
- @moduletag :authentication
-
- test "jwt auth with HMAC secret", _context do
-
- secret = "zxczxc12zxczxc12"
-
- server_config = [
- %{
- :section => "jwt_keys",
- :key => "hmac:_default",
- :value => :base64.encode(secret)
- },
- %{
- :section => "jwt_auth",
- :key => "allowed_algorithms",
- :value => "HS256, HS384, HS512"
- }
- ]
-
- run_on_modified_server(server_config, fn -> test_fun("HS256", secret) end)
- run_on_modified_server(server_config, fn -> test_fun("HS384", secret) end)
- run_on_modified_server(server_config, fn -> test_fun("HS512", secret) end)
- end
-
- defmodule RSA do
- require Record
- Record.defrecord :public, :RSAPublicKey,
- Record.extract(:RSAPublicKey, from_lib: "public_key/include/public_key.hrl")
- Record.defrecord :private, :RSAPrivateKey,
- Record.extract(:RSAPrivateKey, from_lib: "public_key/include/public_key.hrl")
- end
-
- test "jwt auth with RSA secret", _context do
- require JwtAuthTest.RSA
-
- private_key = :public_key.generate_key({:rsa, 2048, 17})
- public_key = RSA.public(
- modulus: RSA.private(private_key, :modulus),
- publicExponent: RSA.private(private_key, :publicExponent))
-
- public_pem = :public_key.pem_encode(
- [:public_key.pem_entry_encode(
- :SubjectPublicKeyInfo, public_key)])
- public_pem = String.replace(public_pem, "\n", "\\n")
-
- server_config = [
- %{
- :section => "jwt_keys",
- :key => "rsa:_default",
- :value => public_pem
- },
- %{
- :section => "jwt_auth",
- :key => "allowed_algorithms",
- :value => "RS256, RS384, RS512"
- }
- ]
-
- run_on_modified_server(server_config, fn -> test_fun("RS256", private_key) end)
- run_on_modified_server(server_config, fn -> test_fun("RS384", private_key) end)
- run_on_modified_server(server_config, fn -> test_fun("RS512", private_key) end)
- end
-
- defmodule EC do
- require Record
- Record.defrecord :point, :ECPoint,
- Record.extract(:ECPoint, from_lib: "public_key/include/public_key.hrl")
- Record.defrecord :private, :ECPrivateKey,
- Record.extract(:ECPrivateKey, from_lib: "public_key/include/public_key.hrl")
- end
-
- test "jwt auth with EC secret", _context do
- require JwtAuthTest.EC
-
- private_key = :public_key.generate_key({:namedCurve, :secp256r1})
- point = EC.point(point: EC.private(private_key, :publicKey))
- public_key = {point, EC.private(private_key, :parameters)}
-
- public_pem = :public_key.pem_encode(
- [:public_key.pem_entry_encode(
- :SubjectPublicKeyInfo, public_key)])
- public_pem = String.replace(public_pem, "\n", "\\n")
-
- server_config = [
- %{
- :section => "jwt_keys",
- :key => "ec:_default",
- :value => public_pem
- },
- %{
- :section => "jwt_auth",
- :key => "allowed_algorithms",
- :value => "ES256, ES384, ES512"
- }
- ]
-
- run_on_modified_server(server_config, fn -> test_fun("ES256", private_key) end)
- run_on_modified_server(server_config, fn -> test_fun("ES384", private_key) end)
- run_on_modified_server(server_config, fn -> test_fun("ES512", private_key) end)
- end
-
- def test_fun(alg, key) do
- now = DateTime.to_unix(DateTime.utc_now())
- {:ok, token} = :jwtf.encode(
- {
- [
- {"alg", alg},
- {"typ", "JWT"}
- ]
- },
- {
- [
- {"nbf", now - 60},
- {"exp", now + 60},
- {"sub", "couch@apache.org"},
- {"_couchdb.roles", ["testing"]
- }
- ]
- }, key)
-
- resp = Couch.get("/_session",
- headers: [authorization: "Bearer #{token}"]
- )
-
- assert resp.status_code == 200
- assert resp.body["userCtx"]["name"] == "couch@apache.org"
- assert resp.body["info"]["authenticated"] == "jwt"
- end
-
- test "jwt auth without secret", _context do
-
- resp = Couch.get("/_session")
-
- assert resp.body["userCtx"]["name"] == "adm"
- assert resp.body["info"]["authenticated"] == "default"
- end
-
- test "jwt auth with required iss claim", _context do
-
- secret = "zxczxc12zxczxc12"
-
- server_config = [
- %{
- :section => "jwt_auth",
- :key => "required_claims",
- :value => "{iss, \"hello\"}"
- },
- %{
- :section => "jwt_keys",
- :key => "hmac:_default",
- :value => :base64.encode(secret)
- },
- %{
- :section => "jwt_auth",
- :key => "allowed_algorithms",
- :value => "HS256, HS384, HS512"
- }
- ]
-
- run_on_modified_server(server_config, fn -> good_iss("HS256", secret) end)
- run_on_modified_server(server_config, fn -> bad_iss("HS256", secret) end)
- end
-
- def good_iss(alg, key) do
- {:ok, token} = :jwtf.encode(
- {
- [
- {"alg", alg},
- {"typ", "JWT"}
- ]
- },
- {
- [
- {"iss", "hello"},
- {"sub", "couch@apache.org"},
- {"_couchdb.roles", ["testing"]
- }
- ]
- }, key)
-
- resp = Couch.get("/_session",
- headers: [authorization: "Bearer #{token}"]
- )
-
- assert resp.body["userCtx"]["name"] == "couch@apache.org"
- assert resp.body["userCtx"]["roles"] == ["testing"]
- assert resp.body["info"]["authenticated"] == "jwt"
- end
-
- def bad_iss(alg, key) do
- {:ok, token} = :jwtf.encode(
- {
- [
- {"alg", alg},
- {"typ", "JWT"}
- ]
- },
- {
- [
- {"iss", "goodbye"},
- {"sub", "couch@apache.org"},
- {"_couchdb.roles", ["testing"]
- }
- ]
- }, key)
-
- resp = Couch.get("/_session",
- headers: [authorization: "Bearer #{token}"]
- )
-
- assert resp.status_code == 400
- end
-
-end
diff --git a/test/elixir/test/large_docs_text.exs b/test/elixir/test/large_docs_text.exs
deleted file mode 100644
index 4d2c5dede..000000000
--- a/test/elixir/test/large_docs_text.exs
+++ /dev/null
@@ -1,40 +0,0 @@
-defmodule LargeDocsTest do
- use CouchTestCase
-
- @moduletag :large_docs
- @long_string "0123456789\n"
-
- @moduledoc """
- Test saving a bunch of large documents.
- This is a port of the large_docs.js suite
- """
-
- @tag :with_db
- test "Large docs", context do
- db_name = context[:db_name]
- long_text = String.duplicate(@long_string, 10)
-
- resp1 = Couch.post("/#{db_name}", body: %{:_id => "0", :longtest => long_text}).body
- resp2 = Couch.post("/#{db_name}", body: %{:_id => "1", :longtest => long_text}).body
- resp3 = Couch.post("/#{db_name}", body: %{:_id => "2", :longtest => long_text}).body
- resp4 = Couch.post("/#{db_name}", body: %{:_id => "3", :longtest => long_text}).body
-
- assert resp1["ok"]
- assert resp2["ok"]
- assert resp3["ok"]
- assert resp4["ok"]
-
- %{"rows" => rows} = query(db_name)
- assert Enum.count(rows) === 4
- Enum.each(rows, fn row -> assert row["value"] === long_text end)
- end
-
- defp query(db_name) do
- map_fun = "function(doc) { emit(null, doc.longtest); }"
- map_doc = %{:views => %{:view => %{:map => map_fun}}}
- %{"rev" => rev} = Couch.put("/#{db_name}/_design/tempddoc", body: map_doc).body
- response = Couch.get("/#{db_name}/_design/tempddoc/_view/view").body
- Couch.delete("/#{db_name}/_design/tempddoc?rev=#{rev}")
- response
- end
-end
diff --git a/test/elixir/test/list_views_test.exs b/test/elixir/test/list_views_test.exs
deleted file mode 100644
index 8e6314dfb..000000000
--- a/test/elixir/test/list_views_test.exs
+++ /dev/null
@@ -1,581 +0,0 @@
-defmodule ListViewsTest do
- use CouchTestCase
-
- @moduletag kind: :single_node
-
- @ddoc %{
- _id: "_design/lists",
- language: "javascript",
- views: %{
- basicView: %{
- map: """
- function(doc) {
- emit(doc.integer, doc.string);
- }
- """
- },
- withReduce: %{
- map: """
- function(doc) {
- emit(doc.integer, doc.string);
- }
- """,
- reduce: """
- function(keys, values, rereduce) {
- if (rereduce) {
- return sum(values);
- } else {
- return values.length;
- }
- }
- """
- }
- },
- lists: %{
- basicBasic: """
- function(head, req) {
- send("head");
- var row;
- while(row = getRow()) {
- send(row.key);
- };
- return "tail";
- }
- """,
- basicJSON: """
- function(head, req) {
- start({"headers":{"Content-Type" : "application/json"}});
- send('{"head":'+toJSON(head)+', ');
- send('"req":'+toJSON(req)+', ');
- send('"rows":[');
- var row, sep = '';
- while (row = getRow()) {
- send(sep + toJSON(row));
- sep = ', ';
- }
- return "]}";
- }
- """,
- simpleForm: """
- function(head, req) {
- send('<ul>');
- var row, row_number = 0, prevKey, firstKey = null;
- while (row = getRow()) {
- row_number += 1;
- if (!firstKey) firstKey = row.key;
- prevKey = row.key;
- send('\\n<li>Key: '+row.key
- +' Value: '+row.value
- +' LineNo: '+row_number+'</li>');
- }
- return '</ul><p>FirstKey: '+ firstKey + ' LastKey: '+ prevKey+'</p>';
- }
- """,
- acceptSwitch: """
- function(head, req) {
- // respondWith takes care of setting the proper headers
- provides("html", function() {
- send("HTML <ul>");
-
- var row, num = 0;
- while (row = getRow()) {
- num ++;
- send('\\n<li>Key: '
- +row.key+' Value: '+row.value
- +' LineNo: '+num+'</li>');
- }
-
- // tail
- return '</ul>';
- });
- }
- """,
- qsParams: """
- function(head, req) {
- return toJSON(req.query) + "\\n";
- }
- """,
- stopIter: """
- function(req) {
- send("head");
- var row, row_number = 0;
- while(row = getRow()) {
- if(row_number > 2) break;
- send(" " + row_number);
- row_number += 1;
- };
- return " tail";
- }
- """,
- stopIter2: """
- function(head, req) {
- provides("html", function() {
- send("head");
- var row, row_number = 0;
- while(row = getRow()) {
- if(row_number > 2) break;
- send(" " + row_number);
- row_number += 1;
- };
- return " tail";
- });
- }
- """,
- tooManyGetRows: """
- function() {
- send("head");
- var row;
- while(row = getRow()) {
- send(row.key);
- };
- getRow();
- getRow();
- getRow();
- row = getRow();
- return "after row: "+toJSON(row);
- }
- """,
- emptyList: """
- function() {
- return " ";
- }
- """,
- rowError: """
- function(head, req) {
- send("head");
- var row = getRow();
- send(fooBarBam); // intentional error
- return "tail";
- }
- """,
- docReference: """
- function(head, req) {
- send("head");
- var row = getRow();
- send(row.doc.integer);
- return "tail";
- }
- """,
- secObj: """
- function(head, req) {
- return toJSON(req.secObj);
- }
- """,
- setHeaderAfterGotRow: """
- function(head, req) {
- getRow();
- start({
- code: 400,
- headers: {
- "X-My-Header": "MyHeader"
- }
- });
- send("bad request");
- }
- """,
- allDocs: """
- function(head, req){
- start({'headers': {'Content-Type': 'application/json'}});
- var resp = head;
- var rows = [];
- while(row=getRow()){
- rows.push(row);
- }
- resp.rows = rows;
- return toJSON(resp);
- }
- """
- }
- }
-
- @view_only_design_doc %{
- _id: "_design/views",
- language: "javascript",
- views: %{
- basicView: %{
- map: """
- function(doc) {
- emit(-doc.integer, doc.string);
- }
- """
- }
- }
- }
-
- @erl_list_doc %{
- _id: "_design/erlang",
- language: "erlang",
- lists: %{
- simple: """
- fun(Head, {Req}) ->
- Send(<<"[">>),
- Fun = fun({Row}, Sep) ->
- Val = couch_util:get_value(<<"key">>, Row, 23),
- Send(list_to_binary(Sep ++ integer_to_list(Val))),
- {ok, ","}
- end,
- {ok, _} = FoldRows(Fun, ""),
- Send(<<"]">>)
- end.
- """
- }
- }
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- {:ok, _} = create_doc(db_name, @ddoc)
- bulk_save(db_name, make_docs(0..9))
-
- # Check setup
- resp = view(db_name, "lists/basicView")
- assert resp.body["total_rows"] == 10
-
- db_name_cross = "#{db_name}_cross"
- {:ok, _} = create_db(db_name_cross)
- on_exit(fn -> delete_db(db_name_cross) end)
-
- {:ok, _} = create_doc(db_name_cross, @ddoc)
- {:ok, _} = create_doc(db_name_cross, @view_only_design_doc)
- bulk_save(db_name_cross, make_docs(0..9))
-
- db_name_erlang = "#{db_name}_erlang"
- {:ok, _} = create_db(db_name_erlang)
- on_exit(fn -> delete_db(db_name_erlang) end)
-
- {:ok, _} = create_doc(db_name_erlang, @erl_list_doc)
- {:ok, _} = create_doc(db_name_erlang, @view_only_design_doc)
- bulk_save(db_name_erlang, make_docs(0..9))
-
- {:ok,
- [db_name: db_name, db_name_cross: db_name_cross, db_name_erlang: db_name_erlang]}
- end
-
- test "standard GET", context do
- db_name = context[:db_name]
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/basicBasic/basicView")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/head0123456789tail/)
- end
-
- test "standard OPTIONS", context do
- db_name = context[:db_name]
- resp = Rawresp.options("/#{db_name}/_design/lists/_list/basicBasic/basicView")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/head0123456789tail/)
- end
-
- test "the richness of the arguments", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design/lists/_list/basicJSON/basicView?update_seq=true")
-
- assert resp.status_code == 200
- assert resp.body["head"]["total_rows"] == 10
- assert resp.body["head"]["offset"] == 0
- assert length(resp.body["rows"]) == 10
- assert Enum.at(resp.body["rows"], 0) == %{"id" => "0", "key" => 0, "value" => "0"}
- assert resp.body["req"]["info"]["db_name"] == db_name
- assert resp.body["req"]["method"] == "GET"
-
- assert resp.body["req"]["path"] == [
- db_name,
- "_design",
- "lists",
- "_list",
- "basicJSON",
- "basicView"
- ]
-
- assert Map.has_key?(resp.body["req"]["headers"], "Host") == true
- assert Map.has_key?(resp.body["req"]["headers"], "User-Agent") == true
- assert Map.has_key?(resp.body["req"], "cookie")
-
- assert resp.body["req"]["raw_path"] ==
- "/#{db_name}/_design/lists/_list/basicJSON/basicView?update_seq=true"
- end
-
- test "get with query params", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get(
- "/#{db_name}/_design/lists/_list/simpleForm/basicView?startkey=3&endkey=8"
- )
-
- assert resp.status_code == 200
- assert not String.match?(resp.body, ~r/Key: 1/)
- assert String.match?(resp.body, ~r/FirstKey: 3/)
- assert String.match?(resp.body, ~r/LastKey: 8/)
- end
-
- test "with 0 rows", context do
- db_name = context[:db_name]
-
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/simpleForm/basicView?startkey=30")
-
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/<\/ul>/)
- end
-
- test "too many Get Rows", context do
- db_name = context[:db_name]
-
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/tooManyGetRows/basicView")
-
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/9after row: null/)
- end
-
- test "reduce with 0 rows", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get("/#{db_name}/_design/lists/_list/simpleForm/withReduce?startkey=30")
-
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/LastKey: undefined/)
- end
-
- test "when there is a reduce present, but not used", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get("/#{db_name}/_design/lists/_list/simpleForm/withReduce?reduce=false")
-
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Key: 1/)
- end
-
- test "when there is a reduce present, and used", context do
- db_name = context[:db_name]
-
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/simpleForm/withReduce?group=true")
-
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Key: 1/)
- end
-
- test "empty list", context do
- db_name = context[:db_name]
-
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/emptyList/basicView")
- assert String.match?(resp.body, ~r/^ $/)
-
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/emptyList/withReduce?group=true")
- assert String.match?(resp.body, ~r/^ $/)
- end
-
- test "multi-key fetch with POST", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.post("/#{db_name}/_design/lists/_list/simpleForm/basicView",
- body: %{keys: [2, 4, 5, 7]}
- )
-
- assert resp.status_code == 200
- assert not String.match?(resp.body, ~r/Key: 1/)
- assert String.match?(resp.body, ~r/Key: 2/)
- assert String.match?(resp.body, ~r/FirstKey: 2/)
- assert String.match?(resp.body, ~r/LastKey: 7/)
- end
-
- test "multi-key fetch with GET", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get("/#{db_name}/_design/lists/_list/simpleForm/basicView?keys=[2,4,5,7]")
-
- assert resp.status_code == 200
- assert not String.match?(resp.body, ~r/Key: 1/)
- assert String.match?(resp.body, ~r/Key: 2/)
- assert String.match?(resp.body, ~r/FirstKey: 2/)
- assert String.match?(resp.body, ~r/LastKey: 7/)
- end
-
- test "no multi-key fetch allowed when group=false", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.post("/#{db_name}/_design/lists/_list/simpleForm/withReduce?group=false",
- body: %{keys: [2, 4, 5, 7]}
- )
-
- assert resp.status_code == 400
- assert String.match?(resp.body, ~r/query_parse_error/)
-
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/rowError/basicView")
- assert String.match?(resp.body, ~r/ReferenceError/)
- end
-
- test "with include_docs and a reference to the doc", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get(
- "/#{db_name}/_design/lists/_list/docReference/basicView?include_docs=true"
- )
-
- assert String.match?(resp.body, ~r/head0tail/)
- end
-
- test "extra qs params", context do
- db_name = context[:db_name]
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/qsParams/basicView?foo=blam")
- assert String.match?(resp.body, ~r/blam/)
- end
-
- test "stop iteration", context do
- db_name = context[:db_name]
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/stopIter/basicView")
- assert String.match?(resp.body, ~r/^head 0 1 2 tail$/)
-
- resp =
- Rawresp.get("/#{db_name}/_design/lists/_list/stopIter2/basicView",
- headers: [Accept: "text/html"]
- )
-
- assert String.match?(resp.body, ~r/^head 0 1 2 tail$/)
- end
-
- test "abort iteration with reduce", context do
- db_name = context[:db_name]
-
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/stopIter/withReduce?group=true")
- assert String.match?(resp.body, ~r/^head 0 1 2 tail$/)
-
- resp =
- Rawresp.get("/#{db_name}/_design/lists/_list/stopIter2/withReduce?group=true",
- headers: [Accept: "text/html"]
- )
-
- assert String.match?(resp.body, ~r/^head 0 1 2 tail$/)
- end
-
- test "with accept headers for HTML", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get("/#{db_name}/_design/lists/_list/acceptSwitch/basicView",
- headers: [Accept: "text/html"]
- )
-
- assert resp.headers["Content-Type"] == "text/html; charset=utf-8"
- assert String.match?(resp.body, ~r/HTML/)
- assert String.match?(resp.body, ~r/Value/)
- end
-
- test "we can run lists and views from separate docs", context do
- db_name = context[:db_name_cross]
-
- resp =
- Rawresp.get(
- "/#{db_name}/_design/lists/_list/simpleForm/views/basicView?startkey=-3"
- )
-
- assert resp.status_code == 200
- assert not String.match?(resp.body, ~r/Key: -4/)
- assert String.match?(resp.body, ~r/FirstKey: -3/)
- assert String.match?(resp.body, ~r/LastKey: 0/)
- end
-
- test "we do multi-key requests on lists and views in separate docs", context do
- db_name = context[:db_name_cross]
-
- resp =
- Rawresp.post(
- "/#{db_name}/_design/lists/_list/simpleForm/views/basicView",
- body: %{keys: [-2, -4, -5, -7]}
- )
-
- assert resp.status_code == 200
- assert not String.match?(resp.body, ~r/Key: -3/)
- assert String.match?(resp.body, ~r/Key: -7/)
- assert String.match?(resp.body, ~r/FirstKey: -2/)
- assert String.match?(resp.body, ~r/LastKey: -7/)
- end
-
- test "secObj is available", context do
- db_name = context[:db_name]
-
- resp = Couch.get("/#{db_name}/_design/lists/_list/secObj/basicView")
- assert resp.status_code == 200
- assert is_map(resp.body)
- end
-
- test "multiple languages in design docs", context do
- db_name = context[:db_name_erlang]
-
- resp =
- Couch.get("/#{db_name}/_design/erlang/_list/simple/views/basicView?startkey=-3")
-
- assert resp.status_code == 200
- assert length(resp.body) == 4
-
- for i <- 0..3 do
- assert Enum.at(resp.body, i) + 3 == i
- end
- end
-
- @tag :with_db
- test "COUCHDB-1113", context do
- db_name = context[:db_name]
-
- ddoc = %{
- _id: "_design/test",
- views: %{
- me: %{
- map: "function(doc) { emit(null,null)}"
- }
- },
- lists: %{
- you: """
- function(head, req) {
- var row;
- while(row = getRow()) {
- send(row);
- }
- }
- """
- }
- }
-
- {:ok, _} = create_doc(db_name, ddoc)
-
- resp =
- Couch.get("/#{db_name}/_design/test/_list/you/me",
- headers: [
- "Content-Type": "application/x-www-form-urlencoded"
- ]
- )
-
- assert resp.status_code == 200
- end
-
- test "HTTP header response set after getRow() called in _list function", context do
- db_name = context[:db_name]
-
- resp = Rawresp.get("/#{db_name}/_design/lists/_list/setHeaderAfterGotRow/basicView")
- assert resp.status_code == 400
- assert resp.headers["X-My-Header"] == "MyHeader"
- assert String.match?(resp.body, ~r/^bad request$/)
- end
-
- test "handling _all_docs by _list functions. the result should be equal", context do
- db_name = context[:db_name]
-
- resp_list = Couch.get("/#{db_name}/_design/lists/_list/allDocs/_all_docs")
- assert resp_list.status_code == 200
-
- resp_alldocs = Couch.get("/#{db_name}/_all_docs")
-
- assert resp_list.body["total_rows"] == resp_alldocs.body["total_rows"]
- assert resp_list.body["offset"] == resp_alldocs.body["offset"]
- assert length(resp_list.body["rows"]) == length(resp_alldocs.body["rows"])
- assert resp_list.body["rows"] == resp_alldocs.body["rows"]
- end
-end
diff --git a/test/elixir/test/local_docs_test.exs b/test/elixir/test/local_docs_test.exs
deleted file mode 100644
index ff071f3e6..000000000
--- a/test/elixir/test/local_docs_test.exs
+++ /dev/null
@@ -1,110 +0,0 @@
-defmodule LocalDocsTest do
- use CouchTestCase
-
- @moduletag :local_docs
-
- @moduledoc """
- Test CouchDB _local_docs
- """
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- resp1 = Couch.put(
- "/#{db_name}/_local/foo",
- body: %{
- _id: "foo",
- bar: "baz"
- }
- )
- assert resp1.status_code == 201
-
- resp2 = Couch.put(
- "/#{db_name}/_local/foo2",
- body: %{
- _id: "foo",
- bar: "baz2"
- }
- )
- assert resp2.status_code == 201
-
- {:ok, [db_name: db_name]}
- end
-
- test "GET with no parameters", context do
- resp = Couch.get(
- "/#{context[:db_name]}/_local_docs"
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 2
- end
-
- test "GET with multiple keys", context do
- resp = Couch.get(
- "/#{context[:db_name]}/_local_docs",
- query: %{
- :keys => "[\"_local/foo\", \"_local/foo2\"]",
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 2
- end
-
- test "POST with empty body", context do
- resp = Couch.post(
- "/#{context[:db_name]}/_local_docs",
- body: %{}
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 2
- end
-
- test "POST with keys and limit", context do
- resp = Couch.post(
- "/#{context[:db_name]}/_local_docs",
- body: %{
- :keys => ["_local/foo", "_local/foo2"],
- :limit => 1
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- test "POST with query parameter and JSON body", context do
- resp = Couch.post(
- "/#{context[:db_name]}/_local_docs",
- query: %{
- :limit => 1
- },
- body: %{
- :keys => ["_local/foo", "_local/foo2"]
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- test "POST edge case with colliding parameters - query takes precedence", context do
- resp = Couch.post(
- "/#{context[:db_name]}/_local_docs",
- query: %{
- :limit => 0
- },
- body: %{
- :keys => ["_local/foo", "_local/foo2"],
- :limit => 2
- }
- )
-
- assert resp.status_code == 200
- assert Enum.empty?(Map.get(resp, :body)["rows"])
- end
-end
diff --git a/test/elixir/test/lots_of_docs_test.exs b/test/elixir/test/lots_of_docs_test.exs
deleted file mode 100644
index c0cc99198..000000000
--- a/test/elixir/test/lots_of_docs_test.exs
+++ /dev/null
@@ -1,116 +0,0 @@
-defmodule LotsOfDocsTest do
- use CouchTestCase
-
- @moduletag :lots_of_docs
- @docs_range 0..499
-
- @moduledoc """
- Test saving a semi-large quanitity of documents and do some view queries.
- This is a port of the lots_of_docs.js suite
- """
-
- @tag :with_db
- test "lots of docs with _all_docs", context do
- db_name = context[:db_name]
-
- @docs_range
- |> create_docs()
- |> Enum.chunk_every(100)
- |> Enum.each(fn docs -> bulk_post(docs, db_name) end)
-
- %{"rows" => rows, "total_rows" => total_rows} =
- Couch.get("/#{db_name}/_all_docs").body
-
- assert total_rows === Enum.count(@docs_range)
- assert total_rows === Enum.count(rows)
-
- @docs_range
- |> Enum.map(fn i -> Integer.to_string(i) end)
- |> Enum.sort()
- |> Enum.with_index()
- |> Enum.each(fn {value, index} ->
- assert Map.fetch!(Enum.at(rows, index), "key") === value
- end)
-
- retry_until(fn ->
- %{"rows" => desc_rows, "total_rows" => desc_total_rows} =
- Couch.get(
- "/#{db_name}/_all_docs",
- query: %{:descending => true}
- ).body
-
- assert desc_total_rows === Enum.count(@docs_range)
- assert desc_total_rows === Enum.count(desc_rows)
-
- @docs_range
- |> Enum.map(fn i -> Integer.to_string(i) end)
- |> Enum.sort()
- |> Enum.reverse()
- |> Enum.with_index()
- |> Enum.each(fn {value, index} ->
- assert Map.fetch!(Enum.at(desc_rows, index), "key") === value
- end)
- end)
- end
-
- @tag :skip_on_jenkins
- @tag :with_db
- test "lots of docs with a regular view", context do
- db_name = context[:db_name]
-
- @docs_range
- |> create_docs()
- |> Enum.chunk_every(100)
- |> Enum.each(fn docs -> bulk_post(docs, db_name) end)
-
- %{"rows" => rows, "total_rows" => total_rows} = query_view(db_name)
- assert total_rows === Enum.count(rows)
- assert total_rows === Enum.count(@docs_range)
-
- Enum.each(@docs_range, fn i ->
- assert Map.fetch!(Enum.at(rows, i), "key") === i
- end)
-
- retry_until(fn ->
- %{"rows" => desc_rows, "total_rows" => desc_total_rows} =
- query_view(db_name, "descending")
-
- assert desc_total_rows === Enum.count(desc_rows)
- assert desc_total_rows === Enum.count(@docs_range)
-
- @docs_range
- |> Enum.reverse()
- |> Enum.with_index()
- |> Enum.each(fn {value, index} ->
- assert Map.fetch!(Enum.at(desc_rows, index), "key") === value
- end)
- end)
- end
-
- defp query_view(db_name, sorting \\ "ascending") do
- descending = if(sorting === "descending", do: true, else: false)
- map_fun = "function(doc) { emit(doc.integer, null); }"
- map_doc = %{:views => %{:view => %{:map => map_fun}}}
- %{"rev" => rev} = Couch.put("/#{db_name}/_design/tempddoc", body: map_doc).body
-
- response =
- Couch.get(
- "/#{db_name}/_design/tempddoc/_view/view",
- query: %{:descending => descending}
- ).body
-
- Couch.delete("/#{db_name}/_design/tempddoc?rev=#{rev}")
- response
- end
-
- defp bulk_post(docs, db) do
- resp = Couch.post("/#{db}/_bulk_docs", query: [w: 3], body: %{docs: docs})
-
- assert resp.status_code in [201, 202] and length(resp.body) == length(docs), """
- Expected 201 and the same number of response rows as in request, but got
- #{pretty_inspect(resp)}
- """
-
- resp
- end
-end
diff --git a/test/elixir/test/method_override_test.exs b/test/elixir/test/method_override_test.exs
deleted file mode 100644
index c67fe3966..000000000
--- a/test/elixir/test/method_override_test.exs
+++ /dev/null
@@ -1,55 +0,0 @@
-defmodule MethodOverrideTest do
- use CouchTestCase
-
- @moduletag :http
-
- @moduledoc """
- Allow broken HTTP clients to fake a full method vocabulary with an
- X-HTTP-METHOD-OVERRIDE header
- """
-
- @tag :with_db
- test "method override PUT", context do
- db_name = context[:db_name]
-
- resp =
- Couch.post("/#{db_name}/fnord",
- body: %{bob: "connie"},
- headers: ["X-HTTP-Method-Override": "PUT"]
- )
-
- assert resp.status_code == 201
-
- resp = Couch.get("/#{db_name}/fnord")
- assert resp.body["bob"] == "connie"
- end
-
- @tag :with_db
- test "method override DELETE", context do
- db_name = context[:db_name]
- {:ok, resp} = create_doc(db_name, %{_id: "fnord", bob: "connie"})
-
- resp =
- Couch.post("/#{db_name}/fnord?rev=#{resp.body["rev"]}",
- headers: ["X-HTTP-Method-Override": "DELETE"]
- )
-
- assert resp.status_code == 200
-
- resp = Couch.get("/#{db_name}/fnord")
- assert resp.status_code == 404
- end
-
- @tag :with_db
- test "Method Override is ignored when original Method isn't POST", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/fnord2",
- body: %{bob: "connie"},
- headers: ["X-HTTP-Method-Override": "PUT"]
- )
-
- assert resp.status_code == 404
- end
-end
diff --git a/test/elixir/test/multiple_rows_test.exs b/test/elixir/test/multiple_rows_test.exs
deleted file mode 100644
index 646682823..000000000
--- a/test/elixir/test/multiple_rows_test.exs
+++ /dev/null
@@ -1,136 +0,0 @@
-defmodule MultipleRowsTest do
- use CouchTestCase
-
- @moduletag :multiple_rows
-
- @north_carolina_cities ["Charlotte", "Raleigh"]
- @massachussets_cities ["Boston", "Lowell", "Worcester", "Cambridge", "Springfield"]
- @florida_cities ["Miami", "Tampa", "Orlando", "Springfield"]
-
- @moduledoc """
- Test checking multiple rows
- This is a port of the multiple_rows.js suite
- """
-
- @tag :with_db
- test "multiple rows", context do
- db_name = context[:db_name]
-
- resp1 =
- Couch.put(
- "/#{db_name}/NC",
- body: %{:_id => "NC", :cities => @north_carolina_cities}
- ).body
-
- resp2 =
- Couch.put(
- "/#{db_name}/MA",
- body: %{
- :_id => "MA",
- :cities => @massachussets_cities
- }
- ).body
-
- resp3 =
- Couch.put("/#{db_name}/FL", body: %{:_id => "FL", :cities => @florida_cities}).body
-
- assert resp1["ok"]
- assert resp2["ok"]
- assert resp3["ok"]
-
- %{"rows" => rows, "total_rows" => total_rows} = query_list_cities_and_state(db_name)
-
- assert Enum.at(rows, 0)["key"] == "Boston, MA"
- assert Enum.at(rows, 1)["key"] == "Cambridge, MA"
- assert Enum.at(rows, 2)["key"] == "Charlotte, NC"
- assert Enum.at(rows, 3)["key"] == "Lowell, MA"
- assert Enum.at(rows, 4)["key"] == "Miami, FL"
- assert Enum.at(rows, 5)["key"] == "Orlando, FL"
- assert Enum.at(rows, 6)["key"] == "Raleigh, NC"
- assert Enum.at(rows, 7)["key"] == "Springfield, FL"
- assert Enum.at(rows, 8)["key"] == "Springfield, MA"
- assert Enum.at(rows, 9)["key"] == "Tampa, FL"
- assert Enum.at(rows, 10)["key"] == "Worcester, MA"
-
- assert total_rows === 11
-
- new_insert_resp =
- Couch.put(
- "/#{db_name}/NC",
- body: %{
- :id => "NC",
- :cities => List.insert_at(@north_carolina_cities, -1, "Wilmington"),
- :_rev => resp1["rev"]
- }
- ).body
-
- assert new_insert_resp["ok"]
-
- %{"rows" => rows, "total_rows" => total_rows} = query_list_cities_and_state(db_name)
-
- assert Enum.at(rows, 0)["key"] == "Boston, MA"
- assert Enum.at(rows, 1)["key"] == "Cambridge, MA"
- assert Enum.at(rows, 2)["key"] == "Charlotte, NC"
- assert Enum.at(rows, 3)["key"] == "Lowell, MA"
- assert Enum.at(rows, 4)["key"] == "Miami, FL"
- assert Enum.at(rows, 5)["key"] == "Orlando, FL"
- assert Enum.at(rows, 6)["key"] == "Raleigh, NC"
- assert Enum.at(rows, 7)["key"] == "Springfield, FL"
- assert Enum.at(rows, 8)["key"] == "Springfield, MA"
- assert Enum.at(rows, 9)["key"] == "Tampa, FL"
- assert Enum.at(rows, 10)["key"] == "Wilmington, NC"
- assert Enum.at(rows, 11)["key"] == "Worcester, MA"
-
- assert total_rows === 12
-
- delete_resp = Couch.delete("/#{db_name}/MA", query: %{:rev => resp2["rev"]}).body
- assert delete_resp["ok"]
-
- %{"rows" => rows, "total_rows" => total_rows} = query_list_cities_and_state(db_name)
-
- assert Enum.at(rows, 0)["key"] == "Charlotte, NC"
- assert Enum.at(rows, 1)["key"] == "Miami, FL"
- assert Enum.at(rows, 2)["key"] == "Orlando, FL"
- assert Enum.at(rows, 3)["key"] == "Raleigh, NC"
- assert Enum.at(rows, 4)["key"] == "Springfield, FL"
- assert Enum.at(rows, 5)["key"] == "Tampa, FL"
- assert Enum.at(rows, 6)["key"] == "Wilmington, NC"
-
- assert total_rows === 7
- end
-
- def query_list_cities_and_state(db_name) do
- design_doc = %{
- :_id => "_design/list_cities_and_state",
- :language => "javascript",
- :views => %{
- :view => %{
- :map => """
- function(doc) {
- for (var i = 0; i < doc.cities.length; i++)
- emit(doc.cities[i] + \", \" + doc._id, null);
- }
- """
- }
- }
- }
-
- design_resp =
- Couch.put(
- "/#{db_name}/_design/list_cities_and_state",
- body: design_doc,
- query: %{w: 3}
- )
-
- assert design_resp.status_code in [201, 202]
-
- %{:body => result} = Couch.get("/#{db_name}/_design/list_cities_and_state/_view/view")
-
- Couch.delete(
- "/#{db_name}/_design/list_cities_and_state",
- query: %{rev: design_resp.body["rev"]}
- )
-
- result
- end
-end
diff --git a/test/elixir/test/partition_all_docs_test.exs b/test/elixir/test/partition_all_docs_test.exs
deleted file mode 100644
index 816a8d6ed..000000000
--- a/test/elixir/test/partition_all_docs_test.exs
+++ /dev/null
@@ -1,204 +0,0 @@
-defmodule PartitionAllDocsTest do
- use CouchTestCase
- import PartitionHelpers
-
- @moduledoc """
- Test Partition functionality for for all_docs
- """
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
- on_exit(fn -> delete_db(db_name) end)
-
- create_partition_docs(db_name)
-
- {:ok, [db_name: db_name]}
- end
-
- test "all_docs with partitioned:true returns partitioned fields", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["foo"]
-
- url = "/#{db_name}/_partition/bar/_all_docs"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["bar"]
- end
-
- test "partition all_docs errors with incorrect partition supplied", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/_bar/_all_docs"
- resp = Couch.get(url)
- assert resp.status_code == 400
-
- url = "/#{db_name}/_partition//_all_docs"
- resp = Couch.get(url)
- assert resp.status_code == 400
- end
-
- test "partitioned _all_docs works with startkey, endkey range", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{start_key: "\"foo:12\"", end_key: "\"foo:2\""})
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert Enum.dedup(partitions) == ["foo"]
- end
-
- test "partitioned _all_docs works with keys", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.post(url, body: %{keys: ["foo:2", "foo:4", "foo:6"]})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 3
- assert ids == ["foo:2", "foo:4", "foo:6"]
- end
-
- test "partition _all_docs works with limit", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{limit: 5})
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert Enum.dedup(partitions) == ["foo"]
- end
-
- test "partition _all_docs with descending", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{descending: true, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:98", "foo:96", "foo:94", "foo:92", "foo:90"]
-
- resp = Couch.get(url, query: %{descending: false, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:10", "foo:100", "foo:12", "foo:14", "foo:16"]
- end
-
- test "partition _all_docs with skip", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{skip: 5, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:18", "foo:2", "foo:20", "foo:22", "foo:24"]
- end
-
- test "partition _all_docs with key", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{key: "\"foo:22\""})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 1
- assert ids == ["foo:22"]
- end
-
- test "partition all docs can set query limits", context do
- set_config({"query_server_config", "partition_query_limit", "2000"})
-
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_all_docs"
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 20
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 50
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2000
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 50
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2001
- }
- )
-
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Limit is too large/, reason)
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2000,
- skip: 25
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 25
- end
-
- # This test is timing based so it could be a little flaky.
- # If that turns out to be the case we should probably just skip it
- @tag :pending
- test "partition _all_docs with timeout", context do
- set_config({"fabric", "partition_view_timeout", "1"})
-
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- retry_until(fn ->
- url = "/#{db_name}/_partition/foo/_all_docs"
-
- case Couch.get(url) do
- %{:body => %{"reason" => reason}} ->
- Regex.match?(~r/not be processed in a reasonable amount of time./, reason)
-
- _ ->
- false
- end
- end)
- end
-end
diff --git a/test/elixir/test/partition_crud_test.exs b/test/elixir/test/partition_crud_test.exs
deleted file mode 100644
index 7e32abbdc..000000000
--- a/test/elixir/test/partition_crud_test.exs
+++ /dev/null
@@ -1,369 +0,0 @@
-defmodule PartitionCrudTest do
- use CouchTestCase
-
- @tag :with_partitioned_db
- test "Sets partition in db info", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}")
- %{body: body} = resp
- assert body["props"] == %{"partitioned" => true}
- end
-
- @tag :with_partitioned_db
- test "PUT and GET document", context do
- db_name = context[:db_name]
- id = "my-partition:doc"
- url = "/#{db_name}/#{id}"
-
- resp = Couch.put(url, body: %{partitioned_doc: true})
- %{body: doc} = resp
- assert resp.status_code in [201, 202]
- assert doc["id"] == id
-
- resp = Couch.get(url)
- assert resp.status_code == 200
-
- %{body: doc} = resp
- assert doc["_id"] == id
- end
-
- @tag :with_partitioned_db
- test "PUT fails if a partition key is not supplied", context do
- db_name = context[:db_name]
- id = "not-partitioned"
- url = "/#{db_name}/#{id}"
-
- resp = Couch.put(url, body: %{partitioned_doc: false})
- assert resp.status_code == 400
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Doc id must be of form partition:id"
- }
-
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "PUT fails for partitions with _", context do
- db_name = context[:db_name]
- id = "_bad:partitioned"
- url = "/#{db_name}/#{id}"
-
- resp = Couch.put(url, body: %{partitioned_doc: false})
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Only reserved document ids may start with underscore."
- }
-
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "PUT fails for bad partitions", context do
- db_name = context[:db_name]
- id = "bad:"
- url = "/#{db_name}/#{id}"
-
- resp = Couch.put(url, body: %{partitioned_doc: false})
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Document id must not be empty"
- }
-
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "POST and GET document", context do
- db_name = context[:db_name]
- id = "my-partition-post:doc"
- url = "/#{db_name}"
-
- resp = Couch.post(url, body: %{_id: id, partitioned_doc: true})
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("#{url}/#{id}")
- assert resp.status_code == 200
-
- %{body: doc} = resp
- assert doc["_id"] == id
- end
-
- @tag :with_partitioned_db
- test "GET to partition returns 400", context do
- db_name = context[:db_name]
- url = "/#{db_name}/_partition"
-
- resp = Couch.get("#{url}")
- assert resp.status_code == 400
- end
-
- @tag :with_partitioned_db
- test "POST and _bulk_get document", context do
- db_name = context[:db_name]
- id = "my-partition-post:doc"
- url = "/#{db_name}"
-
- resp = Couch.post(url, body: %{_id: id, partitioned_doc: true})
- assert resp.status_code in [201, 202]
-
- resp = Couch.post("#{url}/_bulk_get", body: %{docs: [%{id: id}]})
- assert resp.status_code == 200
-
- %{body: body} = resp
-
- assert %{
- "results" => [
- %{
- "docs" => [
- %{
- "ok" => %{
- "_id" => "my-partition-post:doc",
- "_rev" => "1-43d86359741cb629c0953a2beb6e9d7a",
- "partitioned_doc" => true
- }
- }
- ],
- "id" => "my-partition-post:doc"
- }
- ]
- } == body
- end
-
- @tag :with_partitioned_db
- test "_bulk_get bad partitioned document", context do
- db_name = context[:db_name]
- id = "my-partition-post"
- url = "/#{db_name}"
-
- resp = Couch.post("#{url}/_bulk_get", body: %{docs: [%{id: id}]})
- assert resp.status_code == 200
- %{:body => body} = resp
-
- assert %{
- "results" => [
- %{
- "docs" => [
- %{
- "error" => %{
- "error" => "illegal_docid",
- "id" => "my-partition-post",
- "reason" => "Doc id must be of form partition:id",
- "rev" => :null
- }
- }
- ],
- "id" => "my-partition-post"
- }
- ]
- } == body
- end
-
- @tag :with_partitioned_db
- test "POST fails if a partition key is not supplied", context do
- db_name = context[:db_name]
- id = "not-partitioned-post"
- url = "/#{db_name}"
-
- resp = Couch.post(url, body: %{_id: id, partitited_doc: false})
- assert resp.status_code == 400
- end
-
- @tag :with_partitioned_db
- test "_bulk_docs saves docs with partition key", context do
- db_name = context[:db_name]
-
- docs = [
- %{_id: "foo:1"},
- %{_id: "bar:1"}
- ]
-
- url = "/#{db_name}"
- resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("#{url}/foo:1")
- assert resp.status_code == 200
-
- resp = Couch.get("#{url}/bar:1")
- assert resp.status_code == 200
- end
-
- @tag :with_partitioned_db
- test "_bulk_docs errors with missing partition key", context do
- db_name = context[:db_name]
-
- docs = [
- %{_id: "foo1"}
- ]
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Doc id must be of form partition:id"
- }
-
- url = "/#{db_name}"
- resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "_bulk_docs errors with bad partition key", context do
- db_name = context[:db_name]
-
- docs = [
- %{_id: "_foo:1"}
- ]
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Only reserved document ids may start with underscore."
- }
-
- url = "/#{db_name}"
- resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "_bulk_docs errors with bad doc key", context do
- db_name = context[:db_name]
-
- docs = [
- %{_id: "foo:"}
- ]
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Document id must not be empty"
- }
-
- url = "/#{db_name}"
- resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "saves attachment with partitioned doc", context do
- db_name = context[:db_name]
- id = "foo:doc-with-attachment"
-
- doc = %{
- _id: id,
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: Base.encode64("This is a text document to save")
- }
- }
- }
-
- resp = Couch.put("/#{db_name}/#{id}", body: doc)
-
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/#{id}")
- assert resp.status_code == 200
- body = Map.get(resp, :body)
- rev = Map.get(body, "_rev")
-
- assert body["_attachments"] == %{
- "foo.txt" => %{
- "content_type" => "text/plain",
- # "digest" => "md5-OW2BoZAtMqs1E+fAnLpNBw==",
- # Temp remove the digest part since the digest value
- # seems to be different on travis
- "digest" => body["_attachments"]["foo.txt"]["digest"],
- "length" => 31,
- "revpos" => 1,
- "stub" => true
- }
- }
-
- resp = Couch.get("/#{db_name}/#{id}/foo.txt")
- assert Map.get(resp, :body) == "This is a text document to save"
-
- resp =
- Couch.put(
- "/#{db_name}/#{id}/bar.txt?rev=#{rev}",
- headers: ["Content-Type": "text/plain"],
- body: "This is another document"
- )
-
- assert resp.status_code in [201, 202]
- %{:body => body} = resp
- assert body["ok"] == true
- assert body["id"] == id
- end
-
- @tag :with_partitioned_db
- test "can purge partitioned db docs", context do
- db_name = context[:db_name]
-
- doc = %{
- _id: "foo:bar",
- value: "some value"
- }
-
- resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
- rev = body["rev"]
-
- resp = Couch.get("/#{db_name}/foo:bar")
- assert resp.status_code == 200
-
- body = %{"foo:bar" => [rev]}
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/foo:bar")
- assert resp.status_code == 404
- assert resp.body == %{"error" => "not_found", "reason" => "missing"}
- end
-
- @tag :with_partitioned_db
- test "purge rejects unpartitioned docid", context do
- db_name = context[:db_name]
- body = %{"no_partition" => ["1-967a00dff5e02add41819138abb3284d"]}
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
- assert resp.status_code == 400
- %{body: body} = resp
- assert body["error"] == "illegal_docid"
- end
-
- test "create database with bad `partitioned` value", _context do
- resp = Couch.put("/bad-db?partitioned=tru")
- assert resp.status_code == 400
-
- assert Map.get(resp, :body) == %{
- "error" => "bad_request",
- "reason" => "Invalid `partitioned` parameter"
- }
- end
-
- test "can create unpartitioned system db", _context do
- Couch.delete("/_replicator")
- resp = Couch.put("/_replicator")
- assert resp.status_code in [201, 202]
- assert resp.body == %{"ok" => true}
- end
-
- test "cannot create partitioned system db", _context do
- Couch.delete("/_replicator")
-
- resp = Couch.put("/_replicator?partitioned=true")
- assert resp.status_code == 400
-
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Cannot partition a system database/, reason)
- end
-end
diff --git a/test/elixir/test/partition_ddoc_test.exs b/test/elixir/test/partition_ddoc_test.exs
deleted file mode 100644
index 9fdfb9260..000000000
--- a/test/elixir/test/partition_ddoc_test.exs
+++ /dev/null
@@ -1,179 +0,0 @@
-defmodule PartitionDDocTest do
- use CouchTestCase
-
- @moduledoc """
- Test partition design doc interactions
- """
-
- setup do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
- on_exit(fn -> delete_db(db_name) end)
-
- {:ok, [db_name: db_name]}
- end
-
- test "PUT /dbname/_design/foo", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
- end
-
- test "PUT /dbname/_design/foo to update", context do
- db_name = context[:db_name]
- ddoc_id = "_design/foo"
-
- ddoc = %{
- _id: ddoc_id,
- stuff: "here"
- }
-
- resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- ddoc = Map.put(ddoc, :_rev, body["rev"])
- ddoc = Map.put(ddoc, :other, "attribute")
- resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
- assert resp.status_code in [201, 202]
- end
-
- test "PUT /dbname/_design/foo/readme.txt", context do
- db_name = context[:db_name]
- ddoc_id = "_design/foo"
-
- ddoc = %{
- _id: ddoc_id,
- stuff: "here"
- }
-
- resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- att = "This is a readme.txt"
-
- opts = [
- headers: [{:"Content-Type", "text/plain"}],
- query: [rev: body["rev"]],
- body: att
- ]
-
- resp = Couch.put("/#{db_name}/#{ddoc_id}/readme.txt", opts)
- assert resp.status_code in [201, 202]
- end
-
- test "DELETE /dbname/_design/foo", context do
- db_name = context[:db_name]
- ddoc_id = "_design/foo"
-
- ddoc = %{
- _id: ddoc_id,
- stuff: "here"
- }
-
- resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- resp = Couch.delete("/#{db_name}/#{ddoc_id}", query: [rev: body["rev"]])
- assert resp.status_code == 200
- end
-
- test "POST /dbname with design doc", context do
- db_name = context[:db_name]
- body = %{_id: "_design/foo", stuff: "here"}
- resp = Couch.post("/#{db_name}", body: body)
- assert resp.status_code in [201, 202]
- end
-
- test "POST /dbname/_bulk_docs with design doc", context do
- db_name = context[:db_name]
- body = %{:docs => [%{_id: "_design/foo", stuff: "here"}]}
- resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
- assert resp.status_code in [201, 202]
- end
-
- test "GET /dbname/_design/foo", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/_design/foo")
- assert resp.status_code == 200
- end
-
- test "GET /dbname/_design/foo?rev=$rev", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- resp = Couch.get("/#{db_name}/_design/foo", query: [rev: body["rev"]])
- assert resp.status_code == 200
- end
-
- test "GET /dbname/_bulk_get", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
-
- body = %{docs: [%{id: "_design/foo"}]}
- resp = Couch.post("/#{db_name}/_bulk_get", body: body)
- assert resp.status_code == 200
- %{body: body} = resp
-
- assert length(body["results"]) == 1
-
- %{"results" => [%{"id" => "_design/foo", "docs" => [%{"ok" => _}]}]} = body
- end
-
- test "GET /dbname/_bulk_get with rev", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- body = %{docs: [%{id: "_design/foo", rev: body["rev"]}]}
- resp = Couch.post("/#{db_name}/_bulk_get", body: body)
- assert resp.status_code == 200
- %{body: body} = resp
-
- assert length(body["results"]) == 1
- %{"results" => [%{"id" => "_design/foo", "docs" => [%{"ok" => _}]}]} = body
- end
-
- test "GET /dbname/_all_docs?key=$ddoc_id", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"}, query: [w: 3])
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/_all_docs", query: [key: "\"_design/foo\""])
- assert resp.status_code == 200
- %{body: body} = resp
-
- assert length(body["rows"]) == 1
- assert %{"rows" => [%{"id" => "_design/foo"}]} = body
- end
-
- @tag :skip_on_jenkins
- test "GET /dbname/_design_docs", context do
- db_name = context[:db_name]
-
- retry_until(
- fn ->
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/_design_docs")
- assert resp.status_code == 200
- %{body: body} = resp
-
- assert length(body["rows"]) == 1
- %{"rows" => [%{"id" => "_design/foo"}]} = body
- end,
- 500,
- 10_000
- )
- end
-end
diff --git a/test/elixir/test/partition_design_docs_test.exs b/test/elixir/test/partition_design_docs_test.exs
deleted file mode 100644
index 4ccd63fe0..000000000
--- a/test/elixir/test/partition_design_docs_test.exs
+++ /dev/null
@@ -1,16 +0,0 @@
-defmodule PartitionDesignDocsTest do
- use CouchTestCase
-
- @moduledoc """
- Test Partition functionality for partition design docs
- """
-
- @tag :with_partitioned_db
- test "/_partition/:pk/_design/doc 404", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/fakekey/_design/mrtest/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 404
- end
-end
diff --git a/test/elixir/test/partition_helpers.exs b/test/elixir/test/partition_helpers.exs
deleted file mode 100644
index 3322ed7f5..000000000
--- a/test/elixir/test/partition_helpers.exs
+++ /dev/null
@@ -1,76 +0,0 @@
-defmodule PartitionHelpers do
- use ExUnit.Case
-
- def create_partition_docs(db_name, pk1 \\ "foo", pk2 \\ "bar") do
- docs =
- for i <- 1..100 do
- id =
- if rem(i, 2) == 0 do
- "#{pk1}:#{i}"
- else
- "#{pk2}:#{i}"
- end
-
- group =
- if rem(i, 3) == 0 do
- "one"
- else
- "two"
- end
-
- %{
- :_id => id,
- :value => i,
- :some => "field",
- :group => group
- }
- end
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:w => 3, :docs => docs})
- assert resp.status_code in [201, 202]
- end
-
- def create_partition_ddoc(db_name, opts \\ %{}) do
- map_fn = """
- function(doc) {
- if (doc.some) {
- emit(doc.value, doc.some);
- }
- }
- """
-
- default_ddoc = %{
- views: %{
- some: %{
- map: map_fn
- }
- }
- }
-
- ddoc = Enum.into(opts, default_ddoc)
-
- resp = Couch.put("/#{db_name}/_design/mrtest", body: ddoc)
- assert resp.status_code in [201, 202]
- assert Map.has_key?(resp.body, "ok") == true
- end
-
- def get_ids(resp) do
- %{:body => %{"rows" => rows}} = resp
- Enum.map(rows, fn row -> row["id"] end)
- end
-
- def get_partitions(resp) do
- %{:body => %{"rows" => rows}} = resp
-
- Enum.map(rows, fn row ->
- [partition, _] = String.split(row["id"], ":")
- partition
- end)
- end
-
- def assert_correct_partition(partitions, correct_partition) do
- assert Enum.all?(partitions, fn partition ->
- partition == correct_partition
- end)
- end
-end
diff --git a/test/elixir/test/partition_mango_test.exs b/test/elixir/test/partition_mango_test.exs
deleted file mode 100644
index 9e4f1e783..000000000
--- a/test/elixir/test/partition_mango_test.exs
+++ /dev/null
@@ -1,736 +0,0 @@
-defmodule PartitionMangoTest do
- use CouchTestCase
- import PartitionHelpers, except: [get_partitions: 1]
-
- @moduledoc """
- Test Partition functionality for mango
- """
- def create_index(db_name, fields \\ ["some"], opts \\ %{}) do
- default_index = %{
- index: %{
- fields: fields
- }
- }
-
- index = Enum.into(opts, default_index)
- resp = Couch.post("/#{db_name}/_index", body: index)
-
- assert resp.status_code == 200
- assert resp.body["result"] == "created"
- assert resp.body["id"] != nil
- assert resp.body["name"] != nil
-
- # wait until the database reports the index as available
- retry_until(fn ->
- get_index(db_name, resp.body["id"], resp.body["name"]) != nil
- end)
- end
-
- def list_indexes(db_name) do
- resp = Couch.get("/#{db_name}/_index")
- assert resp.status_code == 200
- resp.body["indexes"]
- end
-
- def get_index(db_name, ddocid, name) do
- indexes = list_indexes(db_name)
- Enum.find(indexes, fn(index) ->
- match?(%{"ddoc" => ^ddocid, "name" => ^name}, index)
- end)
- end
-
- def get_partitions(resp) do
- %{:body => %{"docs" => docs}} = resp
-
- Enum.map(docs, fn doc ->
- [partition, _] = String.split(doc["_id"], ":")
- partition
- end)
- end
-
- @tag :with_partitioned_db
- test "query using _id and partition works", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- _id: %{
- "$gt": "foo:"
- }
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- _id: %{
- "$lt": "foo:"
- }
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_partitioned_db
- test "query using _id works for global and local query", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- _id: %{
- "$gt": 0
- }
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- _id: %{
- "$gt": 0
- }
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_partitioned_db
- test "query with partitioned:true using index and $eq", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_partitioned_db
- test "partitioned query using _all_docs with $eq", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_db
- test "non-partitioned query using _all_docs and $eq", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- url = "/#{db_name}/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- skip: 40,
- limit: 5
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert partitions == ["bar", "bar", "bar", "bar", "bar"]
-
- url = "/#{db_name}/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- skip: 50,
- limit: 5
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert partitions == ["foo", "foo", "foo", "foo", "foo"]
- end
-
- @tag :with_partitioned_db
- test "partitioned query using index and range scan", context do
- db_name = context[:db_name]
- create_partition_docs(db_name, "foo", "bar42")
- create_index(db_name, ["value"])
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar42/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "bar42")
- end
-
- @tag :with_partitioned_db
- test "partitioned query using _all_docs and range scan", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_partitioned_db
- test "partitioned query using _all_docs", context do
- db_name = context[:db_name]
- create_partition_docs(db_name, "foo", "bar42")
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar42/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "bar42")
- end
-
- @tag :with_partitioned_db
- test "explain works with partitions", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"])
-
- url = "/#{db_name}/_partition/foo/_explain"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- %{:body => body} = resp
-
- assert body["index"]["name"] == "_all_docs"
- assert body["mrargs"]["partition"] == "foo"
-
- url = "/#{db_name}/_partition/bar/_explain"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- }
- }
- )
-
- %{:body => body} = resp
-
- assert body["index"]["def"] == %{"fields" => [%{"some" => "asc"}]}
- assert body["mrargs"]["partition"] == "bar"
- end
-
- @tag :with_db
- test "explain works with non partitioned db", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"])
-
- url = "/#{db_name}/_explain"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- %{:body => body} = resp
-
- assert body["index"]["name"] == "_all_docs"
- assert body["mrargs"]["partition"] == :null
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- }
- }
- )
-
- %{:body => body} = resp
-
- assert body["index"]["def"] == %{"fields" => [%{"some" => "asc"}]}
- assert body["mrargs"]["partition"] == :null
- end
-
- @tag :with_partitioned_db
- test "partitioned query using bookmarks", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["value"])
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- },
- limit: 3
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 3
- assert_correct_partition(partitions, "foo")
-
- %{:body => %{"bookmark" => bookmark}} = resp
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- },
- limit: 3,
- bookmark: bookmark
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 2
- assert_correct_partition(partitions, "foo")
- end
-
- @tag :with_partitioned_db
- test "partitioned query with query server config set", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["value"])
-
- # this is to test that we bypass partition_query_limit for mango
- set_config({"query_server_config", "partition_query_limit", "1"})
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- },
- limit: 3
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 3
- assert_correct_partition(partitions, "foo")
-
- %{:body => %{"bookmark" => bookmark}} = resp
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- },
- limit: 3,
- bookmark: bookmark
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 2
- assert_correct_partition(partitions, "foo")
- end
-
- @tag :with_partitioned_db
- test "global query uses global index", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"], %{partitioned: false})
-
- url = "/#{db_name}/_explain"
-
- selector = %{
- selector: %{
- some: "field"
- },
- limit: 100
- }
-
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 200
- %{:body => body} = resp
- assert body["index"]["def"] == %{"fields" => [%{"some" => "asc"}]}
-
- url = "/#{db_name}/_find"
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 200
-
- partitions = get_partitions(resp)
- assert length(partitions) == 100
- end
-
- @tag :with_partitioned_db
- test "global query does not use partition index", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"])
-
- url = "/#{db_name}/_explain"
-
- selector = %{
- selector: %{
- some: "field"
- },
- limit: 100
- }
-
- resp = Couch.post(url, body: selector)
- %{:body => body} = resp
- assert body["index"]["name"] == "_all_docs"
-
- url = "/#{db_name}/_find"
- resp = Couch.post(url, body: selector)
-
- assert resp.status_code == 200
-
- partitions = get_partitions(resp)
- assert length(partitions) == 100
- end
-
- @tag :with_partitioned_db
- test "partitioned query does not use global index", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"], %{partitioned: false})
-
- url = "/#{db_name}/_partition/foo/_explain"
-
- selector = %{
- selector: %{
- some: "field"
- },
- limit: 50
- }
-
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 200
- %{:body => body} = resp
- assert body["index"]["name"] == "_all_docs"
-
- url = "/#{db_name}/_partition/foo/_find"
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 200
-
- partitions = get_partitions(resp)
- assert length(partitions) == 50
- assert_correct_partition(partitions, "foo")
- end
-
- @tag :with_partitioned_db
- test "partitioned _find and _explain with missing partition returns 400", context do
- db_name = context[:db_name]
-
- selector = %{
- selector: %{
- some: "field"
- }
- }
-
- resp = Couch.get("/#{db_name}/_partition/_find", body: selector)
- validate_missing_partition(resp)
-
- resp = Couch.get("/#{db_name}/_partition/_explain", body: selector)
- validate_missing_partition(resp)
- end
-
- defp validate_missing_partition(resp) do
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Partition must not start/, reason)
- end
-
- @tag :with_partitioned_db
- test "partitioned query sends correct errors for sort errors", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- selector = %{
- selector: %{
- some: "field"
- },
- sort: ["some"],
- limit: 50
- }
-
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/No partitioned index exists for this sort/, reason)
-
- url = "/#{db_name}/_find"
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/No global index exists for this sort/, reason)
- end
-end
diff --git a/test/elixir/test/partition_size_limit_test.exs b/test/elixir/test/partition_size_limit_test.exs
deleted file mode 100644
index 6ef686611..000000000
--- a/test/elixir/test/partition_size_limit_test.exs
+++ /dev/null
@@ -1,293 +0,0 @@
-defmodule PartitionSizeLimitTest do
- use CouchTestCase
-
- @moduledoc """
- Test Partition size limit functionality
- """
-
- @max_size 10_240
-
- setup do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
- on_exit(fn -> delete_db(db_name) end)
-
- set_config({"couchdb", "max_partition_size", Integer.to_string(@max_size)})
-
- {:ok, [db_name: db_name]}
- end
-
- defp get_db_info(dbname) do
- resp = Couch.get("/#{dbname}")
- assert resp.status_code in [200, 202]
- %{:body => body} = resp
- body
- end
-
- defp get_partition_info(dbname, partition) do
- resp = Couch.get("/#{dbname}/_partition/#{partition}")
- assert resp.status_code in [200, 202]
- %{:body => body} = resp
- body
- end
-
- defp open_doc(db_name, docid, status_assert \\ [200, 202]) do
- resp = Couch.get("/#{db_name}/#{docid}")
- assert resp.status_code in status_assert
- %{:body => body} = resp
- body
- end
-
- defp save_doc(db_name, doc, status_assert \\ [201, 202]) do
- resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
- assert resp.status_code in status_assert
- %{:body => body} = resp
- body["rev"]
- end
-
- defp delete_doc(db_name, doc, status_assert \\ [200, 202]) do
- url = "/#{db_name}/#{doc["_id"]}"
- rev = doc["_rev"]
- resp = Couch.delete(url, query: [w: 3, rev: rev])
- assert resp.status_code in status_assert
- %{:body => body} = resp
- body["rev"]
- end
-
- defp fill_partition(db_name, partition \\ "foo") do
- docs =
- 1..15
- |> Enum.map(fn i ->
- id = i |> Integer.to_string() |> String.pad_leading(4, "0")
- docid = "#{partition}:#{id}"
- %{_id: docid, value: "0" |> String.pad_leading(1024)}
- end)
-
- body = %{:w => 3, :docs => docs}
- resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
- assert resp.status_code in [201, 202]
- end
-
- test "fill partition manually", context do
- db_name = context[:db_name]
- partition = "foo"
-
- resp =
- 1..1000
- |> Enum.find_value(0, fn i ->
- id = i |> Integer.to_string() |> String.pad_leading(4, "0")
- docid = "#{partition}:#{id}"
- doc = %{_id: docid, value: "0" |> String.pad_leading(1024)}
- resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
-
- if resp.status_code in [201, 202] do
- false
- else
- resp
- end
- end)
-
- assert resp.status_code == 403
- %{body: body} = resp
- assert body["error"] == "partition_overflow"
-
- info = get_partition_info(db_name, partition)
- assert info["sizes"]["external"] >= @max_size
- end
-
- test "full partitions reject POST /dbname", context do
- db_name = context[:db_name]
- fill_partition(db_name)
-
- doc = %{_id: "foo:bar", value: "stuff"}
- resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
- assert resp.status_code == 403
- %{body: body} = resp
- assert body["error"] == "partition_overflow"
- end
-
- test "full partitions reject PUT /dbname/docid", context do
- db_name = context[:db_name]
- fill_partition(db_name)
-
- doc = %{value: "stuff"}
- resp = Couch.put("/#{db_name}/foo:bar", query: [w: 3], body: doc)
- assert resp.status_code == 403
- %{body: body} = resp
- assert body["error"] == "partition_overflow"
- end
-
- test "full partitions reject POST /dbname/_bulk_docs", context do
- db_name = context[:db_name]
- fill_partition(db_name)
-
- body = %{w: 3, docs: [%{_id: "foo:bar"}]}
- resp = Couch.post("/#{db_name}/_bulk_docs", query: [w: 3], body: body)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
- doc_resp = Enum.at(body, 0)
- assert doc_resp["error"] == "partition_overflow"
- end
-
- test "full partitions with mixed POST /dbname/_bulk_docs", context do
- db_name = context[:db_name]
- fill_partition(db_name)
-
- body = %{w: 3, docs: [%{_id: "foo:bar"}, %{_id: "baz:bang"}]}
- resp = Couch.post("/#{db_name}/_bulk_docs", query: [w: 3], body: body)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- doc_resp1 = Enum.at(body, 0)
- assert doc_resp1["error"] == "partition_overflow"
-
- doc_resp2 = Enum.at(body, 1)
- assert doc_resp2["ok"]
- end
-
- test "full partitions are still readable", context do
- db_name = context[:db_name]
- fill_partition(db_name)
- open_doc(db_name, "foo:0001")
- end
-
- test "full partitions can accept deletes", context do
- db_name = context[:db_name]
- fill_partition(db_name)
-
- doc = open_doc(db_name, "foo:0001")
- delete_doc(db_name, doc)
- end
-
- test "full partitions can accept updates that reduce size", context do
- db_name = context[:db_name]
- fill_partition(db_name)
-
- doc = open_doc(db_name, "foo:0001")
- save_doc(db_name, %{doc | "value" => ""})
- end
-
- test "full partition does not affect other partitions", context do
- db_name = context[:db_name]
- fill_partition(db_name)
- save_doc(db_name, %{_id: "bar:foo", value: "stuff"})
- end
-
- test "full partition does not affect design documents", context do
- db_name = context[:db_name]
- fill_partition(db_name)
- rev1 = save_doc(db_name, %{_id: "_design/foo", value: "stuff"})
- save_doc(db_name, %{_id: "_design/foo", _rev: rev1, value: "hi"})
- doc = open_doc(db_name, "_design/foo")
- delete_doc(db_name, doc)
- end
-
- test "replication into a full partition works", context do
- db_name = context[:db_name]
- fill_partition(db_name)
- save_doc(db_name, %{_id: "foo:bar", value: "stuff"}, [403])
-
- doc = %{
- _id: "foo:bar",
- _rev: <<"1-23202479633c2b380f79507a776743d5">>,
- value: "stuff"
- }
-
- url = "/#{db_name}/#{doc[:_id]}"
- query = [new_edits: false, w: 3]
- resp = Couch.put(url, query: query, body: doc)
- assert resp.status_code in [201, 202]
- end
-
- test "compacting a full partition works", context do
- db_name = context[:db_name]
- db_info1 = get_db_info(db_name)
- fill_partition(db_name)
- compact(db_name)
- db_info2 = get_db_info(db_name)
- assert db_info2["sizes"]["file"] != db_info1["sizes"]["file"]
- end
-
- test "indexing a full partition works", context do
- db_name = context[:db_name]
- fill_partition(db_name)
-
- ddoc = %{
- _id: "_design/foo",
- views: %{
- bar: %{
- map: "function(doc) {emit(doc.group, 1);}"
- }
- }
- }
-
- save_doc(db_name, ddoc)
-
- url = "/#{db_name}/_partition/foo/_design/foo/_view/bar"
- resp = Couch.get(url)
- assert resp.status_code in [200, 202]
- %{body: body} = resp
-
- assert length(body["rows"]) > 0
- end
-
- test "purging docs allows writes", context do
- db_name = context[:db_name]
- fill_partition(db_name)
-
- info = get_partition_info(db_name, "foo")
- limit = info["doc_count"] - 1
-
- query = [
- start_key: "\"foo:0000\"",
- end_key: "\"foo:9999\"",
- limit: limit
- ]
-
- resp = Couch.get("/#{db_name}/_all_docs", query: query)
- assert resp.status_code in [200, 202]
- %{body: body} = resp
-
- pbody =
- body["rows"]
- |> Enum.reduce(%{}, fn row, acc ->
- Map.put(acc, row["id"], [row["value"]["rev"]])
- end)
-
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: pbody)
- assert resp.status_code in [201, 202]
-
- save_doc(db_name, %{_id: "foo:bar", value: "some value"})
- end
-
- test "increasing partition size allows more writes", context do
- db_name = context[:db_name]
- fill_partition(db_name)
-
- # We use set_config_raw so that we're not setting
- # on_exit handlers that might interfere with the original
- # config change done in setup of this test
- new_size = Integer.to_string(@max_size * 1000)
- set_config_raw("couchdb", "max_partition_size", new_size)
-
- save_doc(db_name, %{_id: "foo:bar", value: "stuff"})
- end
-
- test "decreasing partition size disables more writes", context do
- db_name = context[:db_name]
-
- # We use set_config_raw so that we're not setting
- # on_exit handlers that might interfere with the original
- # config change done in setup of this test
- new_size = Integer.to_string(@max_size * 1000)
- set_config_raw("couchdb", "max_partition_size", new_size)
-
- fill_partition(db_name)
- save_doc(db_name, %{_id: "foo:bar", value: "stuff"})
-
- old_size = Integer.to_string(@max_size)
- set_config_raw("couchdb", "max_partition_size", old_size)
-
- save_doc(db_name, %{_id: "foo:baz", value: "stuff"}, [403])
- end
-end
diff --git a/test/elixir/test/partition_size_test.exs b/test/elixir/test/partition_size_test.exs
deleted file mode 100644
index 2ba8139fc..000000000
--- a/test/elixir/test/partition_size_test.exs
+++ /dev/null
@@ -1,361 +0,0 @@
-defmodule PartitionSizeTest do
- use CouchTestCase
-
- @moduledoc """
- Test Partition size functionality
- """
-
- setup do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
- on_exit(fn -> delete_db(db_name) end)
-
- {:ok, [db_name: db_name]}
- end
-
- def get_db_info(dbname) do
- resp = Couch.get("/#{dbname}")
- assert resp.status_code == 200
- %{:body => body} = resp
- body
- end
-
- def get_partition_info(dbname, partition) do
- resp = Couch.get("/#{dbname}/_partition/#{partition}")
- assert resp.status_code == 200
- %{:body => body} = resp
- body
- end
-
- def mk_partition(i) do
- i |> rem(10) |> Integer.to_string() |> String.pad_leading(3, "0")
- end
-
- def mk_docid(i) do
- id = i |> Integer.to_string() |> String.pad_leading(4, "0")
- "#{mk_partition(i)}:#{id}"
- end
-
- def mk_docs(db_name) do
- docs =
- for i <- 1..1000 do
- group = Integer.to_string(rem(i, 3))
-
- %{
- :_id => mk_docid(i),
- :value => i,
- :some => "field",
- :group => group
- }
- end
-
- body = %{:w => 3, :docs => docs}
-
- retry_until(fn ->
- resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
- assert resp.status_code in [201, 202]
- end)
- end
-
- def save_doc(db_name, doc) do
- resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
- assert resp.status_code in [201, 202]
- %{:body => body} = resp
- body["rev"]
- end
-
- test "get empty partition", context do
- db_name = context[:db_name]
- partition = "non_existent_partition"
-
- info = get_partition_info(db_name, partition)
-
- assert info["doc_count"] == 0
- assert info["doc_del_count"] == 0
- assert info["partition"] == partition
- assert info["sizes"]["external"] == 0
- assert info["sizes"]["active"] == 0
- end
-
- test "unknown partition return's zero", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- info = get_partition_info(db_name, "unknown")
- assert info["doc_count"] == 0
- assert info["doc_del_count"] == 0
- assert info["sizes"]["external"] == 0
- assert info["sizes"]["active"] == 0
- end
-
- test "simple partition size", context do
- db_name = context[:db_name]
- save_doc(db_name, %{_id: "foo:bar", val: 42})
-
- info = get_partition_info(db_name, "foo")
- assert info["doc_count"] == 1
- assert info["doc_del_count"] == 0
- assert info["sizes"]["external"] > 0
- assert info["sizes"]["active"] > 0
- end
-
- test "adding docs increases partition sizes", context do
- db_name = context[:db_name]
- save_doc(db_name, %{_id: "foo:bar", val: 42})
- pre_info = get_partition_info(db_name, "foo")
-
- save_doc(db_name, %{_id: "foo:baz", val: 24})
- post_info = get_partition_info(db_name, "foo")
-
- assert post_info["doc_count"] == 2
- assert post_info["doc_del_count"] == 0
- assert post_info["sizes"]["external"] > pre_info["sizes"]["external"]
- assert post_info["sizes"]["active"] > pre_info["sizes"]["active"]
- end
-
- test "updating docs affects partition sizes", context do
- db_name = context[:db_name]
- rev1 = save_doc(db_name, %{_id: "foo:bar", val: ""})
- info1 = get_partition_info(db_name, "foo")
-
- rev2 =
- save_doc(db_name, %{
- _id: "foo:bar",
- _rev: rev1,
- val: "this is a very long string that is so super long its beyond long"
- })
-
- info2 = get_partition_info(db_name, "foo")
-
- save_doc(db_name, %{
- _id: "foo:bar",
- _rev: rev2,
- val: "this string is shorter"
- })
-
- info3 = get_partition_info(db_name, "foo")
-
- assert info3["doc_count"] == 1
- assert info3["doc_del_count"] == 0
-
- assert info3["sizes"]["external"] > info1["sizes"]["external"]
- assert info2["sizes"]["external"] > info3["sizes"]["external"]
- end
-
- test "deleting a doc affects partition sizes", context do
- db_name = context[:db_name]
- rev1 = save_doc(db_name, %{_id: "foo:bar", val: "some stuff here"})
- info1 = get_partition_info(db_name, "foo")
-
- save_doc(db_name, %{_id: "foo:bar", _rev: rev1, _deleted: true})
- info2 = get_partition_info(db_name, "foo")
-
- assert info1["doc_count"] == 1
- assert info1["doc_del_count"] == 0
-
- assert info2["doc_count"] == 0
- assert info2["doc_del_count"] == 1
-
- assert info2["sizes"]["external"] < info1["sizes"]["external"]
- end
-
- test "design docs do not affect partition sizes", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- pre_infos =
- 0..9
- |> Enum.map(fn i ->
- get_partition_info(db_name, mk_partition(i))
- end)
-
- 0..5
- |> Enum.map(fn i ->
- base = i |> Integer.to_string() |> String.pad_leading(5, "0")
- docid = "_design/#{base}"
- save_doc(db_name, %{_id: docid, value: "some stuff here"})
- end)
-
- post_infos =
- 0..9
- |> Enum.map(fn i ->
- get_partition_info(db_name, mk_partition(i))
- end)
-
- assert post_infos == pre_infos
- end
-
- @tag :skip_on_jenkins
- test "get all partition sizes", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- {esum, asum} =
- 0..9
- |> Enum.reduce({0, 0}, fn i, {esize, asize} ->
- partition = mk_partition(i)
- info = get_partition_info(db_name, partition)
- assert info["doc_count"] == 100
- assert info["doc_del_count"] == 0
- assert info["sizes"]["external"] > 0
- assert info["sizes"]["active"] > 0
- {esize + info["sizes"]["external"], asize + info["sizes"]["active"]}
- end)
-
- db_info = get_db_info(db_name)
- assert db_info["sizes"]["external"] >= esum
- assert db_info["sizes"]["active"] >= asum
- end
-
- test "get partition size with attachment", context do
- db_name = context[:db_name]
-
- doc = %{
- _id: "foo:doc-with-attachment",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: Base.encode64("This is a text document to save")
- }
- }
- }
-
- save_doc(db_name, doc)
-
- db_info = get_db_info(db_name)
- foo_info = get_partition_info(db_name, "foo")
-
- assert foo_info["doc_count"] == 1
- assert foo_info["doc_del_count"] == 0
- assert foo_info["sizes"]["active"] > 0
- assert foo_info["sizes"]["external"] > 0
-
- assert foo_info["sizes"]["active"] <= db_info["sizes"]["active"]
- assert foo_info["sizes"]["external"] <= db_info["sizes"]["external"]
- end
-
- test "attachments don't affect other partitions", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- pre_infos =
- 0..9
- |> Enum.map(fn i ->
- get_partition_info(db_name, mk_partition(i))
- end)
-
- doc = %{
- _id: "foo:doc-with-attachment",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: Base.encode64("This is a text document to save")
- }
- }
- }
-
- save_doc(db_name, doc)
-
- att_info = get_partition_info(db_name, "foo")
- assert att_info["doc_count"] == 1
- assert att_info["sizes"]["external"] > 0
-
- post_infos =
- 0..9
- |> Enum.map(fn i ->
- get_partition_info(db_name, mk_partition(i))
- end)
-
- assert post_infos == pre_infos
-
- esize =
- ([att_info] ++ post_infos)
- |> Enum.reduce(0, fn info, acc ->
- info["sizes"]["external"] + acc
- end)
-
- db_info = get_db_info(db_name)
- assert esize == db_info["sizes"]["external"]
- end
-
- test "partition activity not affect other partition sizes", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- partition1 = "000"
- partition2 = "001"
-
- info2 = get_partition_info(db_name, partition2)
-
- doc_id = "#{partition1}:doc-with-attachment"
-
- doc = %{
- _id: doc_id,
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: Base.encode64("This is a text document to save")
- }
- }
- }
-
- doc_rev = save_doc(db_name, doc)
-
- info2_attach = get_partition_info(db_name, partition2)
- assert info2_attach == info2
-
- doc =
- Enum.into(
- %{
- another: "add another field",
- _rev: doc_rev
- },
- doc
- )
-
- doc_rev = save_doc(db_name, doc)
-
- info2_update = get_partition_info(db_name, partition2)
- assert info2_update == info2
-
- resp = Couch.delete("/#{db_name}/#{doc_id}", query: %{rev: doc_rev})
- assert resp.status_code == 200
-
- info2_delete = get_partition_info(db_name, partition2)
- assert info2_delete == info2
- end
-
- test "purging docs decreases partition size", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- partition = "000"
-
- query = [
- start_key: "\"#{partition}:0000\"",
- end_key: "\"#{partition}:9999\"",
- limit: 50
- ]
-
- resp = Couch.get("/#{db_name}/_all_docs", query: query)
- assert resp.status_code == 200
- %{body: body} = resp
-
- pre_info = get_partition_info(db_name, partition)
-
- pbody =
- body["rows"]
- |> Enum.reduce(%{}, fn row, acc ->
- Map.put(acc, row["id"], [row["value"]["rev"]])
- end)
-
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: pbody)
- assert resp.status_code in [201, 202]
-
- post_info = get_partition_info(db_name, partition)
- assert post_info["doc_count"] == pre_info["doc_count"] - 50
- assert post_info["doc_del_count"] == 0
- assert post_info["sizes"]["active"] < pre_info["sizes"]["active"]
- assert post_info["sizes"]["external"] < pre_info["sizes"]["external"]
- end
-end
diff --git a/test/elixir/test/partition_view_test.exs b/test/elixir/test/partition_view_test.exs
deleted file mode 100644
index 0a55c2443..000000000
--- a/test/elixir/test/partition_view_test.exs
+++ /dev/null
@@ -1,374 +0,0 @@
-defmodule ViewPartitionTest do
- use CouchTestCase
- import PartitionHelpers
-
- @moduledoc """
- Test Partition functionality for views
- """
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
- on_exit(fn -> delete_db(db_name) end)
-
- create_partition_docs(db_name)
-
- map_fun1 = """
- function(doc) {
- if (doc.some) {
- emit(doc.value, doc.some);
- }
- }
- """
-
- map_fun2 = """
- function(doc) {
- if (doc.group) {
- emit([doc.some, doc.group], 1);
- }
- }
- """
-
- query = %{:w => 3}
-
- body = %{
- :docs => [
- %{
- _id: "_design/map",
- views: %{some: %{map: map_fun1}}
- },
- %{
- _id: "_design/map_some",
- views: %{some: %{map: map_fun2}}
- },
- %{
- _id: "_design/partitioned_true",
- views: %{some: %{map: map_fun1}},
- options: %{partitioned: true}
- },
- %{
- _id: "_design/partitioned_false",
- views: %{some: %{map: map_fun1}},
- options: %{partitioned: false}
- },
- %{
- _id: "_design/reduce",
- views: %{some: %{map: map_fun2, reduce: "_count"}}
- },
- %{
- _id: "_design/include_ddocs",
- views: %{some: %{map: map_fun1}},
- options: %{include_design: true}
- }
- ]
- }
-
- resp = Couch.post("/#{db_name}/_bulk_docs", query: query, body: body)
- Enum.each(resp.body, &assert(&1["ok"]))
-
- {:ok, [db_name: db_name]}
- end
-
- def get_reduce_result(resp) do
- %{:body => %{"rows" => rows}} = resp
- rows
- end
-
- test "query with partitioned:true returns partitioned fields", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/partitioned_true/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["foo"]
-
- url = "/#{db_name}/_partition/bar/_design/partitioned_true/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["bar"]
- end
-
- test "default view query returns partitioned fields", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["foo"]
-
- url = "/#{db_name}/_partition/bar/_design/map/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["bar"]
- end
-
- test "conflicting partitions in path and query string rejected", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{partition: "bar"})
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Conflicting value/, reason)
- end
-
- test "query will return zero results for wrong inputs", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{start_key: "\"foo:12\""})
- assert resp.status_code == 200
- assert Map.get(resp, :body)["rows"] == []
- end
-
- test "partitioned ddoc cannot be used in global query", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_design/map/_view/some"
- resp = Couch.get(url)
- %{:body => %{"reason" => reason}} = resp
- assert resp.status_code == 400
- assert Regex.match?(~r/mandatory for queries to this view./, reason)
- end
-
- test "partitioned query cannot be used with global ddoc", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/partitioned_false/_view/some"
- resp = Couch.get(url)
- %{:body => %{"reason" => reason}} = resp
- assert resp.status_code == 400
- assert Regex.match?(~r/is not supported in this design doc/, reason)
- end
-
- test "view query returns all docs for global query", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_design/partitioned_false/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 100
- end
-
- test "partition query errors with incorrect partition supplied", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/_bar/_design/map/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 400
-
- url = "/#{db_name}/_partition//_design/map/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 400
- end
-
- test "partitioned query works with startkey, endkey range", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{start_key: 12, end_key: 20})
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert Enum.dedup(partitions) == ["foo"]
- end
-
- test "partitioned query works with keys", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.post(url, body: %{keys: [2, 4, 6]})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 3
- assert ids == ["foo:2", "foo:4", "foo:6"]
- end
-
- test "global query works with keys", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_design/partitioned_false/_view/some"
- resp = Couch.post(url, body: %{keys: [2, 4, 6]})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 3
- assert ids == ["foo:2", "foo:4", "foo:6"]
- end
-
- test "partition query works with limit", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{limit: 5})
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert Enum.dedup(partitions) == ["foo"]
- end
-
- test "partition query with descending", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{descending: true, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:100", "foo:98", "foo:96", "foo:94", "foo:92"]
-
- resp = Couch.get(url, query: %{descending: false, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:2", "foo:4", "foo:6", "foo:8", "foo:10"]
- end
-
- test "partition query with skip", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{skip: 5, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:12", "foo:14", "foo:16", "foo:18", "foo:20"]
- end
-
- test "partition query with key", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{key: 22})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 1
- assert ids == ["foo:22"]
- end
-
- test "partition query with startkey_docid and endkey_docid", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map_some/_view/some"
-
- resp =
- Couch.get(
- url,
- query: %{
- startkey: "[\"field\",\"one\"]",
- endkey: "[\"field\",\"one\"]",
- startkey_docid: "foo:12",
- endkey_docid: "foo:30"
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:12", "foo:18", "foo:24", "foo:30"]
- end
-
- test "query with reduce works", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/reduce/_view/some"
- resp = Couch.get(url, query: %{reduce: true, group_level: 1})
- assert resp.status_code == 200
- results = get_reduce_result(resp)
- assert results == [%{"key" => ["field"], "value" => 50}]
-
- resp = Couch.get(url, query: %{reduce: true, group_level: 2})
- results = get_reduce_result(resp)
-
- assert results == [
- %{"key" => ["field", "one"], "value" => 16},
- %{"key" => ["field", "two"], "value" => 34}
- ]
-
- resp = Couch.get(url, query: %{reduce: true, group: true})
- results = get_reduce_result(resp)
-
- assert results == [
- %{"key" => ["field", "one"], "value" => 16},
- %{"key" => ["field", "two"], "value" => 34}
- ]
- end
-
- test "partition query can set query limits", context do
- set_config({"query_server_config", "partition_query_limit", "2000"})
-
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 20
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 50
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2000
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 50
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2001
- }
- )
-
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Limit is too large/, reason)
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2000,
- skip: 25
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 25
- end
-
- test "include_design works correctly", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/include_ddocs/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 50
- assert Enum.dedup(partitions) == ["foo"]
- end
-end
diff --git a/test/elixir/test/partition_view_update_test.exs b/test/elixir/test/partition_view_update_test.exs
deleted file mode 100644
index 5c1cb09f0..000000000
--- a/test/elixir/test/partition_view_update_test.exs
+++ /dev/null
@@ -1,160 +0,0 @@
-defmodule PartitionViewUpdateTest do
- use CouchTestCase
- import PartitionHelpers
-
- @moduledoc """
- Test Partition view update functionality
- """
- @tag :with_partitioned_db
- test "view updates properly remove old keys", context do
- db_name = context[:db_name]
- create_partition_docs(db_name, "foo", "bar")
- create_partition_ddoc(db_name)
-
- check_key = fn key, num_rows ->
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
- resp = Couch.get(url, query: [key: key])
- assert resp.status_code == 200
- assert length(resp.body["rows"]) == num_rows
- end
-
- check_key.(2, 1)
-
- resp = Couch.get("/#{db_name}/foo:2")
- doc = Map.put(resp.body, "value", 4)
- resp = Couch.put("/#{db_name}/foo:2", query: [w: 3], body: doc)
- assert resp.status_code >= 201 and resp.status_code <= 202
-
- check_key.(4, 2)
- check_key.(2, 0)
- end
-
- @tag :skip_on_jenkins
- @tag :with_partitioned_db
- test "query with update=false works", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
- resp =
- Couch.get(
- url,
- query: %{
- update: "true",
- limit: 3
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:2", "foo:4", "foo:6"]
-
- # Avoid race conditions by attempting to get a full response
- # from every shard before we do our update:false test
- for _ <- 1..12 do
- resp = Couch.get(url)
- assert resp.status_code == 200
- end
-
- Couch.put("/#{db_name}/foo:1", body: %{some: "field"})
-
- retry_until(fn ->
- resp =
- Couch.get(
- url,
- query: %{
- update: "false",
- limit: 3
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:2", "foo:4", "foo:6"]
- end)
- end
-
- @tag :with_partitioned_db
- test "purge removes view rows", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- %{body: body} = resp
- assert length(body["rows"]) == 50
-
- resp = Couch.get("/#{db_name}/foo:2")
- assert resp.status_code == 200
- %{body: body} = resp
- rev = body["_rev"]
-
- body = %{"foo:2" => [rev]}
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
- assert resp.status_code in [201, 202]
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- %{body: body} = resp
- assert length(body["rows"]) == 49
- end
-
- @tag :with_partitioned_db
- test "purged conflict changes view rows", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- %{body: body} = resp
- assert length(body["rows"]) == 50
-
- # Create a conflict on foo:2. Since the 4096
- # value is deeper than the conflict we can assert
- # that's in the view before the purge and assert
- # that 8192 is in the view after the purge.
- resp = Couch.get("/#{db_name}/foo:2")
- assert resp.status_code == 200
- %{body: body} = resp
- rev1 = body["_rev"]
-
- doc = %{_id: "foo:2", _rev: rev1, value: 4096, some: "field"}
- resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
- rev2 = body["rev"]
-
- query = [w: 3, new_edits: false]
- conflict_rev = "1-4a75b4efa0804859b3dfd327cbc1c2f9"
- doc = %{_id: "foo:2", _rev: conflict_rev, value: 8192, some: "field"}
- resp = Couch.put("/#{db_name}/foo:2", query: query, body: doc)
- assert resp.status_code in [201, 202]
-
- # Check that our expected row exists
- resp = Couch.get(url, query: [key: 4096])
- assert resp.status_code == 200
- %{body: body} = resp
- [row] = body["rows"]
- assert row["id"] == "foo:2"
-
- # Remove the current row to be replaced with
- # a row from the conflict
- body = %{"foo:2" => [rev2]}
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
- assert resp.status_code in [201, 202]
-
- resp = Couch.get(url, query: [key: 8192])
- assert resp.status_code == 200
- %{body: body} = resp
- [row] = body["rows"]
- assert row["id"] == "foo:2"
- end
-end
diff --git a/test/elixir/test/proxyauth_test.exs b/test/elixir/test/proxyauth_test.exs
deleted file mode 100644
index 6bf21920b..000000000
--- a/test/elixir/test/proxyauth_test.exs
+++ /dev/null
@@ -1,167 +0,0 @@
-defmodule ProxyAuthTest do
- use CouchTestCase
-
- @moduletag :authentication
-
- @tag :with_db
- test "proxy auth with secret", context do
- db_name = context[:db_name]
-
- design_doc = %{
- _id: "_design/test",
- language: "javascript",
- shows: %{
- welcome: """
- function(doc,req) {
- return "Welcome " + req.userCtx["name"];
- }
- """,
- role: """
- function(doc, req) {
- return req.userCtx['roles'][0];
- }
- """
- }
- }
-
- {:ok, _} = create_doc(db_name, design_doc)
-
- users_db_name = random_db_name()
- create_db(users_db_name)
-
- secret = generate_secret(64)
-
- server_config = [
- %{
- :section => "chttpd_auth",
- :key => "authentication_db",
- :value => users_db_name
- },
- %{
- :section => "couch_httpd_auth",
- :key => "proxy_use_secret",
- :value => "true"
- },
- %{
- :section => "couch_httpd_auth",
- :key => "secret",
- :value => secret
- }
- ]
-
- run_on_modified_server(server_config, fn ->
- test_fun(db_name, users_db_name, secret)
- end)
- delete_db(users_db_name)
- end
-
- defp generate_secret(len) do
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
- |> String.splitter("", trim: true)
- |> Enum.take_random(len)
- |> Enum.join("")
- end
-
- defp hex_hmac_sha1(secret, message) do
- signature = case :erlang.system_info(:otp_release) do
- '20' -> :crypto.hmac(:sha, secret, message)
- '21' -> :crypto.hmac(:sha, secret, message)
- _ -> :crypto.mac(:hmac, :sha, secret, message)
- end
- Base.encode16(signature, case: :lower)
- end
-
- def test_fun(db_name, users_db_name, secret) do
- user = prepare_user_doc(name: "couch@apache.org", password: "test")
- create_doc(users_db_name, user)
-
- resp =
- Couch.get("/_session",
- headers: [authorization: "Basic Y291Y2hAYXBhY2hlLm9yZzp0ZXN0"]
- )
-
- assert resp.body["userCtx"]["name"] == "couch@apache.org"
- assert resp.body["info"]["authenticated"] == "default"
-
- headers = [
- "X-Auth-CouchDB-UserName": "couch@apache.org",
- "X-Auth-CouchDB-Roles": "test",
- "X-Auth-CouchDB-Token": hex_hmac_sha1(secret, "couch@apache.org")
- ]
- resp = Couch.get("/#{db_name}/_design/test/_show/welcome", headers: headers)
- assert resp.body == "Welcome couch@apache.org"
-
- resp = Couch.get("/#{db_name}/_design/test/_show/role", headers: headers)
- assert resp.body == "test"
- end
-
- @tag :with_db
- test "proxy auth without secret", context do
- db_name = context[:db_name]
-
- design_doc = %{
- _id: "_design/test",
- language: "javascript",
- shows: %{
- welcome: """
- function(doc,req) {
- return "Welcome " + req.userCtx["name"];
- }
- """,
- role: """
- function(doc, req) {
- return req.userCtx['roles'][0];
- }
- """
- }
- }
-
- {:ok, _} = create_doc(db_name, design_doc)
-
- users_db_name = random_db_name()
- create_db(users_db_name)
-
- server_config = [
- %{
- :section => "chttpd_auth",
- :key => "authentication_db",
- :value => users_db_name
- },
- %{
- :section => "couch_httpd_auth",
- :key => "proxy_use_secret",
- :value => "false"
- }
- ]
-
- run_on_modified_server(server_config, fn ->
- test_fun_no_secret(db_name, users_db_name)
- end)
-
- delete_db(users_db_name)
- end
-
- def test_fun_no_secret(db_name, users_db_name) do
- user = prepare_user_doc(name: "couch@apache.org", password: "test")
- create_doc(users_db_name, user)
-
- resp =
- Couch.get("/_session",
- headers: [authorization: "Basic Y291Y2hAYXBhY2hlLm9yZzp0ZXN0"]
- )
-
- assert resp.body["userCtx"]["name"] == "couch@apache.org"
- assert resp.body["info"]["authenticated"] == "default"
-
- headers = [
- "X-Auth-CouchDB-UserName": "couch@apache.org",
- "X-Auth-CouchDB-Roles": "test"
- ]
-
- resp = Couch.get("/#{db_name}/_design/test/_show/welcome", headers: headers)
- assert resp.body == "Welcome couch@apache.org"
-
- resp = Couch.get("/#{db_name}/_design/test/_show/role", headers: headers)
- assert resp.body == "test"
- end
-end
diff --git a/test/elixir/test/purge_test.exs b/test/elixir/test/purge_test.exs
deleted file mode 100644
index 5fc03f16b..000000000
--- a/test/elixir/test/purge_test.exs
+++ /dev/null
@@ -1,150 +0,0 @@
-defmodule PurgeTest do
- use CouchTestCase
-
- @moduletag :purge
-
- @tag :with_db
- test "purge documents", context do
- db_name = context[:db_name]
-
- design_doc = %{
- _id: "_design/test",
- language: "javascript",
- views: %{
- all_docs_twice: %{
- map: "function(doc) { emit(doc.integer, null); emit(doc.integer, null) }"
- },
- single_doc: %{
- map: "function(doc) { if (doc._id == \"1\") { emit(1, null) }}"
- }
- }
- }
-
- {:ok, _} = create_doc(db_name, design_doc)
-
- num_docs = 10
- bulk_save(db_name, make_docs(1..(num_docs + 1)))
-
- test_all_docs_twice(db_name, num_docs, 1)
-
- info = info(db_name)
-
- doc1 = open_doc(db_name, 1)
- doc2 = open_doc(db_name, 2)
-
- resp =
- Couch.post("/#{db_name}/_purge",
- body: %{"1": [doc1["_rev"]], "2": [doc2["_rev"]]}
- )
-
- assert resp.status_code == 201
- result = resp.body
-
- assert Enum.at(result["purged"]["1"], 0) == doc1["_rev"]
- assert Enum.at(result["purged"]["2"], 0) == doc2["_rev"]
-
- open_doc(db_name, 1, 404)
- open_doc(db_name, 2, 404)
-
- purged_info = info(db_name)
-
- assert purged_info["purge_seq"] != info["purge_seq"]
-
- test_all_docs_twice(db_name, num_docs, 0, 2)
-
- # purge sequences are preserved after compaction (COUCHDB-1021)
- compact(db_name)
-
- compacted_info = info(db_name)
- assert compacted_info["purge_seq"] == purged_info["purge_seq"]
-
- # purge documents twice in a row without loading views
- # (causes full view rebuilds)
-
- doc3 = open_doc(db_name, 3)
- doc4 = open_doc(db_name, 4)
-
- resp =
- Couch.post("/#{db_name}/_purge",
- body: %{"3": [doc3["_rev"]]}
- )
-
- assert resp.status_code == 201
-
- resp =
- Couch.post("/#{db_name}/_purge",
- body: %{"4": [doc4["_rev"]]}
- )
-
- assert resp.status_code == 201
-
- test_all_docs_twice(db_name, num_docs, 0, 4)
- end
-
- @tag :with_db
- test "COUCHDB-1065", context do
- db_name_a = context[:db_name]
- db_name_b = random_db_name()
- {:ok, _} = create_db(db_name_b)
-
- {:ok, doc_a_resp} = create_doc(db_name_a, %{_id: "test", a: 1})
- {:ok, doc_b_resp} = create_doc(db_name_b, %{_id: "test", a: 2})
- replicate(db_name_a, db_name_b)
-
- open_rev(db_name_b, "test", doc_a_resp.body["rev"], 200)
- open_rev(db_name_b, "test", doc_b_resp.body["rev"], 200)
-
- resp =
- Couch.post("/#{db_name_b}/_purge",
- body: %{test: [doc_a_resp.body["rev"]]}
- )
-
- assert resp.status_code == 201
-
- open_rev(db_name_b, "test", doc_a_resp.body["rev"], 404)
-
- resp =
- Couch.post("/#{db_name_b}/_purge",
- body: %{test: [doc_b_resp.body["rev"]]}
- )
-
- assert resp.status_code == 201
-
- open_rev(db_name_b, "test", doc_b_resp.body["rev"], 404)
-
- resp =
- Couch.post("/#{db_name_b}/_purge",
- body: %{test: [doc_a_resp.body["rev"], doc_b_resp.body["rev"]]}
- )
-
- assert resp.status_code == 201
-
- delete_db(db_name_b)
- end
-
- defp open_doc(db_name, id, expect \\ 200) do
- resp = Couch.get("/#{db_name}/#{id}")
- assert resp.status_code == expect
- resp.body
- end
-
- defp open_rev(db_name, id, rev, expect) do
- resp = Couch.get("/#{db_name}/#{id}?rev=#{rev}")
- assert resp.status_code == expect
- resp.body
- end
-
- defp test_all_docs_twice(db_name, num_docs, sigle_doc_expect, offset \\ 0) do
- resp = Couch.get("/#{db_name}/_design/test/_view/all_docs_twice")
- assert resp.status_code == 200
- rows = resp.body["rows"]
-
- for x <- 0..(num_docs - offset) do
- assert Map.get(Enum.at(rows, 2 * x), "key") == x + offset + 1
- assert Map.get(Enum.at(rows, 2 * x + 1), "key") == x + offset + 1
- end
-
- resp = Couch.get("/#{db_name}/_design/test/_view/single_doc")
- assert resp.body["total_rows"] == sigle_doc_expect
- end
-end
diff --git a/test/elixir/test/reader_acl_test.exs b/test/elixir/test/reader_acl_test.exs
deleted file mode 100644
index f65e7cbf6..000000000
--- a/test/elixir/test/reader_acl_test.exs
+++ /dev/null
@@ -1,254 +0,0 @@
-defmodule ReaderACLTest do
- use CouchTestCase
-
- @moduletag :authentication
-
- @users_db_name "custom-users"
- @password "funnybone"
-
- @moduletag config: [
- {
- "chttpd_auth",
- "authentication_db",
- @users_db_name
- },
- {
- "couch_httpd_auth",
- "authentication_db",
- @users_db_name
- }
- ]
- setup do
- # Create db if not exists
- Couch.put("/#{@users_db_name}")
-
- # create a user with top-secret-clearance
- user_doc =
- prepare_user_doc([
- {:name, "bond@apache.org"},
- {:password, @password},
- {:roles, ["top-secret"]}
- ])
-
- {:ok, _} = create_doc(@users_db_name, user_doc)
-
- # create a user with top-secret-clearance
- user_doc =
- prepare_user_doc([
- {:name, "juanjo@apache.org"},
- {:password, @password}
- ])
-
- {:ok, _} = create_doc(@users_db_name, user_doc)
-
- on_exit(&tear_down/0)
-
- :ok
- end
-
- defp tear_down do
- delete_db(@users_db_name)
- end
-
- defp login(user, password) do
- sess = Couch.login(user, password)
- assert sess.cookie, "Login correct is expected"
- sess
- end
-
- defp logout(session) do
- assert Couch.Session.logout(session).body["ok"]
- end
-
- defp open_as(db_name, doc_id, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- expect_response = Keyword.get(options, :expect_response, 200)
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login(user, @password)
-
- resp =
- Couch.Session.get(
- session,
- "/#{db_name}/#{URI.encode(doc_id)}"
- )
-
- if use_session == nil do
- logout(session)
- end
-
- assert resp.status_code == expect_response
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp.body
- end
-
- defp set_security(db_name, security, expect_response \\ 200) do
- resp = Couch.put("/#{db_name}/_security", body: security)
- assert resp.status_code == expect_response
- end
-
- @tag :with_db
- test "unrestricted db can be read", context do
- db_name = context[:db_name]
-
- doc = %{_id: "baz", foo: "bar"}
- {:ok, _} = create_doc(db_name, doc)
-
- # any user can read unrestricted db
- open_as(db_name, "baz", user: "juanjo@apache.org")
- open_as(db_name, "baz", user: "bond@apache.org")
- end
-
- @tag :with_db
- test "restricted db can be read by authorized users", context do
- db_name = context[:db_name]
-
- doc = %{_id: "baz", foo: "bar"}
- {:ok, _} = create_doc(db_name, doc)
-
- security = %{
- members: %{
- roles: ["super-secret-club"],
- names: ["joe", "barb"]
- }
- }
-
- set_security(db_name, security)
-
- # can't read it as bond is missing the needed role
- open_as(db_name, "baz", user: "bond@apache.org", expect_response: 403)
-
- # make anyone with the top-secret role an admin
- # db admins are automatically members
- security = %{
- admins: %{
- roles: ["top-secret"],
- names: []
- },
- members: %{
- roles: ["super-secret-club"],
- names: ["joe", "barb"]
- }
- }
-
- set_security(db_name, security)
-
- # db admin can read
- open_as(db_name, "baz", user: "bond@apache.org")
-
- # admin now adds the top-secret role to the db's members
- # and removes db-admins
- security = %{
- admins: %{
- roles: [],
- names: []
- },
- members: %{
- roles: ["super-secret-club", "top-secret"],
- names: ["joe", "barb"]
- }
- }
-
- set_security(db_name, security)
-
- # server _admin can always read
- resp = Couch.get("/#{db_name}/baz")
- assert resp.status_code == 200
-
- open_as(db_name, "baz", user: "bond@apache.org")
- end
-
- @tag :with_db
- test "works with readers (backwards compat with 1.0)", context do
- db_name = context[:db_name]
-
- doc = %{_id: "baz", foo: "bar"}
- {:ok, _} = create_doc(db_name, doc)
-
- security = %{
- admins: %{
- roles: [],
- names: []
- },
- readers: %{
- roles: ["super-secret-club", "top-secret"],
- names: ["joe", "barb"]
- }
- }
-
- set_security(db_name, security)
- open_as(db_name, "baz", user: "bond@apache.org")
- end
-
- @tag :with_db
- test "can't set non string reader names or roles", context do
- db_name = context[:db_name]
-
- security = %{
- members: %{
- roles: ["super-secret-club", %{"top-secret": "awesome"}],
- names: ["joe", "barb"]
- }
- }
-
- set_security(db_name, security, 500)
-
- security = %{
- members: %{
- roles: ["super-secret-club", "top-secret"],
- names: ["joe", 22]
- }
- }
-
- set_security(db_name, security, 500)
-
- security = %{
- members: %{
- roles: ["super-secret-club", "top-secret"],
- names: "joe"
- }
- }
-
- set_security(db_name, security, 500)
- end
-
- @tag :with_db
- test "members can query views", context do
- db_name = context[:db_name]
-
- doc = %{_id: "baz", foo: "bar"}
- {:ok, _} = create_doc(db_name, doc)
-
- security = %{
- admins: %{
- roles: [],
- names: []
- },
- members: %{
- roles: ["super-secret-club", "top-secret"],
- names: ["joe", "barb"]
- }
- }
-
- set_security(db_name, security)
-
- view = %{
- _id: "_design/foo",
- views: %{
- bar: %{
- map: "function(doc){emit(null, null)}"
- }
- }
- }
-
- {:ok, _} = create_doc(db_name, view)
-
- # members can query views
- open_as(db_name, "_design/foo/_view/bar", user: "bond@apache.org")
- end
-end
diff --git a/test/elixir/test/recreate_doc_test.exs b/test/elixir/test/recreate_doc_test.exs
deleted file mode 100644
index 08f92293e..000000000
--- a/test/elixir/test/recreate_doc_test.exs
+++ /dev/null
@@ -1,165 +0,0 @@
-defmodule RecreateDocTest do
- use CouchTestCase
-
- @moduletag :recreate_doc
-
- @moduledoc """
- Test CouchDB document recreation
- This is a port of the recreate_doc.js suite
- """
-
- @tag :with_db
- test "recreate document", context do
- db_name = context[:db_name]
-
- # First create a new document with the ID "foo", and delete it again
- doc = %{_id: "foo", a: "bar", b: 42}
- {:ok, resp} = create_doc(db_name, doc)
- first_rev = resp.body["rev"]
-
- resp = Couch.delete("/#{db_name}/foo?rev=#{first_rev}")
- assert resp.status_code == 200
-
- # Now create a new document with the same ID, save it, and then modify it
- doc = %{_id: "foo"}
-
- for _i <- 0..9 do
- {:ok, _} = create_doc(db_name, doc)
- resp = Couch.get("/#{db_name}/foo")
-
- updated_doc =
- resp.body
- |> Map.put("a", "baz")
-
- resp = Couch.put("/#{db_name}/foo", body: updated_doc)
- assert resp.status_code == 201
- rev = resp.body["rev"]
- resp = Couch.delete("/#{db_name}/foo?rev=#{rev}")
- assert resp.status_code == 200
- end
- end
-
- @tag :with_db
- test "COUCHDB-292 - recreate a deleted document", context do
- db_name = context[:db_name]
- # First create a new document with the ID "foo", and delete it again
- doc = %{_id: "foo", a: "bar", b: 42}
- {:ok, resp} = create_doc(db_name, doc)
- first_rev = resp.body["rev"]
-
- resp = Couch.delete("/#{db_name}/foo?rev=#{first_rev}")
- assert resp.status_code == 200
-
- # COUCHDB-292 now attempt to save the document with a prev that's since
- # been deleted and this should generate a conflict exception
- updated_doc =
- doc
- |> Map.put(:_rev, first_rev)
-
- resp = Couch.put("/#{db_name}/foo", body: updated_doc)
- assert resp.status_code == 409
-
- # same as before, but with binary
- bin_att_doc = %{
- _id: "foo",
- _rev: first_rev,
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
-
- resp = Couch.put("/#{db_name}/foo", body: bin_att_doc)
- assert resp.status_code == 409
- end
-
- @tag :with_db
- test "Recreate a deleted document with non-exsistant rev", context do
- db_name = context[:db_name]
-
- doc = %{_id: "foo", a: "bar", b: 42}
- {:ok, resp} = create_doc(db_name, doc)
- first_rev = resp.body["rev"]
-
- resp = Couch.delete("/#{db_name}/foo?rev=#{first_rev}")
- assert resp.status_code == 200
-
- # random non-existant prev rev
- updated_doc =
- doc
- |> Map.put(:_rev, "1-asfafasdf")
-
- resp = Couch.put("/#{db_name}/foo", body: updated_doc)
- assert resp.status_code == 409
-
- # random non-existant prev rev with bin
- bin_att_doc = %{
- _id: "foo",
- _rev: "1-aasasfasdf",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
-
- resp = Couch.put("/#{db_name}/foo", body: bin_att_doc)
- assert resp.status_code == 409
- end
-
- @tag :with_db
- test "COUCHDB-1265 - changes feed after we try and break the update_seq tree",
- context do
- db_name = context[:db_name]
-
- # Test COUCHDB-1265 - Reinserting an old revision into the revision tree causes
- # duplicates in the update_seq tree.
- revs = create_rev_doc(db_name, "a", 3)
-
- resp =
- Couch.put("/#{db_name}/a",
- body: Enum.at(revs, 0),
- query: [new_edits: false]
- )
-
- assert resp.status_code == 201
-
- resp =
- Couch.put("/#{db_name}/a",
- body: Enum.at(revs, -1)
- )
-
- assert resp.status_code == 201
-
- resp = Couch.get("/#{db_name}/_changes")
- assert resp.status_code == 200
-
- assert length(resp.body["results"]) == 1
- end
-
- # function to create a doc with multiple revisions
- defp create_rev_doc(db_name, id, num_revs) do
- doc = %{_id: id, count: 0}
- {:ok, resp} = create_doc(db_name, doc)
- create_rev_doc(db_name, id, num_revs, [Map.put(doc, :_rev, resp.body["rev"])])
- end
-
- defp create_rev_doc(db_name, id, num_revs, revs) do
- if length(revs) < num_revs do
- doc = %{_id: id, _rev: Enum.at(revs, -1)[:_rev], count: length(revs)}
- {:ok, resp} = create_doc(db_name, doc)
-
- create_rev_doc(
- db_name,
- id,
- num_revs,
- revs ++ [Map.put(doc, :_rev, resp.body["rev"])]
- )
- else
- revs
- end
- end
-end
diff --git a/test/elixir/test/reduce_builtin_test.exs b/test/elixir/test/reduce_builtin_test.exs
deleted file mode 100644
index d13ada1b3..000000000
--- a/test/elixir/test/reduce_builtin_test.exs
+++ /dev/null
@@ -1,282 +0,0 @@
-defmodule ReduceBuiltinTest do
- use CouchTestCase
-
- @moduletag :views
-
- @moduledoc """
- Test CouchDB view builtin reduce functions
- This is a port of the reduce_builtin.js suite
- """
-
- def random_ddoc(db_name) do
- "/#{db_name}/_design/#{:erlang.monotonic_time()}"
- end
-
- def summate(n) do
- (n + 1) * n / 2
- end
-
- def sumsqr(n) do
- 1..n |> Enum.reduce(0, fn i, acc -> acc + i * i end)
- end
-
- def check_approx_distinct(expected, estimated) do
- # see https://en.wikipedia.org/wiki/HyperLogLog
- err = 1.04 / :math.sqrt(:math.pow(2, 11 - 1))
- abs(expected - estimated) < expected * err
- end
-
- def query_rows(ddoc_url, builtin_fun, query \\ nil) do
- http_opts = if query, do: [query: query], else: []
- Couch.get("#{ddoc_url}/_view/builtin#{builtin_fun}", http_opts).body["rows"]
- end
-
- def query_value(ddoc_url, builtin_fun, query \\ nil) do
- hd(query_rows(ddoc_url, builtin_fun, query))["value"]
- end
-
- @tag :with_db
- test "Builtin reduce functions", context do
- db_name = context[:db_name]
- num_docs = 500
-
- docs = make_docs(1..num_docs)
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs}, query: %{w: 3})
- assert resp.status_code in [201, 202]
-
- ddoc_url = random_ddoc(db_name)
-
- map = ~s"""
- function (doc) {
- emit(doc.integer, doc.integer);
- emit(doc.integer, doc.integer);
- };
- """
-
- design_doc = %{
- :views => %{
- :builtin_sum => %{:map => map, :reduce => "_sum"},
- :builtin_count => %{:map => map, :reduce => "_count"},
- :builtin_stats => %{:map => map, :reduce => "_stats"},
- :builtin_approx_count_distinct => %{
- :map => map,
- :reduce => "_approx_count_distinct"
- }
- }
- }
-
- assert Couch.put(ddoc_url, body: design_doc).body["ok"]
-
- value = ddoc_url |> query_value("_sum")
- assert value == 2 * summate(num_docs)
- value = ddoc_url |> query_value("_count")
- assert value == 1000
- value = ddoc_url |> query_value("_stats")
- assert value["sum"] == 2 * summate(num_docs)
- assert value["count"] == 1000
- assert value["min"] == 1
- assert value["max"] == 500
- assert value["sumsqr"] == 2 * sumsqr(num_docs)
- value = ddoc_url |> query_value("_approx_count_distinct")
- assert check_approx_distinct(num_docs, value)
-
- value = ddoc_url |> query_value("_sum", %{startkey: 4, endkey: 4})
- assert value == 8
- value = ddoc_url |> query_value("_count", %{startkey: 4, endkey: 4})
- assert value == 2
- value = ddoc_url |> query_value("_approx_count_distinct", %{startkey: 4, endkey: 4})
- assert check_approx_distinct(1, value)
-
- value = ddoc_url |> query_value("_sum", %{startkey: 4, endkey: 5})
- assert value == 18
- value = ddoc_url |> query_value("_count", %{startkey: 4, endkey: 5})
- assert value == 4
- value = ddoc_url |> query_value("_approx_count_distinct", %{startkey: 4, endkey: 5})
- assert check_approx_distinct(2, value)
-
- value = ddoc_url |> query_value("_sum", %{startkey: 4, endkey: 6})
- assert value == 30
- value = ddoc_url |> query_value("_count", %{startkey: 4, endkey: 6})
- assert value == 6
- value = ddoc_url |> query_value("_approx_count_distinct", %{startkey: 4, endkey: 6})
- assert check_approx_distinct(3, value)
-
- assert [row0, row1, row2] = ddoc_url |> query_rows("_sum", %{group: true, limit: 3})
- assert row0["value"] == 2
- assert row1["value"] == 4
- assert row2["value"] == 6
-
- assert [row0, row1, row2] =
- ddoc_url |> query_rows("_approx_count_distinct", %{group: true, limit: 3})
-
- assert check_approx_distinct(1, row0["value"])
- assert check_approx_distinct(1, row1["value"])
- assert check_approx_distinct(1, row2["value"])
-
- 1..div(500, 2)
- |> Enum.take_every(30)
- |> Enum.each(fn i ->
- value = ddoc_url |> query_value("_sum", %{startkey: i, endkey: num_docs - i})
- assert value == 2 * (summate(num_docs - i) - summate(i - 1))
- end)
- end
-
- @tag :with_db
- test "Builtin reduce functions with trailings", context do
- db_name = context[:db_name]
- num_docs = 500
-
- docs = make_docs(1..num_docs)
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs}, query: %{w: 3})
- assert resp.status_code in [201, 202]
-
- # test for trailing characters after builtin functions, desired behaviour
- # is to disregard any trailing characters
- # I think the behavior should be a prefix test, so that even "_statsorama"
- # or "_stats\nare\awesome" should work just as "_stats" does. - JChris
- ["\n", "orama", "\nare\nawesome", " ", " \n "]
- |> Enum.each(fn trailing ->
- ddoc_url = random_ddoc(db_name)
-
- map = ~s"""
- function (doc) {
- emit(doc.integer, doc.integer);
- emit(doc.integer, doc.integer);
- };
- """
-
- design_doc = %{
- :views => %{
- :builtin_sum => %{:map => map, :reduce => "_sum#{trailing}"},
- :builtin_count => %{:map => map, :reduce => "_count#{trailing}"},
- :builtin_stats => %{:map => map, :reduce => "_stats#{trailing}"},
- :builtin_approx_count_distinct => %{
- :map => map,
- :reduce => "_approx_count_distinct#{trailing}"
- }
- }
- }
-
- assert Couch.put(ddoc_url, body: design_doc).body["ok"]
-
- value = ddoc_url |> query_value("_sum")
- assert value == 2 * summate(num_docs)
- value = ddoc_url |> query_value("_count")
- assert value == 1000
- value = ddoc_url |> query_value("_stats")
- assert value["sum"] == 2 * summate(num_docs)
- assert value["count"] == 1000
- assert value["min"] == 1
- assert value["max"] == 500
- assert value["sumsqr"] == 2 * sumsqr(num_docs)
- end)
- end
-
- @tag :with_db
- test "Builtin count and sum reduce for key as array", context do
- db_name = context[:db_name]
-
- ddoc_url = random_ddoc(db_name)
-
- map_one = ~s"""
- function (doc) {
- emit(doc.keys, 1);
- };
- """
-
- map_ones_array = ~s"""
- function (doc) {
- emit(doc.keys, [1, 1]);
- };
- """
-
- design_doc = %{
- :views => %{
- :builtin_one_sum => %{:map => map_one, :reduce => "_sum"},
- :builtin_one_count => %{:map => map_one, :reduce => "_count"},
- :builtin_ones_array_sum => %{:map => map_ones_array, :reduce => "_sum"}
- }
- }
-
- assert Couch.put(ddoc_url, body: design_doc).body["ok"]
-
- for i <- 1..5 do
- for j <- 0..9 do
- docs = [
- %{keys: ["a"]},
- %{keys: ["a"]},
- %{keys: ["a", "b"]},
- %{keys: ["a", "b"]},
- %{keys: ["a", "b", "c"]},
- %{keys: ["a", "b", "d"]},
- %{keys: ["a", "c", "d"]},
- %{keys: ["d"]},
- %{keys: ["d", "a"]},
- %{keys: ["d", "b"]},
- %{keys: ["d", "c"]}
- ]
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: docs}, query: %{w: 3})
- assert resp.status_code in [201, 202]
-
- total_docs = 1 + (i - 1) * 10 * 11 + (j + 1) * 11
- assert Couch.get("/#{db_name}").body["doc_count"] == total_docs
- end
-
- ["_sum", "_count"]
- |> Enum.each(fn builtin ->
- builtin = "_one#{builtin}"
-
- # group by exact key match
- rows = query_rows(ddoc_url, builtin, %{group: true})
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
- assert Enum.at(rows, 1) == %{"key" => ["a", "b"], "value" => 20 * i}
- assert Enum.at(rows, 2) == %{"key" => ["a", "b", "c"], "value" => 10 * i}
- assert Enum.at(rows, 3) == %{"key" => ["a", "b", "d"], "value" => 10 * i}
-
- # make sure group reduce and limit params provide valid json
- assert [row0, _] = query_rows(ddoc_url, builtin, %{group: true, limit: 2})
- assert row0 == %{"key" => ["a"], "value" => 20 * i}
-
- # group by the first element in the key array
- rows = query_rows(ddoc_url, builtin, %{group_level: 1})
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 70 * i}
- assert Enum.at(rows, 1) == %{"key" => ["d"], "value" => 40 * i}
-
- # group by the first 2 elements in the key array
- rows = query_rows(ddoc_url, builtin, %{group_level: 2})
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
- assert Enum.at(rows, 1) == %{"key" => ["a", "b"], "value" => 40 * i}
- assert Enum.at(rows, 2) == %{"key" => ["a", "c"], "value" => 10 * i}
- assert Enum.at(rows, 3) == %{"key" => ["d"], "value" => 10 * i}
- assert Enum.at(rows, 4) == %{"key" => ["d", "a"], "value" => 10 * i}
- assert Enum.at(rows, 5) == %{"key" => ["d", "b"], "value" => 10 * i}
- assert Enum.at(rows, 6) == %{"key" => ["d", "c"], "value" => 10 * i}
- end)
-
- rows = query_rows(ddoc_url, "_ones_array_sum", %{group: true})
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => [20 * i, 20 * i]}
- assert Enum.at(rows, 1) == %{"key" => ["a", "b"], "value" => [20 * i, 20 * i]}
- assert Enum.at(rows, 2) == %{"key" => ["a", "b", "c"], "value" => [10 * i, 10 * i]}
- assert Enum.at(rows, 3) == %{"key" => ["a", "b", "d"], "value" => [10 * i, 10 * i]}
-
- assert [row0, _] = query_rows(ddoc_url, "_ones_array_sum", %{group: true, limit: 2})
- assert row0 == %{"key" => ["a"], "value" => [20 * i, 20 * i]}
-
- rows = query_rows(ddoc_url, "_ones_array_sum", %{group_level: 1})
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => [70 * i, 70 * i]}
- assert Enum.at(rows, 1) == %{"key" => ["d"], "value" => [40 * i, 40 * i]}
-
- rows = query_rows(ddoc_url, "_ones_array_sum", %{group_level: 2})
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => [20 * i, 20 * i]}
- assert Enum.at(rows, 1) == %{"key" => ["a", "b"], "value" => [40 * i, 40 * i]}
- assert Enum.at(rows, 2) == %{"key" => ["a", "c"], "value" => [10 * i, 10 * i]}
- assert Enum.at(rows, 3) == %{"key" => ["d"], "value" => [10 * i, 10 * i]}
- assert Enum.at(rows, 4) == %{"key" => ["d", "a"], "value" => [10 * i, 10 * i]}
- assert Enum.at(rows, 5) == %{"key" => ["d", "b"], "value" => [10 * i, 10 * i]}
- assert Enum.at(rows, 6) == %{"key" => ["d", "c"], "value" => [10 * i, 10 * i]}
- end
- end
-end
diff --git a/test/elixir/test/reduce_false_test.exs b/test/elixir/test/reduce_false_test.exs
deleted file mode 100644
index 675c11dbd..000000000
--- a/test/elixir/test/reduce_false_test.exs
+++ /dev/null
@@ -1,50 +0,0 @@
-defmodule ReduceFalseTest do
- use CouchTestCase
-
- @moduletag :views
-
- @moduledoc """
- Test CouchDB view without reduces
- This is a port of the reduce_false.js suite
- """
-
- def summate(n) do
- (n + 1) * n / 2
- end
-
- @tag :with_db
- test "Basic reduce functions", context do
- db_name = context[:db_name]
- view_url = "/#{db_name}/_design/foo/_view/summate"
- num_docs = 5
-
- map = ~s"""
- function (doc) {
- emit(doc.integer, doc.integer);
- };
- """
-
- reduce = "function (keys, values) { return sum(values); };"
- red_doc = %{:views => %{:summate => %{:map => map, :reduce => reduce}}}
- assert Couch.put("/#{db_name}/_design/foo", body: red_doc).body["ok"]
-
- docs = make_docs(1..num_docs)
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs}, query: %{w: 3})
- assert resp.status_code in [201, 202]
-
- # Test that the reduce works
- rows = Couch.get(view_url).body["rows"]
- assert length(rows) == 1
- assert hd(rows)["value"] == summate(num_docs)
-
- # Test that we got our docs back
- rows = Couch.get(view_url, query: %{reduce: false}).body["rows"]
- assert length(rows) == 5
-
- rows
- |> Enum.with_index(1)
- |> Enum.each(fn {row, i} ->
- assert i == row["value"]
- end)
- end
-end
diff --git a/test/elixir/test/reduce_test.exs b/test/elixir/test/reduce_test.exs
deleted file mode 100644
index 22f2fa6f2..000000000
--- a/test/elixir/test/reduce_test.exs
+++ /dev/null
@@ -1,632 +0,0 @@
-defmodule ReduceTest do
- use CouchTestCase
-
- @moduletag :views
-
- @moduledoc """
- Test CouchDB view reduces
- This is a port of the reduce.js suite
- """
-
- def summate(n) do
- (n + 1) * n / 2
- end
-
- @tag :with_db
- test "Basic reduce functions", context do
- db_name = context[:db_name]
- view_url = "/#{db_name}/_design/foo/_view/bar"
- num_docs = 500
-
- map = ~s"""
- function (doc) {
- emit(doc.integer, doc.integer);
- emit(doc.integer, doc.integer);
- };
- """
-
- reduce = "function (keys, values) { return sum(values); };"
- red_doc = %{:views => %{:bar => %{:map => map, :reduce => reduce}}}
-
- assert Couch.put("/#{db_name}/_design/foo", body: red_doc).body["ok"]
- docs = make_docs(1..num_docs)
-
- assert Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs}, query: %{w: 3}).status_code in
- [201, 202]
-
- rows = Couch.get(view_url).body["rows"]
- assert hd(rows)["value"] == 2 * summate(num_docs)
-
- query = %{:startkey => 4, :endkey => 4}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 8
-
- query = %{:startkey => 4, :endkey => 5}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 18
-
- query = %{:startkey => 4, :endkey => 6}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 30
-
- query = %{:group => true, :limit => 3}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert Enum.at(rows, 0)["value"] == 2
- assert Enum.at(rows, 1)["value"] == 4
- assert Enum.at(rows, 2)["value"] == 6
-
- half_num_docs = Integer.floor_div(num_docs, 2)
- max = Integer.floor_div(num_docs, 30) + 1
-
- for i <- 1..max, i * 30 + 1 < half_num_docs do
- i = i * 30 + 1
- query = %{:startkey => i, :endkey => num_docs - i}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 2 * (summate(num_docs - i) - summate(i - 1))
- end
- end
-
- @tag :with_db
- test "More complex array key view row testing", context do
- db_name = context[:db_name]
- view_url = "/#{db_name}/_design/foo/_view/bar"
- map = "function (doc) { emit(doc.keys, 1); };"
- reduce = "function (keys, values) { return sum(values); };"
- red_doc = %{:views => %{bar: %{map: map, reduce: reduce}}}
-
- assert Couch.put("/#{db_name}/_design/foo", body: red_doc).body["ok"]
-
- for i <- 1..5 do
- for j <- 0..9 do
- docs = [
- %{keys: ["a"]},
- %{keys: ["a"]},
- %{keys: ["a", "b"]},
- %{keys: ["a", "b"]},
- %{keys: ["a", "b", "c"]},
- %{keys: ["a", "b", "d"]},
- %{keys: ["a", "c", "d"]},
- %{keys: ["d"]},
- %{keys: ["d", "a"]},
- %{keys: ["d", "b"]},
- %{keys: ["d", "c"]}
- ]
-
- assert Couch.post("/#{db_name}/_bulk_docs", body: %{docs: docs}, query: %{w: 3}).status_code in
- [201, 202]
-
- total_docs = 1 + (i - 1) * 10 * 11 + (j + 1) * 11
- assert Couch.get("/#{db_name}").body["doc_count"] == total_docs
- end
-
- # test group by exact key match
- query = %{group: true}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
- assert Enum.at(rows, 1) == %{"key" => ["a", "b"], "value" => 20 * i}
- assert Enum.at(rows, 2) == %{"key" => ["a", "b", "c"], "value" => 10 * i}
- assert Enum.at(rows, 3) == %{"key" => ["a", "b", "d"], "value" => 10 * i}
-
- # test group reduce and limit params provide valid json
- query = %{group: true, limit: 2}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
- assert length(rows) == 2
-
- # test group by the first element in the key array
- query = %{group_level: 2}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
- assert Enum.at(rows, 1) == %{"key" => ["a", "b"], "value" => 40 * i}
- assert Enum.at(rows, 2) == %{"key" => ["a", "c"], "value" => 10 * i}
- assert Enum.at(rows, 3) == %{"key" => ["d"], "value" => 10 * i}
- assert Enum.at(rows, 4) == %{"key" => ["d", "a"], "value" => 10 * i}
- assert Enum.at(rows, 5) == %{"key" => ["d", "b"], "value" => 10 * i}
- assert Enum.at(rows, 6) == %{"key" => ["d", "c"], "value" => 10 * i}
-
- # test endkey with inclusive_end=true
- query = %{group_level: 2, endkey: ~s(["d"]), inclusive_end: true}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
- assert Enum.at(rows, 1) == %{"key" => ["a", "b"], "value" => 40 * i}
- assert Enum.at(rows, 2) == %{"key" => ["a", "c"], "value" => 10 * i}
- assert Enum.at(rows, 3) == %{"key" => ["d"], "value" => 10 * i}
- assert length(rows) == 4
-
- # test endkey with inclusive_end=false
- query = %{group_level: 2, endkey: ~s(["d"]), inclusive_end: false}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
- assert Enum.at(rows, 1) == %{"key" => ["a", "b"], "value" => 40 * i}
- assert Enum.at(rows, 2) == %{"key" => ["a", "c"], "value" => 10 * i}
- assert length(rows) == 3
- end
- end
-
- @tag :with_db
- test "More complex reductions that need to use the combine option", context do
- db_name = context[:db_name]
- view_url = "/#{db_name}/_design/foo/_view/bar"
- map = "function (doc) { emit(doc.val, doc.val); };"
-
- reduce = ~s"""
- function (keys, values, rereduce) {
- // This computes the standard deviation of the mapped results
- var stdDeviation=0.0;
- var count=0;
- var total=0.0;
- var sqrTotal=0.0;
-
- if (!rereduce) {
- // This is the reduce phase, we are reducing over emitted values from
- // the map functions.
- for(var i in values) {
- total = total + values[i];
- sqrTotal = sqrTotal + (values[i] * values[i]);
- }
- count = values.length;
- } else {
- // This is the rereduce phase, we are re-reducing previosuly
- // reduced values.
- for(var i in values) {
- count = count + values[i].count;
- total = total + values[i].total;
- sqrTotal = sqrTotal + values[i].sqrTotal;
- }
- }
-
- var variance = (sqrTotal - ((total * total)/count)) / count;
- stdDeviation = Math.sqrt(variance);
-
- // the reduce result. It contains enough information to be rereduced
- // with other reduce results.
- return {"stdDeviation":stdDeviation,"count":count,
- "total":total,"sqrTotal":sqrTotal};
- }
- """
-
- red_doc = %{:views => %{:bar => %{:map => map, :reduce => reduce}}}
- assert Couch.put("/#{db_name}/_design/foo", body: red_doc).body["ok"]
-
- Enum.each(1..10, fn _ ->
- docs = for i <- 1..10, do: %{val: i * 10}
-
- assert Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs}, query: %{w: 3}).status_code in
- [201, 202]
- end)
-
- rows = Couch.get(view_url).body["rows"]
- assert_in_delta hd(rows)["value"]["stdDeviation"], 28.722813232690143, 0.0000000001
- end
-
- @tag :with_db
- test "Reduce pagination", context do
- db_name = context[:db_name]
- view_url = "/#{db_name}/_design/foo/_view/bar"
-
- ddoc = %{
- _id: "_design/foo",
- language: "javascript",
- views: %{
- bar: %{
- reduce: "_count",
- map: ~s"""
- function(doc) {
- emit(doc.int, doc._id);
- emit(doc.int + 1, doc._id);
- emit(doc.int + 2, doc._id);
- }
- """
- }
- }
- }
-
- assert Couch.put("/#{db_name}/_design/foo", body: ddoc).body["ok"]
- docs = for i <- 0..1122, do: %{_id: Integer.to_string(i), int: i}
-
- assert Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs}, query: %{w: 3}).status_code in
- [201, 202]
-
-
- rand_val = fn -> :rand.uniform(100_000_000) end
-
- # ?group=false tests
- query = %{startkey: 400, endkey: 402, foobar: rand_val.()}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 9
- query = %{startkey: 402, endkey: 400, foobar: rand_val.(), descending: true}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 9
-
- query = %{startkey: 400, endkey: 402, foobar: rand_val.(), inclusive_end: false}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 6
-
- query = %{
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- inclusive_end: false,
- descending: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 6
-
- query = %{startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "400"}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 7
-
- query = %{
- startkey: 400,
- endkey: 402,
- foobar: rand_val.(),
- endkey_docid: "400",
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 6
-
- query = %{startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "401"}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 8
-
- query = %{
- startkey: 400,
- endkey: 402,
- foobar: rand_val.(),
- endkey_docid: "401",
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 7
-
- query = %{startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "402"}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 9
-
- query = %{
- startkey: 400,
- endkey: 402,
- foobar: rand_val.(),
- endkey_docid: "402",
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 8
-
- query = %{
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "398",
- descending: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 9
-
- query = %{
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "398",
- descending: true,
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 8
-
- query = %{
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "399",
- descending: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 8
-
- query = %{
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "399",
- descending: true,
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 7
-
- query = %{
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "400",
- descending: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 7
-
- query = %{
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "400",
- descending: true,
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 6
-
- query = %{
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- startkey_docid: "400",
- descending: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 7
-
- query = %{
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- startkey_docid: "401",
- descending: true,
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert hd(rows)["value"] == 5
-
- # ?group=true tests
- query = %{:group => true, startkey: 400, endkey: 402, foobar: rand_val.()}
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["key"] == 400
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- assert Enum.at(rows, 2)["key"] == 402
- assert Enum.at(rows, 2)["value"] == 3
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- descending: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- assert Enum.at(rows, 2)["key"] == 400
- assert Enum.at(rows, 2)["value"] == 3
-
- query = %{
- :group => true,
- startkey: 400,
- endkey: 402,
- foobar: rand_val.(),
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 2
- assert Enum.at(rows, 0)["key"] == 400
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- inclusive_end: false,
- descending: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 2
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
-
- query = %{
- :group => true,
- startkey: 400,
- endkey: 402,
- foobar: rand_val.(),
- endkey_docid: "401"
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["key"] == 400
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- assert Enum.at(rows, 2)["key"] == 402
- assert Enum.at(rows, 2)["value"] == 2
-
- query = %{
- :group => true,
- startkey: 400,
- endkey: 402,
- foobar: rand_val.(),
- endkey_docid: "400"
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["key"] == 400
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- assert Enum.at(rows, 2)["key"] == 402
- assert Enum.at(rows, 2)["value"] == 1
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- startkey_docid: "401",
- descending: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 2
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- assert Enum.at(rows, 2)["key"] == 400
- assert Enum.at(rows, 2)["value"] == 3
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- startkey_docid: "400",
- descending: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 1
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- assert Enum.at(rows, 2)["key"] == 400
- assert Enum.at(rows, 2)["value"] == 3
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- startkey_docid: "401",
- descending: true,
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 2
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 2
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- startkey_docid: "400",
- descending: true,
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 2
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 1
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "398",
- descending: true,
- inclusive_end: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- assert Enum.at(rows, 2)["key"] == 400
- assert Enum.at(rows, 2)["value"] == 3
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "399",
- descending: true,
- inclusive_end: true
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- assert Enum.at(rows, 2)["key"] == 400
- assert Enum.at(rows, 2)["value"] == 2
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "399",
- descending: true,
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- assert Enum.at(rows, 2)["key"] == 400
- assert Enum.at(rows, 2)["value"] == 1
-
- query = %{
- :group => true,
- startkey: 402,
- endkey: 400,
- foobar: rand_val.(),
- endkey_docid: "400",
- descending: true,
- inclusive_end: false
- }
-
- rows = Couch.get(view_url, query: query).body["rows"]
- assert length(rows) == 2
- assert Enum.at(rows, 0)["key"] == 402
- assert Enum.at(rows, 0)["value"] == 3
- assert Enum.at(rows, 1)["key"] == 401
- assert Enum.at(rows, 1)["value"] == 3
- end
-end
diff --git a/test/elixir/test/replication_test.exs b/test/elixir/test/replication_test.exs
deleted file mode 100644
index 09f3e2417..000000000
--- a/test/elixir/test/replication_test.exs
+++ /dev/null
@@ -1,1773 +0,0 @@
-defmodule ReplicationTest do
- use CouchTestCase
-
- @moduledoc """
- Test CouchDB Replication Behavior
- This is a port of the view_collation.js suite
- """
-
- # TODO: Parameterize these
- @db_pairs_prefixes [
- {"remote-to-remote", "http://127.0.0.1:15984/", "http://127.0.0.1:15984/"}
- ]
-
- # This should probably go into `make elixir` like what
- # happens for JavaScript tests.
- @moduletag config: [{"replicator", "startup_jitter", "0"}]
-
- test "source database not found with host" do
- name = random_db_name()
- src_url = "http://127.0.0.1:15984/" <> name <> "_src"
- tgt_url = "http://127.0.0.1:15984/" <> name <> "_tgt"
- check_not_found(src_url, tgt_url)
- end
-
- def check_not_found(src, tgt) do
- body = %{:source => src, :target => tgt}
- resp = Couch.post("/_replicate", body: body)
- assert resp.body["error"] == "db_not_found"
- end
-
- test "replicating attachment without conflict - COUCHDB-885" do
- name = random_db_name()
- src_db_name = name <> "_src"
- tgt_db_name = name <> "_tgt"
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- doc = %{"_id" => "doc1"}
- [doc] = save_docs(src_db_name, [doc])
-
- repl_src = "http://127.0.0.1:15984/" <> src_db_name
- repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name
- result = replicate(repl_src, repl_tgt)
- assert result["ok"]
- assert is_list(result["history"])
- history = Enum.at(result["history"], 0)
- assert history["docs_written"] == 1
- assert history["docs_read"] == 1
- assert history["doc_write_failures"] == 0
-
- doc =
- Map.put(doc, "_attachments", %{
- "hello.txt" => %{
- "content_type" => "text/plain",
- # base64:encode("hello world")
- "data" => "aGVsbG8gd29ybGQ="
- },
- "foo.dat" => %{
- "content_type" => "not/compressible",
- # base64:encode("i am not gziped")
- "data" => "aSBhbSBub3QgZ3ppcGVk"
- }
- })
-
- [doc] = save_docs(src_db_name, [doc])
-
- repl_src = "http://127.0.0.1:15984/" <> src_db_name
- repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name
- result = replicate(repl_src, repl_tgt)
-
- assert result["ok"]
- assert is_list(result["history"])
- assert length(result["history"]) == 2
- history = Enum.at(result["history"], 0)
- assert history["docs_written"] == 2
- assert history["docs_read"] == 2
- assert history["doc_write_failures"] == 0
-
- query = %{
- :conflicts => true,
- :deleted_conflicts => true,
- :attachments => true,
- :att_encoding_info => true
- }
-
- opts = [headers: [Accept: "application/json"], query: query]
- resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}", opts)
- assert HTTPotion.Response.success?(resp)
- assert is_map(resp.body)
- refute Map.has_key?(resp.body, "_conflicts")
- refute Map.has_key?(resp.body, "_deleted_conflicts")
-
- atts = resp.body["_attachments"]
-
- assert atts["hello.txt"]["content_type"] == "text/plain"
- assert atts["hello.txt"]["data"] == "aGVsbG8gd29ybGQ="
- assert atts["hello.txt"]["encoding"] == "gzip"
-
- assert atts["foo.dat"]["content_type"] == "not/compressible"
- assert atts["foo.dat"]["data"] == "aSBhbSBub3QgZ3ppcGVk"
- refute Map.has_key?(atts["foo.dat"], "encoding")
- end
-
- test "replication cancellation" do
- name = random_db_name()
- src_db_name = name <> "_src"
- tgt_db_name = name <> "_tgt"
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- save_docs(src_db_name, make_docs(1..6))
-
- repl_body = %{:continuous => true, :create_target => true}
- repl_src = "http://127.0.0.1:15984/" <> src_db_name
- repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name
- result = replicate(repl_src, repl_tgt, body: repl_body)
-
- assert result["ok"]
- assert is_binary(result["_local_id"])
- repl_id = result["_local_id"]
-
- task = get_task(repl_id, 3_000)
- assert is_map(task)
-
- assert task["replication_id"] == repl_id
-
- repl_body = %{
- "replication_id" => repl_id,
- cancel: true
- }
-
- result = Couch.post("/_replicate", body: repl_body)
- assert result.status_code == 200
-
- wait_for_repl_stop(repl_id)
-
- assert get_task(repl_id, 0) == nil
-
- result = Couch.post("/_replicate", body: repl_body)
- assert result.status_code == 404
- end
-
- @tag user: [name: "joe", password: "erly", roles: ["erlanger"]]
- test "unauthorized replication cancellation", ctx do
- name = random_db_name()
- src_db_name = name <> "_src"
- tgt_db_name = name <> "_tgt"
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- save_docs(src_db_name, make_docs(1..6))
-
- repl_src = "http://127.0.0.1:15984/" <> src_db_name
- repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name
- repl_body = %{"continuous" => true}
- result = replicate(repl_src, repl_tgt, body: repl_body)
-
- assert result["ok"]
- assert is_binary(result["_local_id"])
- repl_id = result["_local_id"]
-
- task = get_task(repl_id, 5_000)
- assert is_map(task)
-
- sess = Couch.login(ctx[:userinfo])
- resp = Couch.Session.get(sess, "/_session")
- assert resp.body["ok"]
- assert resp.body["userCtx"]["name"] == "joe"
-
- repl_body = %{
- "replication_id" => repl_id,
- cancel: true
- }
-
- resp = Couch.Session.post(sess, "/_replicate", body: repl_body)
- assert resp.status_code == 401
- assert resp.body["error"] == "unauthorized"
-
- assert Couch.Session.logout(sess).body["ok"]
-
- resp = Couch.post("/_replicate", body: repl_body)
- assert resp.status_code == 200
- end
-
- test "default headers returned for _scheduler/jobs" do
- resp = Couch.get("/_scheduler/jobs")
- assert resp.headers["Content-Type"] == "application/json"
- assert resp.headers["X-Couch-Request-ID"]
- assert resp.headers["X-CouchDB-Body-Time"]
- end
-
- test "default headers returned for _scheduler/docs " do
- resp = Couch.get("/_scheduler/docs")
- assert resp.headers["Content-Type"] == "application/json"
- assert resp.headers["X-Couch-Request-ID"]
- assert resp.headers["X-CouchDB-Body-Time"]
- end
-
- Enum.each(@db_pairs_prefixes, fn {name, src_prefix, tgt_prefix} ->
- @src_prefix src_prefix
- @tgt_prefix tgt_prefix
-
- test "simple #{name} replication - #{name}" do
- run_simple_repl(@src_prefix, @tgt_prefix)
- end
-
- test "replicate with since_seq - #{name}" do
- run_since_seq_repl(@src_prefix, @tgt_prefix)
- end
-
- test "validate_doc_update failure replications - #{name}" do
- run_vdu_repl(@src_prefix, @tgt_prefix)
- end
-
- test "create_target filter option - #{name}" do
- run_create_target_repl(@src_prefix, @tgt_prefix)
- end
-
- test "filtered replications - #{name}" do
- run_filtered_repl(@src_prefix, @tgt_prefix)
- end
-
- test "replication restarts after filter change - COUCHDB-892 - #{name}" do
- run_filter_changed_repl(@src_prefix, @tgt_prefix)
- end
-
- test "replication by doc ids - #{name}" do
- run_by_id_repl(@src_prefix, @tgt_prefix)
- end
-
- test "continuous replication - #{name}" do
- run_continuous_repl(@src_prefix, @tgt_prefix)
- end
-
- @tag config: [
- {"attachments", "compression_level", "8"},
- {"attachments", "compressible_types", "text/*"}
- ]
- test "compressed attachment replication - #{name}" do
- run_compressed_att_repl(@src_prefix, @tgt_prefix)
- end
-
- @tag user: [name: "joe", password: "erly", roles: ["erlanger"]]
- test "non-admin user on target - #{name}", ctx do
- run_non_admin_target_user_repl(@src_prefix, @tgt_prefix, ctx)
- end
-
- @tag user: [name: "joe", password: "erly", roles: ["erlanger"]]
- test "non-admin or reader user on source - #{name}", ctx do
- run_non_admin_or_reader_source_user_repl(@src_prefix, @tgt_prefix, ctx)
- end
- end)
-
- def run_simple_repl(src_prefix, tgt_prefix) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- att1_data = get_att1_data()
- att2_data = get_att2_data()
-
- ddoc = %{
- "_id" => "_design/foo",
- "language" => "javascript",
- "value" => "ddoc"
- }
-
- docs = make_docs(1..20) ++ [ddoc]
- docs = save_docs(src_db_name, docs)
-
- docs =
- for doc <- docs do
- if doc["integer"] >= 10 and doc["integer"] < 15 do
- add_attachment(src_db_name, doc, body: att1_data)
- else
- doc
- end
- end
-
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
- result = replicate(repl_src, repl_tgt)
- assert result["ok"]
-
- src_info =
- retry_until(fn ->
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert src_info["doc_count"] == tgt_info["doc_count"]
- src_info
- end)
-
- assert is_binary(result["session_id"])
- assert is_list(result["history"])
- assert length(result["history"]) == 1
- history = Enum.at(result["history"], 0)
- assert is_binary(history["start_time"])
- assert is_binary(history["end_time"])
- assert history["start_last_seq"] == 0
- assert history["missing_checked"] == src_info["doc_count"]
- assert history["missing_found"] == src_info["doc_count"]
- assert history["docs_read"] == src_info["doc_count"]
- assert history["docs_written"] == src_info["doc_count"]
- assert history["doc_write_failures"] == 0
-
- for doc <- docs do
- copy = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}").body
- assert cmp_json(doc, copy)
-
- if doc["integer"] >= 10 and doc["integer"] < 15 do
- atts = copy["_attachments"]
- assert is_map(atts)
- att = atts["readme.txt"]
- assert is_map(att)
- assert att["revpos"] == 2
- assert String.match?(att["content_type"], ~r/text\/plain/)
- assert att["stub"]
-
- resp = Couch.get!("/#{tgt_db_name}/#{copy["_id"]}/readme.txt")
- assert String.length(resp.body) == String.length(att1_data)
- assert resp.body == att1_data
- end
- end
-
- # Add one more doc to source and more attachments to existing docs
- new_doc = %{"_id" => "foo666", "value" => "d"}
- [new_doc] = save_docs(src_db_name, [new_doc])
-
- docs =
- for doc <- docs do
- if doc["integer"] >= 10 and doc["integer"] < 15 do
- ctype = "application/binary"
- opts = [name: "data.dat", body: att2_data, content_type: ctype]
- add_attachment(src_db_name, doc, opts)
- else
- doc
- end
- end
-
- result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
- assert result["ok"]
-
- retry_until(fn ->
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
- end)
-
- assert is_binary(result["session_id"])
- assert is_list(result["history"])
- assert length(result["history"]) == 2
- history = Enum.at(result["history"], 0)
- assert history["session_id"] == result["session_id"]
- assert is_binary(history["start_time"])
- assert is_binary(history["end_time"])
- assert history["missing_checked"] == 27
- assert history["missing_found"] == 27
- assert history["docs_read"] == 27
- assert history["docs_written"] == 27
- assert history["doc_write_failures"] == 0
-
- copy = Couch.get!("/#{tgt_db_name}/#{new_doc["_id"]}").body
- assert copy["_id"] == new_doc["_id"]
- assert copy["value"] == new_doc["value"]
-
- for i <- 10..14 do
- doc = Enum.at(docs, i - 1)
- copy = Couch.get!("/#{tgt_db_name}/#{i}").body
- assert cmp_json(doc, copy)
-
- atts = copy["_attachments"]
- assert is_map(atts)
- att = atts["readme.txt"]
- assert is_map(atts)
- assert att["revpos"] == 2
- assert String.match?(att["content_type"], ~r/text\/plain/)
- assert att["stub"]
-
- resp = Couch.get!("/#{tgt_db_name}/#{i}/readme.txt")
- assert String.length(resp.body) == String.length(att1_data)
- assert resp.body == att1_data
-
- att = atts["data.dat"]
- assert is_map(att)
- assert att["revpos"] == 3
- assert String.match?(att["content_type"], ~r/application\/binary/)
- assert att["stub"]
-
- resp = Couch.get!("/#{tgt_db_name}/#{i}/data.dat")
- assert String.length(resp.body) == String.length(att2_data)
- assert resp.body == att2_data
- end
-
- # Test deletion is replicated
- del_doc = %{
- "_id" => "1",
- "_rev" => Enum.at(docs, 0)["_rev"],
- "_deleted" => true
- }
-
- [del_doc] = save_docs(src_db_name, [del_doc])
-
- result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
- assert result["ok"]
-
- retry_until(fn ->
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
- assert tgt_info["doc_del_count"] == src_info["doc_del_count"]
- assert tgt_info["doc_del_count"] == 1
- end)
-
- assert is_list(result["history"])
- assert length(result["history"]) == 3
- history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 28
- assert history["missing_found"] == 28
- assert history["docs_read"] == 28
- assert history["docs_written"] == 28
- assert history["doc_write_failures"] == 0
-
- resp = Couch.get("/#{tgt_db_name}/#{del_doc["_id"]}")
- assert resp.status_code == 404
-
- resp = Couch.get!("/#{tgt_db_name}/_changes")
- [change] = Enum.filter(resp.body["results"], &(&1["id"] == del_doc["_id"]))
- assert change["id"] == del_doc["_id"]
- assert change["deleted"]
-
- # Test replicating a conflict
- doc = Couch.get!("/#{src_db_name}/2").body
- [doc] = save_docs(src_db_name, [Map.put(doc, :value, "white")])
-
- copy = Couch.get!("/#{tgt_db_name}/2").body
- save_docs(tgt_db_name, [Map.put(copy, :value, "black")])
-
- result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
- assert result["ok"]
-
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- assert is_list(result["history"])
- assert length(result["history"]) == 4
- history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 29
- assert history["missing_found"] == 29
- assert history["docs_read"] == 29
- assert history["docs_written"] == 29
- assert history["doc_write_failures"] == 0
-
- copy = Couch.get!("/#{tgt_db_name}/2", query: %{:conflicts => true}).body
- assert String.match?(copy["_rev"], ~r/^2-/)
- assert is_list(copy["_conflicts"])
- assert length(copy["_conflicts"]) == 1
- conflict = Enum.at(copy["_conflicts"], 0)
- assert String.match?(conflict, ~r/^2-/)
-
- # Re-replicate updated conflict
- [doc] = save_docs(src_db_name, [Map.put(doc, :value, "yellow")])
-
- result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
- assert result["ok"]
-
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- assert is_list(result["history"])
- assert length(result["history"]) == 5
- history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 30
- assert history["missing_found"] == 30
- assert history["docs_read"] == 30
- assert history["docs_written"] == 30
- assert history["doc_write_failures"] == 0
-
- copy = Couch.get!("/#{tgt_db_name}/2", query: %{:conflicts => true}).body
- assert String.match?(copy["_rev"], ~r/^3-/)
- assert is_list(copy["_conflicts"])
- assert length(copy["_conflicts"]) == 1
- conflict = Enum.at(copy["_conflicts"], 0)
- assert String.match?(conflict, ~r/^2-/)
-
- # Resolve the conflict and re-replicate new revision
- resolve_doc = %{"_id" => "2", "_rev" => conflict, "_deleted" => true}
- save_docs(tgt_db_name, [resolve_doc])
- save_docs(src_db_name, [Map.put(doc, :value, "rainbow")])
-
- result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
- assert result["ok"]
-
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- assert is_list(result["history"])
- assert length(result["history"]) == 6
- history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 31
- assert history["missing_found"] == 31
- assert history["docs_read"] == 31
- assert history["docs_written"] == 31
- assert history["doc_write_failures"] == 0
-
- copy = Couch.get!("/#{tgt_db_name}/2", query: %{:conflicts => true}).body
-
- assert String.match?(copy["_rev"], ~r/^4-/)
- assert not Map.has_key?(copy, "_conflicts")
-
- # Test that existing revisions are not replicated
- src_docs = [
- %{"_id" => "foo1", "value" => 111},
- %{"_id" => "foo2", "value" => 222},
- %{"_id" => "foo3", "value" => 333}
- ]
-
- save_docs(src_db_name, src_docs)
- save_docs(tgt_db_name, Enum.filter(src_docs, &(&1["_id"] != "foo2")))
-
- result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
- assert result["ok"]
-
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- assert is_list(result["history"])
- assert length(result["history"]) == 7
- history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 34
- assert history["missing_found"] == 32
- assert history["docs_read"] == 32
- assert history["docs_written"] == 32
- assert history["doc_write_failures"] == 0
-
- docs = [
- %{"_id" => "foo4", "value" => 444},
- %{"_id" => "foo5", "value" => 555}
- ]
-
- save_docs(src_db_name, docs)
- save_docs(tgt_db_name, docs)
-
- result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
- assert result["ok"]
-
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- assert is_list(result["history"])
- assert length(result["history"]) == 8
- history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 36
- assert history["missing_found"] == 32
- assert history["docs_read"] == 32
- assert history["docs_written"] == 32
- assert history["doc_write_failures"] == 0
-
- # Test nothing to replicate
- result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
- assert result["ok"]
- assert result["no_changes"]
- end
-
- def run_since_seq_repl(src_prefix, tgt_prefix) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- docs = make_docs(1..5)
- docs = save_docs(src_db_name, docs)
-
- changes = get_db_changes(src_db_name)["results"]
- since_seq = Enum.at(changes, 2)["seq"]
-
- # TODO: In JS we re-fetch _changes with since_seq, is that
- # really necessary?
- expected_ids =
- for change <- Enum.drop(changes, 3) do
- change["id"]
- end
-
- assert length(expected_ids) == 2
-
- cancel_replication(repl_src, repl_tgt)
- result = replicate(repl_src, repl_tgt, body: %{:since_seq => since_seq})
- cancel_replication(repl_src, repl_tgt)
-
- assert result["ok"]
- assert is_list(result["history"])
- history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 2
- assert history["missing_found"] == 2
- assert history["docs_read"] == 2
- assert history["docs_written"] == 2
- assert history["doc_write_failures"] == 0
-
- Enum.each(docs, fn doc ->
- result = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
-
- if Enum.member?(expected_ids, doc["_id"]) do
- assert result.status_code < 300
- assert cmp_json(doc, result.body)
- else
- assert result.status_code == 404
- end
- end)
- end
-
- def run_vdu_repl(src_prefix, tgt_prefix) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- docs = make_docs(1..7)
-
- docs =
- for doc <- docs do
- if doc["integer"] == 2 do
- Map.put(doc, "_attachments", %{
- "hello.txt" => %{
- :content_type => "text/plain",
- # base64:encode("hello world")
- :data => "aGVsbG8gd29ybGQ="
- }
- })
- else
- doc
- end
- end
-
- docs = save_docs(src_db_name, docs)
-
- ddoc = %{
- "_id" => "_design/test",
- "language" => "javascript",
- "validate_doc_update" => """
- function(newDoc, oldDoc, userCtx, secObj) {
- if((newDoc.integer % 2) !== 0) {
- throw {forbidden: "I only like multiples of 2."};
- }
- }
- """
- }
-
- [_] = save_docs(tgt_db_name, [ddoc])
-
- result = replicate(repl_src, repl_tgt)
- assert result["ok"]
-
- assert is_list(result["history"])
- history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 7
- assert history["missing_found"] == 7
- assert history["docs_read"] == 7
- assert history["docs_written"] == 3
- assert history["doc_write_failures"] == 4
-
- for doc <- docs do
- result = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
-
- if rem(doc["integer"], 2) == 0 do
- assert result.status_code < 300
- assert result.body["integer"] == doc["integer"]
- else
- assert result.status_code == 404
- end
- end
- end
-
- def run_create_target_repl(src_prefix, tgt_prefix) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- create_db(src_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
- # tgt_db_name is created by the replication
-
- docs = make_docs(1..2)
- save_docs(src_db_name, docs)
-
- replicate(repl_src, repl_tgt, body: %{:create_target => true})
-
- retry_until(fn ->
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- src_shards = seq_to_shards(src_info["update_seq"])
- tgt_shards = seq_to_shards(tgt_info["update_seq"])
- assert tgt_shards == src_shards
- end)
- end
-
- def run_filtered_repl(src_prefix, tgt_prefix) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- docs = make_docs(1..30)
-
- ddoc = %{
- "_id" => "_design/mydesign",
- "language" => "javascript",
- "filters" => %{
- "myfilter" => """
- function(doc, req) {
- var modulus = Number(req.query.modulus);
- var special = req.query.special;
- return (doc.integer % modulus === 0) || (doc.string === special);
- }
- """
- }
- }
-
- [_ | docs] = save_docs(src_db_name, [ddoc | docs])
-
- repl_body = %{
- "filter" => "mydesign/myfilter",
- "query_params" => %{
- "modulus" => "2",
- "special" => "7"
- }
- }
-
- result = replicate(repl_src, repl_tgt, body: repl_body)
- assert result["ok"]
-
- Enum.each(docs, fn doc ->
- resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
-
- if rem(doc["integer"], 2) == 0 || doc["string"] == "7" do
- assert resp.status_code < 300
- assert cmp_json(doc, resp.body)
- else
- assert resp.status_code == 404
- end
- end)
-
- assert is_list(result["history"])
- assert length(result["history"]) == 1
- history = Enum.at(result["history"], 0)
-
- # We (incorrectly) don't record update sequences for things
- # that don't pass the changes feed filter. Historically the
- # last document to pass was the second to last doc which has
- # an update sequence of 30. Work that has been applied to avoid
- # conflicts from duplicate IDs breaking _bulk_docs updates added
- # a sort to the logic which changes this. Now the last document
- # to pass has a doc id of "8" and is at update_seq 29 (because only
- # "9" and the design doc are after it).
- #
- # In the future the fix ought to be that we record that update
- # sequence of the database. BigCouch has some existing work on
- # this in the clustered case because if you have very few documents
- # that pass the filter then (given single node's behavior) you end
- # up having to rescan a large portion of the database.
- # we can't rely on sequences in a cluster
- # not only can one figure appear twice (at least for n>1), there's also
- # hashes involved now - so comparing seq==29 is lottery
- # (= cutting off hashes is nonsense) above, there was brute-force
- # comparing all attrs of all docs - now we did check if excluded docs
- # did NOT make it in any way, we can't rely on sequences in a
- # cluster (so leave out)
-
- # 16 => 15 docs with even integer field + 1 doc with string field "7"
- assert history["missing_checked"] == 16
- assert history["missing_found"] == 16
- assert history["docs_read"] == 16
- assert history["docs_written"] == 16
- assert history["doc_write_failures"] == 0
-
- new_docs = make_docs(50..55)
- new_docs = save_docs(src_db_name, new_docs)
-
- result = replicate(repl_src, repl_tgt, body: repl_body)
- assert result["ok"]
-
- Enum.each(new_docs, fn doc ->
- resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
-
- if rem(doc["integer"], 2) == 0 do
- assert resp.status_code < 300
- assert cmp_json(doc, resp.body)
- else
- assert resp.status_code == 404
- end
- end)
-
- assert is_list(result["history"])
- assert length(result["history"]) == 2
- history = Enum.at(result["history"], 0)
-
- assert history["missing_checked"] == 19
- assert history["missing_found"] == 19
- assert history["docs_read"] == 19
- assert history["docs_written"] == 19
- assert history["doc_write_failures"] == 0
- end
-
- def run_filter_changed_repl(src_prefix, tgt_prefix) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- filter_fun_1 = """
- function(doc, req) {
- if(doc.value < Number(req.query.maxvalue)) {
- return true;
- } else {
- return false;
- }
- }
- """
-
- filter_fun_2 = """
- function(doc, req) {
- return true;
- }
- """
-
- docs = [
- %{"_id" => "foo1", "value" => 1},
- %{"_id" => "foo2", "value" => 2},
- %{"_id" => "foo3", :value => 3},
- %{"_id" => "foo4", :value => 4}
- ]
-
- ddoc = %{
- "_id" => "_design/mydesign",
- :language => "javascript",
- :filters => %{
- :myfilter => filter_fun_1
- }
- }
-
- [ddoc | _] = save_docs(src_db_name, [ddoc | docs])
-
- repl_body = %{
- :filter => "mydesign/myfilter",
- :query_params => %{
- :maxvalue => "3"
- }
- }
-
- result = replicate(repl_src, repl_tgt, body: repl_body)
- assert result["ok"]
-
- assert is_list(result["history"])
- assert length(result["history"]) == 1
- history = Enum.at(result["history"], 0)
- assert history["docs_read"] == 2
- assert history["docs_written"] == 2
- assert history["doc_write_failures"] == 0
-
- resp = Couch.get!("/#{tgt_db_name}/foo1")
- assert HTTPotion.Response.success?(resp)
- assert resp.body["value"] == 1
-
- resp = Couch.get!("/#{tgt_db_name}/foo2")
- assert HTTPotion.Response.success?(resp)
- assert resp.body["value"] == 2
-
- resp = Couch.get!("/#{tgt_db_name}/foo3")
- assert resp.status_code == 404
-
- resp = Couch.get!("/#{tgt_db_name}/foo4")
- assert resp.status_code == 404
-
- # Replication should start from scratch after the filter's code changed
- ddoc = Map.put(ddoc, :filters, %{:myfilter => filter_fun_2})
- [_] = save_docs(src_db_name, [ddoc])
-
- result = replicate(repl_src, repl_tgt, body: repl_body)
- assert result["ok"]
-
- assert is_list(result["history"])
- assert length(result["history"]) == 1
- history = Enum.at(result["history"], 0)
- assert history["docs_read"] == 3
- assert history["docs_written"] == 3
- assert history["doc_write_failures"] == 0
-
- resp = Couch.get!("/#{tgt_db_name}/foo1")
- assert HTTPotion.Response.success?(resp)
- assert resp.body["value"] == 1
-
- resp = Couch.get!("/#{tgt_db_name}/foo2")
- assert HTTPotion.Response.success?(resp)
- assert resp.body["value"] == 2
-
- resp = Couch.get!("/#{tgt_db_name}/foo3")
- assert HTTPotion.Response.success?(resp)
- assert resp.body["value"] == 3
-
- resp = Couch.get!("/#{tgt_db_name}/foo4")
- assert HTTPotion.Response.success?(resp)
- assert resp.body["value"] == 4
-
- resp = Couch.get!("/#{tgt_db_name}/_design/mydesign")
- assert HTTPotion.Response.success?(resp)
- end
-
- def run_by_id_repl(src_prefix, tgt_prefix) do
- target_doc_ids = [
- %{
- :initial => ["1", "2", "10"],
- :after => [],
- :conflict_id => "2"
- },
- %{
- :initial => ["1", "2"],
- :after => ["7"],
- :conflict_id => "1"
- },
- %{
- :initial => ["1", "foo_666", "10"],
- :after => ["7"],
- :conflict_id => "10"
- },
- %{
- :initial => ["_design/foo", "8"],
- :after => ["foo_5"],
- :conflict_id => "8"
- },
- %{
- :initial => ["_design%2Ffoo", "8"],
- :after => ["foo_5"],
- :conflict_id => "8"
- },
- %{
- :initial => [],
- :after => ["foo_1000", "_design/foo", "1"],
- :conflict_id => "1"
- }
- ]
-
- Enum.each(target_doc_ids, fn test_data ->
- run_by_id_repl_impl(src_prefix, tgt_prefix, test_data)
- end)
- end
-
- def run_by_id_repl_impl(src_prefix, tgt_prefix, test_data) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- retry_until(fn ->
- create_db(src_db_name)
- create_db(tgt_db_name)
- end)
-
- delete_on_exit([src_db_name, tgt_db_name])
-
- docs = make_docs(1..10)
-
- ddoc = %{
- "_id" => "_design/foo",
- :language => "javascript",
- "integer" => 1
- }
-
- doc_ids = test_data[:initial]
-
- num_missing =
- Enum.count(doc_ids, fn doc_id ->
- String.starts_with?(doc_id, "foo_")
- end)
-
- total_replicated = length(doc_ids) - num_missing
-
- [_ | docs] = save_docs(src_db_name, [ddoc | docs])
-
- repl_body = %{:doc_ids => doc_ids}
- result = replicate(repl_src, repl_tgt, body: repl_body)
- assert result["ok"]
-
- if total_replicated == 0 do
- assert result["no_changes"]
- else
- assert is_binary(result["start_time"])
- assert is_binary(result["end_time"])
- assert result["docs_read"] == total_replicated
- assert result["docs_written"] == total_replicated
- assert result["doc_write_failures"] == 0
- end
-
- Enum.each(doc_ids, fn doc_id ->
- doc_id = URI.decode(doc_id)
- orig = Couch.get!("/#{src_db_name}/#{doc_id}")
- copy = Couch.get!("/#{tgt_db_name}/#{doc_id}")
-
- if String.starts_with?(doc_id, "foo_") do
- assert orig.status_code == 404
- assert copy.status_code == 404
- else
- assert HTTPotion.Response.success?(orig)
- assert HTTPotion.Response.success?(copy)
- assert cmp_json(orig.body, copy.body)
- end
- end)
-
- # Be absolutely sure that other docs were not replicated
- Enum.each(docs, fn doc ->
- encoded_id = URI.encode_www_form(doc["_id"])
- copy = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
- is_doc_id = &Enum.member?(doc_ids, &1)
-
- if is_doc_id.(doc["_id"]) or is_doc_id.(encoded_id) do
- assert HTTPotion.Response.success?(copy)
- else
- assert copy.status_code == 404
- end
- end)
-
- retry_until(fn ->
- tgt_info = get_db_info(tgt_db_name)
- assert tgt_info["doc_count"] == total_replicated
- end)
-
- doc_ids_after = test_data[:after]
-
- num_missing_after =
- Enum.count(doc_ids_after, fn doc_id ->
- String.starts_with?(doc_id, "foo_")
- end)
-
- repl_body = %{:doc_ids => doc_ids_after}
- result = replicate(repl_src, repl_tgt, body: repl_body)
- assert result["ok"]
-
- total_replicated_after = length(doc_ids_after) - num_missing_after
-
- if total_replicated_after == 0 do
- assert result["no_changes"]
- else
- assert is_binary(result["start_time"])
- assert is_binary(result["end_time"])
- assert result["docs_read"] == total_replicated_after
- assert result["docs_written"] == total_replicated_after
- assert result["doc_write_failures"] == 0
- end
-
- Enum.each(doc_ids_after, fn doc_id ->
- orig = Couch.get!("/#{src_db_name}/#{doc_id}")
- copy = Couch.get!("/#{tgt_db_name}/#{doc_id}")
-
- if String.starts_with?(doc_id, "foo_") do
- assert orig.status_code == 404
- assert copy.status_code == 404
- else
- assert HTTPotion.Response.success?(orig)
- assert HTTPotion.Response.success?(copy)
- assert cmp_json(orig.body, copy.body)
- end
- end)
-
- # Be absolutely sure that other docs were not replicated
- all_doc_ids = doc_ids ++ doc_ids_after
-
- Enum.each(docs, fn doc ->
- encoded_id = URI.encode_www_form(doc["_id"])
- copy = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
- is_doc_id = &Enum.member?(all_doc_ids, &1)
-
- if is_doc_id.(doc["_id"]) or is_doc_id.(encoded_id) do
- assert HTTPotion.Response.success?(copy)
- else
- assert copy.status_code == 404
- end
- end)
-
- retry_until(fn ->
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == total_replicated + total_replicated_after,
- "#{inspect(test_data)}"
- end)
-
- # Update a source document and re-replicate (no conflict introduced)
- conflict_id = test_data[:conflict_id]
- doc = Couch.get!("/#{src_db_name}/#{conflict_id}").body
- assert is_map(doc)
- doc = Map.put(doc, "integer", 666)
- [doc] = save_docs(src_db_name, [doc])
-
- att1 = [
- name: "readme.txt",
- body: get_att1_data(),
- content_type: "text/plain"
- ]
-
- att2 = [
- name: "data.dat",
- body: get_att2_data(),
- content_type: "application/binary"
- ]
-
- doc = add_attachment(src_db_name, doc, att1)
- doc = add_attachment(src_db_name, doc, att2)
-
- repl_body = %{:doc_ids => [conflict_id]}
- result = replicate(repl_src, repl_tgt, body: repl_body)
- assert result["ok"]
-
- assert result["docs_read"] == 1
- assert result["docs_written"] == 1
- assert result["doc_write_failures"] == 0
-
- query = %{"conflicts" => "true"}
- copy = Couch.get!("/#{tgt_db_name}/#{conflict_id}", query: query)
- assert HTTPotion.Response.success?(copy)
- assert copy.body["integer"] == 666
- assert String.starts_with?(copy.body["_rev"], "4-")
- assert not Map.has_key?(doc, "_conflicts")
-
- atts = copy.body["_attachments"]
- assert is_map(atts)
- assert is_map(atts["readme.txt"])
- assert atts["readme.txt"]["revpos"] == 3
- assert String.match?(atts["readme.txt"]["content_type"], ~r/text\/plain/)
- assert atts["readme.txt"]["stub"]
-
- att1_data = Couch.get!("/#{tgt_db_name}/#{conflict_id}/readme.txt").body
- assert String.length(att1_data) == String.length(att1[:body])
- assert att1_data == att1[:body]
-
- assert is_map(atts["data.dat"])
- assert atts["data.dat"]["revpos"] == 4
- ct_re = ~r/application\/binary/
- assert String.match?(atts["data.dat"]["content_type"], ct_re)
- assert atts["data.dat"]["stub"]
-
- att2_data = Couch.get!("/#{tgt_db_name}/#{conflict_id}/data.dat").body
- assert String.length(att2_data) == String.length(att2[:body])
- assert att2_data == att2[:body]
-
- # Generate a conflict using replication by doc ids
- orig = Couch.get!("/#{src_db_name}/#{conflict_id}").body
- orig = Map.update!(orig, "integer", &(&1 + 100))
- [_] = save_docs(src_db_name, [orig])
-
- copy = Couch.get!("/#{tgt_db_name}/#{conflict_id}").body
- copy = Map.update!(copy, "integer", &(&1 + 1))
- [_] = save_docs(tgt_db_name, [copy])
-
- result = replicate(repl_src, repl_tgt, body: repl_body)
- assert result["ok"]
- assert result["docs_read"] == 2
- assert result["docs_written"] == 2
- assert result["doc_write_failures"] == 0
-
- retry_until(fn ->
- copy = Couch.get!("/#{tgt_db_name}/#{conflict_id}", query: query).body
- assert String.match?(copy["_rev"], ~r/^5-/)
- assert is_list(copy["_conflicts"])
- assert length(copy["_conflicts"]) == 1
- conflict_rev = Enum.at(copy["_conflicts"], 0)
- assert String.match?(conflict_rev, ~r/^5-/)
- end)
- end
-
- def run_continuous_repl(src_prefix, tgt_prefix) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- ddoc = %{
- "_id" => "_design/mydesign",
- "language" => "javascript",
- "filters" => %{
- "myfilter" => "function(doc, req) { return true; }"
- }
- }
-
- docs = make_docs(1..25)
- docs = save_docs(src_db_name, docs ++ [ddoc])
-
- att1_data = get_att1_data()
-
- docs =
- for doc <- docs do
- if doc["integer"] >= 10 and doc["integer"] < 15 do
- add_attachment(src_db_name, doc)
- else
- doc
- end
- end
-
- repl_body = %{:continuous => true}
- result = replicate(repl_src, repl_tgt, body: repl_body)
-
- assert result["ok"]
- assert is_binary(result["_local_id"])
-
- repl_id = result["_local_id"]
- task = get_task(repl_id, 30_000)
- assert is_map(task), "Error waiting for replication to start"
-
- wait_for_repl(src_db_name, repl_id, 26)
-
- Enum.each(docs, fn doc ->
- resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
- assert resp.status_code < 300
- assert cmp_json(doc, resp.body)
-
- if doc["integer"] >= 10 and doc["integer"] < 15 do
- atts = resp.body["_attachments"]
- assert is_map(atts)
- att = atts["readme.txt"]
- assert is_map(att)
- assert att["revpos"] == 2
- assert String.match?(att["content_type"], ~r/text\/plain/)
- assert att["stub"]
-
- resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}/readme.txt")
- assert String.length(resp.body) == String.length("some text")
- assert resp.body == "some text"
- end
- end)
-
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- # Add attachments to more source docs
- docs =
- for doc <- docs do
- is_ddoc = String.starts_with?(doc["_id"], "_design/")
-
- case doc["integer"] do
- n when n >= 10 and n < 15 ->
- ctype = "application/binary"
- opts = [name: "data.dat", body: att1_data, content_type: ctype]
- add_attachment(src_db_name, doc, opts)
-
- _ when is_ddoc ->
- add_attachment(src_db_name, doc)
-
- _ ->
- doc
- end
- end
-
- wait_for_repl(src_db_name, repl_id, 32)
-
- Enum.each(docs, fn doc ->
- is_ddoc = String.starts_with?(doc["_id"], "_design/")
-
- case doc["integer"] do
- N when (N >= 10 and N < 15) or is_ddoc ->
- resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
- atts = resp.body["_attachments"]
- assert is_map(atts)
- att = atts["readme.txt"]
- assert is_map(att)
- assert att["revpos"] == 2
- assert String.match?(att["content_type"], ~r/text\/plain/)
- assert att["stub"]
-
- resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}/readme.txt")
- assert String.length(resp.body) == String.length("some text")
- assert resp.body == "some text"
-
- if not is_ddoc do
- att = atts["data.dat"]
- assert is_map(att)
- assert att["revpos"] == 3
- assert String.match?(att["content_type"], ~r/application\/binary/)
- assert att["stub"]
-
- resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}/data.dat")
- assert String.length(resp.body) == String.length(att1_data)
- assert resp.body == att1_data
- end
-
- _ ->
- :ok
- end
- end)
-
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- ddoc = List.last(docs)
- ctype = "application/binary"
- opts = [name: "data.dat", body: att1_data, content_type: ctype]
- add_attachment(src_db_name, ddoc, opts)
-
- wait_for_repl(src_db_name, repl_id, 33)
-
- resp = Couch.get("/#{tgt_db_name}/#{ddoc["_id"]}")
- atts = resp.body["_attachments"]
- assert is_map(atts)
- att = atts["readme.txt"]
- assert is_map(att)
- assert att["revpos"] == 2
- assert String.match?(att["content_type"], ~r/text\/plain/)
- assert att["stub"]
-
- resp = Couch.get!("/#{tgt_db_name}/#{ddoc["_id"]}/readme.txt")
- assert String.length(resp.body) == String.length("some text")
- assert resp.body == "some text"
-
- att = atts["data.dat"]
- assert is_map(att)
- assert att["revpos"] == 3
- assert String.match?(att["content_type"], ~r/application\/binary/)
- assert att["stub"]
-
- resp = Couch.get!("/#{tgt_db_name}/#{ddoc["_id"]}/data.dat")
- assert String.length(resp.body) == String.length(att1_data)
- assert resp.body == att1_data
-
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- # Check creating new normal documents
- new_docs = make_docs(26..35)
- new_docs = save_docs(src_db_name, new_docs)
-
- wait_for_repl(src_db_name, repl_id, 43)
-
- Enum.each(new_docs, fn doc ->
- resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
- assert resp.status_code < 300
- assert cmp_json(doc, resp.body)
- end)
-
- src_info = get_db_info(src_db_name)
- tgt_info = get_db_info(tgt_db_name)
-
- assert tgt_info["doc_count"] == src_info["doc_count"]
-
- # Delete docs from the source
-
- doc1 = Enum.at(new_docs, 0)
- query = %{:rev => doc1["_rev"]}
- Couch.delete!("/#{src_db_name}/#{doc1["_id"]}", query: query)
-
- doc2 = Enum.at(new_docs, 6)
- query = %{:rev => doc2["_rev"]}
- Couch.delete!("/#{src_db_name}/#{doc2["_id"]}", query: query)
-
- wait_for_repl(src_db_name, repl_id, 45)
-
- resp = Couch.get("/#{tgt_db_name}/#{doc1["_id"]}")
- assert resp.status_code == 404
- resp = Couch.get("/#{tgt_db_name}/#{doc2["_id"]}")
- assert resp.status_code == 404
-
- changes = get_db_changes(tgt_db_name, %{:since => tgt_info["update_seq"]})
- # quite unfortunately, there is no way on relying on ordering in a cluster
- # but we can assume a length of 2
- changes =
- for change <- changes["results"] do
- {change["id"], change["deleted"]}
- end
-
- assert Enum.sort(changes) == [{doc1["_id"], true}, {doc2["_id"], true}]
-
- # Cancel the replication
- repl_body = %{:continuous => true, :cancel => true}
- resp = replicate(repl_src, repl_tgt, body: repl_body)
- assert resp["ok"]
- assert resp["_local_id"] == repl_id
-
- doc = %{"_id" => "foobar", "value" => 666}
- [doc] = save_docs(src_db_name, [doc])
-
- wait_for_repl_stop(repl_id, 30_000)
-
- resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
- assert resp.status_code == 404
- end
-
- def run_compressed_att_repl(src_prefix, tgt_prefix) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- doc = %{"_id" => "foobar"}
- [doc] = save_docs(src_db_name, [doc])
-
- att1_data = get_att1_data()
- num_copies = 1 + round(128 * 1024 / String.length(att1_data))
-
- big_att =
- List.foldl(Enum.to_list(1..num_copies), "", fn _, acc ->
- acc <> att1_data
- end)
-
- doc = add_attachment(src_db_name, doc, body: big_att)
-
- # Disable attachment compression
- set_config_raw("attachments", "compression_level", "0")
-
- result = replicate(repl_src, repl_tgt)
- assert result["ok"]
- assert is_list(result["history"])
- assert length(result["history"]) == 1
- history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 1
- assert history["missing_found"] == 1
- assert history["docs_read"] == 1
- assert history["docs_written"] == 1
- assert history["doc_write_failures"] == 0
-
- token = Enum.random(1..1_000_000)
- query = %{att_encoding_info: true, bypass_cache: token}
- resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}", query: query)
- assert resp.status_code < 300
- assert is_map(resp.body["_attachments"])
- att = resp.body["_attachments"]["readme.txt"]
- assert att["encoding"] == "gzip"
- assert is_integer(att["length"])
- assert is_integer(att["encoded_length"])
- assert att["encoded_length"] < att["length"]
- end
-
- def run_non_admin_target_user_repl(src_prefix, tgt_prefix, ctx) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- set_security(tgt_db_name, %{
- :admins => %{
- :names => ["superman"],
- :roles => ["god"]
- }
- })
-
- docs = make_docs(1..6)
- ddoc = %{"_id" => "_design/foo", "language" => "javascript"}
- docs = save_docs(src_db_name, [ddoc | docs])
-
- sess = Couch.login(ctx[:userinfo])
- resp = Couch.Session.get(sess, "/_session")
- assert resp.body["ok"]
- assert resp.body["userCtx"]["name"] == "joe"
-
- opts = [
- userinfo: ctx[:userinfo],
- headers: [cookie: sess.cookie]
- ]
-
- result = replicate(repl_src, repl_tgt, opts)
-
- assert Couch.Session.logout(sess).body["ok"]
-
- assert result["ok"]
- history = Enum.at(result["history"], 0)
- assert history["docs_read"] == length(docs)
- # ddoc write failed
- assert history["docs_written"] == length(docs) - 1
- # ddoc write failed
- assert history["doc_write_failures"] == 1
-
- Enum.each(docs, fn doc ->
- resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
-
- if String.starts_with?(doc["_id"], "_design/") do
- assert resp.status_code == 404
- else
- assert HTTPotion.Response.success?(resp)
- assert cmp_json(doc, resp.body)
- end
- end)
- end
-
- def run_non_admin_or_reader_source_user_repl(src_prefix, tgt_prefix, ctx) do
- base_db_name = random_db_name()
- src_db_name = base_db_name <> "_src"
- tgt_db_name = base_db_name <> "_tgt"
- repl_src = src_prefix <> src_db_name
- repl_tgt = tgt_prefix <> tgt_db_name
-
- create_db(src_db_name)
- create_db(tgt_db_name)
- delete_on_exit([src_db_name, tgt_db_name])
-
- set_security(tgt_db_name, %{
- :admins => %{
- :names => ["superman"],
- :roles => ["god"]
- },
- :readers => %{
- :names => ["john"],
- :roles => ["secret"]
- }
- })
-
- docs = make_docs(1..6)
- ddoc = %{"_id" => "_design/foo", "language" => "javascript"}
- docs = save_docs(src_db_name, [ddoc | docs])
-
- sess = Couch.login(ctx[:userinfo])
- resp = Couch.Session.get(sess, "/_session")
- assert resp.body["ok"]
- assert resp.body["userCtx"]["name"] == "joe"
-
- opts = [
- userinfo: ctx[:userinfo],
- headers: [cookie: sess.cookie]
- ]
-
- assert_raise(ExUnit.AssertionError, fn ->
- replicate(repl_src, repl_tgt, opts)
- end)
-
- assert Couch.Session.logout(sess).body["ok"]
-
- Enum.each(docs, fn doc ->
- resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
- assert resp.status_code == 404
- end)
- end
-
- def get_db_info(db_name) do
- resp = Couch.get("/#{db_name}")
- assert HTTPotion.Response.success?(resp)
- resp.body
- end
-
-
- def cancel_replication(src, tgt) do
- body = %{:cancel => true}
-
- try do
- replicate(src, tgt, body: body)
- rescue
- ExUnit.AssertionError -> :ok
- end
- end
-
- def get_db_changes(db_name, query \\ %{}) do
- resp = Couch.get("/#{db_name}/_changes", query: query)
- assert HTTPotion.Response.success?(resp), "#{inspect(resp)} #{inspect(query)}"
- resp.body
- end
-
- def save_docs(db_name, docs) do
- query = %{w: 3}
- body = %{docs: docs}
- resp = Couch.post("/#{db_name}/_bulk_docs", query: query, body: body)
- assert HTTPotion.Response.success?(resp)
-
- for {doc, resp} <- Enum.zip(docs, resp.body) do
- assert resp["ok"], "Error saving doc: #{doc["_id"]}"
- Map.put(doc, "_rev", resp["rev"])
- end
- end
-
- def set_security(db_name, sec_props) do
- resp = Couch.put("/#{db_name}/_security", body: :jiffy.encode(sec_props))
- assert HTTPotion.Response.success?(resp)
- assert resp.body["ok"]
- end
-
- def add_attachment(db_name, doc, att \\ []) do
- defaults = [
- name: <<"readme.txt">>,
- body: <<"some text">>,
- content_type: "text/plain"
- ]
-
- att = defaults |> Keyword.merge(att) |> Enum.into(%{})
- uri = "/#{db_name}/#{URI.encode(doc["_id"])}/#{att[:name]}"
- headers = ["Content-Type": att[:content_type]]
-
- params =
- if doc["_rev"] do
- %{:w => 3, :rev => doc["_rev"]}
- else
- %{:w => 3}
- end
-
- retry_until(fn ->
- resp = Couch.put(uri, headers: headers, query: params, body: att[:body])
- assert HTTPotion.Response.success?(resp)
- Map.put(doc, "_rev", resp.body["rev"])
- end)
- end
-
- def wait_for_repl(src_db_name, repl_id, expect_revs_checked) do
- wait_for_repl(src_db_name, repl_id, expect_revs_checked, 30_000)
- end
-
- def wait_for_repl(_, _, _, wait_left) when wait_left <= 0 do
- assert false, "Timeout waiting for replication"
- end
-
- def wait_for_repl(src_db_name, repl_id, expect_revs_checked, wait_left) do
- task = get_task(repl_id, 0)
- through_seq = task["through_seq"] || "0"
- revs_checked = task["revisions_checked"]
- changes = get_db_changes(src_db_name, %{:since => through_seq})
-
- if length(changes["results"]) > 0 or revs_checked < expect_revs_checked do
- :timer.sleep(500)
- wait_for_repl(src_db_name, repl_id, expect_revs_checked, wait_left - 500)
- end
-
- task
- end
-
- def wait_for_repl_stop(repl_id) do
- wait_for_repl_stop(repl_id, 30_000)
- end
-
- def wait_for_repl_stop(repl_id, wait_left) when wait_left <= 0 do
- assert false, "Timeout waiting for replication task to stop: #{repl_id}"
- end
-
- def wait_for_repl_stop(repl_id, wait_left) do
- task = get_task(repl_id, 0)
-
- if is_map(task) do
- :timer.sleep(500)
- wait_for_repl_stop(repl_id, wait_left - 500)
- end
- end
-
- def get_last_seq(db_name) do
- body = get_db_changes(db_name, %{:since => "now"})
- body["last_seq"]
- end
-
- def get_task(repl_id, delay) when delay <= 0 do
- try_get_task(repl_id)
- end
-
- def get_task(repl_id, delay) do
- case try_get_task(repl_id) do
- result when is_map(result) ->
- result
-
- _ ->
- :timer.sleep(500)
- get_task(repl_id, delay - 500)
- end
- end
-
- def try_get_task(repl_id) do
- resp = Couch.get("/_active_tasks")
- assert HTTPotion.Response.success?(resp)
- assert is_list(resp.body)
-
- Enum.find(resp.body, nil, fn task ->
- task["replication_id"] == repl_id
- end)
- end
-
- def get_att1_data do
- File.read!(Path.expand("data/lorem.txt", __DIR__))
- end
-
- def get_att2_data do
- File.read!(Path.expand("data/lorem_b64.txt", __DIR__))
- end
-
- def cmp_json(lhs, rhs) when is_map(lhs) and is_map(rhs) do
- Enum.reduce_while(lhs, true, fn {k, v}, true ->
- if Map.has_key?(rhs, k) do
- if cmp_json(v, rhs[k]) do
- {:cont, true}
- else
- Logger.error("#{inspect(lhs)} != #{inspect(rhs)}")
- {:halt, false}
- end
- else
- Logger.error("#{inspect(lhs)} != #{inspect(rhs)}")
- {:halt, false}
- end
- end)
- end
-
- def cmp_json(lhs, rhs), do: lhs == rhs
-
- def seq_to_shards(seq) do
- for {_node, range, {seq_num, _uuid, _epoch}} <- decode_seq(seq) do
- {range, seq_num}
- end
- end
-
- def decode_seq(seq) do
- seq = String.replace(seq, ~r/\d+-/, "", global: false)
- :erlang.binary_to_term(Base.url_decode64!(seq, padding: false))
- end
-
- def delete_on_exit(db_names) when is_list(db_names) do
- on_exit(fn ->
- Enum.each(db_names, fn name ->
- delete_db(name)
- end)
- end)
- end
-end
diff --git a/test/elixir/test/reshard_all_docs_test.exs b/test/elixir/test/reshard_all_docs_test.exs
deleted file mode 100644
index ab8c6b75b..000000000
--- a/test/elixir/test/reshard_all_docs_test.exs
+++ /dev/null
@@ -1,79 +0,0 @@
-defmodule ReshardAllDocsTest do
- use CouchTestCase
- import ReshardHelpers
-
- @moduledoc """
- Test _all_docs interaction with resharding
- """
-
- setup do
- db = random_db_name()
- {:ok, _} = create_db(db, query: %{q: 2})
-
- on_exit(fn ->
- reset_reshard_state()
- delete_db(db)
- end)
-
- {:ok, [db: db]}
- end
-
- test "all_docs after splitting all shards on node1", context do
- db = context[:db]
- node1 = get_first_node()
- docs = add_docs(1..100, db)
-
- before_split_all_docs = all_docs(db)
- assert docs == before_split_all_docs
-
- resp = post_job_node(db, node1)
- assert resp.status_code in [201, 202]
- jobid = hd(resp.body)["id"]
- wait_job_completed(jobid)
-
- assert before_split_all_docs == all_docs(db)
-
- assert remove_job(jobid).status_code == 200
- end
-
- test "all_docs after splitting the same range on all nodes", context do
- db = context[:db]
- docs = add_docs(1..100, db)
-
- before_split_all_docs = all_docs(db)
- assert docs == before_split_all_docs
-
- resp = post_job_range(db, "00000000-7fffffff")
- assert resp.status_code in [201, 202]
-
- resp.body
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> wait_job_completed(id) end)
-
- assert before_split_all_docs == all_docs(db)
-
- get_jobs()
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> remove_job(id) end)
- end
-
- defp add_docs(range, db) do
- docs = create_docs(range)
- w3 = %{:w => 3}
- resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs}, query: w3)
- assert resp.status_code in [201, 202]
- assert length(resp.body) == length(docs)
-
- docs
- |> rev(resp.body)
- |> Enum.into(%{}, fn %{:_id => id, :_rev => rev} -> {id, rev} end)
- end
-
- defp all_docs(db, query \\ %{}) do
- resp = Couch.get("/#{db}/_all_docs", query: query)
- assert resp.status_code == 200
-
- resp.body["rows"]
- |> Enum.into(%{}, fn %{"id" => id, "value" => v} -> {id, v["rev"]} end)
- end
-end
diff --git a/test/elixir/test/reshard_basic_test.exs b/test/elixir/test/reshard_basic_test.exs
deleted file mode 100644
index dcb198c46..000000000
--- a/test/elixir/test/reshard_basic_test.exs
+++ /dev/null
@@ -1,174 +0,0 @@
-defmodule ReshardBasicTest do
- use CouchTestCase
- import ReshardHelpers
-
- @moduledoc """
- Test resharding basic functionality
- """
-
- setup_all do
- db1 = random_db_name()
- {:ok, _} = create_db(db1, query: %{q: 1})
- db2 = random_db_name()
- {:ok, _} = create_db(db2, query: %{q: 2})
-
- on_exit(fn ->
- reset_reshard_state()
- delete_db(db1)
- delete_db(db2)
- end)
-
- {:ok, [db1: db1, db2: db2]}
- end
-
- test "basic api querying, no jobs present" do
- summary = get_summary()
- assert summary["state"] == "running"
- assert summary["state_reason"] == :null
- assert summary["total"] == 0
- assert summary["completed"] == 0
- assert summary["failed"] == 0
- assert summary["stopped"] == 0
- assert get_state() == %{"state" => "running", "reason" => :null}
- assert get_jobs() == []
- end
-
- test "check validation of invalid parameters", context do
- db1 = context[:db1]
- node1 = get_first_node()
-
- resp = post_job_node(db1, "badnode")
- assert resp.status_code == 400
-
- resp = post_job_node("badresharddb", node1)
- assert resp.status_code == 400
-
- resp = post_job_db("badresharddb")
- assert resp.status_code == 400
-
- resp = post_job_range("badresharddb", "randomgarbage")
- assert resp.status_code == 400
-
- resp = get_job("badjobid")
- assert resp.status_code == 404
-
- resp = remove_job("badjobid")
- assert resp.status_code == 404
- end
-
- test "toggle global state" do
- assert get_state() == %{"state" => "running", "reason" => :null}
- put_state_stopped("xyz")
- assert get_state() == %{"state" => "stopped", "reason" => "xyz"}
- put_state_running()
- assert get_state() == %{"state" => "running", "reason" => :null}
- end
-
- test "split q=1 db shards on node1 (1 job)", context do
- db = context[:db1]
- node1 = get_first_node()
-
- resp = post_job_node(db, node1)
- assert resp.status_code in [201, 202]
-
- body = resp.body
- assert is_list(body)
- assert length(body) == 1
-
- [job] = body
- id = job["id"]
- assert is_binary(id)
- node = job["node"]
- assert is_binary(node)
- assert node == node1
- assert job["ok"] == true
- shard = job["shard"]
- assert is_binary(shard)
-
- resp = get_job(id)
- assert resp.status_code == 200
-
- body = resp.body
- assert body["type"] == "split"
- assert body["id"] == id
- assert body["source"] == shard
- assert is_list(body["history"])
- assert body["job_state"] in ["new", "running", "completed"]
- assert is_list(body["target"])
- assert length(body["target"]) == 2
-
- wait_job_completed(id)
-
- resp = get_job(id)
- assert resp.status_code == 200
-
- body = resp.body
- assert body["job_state"] == "completed"
- assert body["split_state"] == "completed"
-
- resp = Couch.get("/#{db}/_shards")
- assert resp.status_code == 200
- shards = resp.body["shards"]
- assert node1 not in Map.get(shards, "00000000-ffffffff", [])
- assert shards["00000000-7fffffff"] == [node1]
- assert shards["80000000-ffffffff"] == [node1]
-
- summary = get_summary()
- assert summary["total"] == 1
- assert summary["completed"] == 1
-
- resp = remove_job(id)
- assert resp.status_code == 200
-
- assert get_jobs() == []
-
- summary = get_summary()
- assert summary["total"] == 0
- assert summary["completed"] == 0
- end
-
- test "split q=2 shards on node1 (2 jobs)", context do
- db = context[:db2]
- node1 = get_first_node()
-
- resp = post_job_node(db, node1)
- assert resp.status_code in [201, 202]
-
- body = resp.body
- assert is_list(body)
- assert length(body) == 2
-
- [job1, job2] = Enum.sort(body)
- {id1, id2} = {job1["id"], job2["id"]}
-
- assert get_job(id1).body["id"] == id1
- assert get_job(id2).body["id"] == id2
-
- summary = get_summary()
- assert summary["total"] == 2
-
- wait_job_completed(id1)
- wait_job_completed(id2)
-
- summary = get_summary()
- assert summary["completed"] == 2
-
- resp = Couch.get("/#{db}/_shards")
- assert resp.status_code == 200
- shards = resp.body["shards"]
- assert node1 not in Map.get(shards, "00000000-7fffffff", [])
- assert node1 not in Map.get(shards, "80000000-ffffffff", [])
- assert shards["00000000-3fffffff"] == [node1]
- assert shards["40000000-7fffffff"] == [node1]
- assert shards["80000000-bfffffff"] == [node1]
- assert shards["c0000000-ffffffff"] == [node1]
-
- # deleting the source db should remove the jobs
- delete_db(db)
- wait_job_removed(id1)
- wait_job_removed(id2)
-
- summary = get_summary()
- assert summary["total"] == 0
- end
-end
diff --git a/test/elixir/test/reshard_changes_feed.exs b/test/elixir/test/reshard_changes_feed.exs
deleted file mode 100644
index 5498ded7b..000000000
--- a/test/elixir/test/reshard_changes_feed.exs
+++ /dev/null
@@ -1,81 +0,0 @@
-defmodule ReshardChangesFeedTest do
- use CouchTestCase
- import ReshardHelpers
-
- @moduledoc """
- Test _changes interaction with resharding
- """
-
- setup do
- db = random_db_name()
- {:ok, _} = create_db(db, query: %{q: 2})
-
- on_exit(fn ->
- reset_reshard_state()
- delete_db(db)
- end)
-
- {:ok, [db: db]}
- end
-
- test "all_docs after splitting all shards on node1", context do
- db = context[:db]
- add_docs(1..3, db)
-
- all_before = changes(db)
- first_seq = hd(all_before["results"])["seq"]
- last_seq = all_before["last_seq"]
- since_1_before = docset(changes(db, %{:since => first_seq}))
- since_last_before = docset(changes(db, %{:since => last_seq}))
-
- resp = post_job_range(db, "00000000-7fffffff")
- assert resp.status_code in [201, 202]
-
- resp.body
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> wait_job_completed(id) end)
-
- all_after = changes(db)
- since_1_after = docset(changes(db, %{:since => first_seq}))
- since_last_after = docset(changes(db, %{:since => last_seq}))
-
- assert docset(all_before) == docset(all_after)
- assert MapSet.subset?(since_1_before, since_1_after)
- assert MapSet.subset?(since_last_before, since_last_after)
-
- get_jobs()
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> remove_job(id) end)
- end
-
- defp docset(changes) do
- changes["results"]
- |> Enum.map(fn %{"id" => id} -> id end)
- |> MapSet.new()
- end
-
- defp changes(db, query \\ %{}) do
- resp = Couch.get("/#{db}/_changes", query: query)
- assert resp.status_code == 200
- resp.body
- end
-
- defp add_docs(range, db) do
- docs = create_docs(range)
- w3 = %{:w => 3}
- resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs}, query: w3)
- assert resp.status_code in [201, 202]
- assert length(resp.body) == length(docs)
-
- docs
- |> rev(resp.body)
- |> Enum.into(%{}, fn %{:_id => id, :_rev => rev} -> {id, rev} end)
- end
-
- # (Keep for debugging)
- # defp unpack_seq(seq) when is_binary(seq) do
- # [_, opaque] = String.split(seq, "-")
- # {:ok, binblob} = Base.url_decode64(opaque, padding: false)
- # :erlang.binary_to_term(binblob)
- # end
-end
diff --git a/test/elixir/test/reshard_helpers.exs b/test/elixir/test/reshard_helpers.exs
deleted file mode 100644
index 282d98c82..000000000
--- a/test/elixir/test/reshard_helpers.exs
+++ /dev/null
@@ -1,114 +0,0 @@
-defmodule ReshardHelpers do
- use CouchTestCase
-
- def get_summary do
- resp = Couch.get("/_reshard")
- assert resp.status_code == 200
- resp.body
- end
-
- def get_state do
- resp = Couch.get("/_reshard/state")
- assert resp.status_code == 200
- resp.body
- end
-
- def put_state_running do
- resp = Couch.put("/_reshard/state", body: %{:state => "running"})
- assert resp.status_code == 200
- resp
- end
-
- def put_state_stopped(reason \\ "") do
- body = %{:state => "stopped", :reason => reason}
- resp = Couch.put("/_reshard/state", body: body)
- assert resp.status_code == 200
- resp
- end
-
- def get_jobs do
- resp = Couch.get("/_reshard/jobs")
- assert resp.status_code == 200
- resp.body["jobs"]
- end
-
- def post_job_db(db) do
- body = %{:type => :split, :db => db}
- Couch.post("/_reshard/jobs", body: body)
- end
-
- def post_job_node(db, node) do
- body = %{:type => :split, :db => db, :node => node}
- Couch.post("/_reshard/jobs", body: body)
- end
-
- def post_job_range(db, range) do
- body = %{:type => :split, :db => db, :range => range}
- Couch.post("/_reshard/jobs", body: body)
- end
-
- def post_job_node_and_range(db, node, range) do
- body = %{:type => :split, :db => db, :node => node, :range => range}
- Couch.post("/_reshard/jobs", body: body)
- end
-
- def get_job(id) when is_binary(id) do
- Couch.get("/_reshard/jobs/#{id}")
- end
-
- def remove_job(id) when is_binary(id) do
- Couch.delete("/_reshard/jobs/#{id}")
- end
-
- def get_job_state(id) when is_binary(id) do
- resp = Couch.get("/_reshard/jobs/#{id}/state")
- assert resp.status_code == 200
- resp.body["state"]
- end
-
- def stop_job(id, reason \\ "") when is_binary(id) do
- body = %{:state => "stopped", :reason => reason}
- Couch.post("/_reshard/jobs/#{id}/state", body: body)
- end
-
- def resume_job(id) when is_binary(id) do
- body = %{:state => "running"}
- Couch.post("/_reshard/jobs/#{id}/state", body: body)
- end
-
- def job_ids(jobs) do
- Enum.map(fn job -> job["id"] end, jobs)
- end
-
- def get_first_node do
- mresp = Couch.get("/_membership")
- assert mresp.status_code == 200
- all_nodes = mresp.body["all_nodes"]
-
- mresp.body["cluster_nodes"]
- |> Enum.filter(fn n -> n in all_nodes end)
- |> Enum.sort()
- |> hd()
- end
-
- def wait_job_removed(id) do
- retry_until(fn -> get_job(id).status_code == 404 end, 200, 60_000)
- end
-
- def wait_job_completed(id) do
- wait_job_state(id, "completed")
- end
-
- def wait_job_state(id, state) do
- retry_until(fn -> get_job_state(id) == state end, 200, 60_000)
- end
-
- def reset_reshard_state do
- get_jobs()
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> remove_job(id) end)
-
- assert get_jobs() == []
- put_state_running()
- end
-end
diff --git a/test/elixir/test/rev_stemming_test.exs b/test/elixir/test/rev_stemming_test.exs
deleted file mode 100644
index 9a16d481d..000000000
--- a/test/elixir/test/rev_stemming_test.exs
+++ /dev/null
@@ -1,157 +0,0 @@
-defmodule RevStemmingTest do
- use CouchTestCase
-
- @moduletag :revs
-
- @moduledoc """
- This is a port of the rev_stemming.js suite
- """
-
- @new_limit 5
-
- @tag :with_db
- test "revs limit update", context do
- db_name = context[:db_name]
-
- resp = Couch.get("/#{db_name}/_revs_limit")
- assert resp.body == 1000
-
- create_rev_doc(db_name, "foo", @new_limit + 1)
- resp = Couch.get("/#{db_name}/foo?revs=true")
- assert length(resp.body["_revisions"]["ids"]) == @new_limit + 1
-
- resp =
- Couch.put("/#{db_name}/_revs_limit",
- body: "#{@new_limit}",
- headers: ["Content-type": "application/json"]
- )
-
- assert resp.status_code == 200
-
- create_rev_doc(db_name, "foo", @new_limit + 1)
- resp = Couch.get("/#{db_name}/foo?revs=true")
- assert length(resp.body["_revisions"]["ids"]) == @new_limit
- end
-
- @tag :with_db
- test "revs limit produces replication conflict ", context do
- db_name = context[:db_name]
-
- db_name_b = "#{db_name}_b"
- create_db(db_name_b)
- delete_db_on_exit([db_name_b])
-
- resp =
- Couch.put("/#{db_name}/_revs_limit",
- body: "#{@new_limit}",
- headers: ["Content-type": "application/json"]
- )
-
- assert resp.status_code == 200
-
- create_rev_doc(db_name, "foo", @new_limit + 1)
- resp = Couch.get("/#{db_name}/foo?revs=true")
- assert length(resp.body["_revisions"]["ids"]) == @new_limit
-
- # If you replicate after you make more edits than the limit, you'll
- # cause a spurious edit conflict.
- replicate(db_name, db_name_b)
- resp = Couch.get("/#{db_name_b}/foo?conflicts=true")
- assert not Map.has_key?(resp.body, "_conflicts")
-
- create_rev_doc(db_name, "foo", @new_limit - 1)
-
- # one less edit than limit, no conflict
- replicate(db_name, db_name_b)
- resp = Couch.get("/#{db_name_b}/foo?conflicts=true")
- assert not Map.has_key?(resp.body, "_conflicts")
- prev_conflicted_rev = resp.body["_rev"]
-
- # now we hit the limit
- create_rev_doc(db_name, "foo", @new_limit + 1)
-
- replicate(db_name, db_name_b)
- resp = Couch.get("/#{db_name_b}/foo?conflicts=true")
- assert Map.has_key?(resp.body, "_conflicts")
-
- conflicted_rev =
- resp.body["_conflicts"]
- |> Enum.at(0)
-
- # we have a conflict, but the previous replicated rev is always the losing
- # conflict
- assert conflicted_rev == prev_conflicted_rev
- end
-
- @tag :with_db
- test "revs limit is kept after compaction", context do
- db_name = context[:db_name]
-
- create_rev_doc(db_name, "bar", @new_limit + 1)
- resp = Couch.get("/#{db_name}/bar?revs=true")
- assert length(resp.body["_revisions"]["ids"]) == @new_limit + 1
-
- resp =
- Couch.put("/#{db_name}/_revs_limit",
- body: "#{@new_limit}",
- headers: ["Content-type": "application/json"]
- )
-
- assert resp.status_code == 200
-
- # We having already updated bar before setting the limit, so it's still got
- # a long rev history. compact to stem the revs.
- resp = Couch.get("/#{db_name}/bar?revs=true")
- assert length(resp.body["_revisions"]["ids"]) == @new_limit
-
- compact(db_name)
-
- # force reload because ETags don't honour compaction
- resp =
- Couch.get("/#{db_name}/bar?revs=true",
- headers: ["if-none-match": "pommes"]
- )
-
- assert length(resp.body["_revisions"]["ids"]) == @new_limit
- end
-
- # function to create a doc with multiple revisions
- defp create_rev_doc(db_name, id, num_revs) do
- resp = Couch.get("/#{db_name}/#{id}")
-
- doc =
- if resp.status_code == 200 do
- resp.body
- else
- %{_id: id, count: 0}
- end
-
- {:ok, resp} = create_doc(db_name, doc)
- create_rev_doc(db_name, id, num_revs, [Map.put(doc, :_rev, resp.body["rev"])])
- end
-
- defp create_rev_doc(db_name, id, num_revs, revs) do
- if length(revs) < num_revs do
- doc = %{_id: id, _rev: Enum.at(revs, -1)[:_rev], count: length(revs)}
- {:ok, resp} = create_doc(db_name, doc)
-
- create_rev_doc(
- db_name,
- id,
- num_revs,
- revs ++ [Map.put(doc, :_rev, resp.body["rev"])]
- )
- else
- revs
- end
- end
-
- def delete_db_on_exit(db_names) when is_list(db_names) do
- on_exit(fn ->
- Enum.each(db_names, fn name ->
- delete_db(name)
- end)
- end)
- end
-
-end
diff --git a/test/elixir/test/rewrite_js_test.exs b/test/elixir/test/rewrite_js_test.exs
deleted file mode 100644
index a3adb3e7d..000000000
--- a/test/elixir/test/rewrite_js_test.exs
+++ /dev/null
@@ -1,411 +0,0 @@
-defmodule RewriteJSTest do
- use CouchTestCase
-
- @moduletag :js_engine
- @moduletag kind: :single_node
-
- @moduledoc """
- Test CouchDB rewrites JS
- This is a port of the rewrite_js.js suite
- """
-
- @ddoc %{
- _id: "_design/test",
- language: "javascript",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- },
- rewrites: """
- function(req) {
- prefix = req.path[4];
- if (prefix === 'foo') {
- return 'foo.txt';
- }
- if (prefix === 'foo2') {
- return {path: 'foo.txt', method: 'GET'};
- }
- if (prefix === 'hello') {
- if (req.method != 'PUT') {
- return
- }
- id = req.path[5];
- return {path: '_update/hello/' + id};
- }
- if (prefix === 'welcome') {
- if (req.path.length == 6){
- name = req.path[5];
- return {path: '_show/welcome', query: {'name': name}};
- }
- return '_show/welcome';
- }
- if (prefix === 'welcome2') {
- return {path: '_show/welcome', query: {'name': 'user'}};
- }
- if (prefix === 'welcome3') {
- name = req.path[5];
- if (req.method == 'PUT') {
- path = '_update/welcome2/' + name;
- } else if (req.method == 'GET') {
- path = '_show/welcome2/' + name;
- } else {
- return;
- }
- return path;
- }
- if (prefix === 'welcome4') {
- return {path: '_show/welcome3', query: {name: req.path[5]}};
- }
- if (prefix === 'welcome5') {
- rest = req.path.slice(5).join('/');
- return {path: '_show/' + rest, query: {name: rest}};
- }
- if (prefix === 'basicView') {
- rest = req.path.slice(5).join('/');
- return {path: '_view/basicView'};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/basicView') {
- return {path: '_list/simpleForm/basicView'};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/basicViewFixed') {
- return {path: '_list/simpleForm/basicView',
- query: {startkey: '"3"', endkey: '"8"'}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/complexView') {
- return {path: '_list/simpleForm/complexView',
- query: {key: JSON.stringify([1,2])}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/complexView2') {
- return {path: '_list/simpleForm/complexView',
- query: {key: JSON.stringify(['test', {}])}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/complexView3') {
- return {path: '_list/simpleForm/complexView',
- query: {key: JSON.stringify(['test', ['test', 'essai']])}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/complexView4') {
- return {path: '_list/simpleForm/complexView2',
- query: {key: JSON.stringify({"c": 1})}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/sendBody1') {
- return {path: '_list/simpleForm/complexView2',
- method: 'POST',
- query: {limit: '1'},
- headers:{'Content-type':'application/json'},
- body: JSON.stringify( {keys: [{"c": 1}]} )};
- }
- if (req.path.slice(4).join('/') === '/') {
- return {path: '_view/basicView'};
- }
- if (prefix === 'db') {
- return {path: '../../' + req.path.slice(5).join('/')};
- }
- }
- """,
- lists: %{
- simpleForm: """
- function(head, req) {
- send('<ul>');
- var row, row_number = 0, prevKey, firstKey = null;
- while (row = getRow()) {
- row_number += 1;
- if (!firstKey) firstKey = row.key;
- prevKey = row.key;
- send('\\n<li>Key: '+row.key
- +' Value: '+row.value
- +' LineNo: '+row_number+'</li>');
- }
- return '</ul><p>FirstKey: '+ firstKey + ' LastKey: '+ prevKey+'</p>';
- }
- """
- },
- shows: %{
- welcome: """
- function(doc,req) {
- return "Welcome " + req.query["name"];
- }
- """,
- welcome2: """
- function(doc, req) {
- return "Welcome " + doc.name;
- }
- """,
- welcome3: """
- function(doc,req) {
- return "Welcome " + req.query["name"];
- }
- """
- },
- updates: %{
- hello: """
- function(doc, req) {
- if (!doc) {
- if (req.id) {
- return [{
- _id : req.id
- }, "New World"]
- }
- return [null, "Empty World"];
- }
- doc.world = "hello";
- doc.edited_by = req.userCtx;
- return [doc, "hello doc"];
- }
- """,
- welcome2: """
- function(doc, req) {
- if (!doc) {
- if (req.id) {
- return [{
- _id: req.id,
- name: req.id
- }, "New World"]
- }
- return [null, "Empty World"];
- }
- return [doc, "hello doc"];
- }
- """
- },
- views: %{
- basicView: %{
- map: """
- function(doc) {
- if (doc.integer) {
- emit(doc.integer, doc.string);
- }
- }
- """
- },
- complexView: %{
- map: """
- function(doc) {
- if (doc.type == "complex") {
- emit([doc.a, doc.b], doc.string);
- }
- }
- """
- },
- complexView2: %{
- map: """
- function(doc) {
- if (doc.type == "complex") {
- emit(doc.a, doc.string);
- }
- }
- """
- },
- complexView3: %{
- map: """
- function(doc) {
- if (doc.type == "complex") {
- emit(doc.b, doc.string);
- }
- }
- """
- }
- }
- }
-
- Enum.each(
- ["test_rewrite_suite_db", "test_rewrite_suite_db%2Fwith_slashes"],
- fn db_name ->
- @tag with_random_db: db_name
- test "Test basic js rewrites on #{db_name}", context do
- db_name = context[:db_name]
-
- create_doc(db_name, @ddoc)
-
- docs1 = make_docs(0..9)
- bulk_save(db_name, docs1)
-
- docs2 = [
- %{"a" => 1, "b" => 1, "string" => "doc 1", "type" => "complex"},
- %{"a" => 1, "b" => 2, "string" => "doc 2", "type" => "complex"},
- %{"a" => "test", "b" => %{}, "string" => "doc 3", "type" => "complex"},
- %{
- "a" => "test",
- "b" => ["test", "essai"],
- "string" => "doc 4",
- "type" => "complex"
- },
- %{"a" => %{"c" => 1}, "b" => "", "string" => "doc 5", "type" => "complex"}
- ]
-
- bulk_save(db_name, docs2)
-
- # Test simple rewriting
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/foo")
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "text/plain"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/foo2")
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "text/plain"
-
- # Test POST, hello update world
- resp =
- Couch.post("/#{db_name}", body: %{"word" => "plankton", "name" => "Rusty"}).body
-
- assert resp["ok"]
- doc_id = resp["id"]
- assert doc_id
-
- resp = Couch.put("/#{db_name}/_design/test/_rewrite/hello/#{doc_id}")
- assert resp.status_code in [201, 202]
- assert resp.body == "hello doc"
- assert String.match?(resp.headers["Content-Type"], ~r/charset=utf-8/)
-
- assert Couch.get("/#{db_name}/#{doc_id}").body["world"] == "hello"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome?name=user")
- assert resp.body == "Welcome user"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome/user")
- assert resp.body == "Welcome user"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome2")
- assert resp.body == "Welcome user"
-
- resp = Couch.put("/#{db_name}/_design/test/_rewrite/welcome3/test")
- assert resp.status_code in [201, 202]
- assert resp.body == "New World"
- assert String.match?(resp.headers["Content-Type"], ~r/charset=utf-8/)
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome3/test")
- assert resp.body == "Welcome test"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome4/user")
- assert resp.body == "Welcome user"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome5/welcome3")
- assert resp.body == "Welcome welcome3"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/basicView")
- assert resp.status_code == 200
- assert resp.body["total_rows"] == 9
-
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/complexView")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/FirstKey: [1, 2]/)
-
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/complexView2")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Value: doc 3/)
-
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/complexView3")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Value: doc 4/)
-
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/complexView4")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Value: doc 5/)
-
- # COUCHDB-1612 - send body rewriting get to post
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/sendBody1")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Value: doc 5 LineNo: 1/)
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/db/_design/test?meta=true")
- assert resp.status_code == 200
- assert resp.body["_id"] == "_design/test"
- assert Map.has_key?(resp.body, "_revs_info")
- end
-
- @tag with_random_db: db_name
- test "early response on #{db_name}", context do
- db_name = context[:db_name]
-
- ddoc = %{
- _id: "_design/response",
- rewrites: """
- function(req){
- status = parseInt(req.query.status);
- return {code: status,
- body: JSON.stringify({"status": status}),
- headers: {'x-foo': 'bar', 'Content-Type': 'application/json'}};
- }
- """
- }
-
- create_doc(db_name, ddoc)
-
- resp = Couch.get("/#{db_name}/_design/response/_rewrite?status=200")
- assert resp.status_code == 200
- assert resp.headers["x-foo"] == "bar"
- assert resp.body["status"] == 200
-
- resp = Couch.get("/#{db_name}/_design/response/_rewrite?status=451")
- assert resp.status_code == 451
- assert resp.headers["Content-Type"] == "application/json"
-
- resp = Couch.get("/#{db_name}/_design/response/_rewrite?status=500")
- assert resp.status_code == 500
- end
-
- @tag with_random_db: db_name
- test "path relative to server on #{db_name}", context do
- db_name = context[:db_name]
-
- ddoc = %{
- _id: "_design/relative",
- rewrites: """
- function(req){
- return '../../../_uuids'
- }
- """
- }
-
- create_doc(db_name, ddoc)
- resp = Couch.get("/#{db_name}/_design/relative/_rewrite/uuids")
- assert resp.status_code == 200
- assert length(resp.body["uuids"]) == 1
- end
-
- @tag with_random_db: db_name
- test "loop on #{db_name}", context do
- db_name = context[:db_name]
-
- ddoc_loop = %{
- _id: "_design/loop",
- rewrites: """
- function(req) {
- return '_rewrite/loop';
- }
- """
- }
-
- create_doc(db_name, ddoc_loop)
- resp = Couch.get("/#{db_name}/_design/loop/_rewrite/loop")
- assert resp.status_code == 400
- end
-
- @tag with_random_db: db_name
- test "requests with body preserve the query string rewrite on #{db_name}",
- context do
- db_name = context[:db_name]
-
- ddoc_qs = %{
- _id: "_design/qs",
- rewrites:
- "function (r) { return {path: '../../_changes', query: {'filter': '_doc_ids'}};};"
- }
-
- create_doc(db_name, ddoc_qs)
- create_doc(db_name, %{_id: "qs1"})
- create_doc(db_name, %{_id: "qs2"})
-
- resp =
- Couch.post("/#{db_name}/_design/qs/_rewrite",
- body: %{doc_ids: ["qs2"]}
- )
-
- assert resp.status_code == 200
- assert length(resp.body["results"]) == 1
- assert Enum.at(resp.body["results"], 0)["id"] == "qs2"
- end
- end
- )
-end
diff --git a/test/elixir/test/rewrite_test.exs b/test/elixir/test/rewrite_test.exs
deleted file mode 100644
index e23d63609..000000000
--- a/test/elixir/test/rewrite_test.exs
+++ /dev/null
@@ -1,526 +0,0 @@
-defmodule RewriteTest do
- use CouchTestCase
-
- @moduletag :js_engine
-
- @moduledoc """
- Test CouchDB rewrites
- This is a port of the rewrite.js suite
- """
-
- Enum.each(
- ["test_rewrite_suite_db", "test_rewrite_suite_db%2Fwith_slashes"],
- fn db_name ->
- @tag with_random_db: db_name
- @tag config: [
- {"httpd", "authentication_handlers",
- "{couch_httpd_auth, special_test_authentication_handler}"},
- {"chttpd", "WWW-Authenticate", "X-Couch-Test-Auth"}
- ]
- test "Test basic rewrites on #{db_name}", context do
- db_name = context[:db_name]
-
- ddoc = ~S"""
- {
- "_id": "_design/test",
- "language": "javascript",
- "_attachments": {
- "foo.txt": {
- "content_type":"text/plain",
- "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- },
- "rewrites": [
- {
- "from": "foo",
- "to": "foo.txt"
- },
- {
- "from": "foo2",
- "to": "foo.txt",
- "method": "GET"
- },
- {
- "from": "hello/:id",
- "to": "_update/hello/:id",
- "method": "PUT"
- },
- {
- "from": "/welcome",
- "to": "_show/welcome"
- },
- {
- "from": "/welcome/:name",
- "to": "_show/welcome",
- "query": {
- "name": ":name"
- }
- },
- {
- "from": "/welcome2",
- "to": "_show/welcome",
- "query": {
- "name": "user"
- }
- },
- {
- "from": "/welcome3/:name",
- "to": "_update/welcome2/:name",
- "method": "PUT"
- },
- {
- "from": "/welcome3/:name",
- "to": "_show/welcome2/:name",
- "method": "GET"
- },
- {
- "from": "/welcome4/*",
- "to" : "_show/welcome3",
- "query": {
- "name": "*"
- }
- },
- {
- "from": "/welcome5/*",
- "to" : "_show/*",
- "query": {
- "name": "*"
- }
- },
- {
- "from": "basicView",
- "to": "_view/basicView"
- },
- {
- "from": "simpleForm/basicView",
- "to": "_list/simpleForm/basicView"
- },
- {
- "from": "simpleForm/basicViewFixed",
- "to": "_list/simpleForm/basicView",
- "query": {
- "startkey": 3,
- "endkey": 8
- }
- },
- {
- "from": "simpleForm/basicViewPath/:start/:end",
- "to": "_list/simpleForm/basicView",
- "query": {
- "startkey": ":start",
- "endkey": ":end"
- },
- "formats": {
- "start": "int",
- "end": "int"
- }
- },
- {
- "from": "simpleForm/complexView",
- "to": "_list/simpleForm/complexView",
- "query": {
- "key": [1, 2]
- }
- },
- {
- "from": "simpleForm/complexView2",
- "to": "_list/simpleForm/complexView",
- "query": {
- "key": ["test", {}]
- }
- },
- {
- "from": "simpleForm/complexView3",
- "to": "_list/simpleForm/complexView",
- "query": {
- "key": ["test", ["test", "essai"]]
- }
- },
- {
- "from": "simpleForm/complexView4",
- "to": "_list/simpleForm/complexView2",
- "query": {
- "key": {"c": 1}
- }
- },
- {
- "from": "simpleForm/complexView5/:a/:b",
- "to": "_list/simpleForm/complexView3",
- "query": {
- "key": [":a", ":b"]
- }
- },
- {
- "from": "simpleForm/complexView6",
- "to": "_list/simpleForm/complexView3",
- "query": {
- "key": [":a", ":b"]
- }
- },
- {
- "from": "simpleForm/complexView7/:a/:b",
- "to": "_view/complexView3",
- "query": {
- "key": [":a", ":b"],
- "include_docs": ":doc"
- },
- "format": {
- "doc": "bool"
- }
-
- },
- {
- "from": "/",
- "to": "_view/basicView"
- },
- {
- "from": "/db/*",
- "to": "../../*"
- }
- ],
- "lists": {
- "simpleForm": "function(head, req) {
- log(\"simpleForm\");
- send(\"<ul>\");
- var row, row_number = 0, prevKey, firstKey = null;
- while (row = getRow()) {
- row_number += 1;
- if (!firstKey) firstKey = row.key;
- prevKey = row.key;
- send(\"\\n<li>Key: \"+row.key
- +\" Value: \"+row.value
- +\" LineNo: \"+row_number+\"</li>\");
- }
- return \"</ul><p>FirstKey: \"+ firstKey + \" LastKey: \"+ prevKey+\"</p>\";
- }"
- },
- "shows": {
- "welcome": "(function(doc,req) {
- return \"Welcome \" + req.query[\"name\"];
- })",
- "welcome2": "(function(doc, req) {
- return \"Welcome \" + doc.name;
- })",
- "welcome3": "(function(doc,req) {
- return \"Welcome \" + req.query[\"name\"];
- })"
- },
- "updates": {
- "hello" : "(function(doc, req) {
- if (!doc) {
- if (req.id) {
- return [{
- _id : req.id
- }, \"New World\"]
- }
- return [null, \"Empty World\"];
- }
- doc.world = \"hello\";
- doc.edited_by = req.userCtx;
- return [doc, \"hello doc\"];
- })",
- "welcome2": "(function(doc, req) {
- if (!doc) {
- if (req.id) {
- return [{
- _id: req.id,
- name: req.id
- }, \"New World\"]
- }
- return [null, \"Empty World\"];
- }
- return [doc, \"hello doc\"];
- })"
- },
- "views" : {
- "basicView" : {
- "map" : "(function(doc) {
- if (doc.integer) {
- emit(doc.integer, doc.string);
- }
-
- })"
- },
- "complexView": {
- "map": "(function(doc) {
- if (doc.type == \"complex\") {
- emit([doc.a, doc.b], doc.string);
- }
- })"
- },
- "complexView2": {
- "map": "(function(doc) {
- if (doc.type == \"complex\") {
- emit(doc.a, doc.string);
- }
- })"
- },
- "complexView3": {
- "map": "(function(doc) {
- if (doc.type == \"complex\") {
- emit(doc.b, doc.string);
- }
- })"
- }
- }
- }
- """
-
- ddoc = String.replace(ddoc, ~r/[\r\n]+/, "")
-
- docs1 = make_docs(0..9)
-
- docs2 = [
- %{"a" => 1, "b" => 1, "string" => "doc 1", "type" => "complex"},
- %{"a" => 1, "b" => 2, "string" => "doc 2", "type" => "complex"},
- %{"a" => "test", "b" => %{}, "string" => "doc 3", "type" => "complex"},
- %{
- "a" => "test",
- "b" => ["test", "essai"],
- "string" => "doc 4",
- "type" => "complex"
- },
- %{"a" => %{"c" => 1}, "b" => "", "string" => "doc 5", "type" => "complex"}
- ]
-
- assert Couch.put("/#{db_name}/_design/test", body: ddoc).body["ok"]
-
- assert Couch.post(
- "/#{db_name}/_bulk_docs",
- body: %{:docs => docs1},
- query: %{w: 3}
- ).status_code in [201, 202]
-
- assert Couch.post(
- "/#{db_name}/_bulk_docs",
- body: %{:docs => docs2},
- query: %{w: 3}
- ).status_code in [201, 202]
-
- # Test simple rewriting
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/foo")
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "text/plain"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/foo2")
- assert resp.body == "This is a base64 encoded text"
- assert resp.headers["Content-Type"] == "text/plain"
-
- # Test POST, hello update world
- resp =
- Couch.post("/#{db_name}", body: %{"word" => "plankton", "name" => "Rusty"}).body
-
- assert resp["ok"]
- doc_id = resp["id"]
- assert doc_id
-
- resp = Couch.put("/#{db_name}/_design/test/_rewrite/hello/#{doc_id}")
- assert resp.status_code in [201, 202]
- assert resp.body == "hello doc"
- assert String.match?(resp.headers["Content-Type"], ~r/charset=utf-8/)
-
- assert Couch.get("/#{db_name}/#{doc_id}").body["world"] == "hello"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome?name=user")
- assert resp.body == "Welcome user"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome/user")
- assert resp.body == "Welcome user"
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome2")
- assert resp.body == "Welcome user"
-
- resp = Couch.put("/#{db_name}/_design/test/_rewrite/welcome3/test")
- assert resp.status_code in [201, 202]
- assert resp.body == "New World"
- assert String.match?(resp.headers["Content-Type"], ~r/charset=utf-8/)
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome3/test")
- assert resp.body == "Welcome test"
-
- # TODO: port the two "bugged" tests from rewrite.js
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/basicView")
- assert resp.status_code == 200
- assert resp.body["total_rows"] == 9
-
- resp = Couch.get("/#{db_name}/_design/test/_rewrite")
- assert resp.status_code == 200
- assert resp.body["total_rows"] == 9
-
- resp =
- Rawresp.get(
- "/#{db_name}/_design/test/_rewrite/simpleForm/basicView?startkey=3&endkey=8"
- )
-
- assert resp.status_code == 200
- assert not String.match?(resp.body, ~r/Key: 1/)
- assert String.match?(resp.body, ~r/FirstKey: 3/)
- assert String.match?(resp.body, ~r/LastKey: 8/)
-
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/basicViewFixed")
- assert resp.status_code == 200
- assert not String.match?(resp.body, ~r/Key: 1/)
- assert String.match?(resp.body, ~r/FirstKey: 3/)
- assert String.match?(resp.body, ~r/LastKey: 8/)
-
- resp =
- Rawresp.get(
- "/#{db_name}/_design/test/_rewrite/simpleForm/basicViewFixed?startkey=4"
- )
-
- assert resp.status_code == 200
- assert not String.match?(resp.body, ~r/Key: 1/)
- assert String.match?(resp.body, ~r/FirstKey: 3/)
- assert String.match?(resp.body, ~r/LastKey: 8/)
-
- resp =
- Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/basicViewPath/3/8")
-
- assert resp.status_code == 200
- assert not String.match?(resp.body, ~r/Key: 1/)
- assert String.match?(resp.body, ~r/FirstKey: 3/)
- assert String.match?(resp.body, ~r/LastKey: 8/)
-
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/complexView")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/FirstKey: [1, 2]/)
-
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/complexView2")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Value: doc 3/)
-
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/complexView3")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Value: doc 4/)
-
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/simpleForm/complexView4")
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Value: doc 5/)
-
- resp =
- Rawresp.get(
- "/#{db_name}/_design/test/_rewrite/simpleForm/complexView5/test/essai"
- )
-
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Value: doc 4/)
-
- resp =
- Rawresp.get(
- "/#{db_name}/_design/test/_rewrite/simpleForm/complexView6?a=test&b=essai"
- )
-
- assert resp.status_code == 200
- assert String.match?(resp.body, ~r/Value: doc 4/)
-
- resp =
- Rawresp.get(
- "/#{db_name}/_design/test/_rewrite/simpleForm/complexView7/test/essai?doc=true"
- )
-
- assert resp.status_code == 200
- result = resp.body |> IO.iodata_to_binary() |> :jiffy.decode([:return_maps])
- first_row = Enum.at(result["rows"], 0)
- assert Map.has_key?(first_row, "doc")
-
- # COUCHDB-2031 - path normalization versus qs params
- resp = Rawresp.get("/#{db_name}/_design/test/_rewrite/db/_design/test?meta=true")
- assert resp.status_code == 200
- result = resp.body |> IO.iodata_to_binary() |> :jiffy.decode([:return_maps])
- assert result["_id"] == "_design/test"
- assert Map.has_key?(result, "_revs_info")
-
- ddoc2 = %{
- _id: "_design/test2",
- rewrites: [
- %{
- from: "uuids",
- to: "../../../_uuids"
- }
- ]
- }
-
- create_doc(db_name, ddoc2)
- resp = Couch.get("/#{db_name}/_design/test2/_rewrite/uuids")
- assert resp.status_code == 500
- assert resp.body["error"] == "insecure_rewrite_rule"
- end
-
- @tag with_random_db: db_name
- @tag config: [
- {"chttpd", "secure_rewrites", "false"}
- ]
- test "path relative to server on #{db_name}", context do
- db_name = context[:db_name]
-
- ddoc = %{
- _id: "_design/test2",
- rewrites: [
- %{
- from: "uuids",
- to: "../../../_uuids"
- }
- ]
- }
-
- create_doc(db_name, ddoc)
-
- resp = Couch.get("/#{db_name}/_design/test2/_rewrite/uuids")
- assert resp.status_code == 200
- assert length(resp.body["uuids"]) == 1
- end
-
- @tag with_random_db: db_name
- @tag config: [
- {"chttpd", "rewrite_limit", "2"}
- ]
- test "loop detection on #{db_name}", context do
- db_name = context[:db_name]
-
- ddoc_loop = %{
- _id: "_design/loop",
- rewrites: [%{from: "loop", to: "_rewrite/loop"}]
- }
-
- create_doc(db_name, ddoc_loop)
-
- resp = Couch.get("/#{db_name}/_design/loop/_rewrite/loop")
- assert resp.status_code == 400
- end
-
- @tag with_random_db: db_name
- @tag config: [
- {"chttpd", "rewrite_limit", "2"},
- {"chttpd", "secure_rewrites", "false"}
- ]
- test "serial execution is not spuriously counted as loop on #{db_name}", context do
- db_name = context[:db_name]
-
- ddoc = %{
- _id: "_design/test",
- language: "javascript",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- },
- rewrites: [
- %{
- from: "foo",
- to: "foo.txt"
- }
- ]
- }
-
- create_doc(db_name, ddoc)
-
- for _i <- 0..4 do
- resp = Couch.get("/#{db_name}/_design/test/_rewrite/foo")
- assert resp.status_code == 200
- end
- end
- end
- )
-end
diff --git a/test/elixir/test/security_validation_test.exs b/test/elixir/test/security_validation_test.exs
deleted file mode 100644
index adc282a9e..000000000
--- a/test/elixir/test/security_validation_test.exs
+++ /dev/null
@@ -1,324 +0,0 @@
-defmodule SecurityValidationTest do
- use CouchTestCase
-
- @moduletag :security
-
- @moduledoc """
- Test CouchDB Security Validations
- This is a port of the security_validation.js suite
- """
-
- @auth_headers %{
- jerry: [
- # jerry:mouse
- authorization: "Basic amVycnk6bW91c2U="
- ],
- tom: [
- # tom:cat
- authorization: "Basic dG9tOmNhdA=="
- ],
- spike_cat: [
- # spike:cat - which is wrong
- authorization: "Basic c3Bpa2U6Y2F0"
- ],
- spike: [
- # spike:dog
- authorization: "Basic c3Bpa2U6ZG9n"
- ]
- }
-
- @ddoc %{
- _id: "_design/test",
- language: "javascript",
- validate_doc_update: ~s"""
- (function (newDoc, oldDoc, userCtx, secObj) {
- if (secObj.admin_override) {
- if (userCtx.roles.indexOf('_admin') != -1) {
- // user is admin, they can do anything
- return true;
- }
- }
- // docs should have an author field.
- if (!newDoc._deleted && !newDoc.author) {
- throw {forbidden:
- \"Documents must have an author field\"};
- }
- if (oldDoc && oldDoc.author != userCtx.name) {
- throw {unauthorized:
- \"You are '\" + userCtx.name + \"', not the author '\" + oldDoc.author + \"' of this document. You jerk.\"};
- }
- })
- """
- }
-
- setup_all do
- auth_db_name = random_db_name()
- {:ok, _} = create_db(auth_db_name)
- on_exit(fn -> delete_db(auth_db_name) end)
-
- configs = [
- {"httpd", "authentication_handlers",
- "{couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}"},
- {"couch_httpd_auth", "authentication_db", auth_db_name},
- {"chttpd_auth", "authentication_db", auth_db_name}
- ]
-
- Enum.each(configs, &set_config/1)
-
- # port of comment from security_validation.js
- # the special case handler does not exist (any longer) in clusters, so we have
- # to replicate the behavior using a "normal" DB even though tests might no more
- # run universally (why the "X-Couch-Test-Auth" header was introduced).
- # btw: this needs to be INSIDE configured server to propagate correctly ;-)
- # At least they'd run in the build, though
- users = [{"tom", "cat"}, {"jerry", "mouse"}, {"spike", "dog"}]
-
- Enum.each(users, fn {name, pass} ->
- doc = %{
- :_id => "org.couchdb.user:#{name}",
- :name => name,
- :roles => [],
- :password => pass
- }
-
- assert Couch.post("/#{auth_db_name}", body: doc).body["ok"]
- end)
-
- {:ok, [auth_db_name: auth_db_name]}
- end
-
- @tag :with_db_name
- test "Saving document using the wrong credentials", context do
- # spike:cat - which is wrong
- headers = @auth_headers[:spike_cat]
- resp = Couch.post("/#{context[:db_name]}", body: %{foo: 1}, headers: headers)
- assert resp.body["error"] == "unauthorized"
- assert resp.status_code == 401
- end
-
- test "Force basic login" do
- # spike:cat - which is wrong
- headers = @auth_headers[:spike_cat]
- resp = Couch.get("/_session", query: %{basic: true}, headers: headers)
- assert resp.status_code == 401
- assert resp.body["error"] == "unauthorized"
- end
-
- @tag :with_db
- test "Jerry can save a document normally", context do
- headers = @auth_headers[:jerry]
- assert Couch.get("/_session", headers: headers).body["userCtx"]["name"] == "jerry"
-
- doc = %{_id: "testdoc", foo: 1, author: "jerry"}
- assert Couch.post("/#{context[:db_name]}", body: doc).body["ok"]
- end
-
- @tag :with_db
- test "Non-admin user cannot save a ddoc", context do
- headers = @auth_headers[:jerry]
- resp = Couch.post("/#{context[:db_name]}", body: @ddoc, headers: headers)
- assert resp.status_code == 403
- assert resp.body["error"] == "forbidden"
- end
-
- @tag :with_db
- test "Ddoc writes with admin and replication contexts", context do
- db_name = context[:db_name]
- sec_obj = %{admins: %{names: ["jerry"]}}
-
- assert Couch.put("/#{db_name}/_security", body: sec_obj).body["ok"]
- assert Couch.post("/#{db_name}", body: @ddoc).body["ok"]
-
- new_rev = "2-642e20f96624a0aae6025b4dba0c6fb2"
- ddoc = @ddoc |> Map.put(:_rev, new_rev) |> Map.put(:foo, "bar")
- headers = @auth_headers[:tom]
- # attempt to save doc in replication context, eg ?new_edits=false
- resp =
- Couch.put(
- "/#{db_name}/#{ddoc[:_id]}",
- body: ddoc,
- headers: headers,
- query: %{new_edits: false}
- )
-
- assert resp.status_code == 403
- assert resp.body["error"] == "forbidden"
- end
-
- test "_session API" do
- headers = @auth_headers[:jerry]
- resp = Couch.get("/_session", headers: headers)
- assert resp.body["userCtx"]["name"] == "jerry"
- assert resp.body["userCtx"]["roles"] == []
- end
-
- @tag :with_db
- test "try to set a wrong value for _security", context do
- db_name = context[:db_name]
- # try to do something lame
- resp = Couch.put("/#{db_name}/_security", body: ["foo"])
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- end
-
- @tag :with_db
- test "Author presence and user security", context do
- db_name = context[:db_name]
- sec_obj = %{admin_override: false, admins: %{names: ["jerry"]}}
-
- jerry = @auth_headers[:jerry]
- tom = @auth_headers[:tom]
-
- assert Couch.put("/#{db_name}/_security", body: sec_obj).body["ok"]
- assert Couch.post("/#{db_name}", body: @ddoc).body["ok"]
-
- retry_until(fn ->
- resp = Couch.put("/#{db_name}/test_doc", body: %{foo: 1}, headers: jerry)
- assert resp.status_code == 403
- assert resp.body["error"] == "forbidden"
- assert resp.body["reason"] == "Documents must have an author field"
- end)
-
- # Jerry can write the document
- assert Couch.put(
- "/#{db_name}/test_doc",
- body: %{foo: 1, author: "jerry"},
- headers: jerry
- ).body["ok"]
-
- test_doc = Couch.get("/#{db_name}/test_doc").body
-
- # Tom cannot write the document
- resp = Couch.post("/#{db_name}", body: %{foo: 1}, headers: tom)
- assert resp.status_code == 403
- assert resp.body["error"] == "forbidden"
-
- # Admin cannot write the document (admin_override = false)
- test_doc = Map.put(test_doc, "foo", 3)
- resp = Couch.put("/#{db_name}/test_doc", body: test_doc)
- assert resp.status_code == 401
- assert resp.body["error"] == "unauthorized"
-
- # Enable admin override for changing author values
- assert Couch.put("/#{db_name}/_security", body: %{sec_obj | admin_override: true}).body[
- "ok"
- ]
-
- # Change owner to Tom
- test_doc = Map.put(test_doc, "author", "tom")
- resp = Couch.put("/#{db_name}/test_doc", body: test_doc)
- assert resp.body["ok"]
- test_doc = Map.put(test_doc, "_rev", resp.body["rev"])
-
- # Now Tom can update the document
- test_doc = Map.put(test_doc, "foo", "asdf")
- resp = Couch.put("/#{db_name}/test_doc", body: test_doc, headers: tom)
- assert resp.body["ok"]
- test_doc = Map.put(test_doc, "_rev", resp.body["rev"])
-
- # Jerry can't delete it
- retry_until(fn ->
- opts = [headers: jerry]
- resp = Couch.delete("/#{db_name}/test_doc?rev=#{test_doc["_rev"]}", opts)
- resp.status_code == 401 and resp.body["error"] == "unauthorized"
- end)
-
- # Admin can write the document (admin_override = true)
- test_doc = Map.put(test_doc, "foo", 4)
- resp = Couch.put("/#{db_name}/test_doc", body: test_doc)
- assert resp.body["ok"]
-
- # Disable admin override
- assert Couch.put("/#{db_name}/_security", body: %{sec_obj | admin_override: false}).body[
- "ok"
- ]
-
- docs = [%{_id: "bahbah", author: "jerry", foo: "bar"}, %{_id: "fahfah", foo: "baz"}]
-
- resp =
- Couch.post(
- "/#{db_name}/_bulk_docs",
- body: %{
- docs: docs
- },
- headers: jerry
- )
-
- assert Enum.at(resp.body, 0)["rev"]
- assert !Enum.at(resp.body, 0)["error"]
- assert !Enum.at(resp.body, 1)["rev"]
- assert Enum.at(resp.body, 1)["error"] == "forbidden"
-
- resp = Couch.get("/#{db_name}/bahbah")
- assert resp.status_code == 200
-
- resp = Couch.get("/#{db_name}/fahfah")
- assert resp.status_code == 404
- end
-
- test "Author presence and user security when replicated", _context do
- db_name = random_db_name()
- db_name_a = "#{db_name}_a"
- db_name_b = "#{db_name}_b"
- create_db(db_name_a)
- create_db(db_name_b)
- on_exit(fn -> delete_db(db_name_a) end)
- on_exit(fn -> delete_db(db_name_b) end)
-
- spike = @auth_headers[:spike]
-
- # save and replicate a documents that will and will not pass our design
- # doc validation function.
- {:ok, _} = create_doc(db_name_a, %{_id: "foo1", value: "a", author: "tom"})
- {:ok, _} = create_doc(db_name_a, %{_id: "foo2", value: "a", author: "spike"})
- {:ok, _} = create_doc(db_name_a, %{_id: "bad1", value: "a"})
- replicate(db_name_a, db_name_b, headers: spike)
- replicate(db_name_b, db_name_a, headers: spike)
-
- assert Couch.get("/#{db_name_a}/foo1").status_code == 200
- assert Couch.get("/#{db_name_b}/foo1").status_code == 200
- assert Couch.get("/#{db_name_a}/foo2").status_code == 200
- assert Couch.get("/#{db_name_b}/foo2").status_code == 200
-
- {:ok, _} = create_doc(db_name_a, @ddoc)
-
- # no affect on already saved docs
- assert Couch.get("/#{db_name_a}/bad1").status_code == 200
-
- # Update some docs on dbB. Since the design hasn't replicated, anything
- # is allowed.
-
- # this edit will fail validation on replication to dbA (no author)
- assert Couch.post(
- "/#{db_name_b}",
- body: %{id: "bad2", value: "a"},
- headers: spike
- ).body["ok"]
-
- # this edit will fail security on replication to dbA (wrong author
- # replicating the change)
- foo1 = Couch.get("/#{db_name_b}/foo1").body
- foo1 = Map.put(foo1, "value", "b")
- assert Couch.put("/#{db_name_b}/foo1", body: foo1, headers: spike).body["ok"]
-
- # this is a legal edit
- foo2 = Couch.get("/#{db_name_b}/foo2").body
- foo2 = Map.put(foo2, "value", "b")
- assert Couch.put("/#{db_name_b}/foo2", body: foo2, headers: spike).body["ok"]
-
- result = replicate(db_name_b, db_name_a, headers: spike)
- assert Enum.at(result["history"], 0)["docs_written"] == 1
- assert Enum.at(result["history"], 0)["doc_write_failures"] == 2
-
- # bad2 should not be on dbA
- assert Couch.get("/#{db_name_a}/bad2").status_code == 404
-
- # The edit to foo1 should not have replicated.
- resp = Couch.get("/#{db_name_a}/foo1")
- assert resp.body["value"] == "a"
-
- # The edit to foo2 should have replicated.
- resp = Couch.get("/#{db_name_a}/foo2")
- assert resp.body["value"] == "b"
- end
-end
diff --git a/test/elixir/test/show_documents_test.exs b/test/elixir/test/show_documents_test.exs
deleted file mode 100644
index a574c72b1..000000000
--- a/test/elixir/test/show_documents_test.exs
+++ /dev/null
@@ -1,448 +0,0 @@
-defmodule ShowDocumentsTest do
- use CouchTestCase
-
- @moduletag kind: :single_node
-
- @ddoc %{
- _id: "_design/template",
- language: "javascript",
- shows: %{
- hello: """
- function(doc, req) {
- if (doc) {
- return "Hello World";
- } else {
- if(req.id) {
- return "New World";
- } else {
- return "Empty World";
- }
- }
- }
- """,
- "just-name": """
- function(doc, req) {
- if (doc) {
- return {
- body : "Just " + doc.name
- };
- } else {
- return {
- body : "No such doc",
- code : 404
- };
- }
- }
- """,
- json: """
- function(doc, req) {
- return {
- json : doc
- }
- }
- """,
- "req-info": """
- function(doc, req) {
- return {
- json : req
- }
- }
- """,
- "show-deleted": """
- function(doc, req) {
- if(doc) {
- return doc._id;
- } else {
- return "No doc " + req.id;
- }
- }
- """,
- "render-error": """
- function(doc, req) {
- return noSuchVariable;
- }
- """,
- empty: """
- function(doc, req) {
- return "";
- }
- """,
- fail: """
- function(doc, req) {
- return doc._id;
- }
- """,
- "no-set-etag": """
- function(doc, req) {
- return {
- headers : {
- "Etag" : "skipped"
- },
- "body" : "something"
- }
- }
- """,
- "list-api": """
- function(doc, req) {
- start({"X-Couch-Test-Header": "Yeah"});
- send("Hey");
- }
- """,
- "list-api-provides": """
- function(doc, req) {
- provides("text", function(){
- send("foo, ");
- send("bar, ");
- send("baz!");
- })
- }
- """,
- "list-api-provides-and-return": """
- function(doc, req) {
- provides("text", function(){
- send("4, ");
- send("5, ");
- send("6, ");
- return "7!";
- })
- send("1, ");
- send("2, ");
- return "3, ";
- }
- """,
- "list-api-mix": """
- function(doc, req) {
- start({"X-Couch-Test-Header": "Yeah"});
- send("Hey ");
- return "Dude";
- }
- """,
- "list-api-mix-with-header": """
- function(doc, req) {
- start({"X-Couch-Test-Header": "Yeah"});
- send("Hey ");
- return {
- headers: {
- "X-Couch-Test-Header-Awesome": "Oh Yeah!"
- },
- body: "Dude"
- };
- }
- """,
- "accept-switch": """
- function(doc, req) {
- if (req.headers["Accept"].match(/image/)) {
- return {
- // a 16x16 px version of the CouchDB logo
- "base64" :
- ["iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAsV",
- "BMVEUAAAD////////////////////////5ur3rEBn////////////////wDBL/",
- "AADuBAe9EB3IEBz/7+//X1/qBQn2AgP/f3/ilpzsDxfpChDtDhXeCA76AQH/v7",
- "/84eLyWV/uc3bJPEf/Dw/uw8bRWmP1h4zxSlD6YGHuQ0f6g4XyQkXvCA36MDH6",
- "wMH/z8/yAwX64ODeh47BHiv/Ly/20dLQLTj98PDXWmP/Pz//39/wGyJ7Iy9JAA",
- "AADHRSTlMAbw8vf08/bz+Pv19jK/W3AAAAg0lEQVR4Xp3LRQ4DQRBD0QqTm4Y5",
- "zMxw/4OleiJlHeUtv2X6RbNO1Uqj9g0RMCuQO0vBIg4vMFeOpCWIWmDOw82fZx",
- "vaND1c8OG4vrdOqD8YwgpDYDxRgkSm5rwu0nQVBJuMg++pLXZyr5jnc1BaH4GT",
- "LvEliY253nA3pVhQqdPt0f/erJkMGMB8xucAAAAASUVORK5CYII="].join(''),
- headers : {
- "Content-Type" : "image/png",
- "Vary" : "Accept" // we set this for proxy caches
- }
- };
- } else {
- return {
- "body" : "accepting text requests",
- headers : {
- "Content-Type" : "text/html",
- "Vary" : "Accept"
- }
- };
- }
- }
- """,
- provides: """
- function(doc, req) {
- registerType("foo", "application/foo","application/x-foo");
-
- provides("html", function() {
- return "Ha ha, you said \\"" + doc.word + "\\".";
- });
-
- provides("foo", function() {
- return "foofoo";
- });
- }
- """,
- withSlash: """
- function(doc, req) {
- return { json: doc }
- }
- """,
- secObj: """
- function(doc, req) {
- return { json: req.secObj };
- }
- """
- }
- }
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- {:ok, _} = create_doc(db_name, @ddoc)
-
- create_doc(db_name, %{_id: "test-doc-id", word: "plankton", name: "Rusty"})
-
- {:ok, [db_name: db_name]}
- end
-
- test "show error", context do
- db_name = context[:db_name]
-
- resp = Couch.get("/#{db_name}/_design/template/_show/")
- assert resp.status_code == 404
- assert resp.body["reason"] == "Invalid path."
- end
-
- test "show with existing doc", context do
- db_name = context[:db_name]
-
- resp = Rawresp.get("/#{db_name}/_design/template/_show/hello/test-doc-id")
- assert resp.body == "Hello World"
- assert String.match?(resp.headers["Content-Type"], ~r/charset=utf-8/)
-
- # Fix for COUCHDB-379
- assert String.match?(resp.headers["Server"], ~r/^CouchDB/)
- end
-
- test "show without docid", context do
- db_name = context[:db_name]
- resp = Rawresp.get("/#{db_name}/_design/template/_show/hello")
- assert resp.body == "Empty World"
-
- resp = Rawresp.get("/#{db_name}/_design/template/_show/empty")
- assert resp.body == ""
- end
-
- test "show fail with non-existing docid", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design/template/_show/fail/nonExistingDoc")
- assert resp.status_code == 404
- assert resp.body["error"] == "not_found"
- end
-
- test "show with doc", context do
- db_name = context[:db_name]
- resp = Rawresp.get("/#{db_name}/_design/template/_show/just-name/test-doc-id")
- assert resp.body == "Just Rusty"
- end
-
- test "show with missing doc", context do
- db_name = context[:db_name]
- resp = Rawresp.get("/#{db_name}/_design/template/_show/just-name/missingdoc")
- assert resp.status_code == 404
- assert resp.body == "No such doc"
- end
-
- test "missing design doc", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}/_design/missingddoc/_show/just-name/test-doc-id")
- assert resp.status_code == 404
- assert resp.body["error"] == "not_found"
- end
-
- test "show query parameters", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design/template/_show/req-info/test-doc-id?foo=bar",
- headers: [Accept: "text/html;text/plain;*/*", "X-Foo": "bar"]
- )
-
- assert resp.body["headers"]["X-Foo"] == "bar"
- assert resp.body["query"] == %{"foo" => "bar"}
- assert resp.body["method"] == "GET"
- assert Enum.at(resp.body["path"], 5) == "test-doc-id"
- assert resp.body["info"]["db_name"] == db_name
- end
-
- test "accept header switching - different mime has different etag", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design/template/_show/accept-switch/test-doc-id",
- headers: [Accept: "text/html;text/plain;*/*"]
- )
-
- assert String.match?(resp.headers["Content-Type"], ~r/text\/html/)
- assert resp.headers["Vary"] == "Accept"
-
- etag = resp.headers["etag"]
-
- resp =
- Rawresp.get("/#{db_name}/_design/template/_show/accept-switch/test-doc-id",
- headers: [Accept: "image/png;*/*"]
- )
-
- assert String.match?(resp.body, ~r/PNG/)
- assert resp.headers["Content-Type"] == "image/png"
-
- etag2 = resp.headers["etag"]
-
- assert etag != etag2
- end
-
- test "show with doc - etags", context do
- db_name = context[:db_name]
-
- doc = %{"_id" => "test-doc-id2", word: "plankton", name: "Rusty"}
- doc = save(db_name, doc)
-
- resp = Couch.get("/#{db_name}/_design/template/_show/just-name/test-doc-id2")
-
- etag = resp.headers["etag"]
-
- resp =
- Couch.get("/#{db_name}/_design/template/_show/just-name/test-doc-id2",
- headers: ["if-none-match": etag]
- )
-
- assert resp.status_code == 304
-
- doc = Map.put(doc, "name", "Crusty")
- save(db_name, doc)
-
- resp =
- Couch.get("/#{db_name}/_design/template/_show/just-name/test-doc-id2",
- headers: ["if-none-match": etag]
- )
-
- assert resp.status_code == 200
- end
-
- test "JS can't set etag", context do
- db_name = context[:db_name]
-
- resp = Couch.get("/#{db_name}/_design/template/_show/no-set-etag/test-doc-id")
- assert resp.headers["etag"] != "skipped"
- end
-
- test "the provides mime matcher", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get("/#{db_name}/_design/template/_show/provides/test-doc-id",
- headers: [Accept: "text/html,application/atom+xml; q=0.9"]
- )
-
- assert String.match?(resp.headers["Content-Type"], ~r/text\/html/)
- assert String.match?(resp.headers["Content-Type"], ~r/charset=utf-8/)
- assert resp.body == "Ha ha, you said \"plankton\"."
- end
-
- test "registering types works", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get("/#{db_name}/_design/template/_show/provides/test-doc-id",
- headers: [Accept: "application/x-foo"]
- )
-
- assert resp.headers["Content-Type"] == "application/x-foo"
- assert String.match?(resp.body, ~r/foofoo/)
- end
-
- test "the provides mime matcher without a match", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design/template/_show/provides/test-doc-id",
- headers: [Accept: "text/monkeys"]
- )
-
- assert resp.body["error"] == "not_acceptable"
- end
-
- test "id with slash", context do
- db_name = context[:db_name]
-
- doc3 = %{"_id" => "a/b/c", "a" => 1}
- save(db_name, doc3)
- resp = Couch.get("/#{db_name}/_design/template/_show/withSlash/a/b/c")
- assert resp.status_code == 200
- end
-
- test "show with non-existing docid", context do
- db_name = context[:db_name]
-
- resp = Rawresp.get("/#{db_name}/_design/template/_show/hello/nonExistingDoc")
- assert resp.body == "New World"
- end
-
- test "list() compatible API", context do
- db_name = context[:db_name]
-
- resp = Rawresp.get("/#{db_name}/_design/template/_show/list-api/foo")
- assert resp.body == "Hey"
- assert resp.headers["X-Couch-Test-Header"] == "Yeah"
- end
-
- test "list() compatible API with provides function", context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get("/#{db_name}/_design/template/_show/list-api-provides/foo?format=text")
-
- assert resp.body == "foo, bar, baz!"
- end
-
- test "should keep next result order: chunks + return value + provided chunks + provided return value",
- context do
- db_name = context[:db_name]
-
- resp =
- Rawresp.get(
- "/#{db_name}/_design/template/_show/list-api-provides-and-return/foo?format=text"
- )
-
- assert resp.body == "1, 2, 3, 4, 5, 6, 7!"
-
- resp = Rawresp.get("/#{db_name}/_design/template/_show/list-api-mix/foo")
- assert resp.body == "Hey Dude"
- assert resp.headers["X-Couch-Test-Header"] == "Yeah"
-
- resp = Rawresp.get("/#{db_name}/_design/template/_show/list-api-mix-with-header/foo")
- assert resp.body == "Hey Dude"
- assert resp.headers["X-Couch-Test-Header"] == "Yeah"
- assert resp.headers["X-Couch-Test-Header-Awesome"] == "Oh Yeah!"
- end
-
- test "deleted docs", context do
- db_name = context[:db_name]
-
- doc = save(db_name, %{"_id" => "testdoc", "foo" => 1})
-
- resp = Rawresp.get("/#{db_name}/_design/template/_show/show-deleted/testdoc")
- assert resp.body == "testdoc"
-
- Couch.delete("/#{db_name}/testdoc?rev=#{doc["_rev"]}")
- resp = Rawresp.get("/#{db_name}/_design/template/_show/show-deleted/testdoc")
- assert resp.body == "No doc testdoc"
- end
-
- @tag :with_db
- test "security object", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @ddoc)
- {:ok, _} = create_doc(db_name, %{_id: "testdoc", foo: 1})
-
- Couch.put("/#{db_name}/_security", body: %{foo: true})
-
- retry_until(fn ->
- resp = Couch.get("/#{db_name}/_design/template/_show/secObj")
- assert resp.body["foo"]
- end)
- end
-end
diff --git a/test/elixir/test/support/couch_test_case.ex b/test/elixir/test/support/couch_test_case.ex
deleted file mode 100644
index 2d29425f0..000000000
--- a/test/elixir/test/support/couch_test_case.ex
+++ /dev/null
@@ -1,29 +0,0 @@
-defmodule CouchTestCase do
- @moduledoc false
-
- use ExUnit.CaseTemplate
-
- using do
- quote do
- require Logger
- use ExUnit.Case
-
- import Couch.DBTest
- end
- end
-
- setup context do
- setup_funs = [
- &Couch.DBTest.set_db_context/1,
- &Couch.DBTest.set_config_context/1,
- &Couch.DBTest.set_user_context/1
- ]
-
- context =
- Enum.reduce(setup_funs, context, fn setup_fun, acc ->
- setup_fun.(acc)
- end)
-
- {:ok, context}
- end
-end
diff --git a/test/elixir/test/test_helper.exs b/test/elixir/test/test_helper.exs
deleted file mode 100644
index 86f20ed59..000000000
--- a/test/elixir/test/test_helper.exs
+++ /dev/null
@@ -1,3 +0,0 @@
-Couch.Test.Suite.start()
-Code.require_file("partition_helpers.exs", __DIR__)
-Code.require_file("reshard_helpers.exs", __DIR__)
diff --git a/test/elixir/test/update_documents_test.exs b/test/elixir/test/update_documents_test.exs
deleted file mode 100644
index c29b31a4d..000000000
--- a/test/elixir/test/update_documents_test.exs
+++ /dev/null
@@ -1,324 +0,0 @@
-defmodule UpdateDocumentsTest do
- use CouchTestCase
-
- @ddoc %{
- _id: "_design/update",
- language: "javascript",
- updates: %{
- hello: """
- function(doc, req) {
- if (!doc) {
- if (req.id) {
- return [
- // Creates a new document with the PUT docid,
- { _id : req.id,
- reqs : [req] },
- // and returns an HTML response to the client.
- "<p>New World</p>"];
- };
- //
- return [null, "<p>Empty World</p>"];
- };
- // we can update the document inline
- doc.world = "hello";
- // we can record aspects of the request or use them in application logic.
- doc.reqs && doc.reqs.push(req);
- doc.edited_by = req.userCtx;
- return [doc, "<p>hello doc</p>"];
- }
- """,
- "in-place": """
- function(doc, req) {
- var field = req.query.field;
- var value = req.query.value;
- var message = "set "+field+" to "+value;
- doc[field] = value;
- return [doc, message];
- }
- """,
- "form-update": """
- function(doc, req) {
- for (var field in req.form) {
- doc[field] = req.form[field];
- }
- var message = "updated doc from form";
- return [doc, message];
- }
- """,
- "bump-counter": """
- function(doc, req) {
- if (!doc.counter) doc.counter = 0;
- doc.counter += 1;
- var message = "<h1>bumped it!</h1>";
- return [doc, message];
- }
- """,
- error: """
- function(doc, req) {
- superFail.badCrash;
- }
- """,
- "get-uuid": """
- function(doc, req) {
- return [null, req.uuid];
- }
- """,
- "code-n-bump": """
- function(doc,req) {
- if (!doc.counter) doc.counter = 0;
- doc.counter += 1;
- var message = "<h1>bumped it!</h1>";
- resp = {"code": 302, "body": message}
- return [doc, resp];
- }
- """,
- "resp-code": """
- function(doc,req) {
- resp = {"code": 302}
- return [null, resp];
- }
- """,
- "resp-code-and-json": """
- function(doc,req) {
- resp = {"code": 302, "json": {"ok": true}}
- return [{"_id": req["uuid"]}, resp];
- }
- """,
- binary: """
- function(doc, req) {
- var resp = {
- "headers" : {
- "Content-Type" : "application/octet-stream"
- },
- "base64" : "aGVsbG8gd29ybGQh" // "hello world!" encoded
- };
- return [doc, resp];
- }
- """,
- empty: """
- function(doc, req) {
- return [{}, 'oops'];
- }
- """
- }
- }
-
- @document %{word: "plankton", name: "Rusty"}
-
- @tag :with_db
- test "update error invalid path", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- resp = Couch.post("/#{db_name}/_design/update/_update/")
- assert resp.status_code == 404
- assert resp.body["reason"] == "Invalid path."
- end
-
- @tag :with_db
- test "update document", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
- {:ok, resp} = create_doc(db_name, @document)
- docid = resp.body["id"]
-
- resp = Couch.put("/#{db_name}/_design/update/_update/hello/#{docid}")
- assert resp.status_code == 201
- assert resp.body == "<p>hello doc</p>"
- assert String.contains?(resp.headers["Content-Type"], "charset=utf-8")
- assert resp.headers["X-Couch-Id"] == docid
-
- resp = Couch.get("/#{db_name}/#{docid}")
- assert resp.status_code == 200
- assert resp.body["world"] == "hello"
-
- # Fix for COUCHDB-379
- assert String.starts_with?(resp.headers["Server"], "CouchDB")
-
- resp = Couch.put("/#{db_name}/_design/update/_update/hello")
- assert resp.status_code == 200
- assert resp.body == "<p>Empty World</p>"
- end
-
- @tag :with_db
- test "GET is not allowed", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- resp = Couch.get("/#{db_name}/_design/update/_update/hello")
- assert resp.body["error"] == "method_not_allowed"
- end
-
- @tag :with_db
- test "doc can be created", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- resp = Couch.get("/#{db_name}/nonExistingDoc")
- assert resp.status_code == 404
-
- resp = Couch.put("/#{db_name}/_design/update/_update/hello/nonExistingDoc")
- assert resp.status_code == 201
- assert resp.body == "<p>New World</p>"
-
- resp = Couch.get("/#{db_name}/nonExistingDoc")
- assert resp.status_code == 200
- end
-
- @tag :with_db
- test "in place update", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- {:ok, resp} = create_doc(db_name, @document)
- docid = resp.body["id"]
-
- resp =
- Couch.put(
- "/#{db_name}/_design/update/_update/in-place/#{docid}?field=title&value=test"
- )
-
- assert resp.status_code == 201
- assert resp.body == "set title to test"
- resp = Couch.get("/#{db_name}/#{docid}")
- assert resp.status_code == 200
- assert resp.body["title"] == "test"
- end
-
- @tag :with_db
- test "form update via application/x-www-form-urlencoded", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- {:ok, resp} = create_doc(db_name, @document)
- docid = resp.body["id"]
-
- resp =
- Couch.put(
- "/#{db_name}/_design/update/_update/form-update/#{docid}",
- headers: ["Content-Type": "application/x-www-form-urlencoded"],
- body: "formfoo=bar&formbar=foo"
- )
-
- assert resp.status_code == 201
- assert resp.body == "updated doc from form"
-
- resp = Couch.get("/#{db_name}/#{docid}")
- assert resp.status_code == 200
- assert resp.body["formfoo"] == "bar"
- assert resp.body["formbar"] == "foo"
- end
-
- @tag :with_db
- test "bump counter", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- {:ok, resp} = create_doc(db_name, @document)
- docid = resp.body["id"]
-
- resp =
- Couch.put("/#{db_name}/_design/update/_update/bump-counter/#{docid}",
- headers: ["X-Couch-Full-Commit": "true"]
- )
-
- assert resp.status_code == 201
- assert resp.body == "<h1>bumped it!</h1>"
-
- resp = Couch.get("/#{db_name}/#{docid}")
- assert resp.status_code == 200
- assert resp.body["counter"] == 1
-
- resp =
- Couch.put("/#{db_name}/_design/update/_update/bump-counter/#{docid}",
- headers: ["X-Couch-Full-Commit": "true"]
- )
-
- newrev = resp.headers["X-Couch-Update-NewRev"]
-
- resp = Couch.get("/#{db_name}/#{docid}")
- assert resp.status_code == 200
- assert resp.body["counter"] == 2
- assert resp.body["_rev"] == newrev
- end
-
- @tag :with_db
- test "Server provides UUID when POSTing without an ID in the URL", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
- resp = Couch.put("/#{db_name}/_design/update/_update/get-uuid/")
- assert resp.status_code == 200
- assert String.length(resp.body) == 32
- end
-
- @tag :with_db
- test "COUCHDB-1229 - allow slashes in doc ids for update handlers", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- create_doc(db_name, %{_id: "with/slash", counter: 1})
-
- resp = Couch.put("/#{db_name}/_design/update/_update/bump-counter/with/slash")
- assert resp.status_code == 201
- assert resp.body == "<h1>bumped it!</h1>"
-
- resp = Couch.get("/#{db_name}/with%2Fslash")
- assert resp.status_code == 200
- assert resp.body["counter"] == 2
- end
-
- @tag :with_db
- test "COUCHDB-648 - the code in the JSON response should be honored", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- {:ok, resp} = create_doc(db_name, @document)
- docid = resp.body["id"]
-
- Couch.put("/#{db_name}/_design/update/_update/bump-counter/#{docid}")
- Couch.put("/#{db_name}/_design/update/_update/bump-counter/#{docid}")
-
- resp = Couch.put("/#{db_name}/_design/update/_update/code-n-bump/#{docid}")
- assert resp.status_code == 302
- assert resp.body == "<h1>bumped it!</h1>"
-
- resp = Couch.get("/#{db_name}/#{docid}")
- assert resp.status_code == 200
- assert resp.body["counter"] == 3
-
- resp = Couch.put("/#{db_name}/_design/update/_update/resp-code/")
- assert resp.status_code == 302
-
- resp = Couch.put("/#{db_name}/_design/update/_update/resp-code-and-json/")
- assert resp.status_code == 302
- assert resp.body["ok"] == true
- end
-
- @tag :with_db
- test "base64 response", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- {:ok, resp} = create_doc(db_name, @document)
- docid = resp.body["id"]
-
- resp =
- Couch.put("/#{db_name}/_design/update/_update/binary/#{docid}",
- body: "rubbish"
- )
-
- assert resp.status_code == 201
- assert resp.body == "hello world!"
- assert String.contains?(resp.headers["Content-Type"], "application/octet-stream")
- end
-
- @tag :with_db
- test "Insert doc with empty id", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- resp = Couch.put("/#{db_name}/_design/update/_update/empty/foo")
- assert resp.status_code == 400
- assert resp.body["reason"] == "Document id must not be empty"
- end
-end
diff --git a/test/elixir/test/users_db_security_test.exs b/test/elixir/test/users_db_security_test.exs
deleted file mode 100644
index 656749040..000000000
--- a/test/elixir/test/users_db_security_test.exs
+++ /dev/null
@@ -1,520 +0,0 @@
-defmodule UsersDbSecurityTest do
- use CouchTestCase
-
- @moduletag :authentication
- @moduletag kind: :single_node
-
- @users_db "_users"
-
- @login_user %{
- jerry: "apple",
- tom: "mp3",
- spike: "foobar",
- speedy: "test",
- silvestre: "anchovy"
- }
-
- setup_all do
- # Create db if not exists
- Couch.put("/#{@users_db}")
-
- retry_until(fn ->
- resp =
- Couch.get(
- "/#{@users_db}/_changes",
- query: [feed: "longpoll", timeout: 5000, filter: "_design"]
- )
-
- length(resp.body["results"]) > 0
- end)
-
- on_exit(&tear_down/0)
-
- :ok
- end
-
- defp tear_down do
- users = Map.keys(@login_user)
- Enum.each(users, fn name ->
- resp = Couch.get("/#{@users_db}/org.couchdb.user:#{name}")
- if resp.status_code == 200 do
- rev = resp.body["_rev"]
- Couch.delete("/#{@users_db}/org.couchdb.user:#{name}?rev=#{rev}")
- end
- end)
- end
-
- defp login_as(user, password \\ nil) do
- pwd =
- case password do
- nil -> @login_user[String.to_atom(user)]
- _ -> password
- end
-
- sess = Couch.login(user, pwd)
- assert sess.cookie, "Login correct is expected"
- sess
- end
-
- defp logout(session) do
- assert Couch.Session.logout(session).body["ok"]
- end
-
- defp open_as(db_name, doc_id, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- pwd = Keyword.get(options, :pwd)
- expect_response = Keyword.get(options, :expect_response, 200)
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login_as(user, pwd)
-
- resp =
- Couch.Session.get(
- session,
- "/#{db_name}/#{URI.encode(doc_id)}"
- )
-
- if use_session == nil do
- logout(session)
- end
-
- assert resp.status_code == expect_response
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp.body
- end
-
- defp save_as(db_name, doc, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- expect_response = Keyword.get(options, :expect_response, [201, 202])
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login_as(user)
-
- resp =
- Couch.Session.put(
- session,
- "/#{db_name}/#{URI.encode(doc["_id"])}",
- body: doc
- )
-
- if use_session == nil do
- logout(session)
- end
-
- if is_list(expect_response) do
- assert resp.status_code in expect_response
- else
- assert resp.status_code == expect_response
- end
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp
- end
-
- defp view_as(db_name, view_name, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- pwd = Keyword.get(options, :pwd)
- expect_response = Keyword.get(options, :expect_response, 200)
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login_as(user, pwd)
-
- [view_root, view_name] = String.split(view_name, "/")
-
- resp =
- Couch.Session.get(session, "/#{db_name}/_design/#{view_root}/_view/#{view_name}")
-
- if use_session == nil do
- logout(session)
- end
-
- if is_list(expect_response) do
- assert resp.status_code in expect_response
- else
- assert resp.status_code == expect_response
- end
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp
- end
-
- defp changes_as(db_name, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- expect_response = Keyword.get(options, :expect_response, [200, 202])
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login_as(user)
-
- resp =
- Couch.Session.get(
- session,
- "/#{db_name}/_changes"
- )
-
- if use_session == nil do
- logout(session)
- end
-
- if is_list(expect_response) do
- assert resp.status_code in expect_response
- else
- assert resp.status_code == expect_response
- end
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp
- end
-
- defp request_raw_as(db_name, path, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- pwd = Keyword.get(options, :pwd)
- expect_response = Keyword.get(options, :expect_response, 200)
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login_as(user, pwd)
-
- resp =
- Couch.Session.get(
- session,
- "/#{db_name}/#{path}",
- parse_response: false
- )
-
- if use_session == nil do
- logout(session)
- end
-
- if is_list(expect_response) do
- assert resp.status_code in expect_response
- else
- assert resp.status_code == expect_response
- end
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp
- end
-
- defp request_as(db_name, path, options) do
- use_session = Keyword.get(options, :use_session)
- user = Keyword.get(options, :user)
- pwd = Keyword.get(options, :pwd)
- expect_response = Keyword.get(options, :expect_response, 200)
- expect_message = Keyword.get(options, :error_message)
-
- session = use_session || login_as(user, pwd)
-
- resp =
- Couch.Session.get(
- session,
- "/#{db_name}/#{path}"
- )
-
- if use_session == nil do
- logout(session)
- end
-
- if is_list(expect_response) do
- assert resp.status_code in expect_response
- else
- assert resp.status_code == expect_response
- end
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- resp
- end
-
- defp set_security(db_name, security, expect_response \\ 200) do
- resp = Couch.put("/#{db_name}/_security", body: security)
- assert resp.status_code == expect_response
- end
-
- @tag config: [
- {
- "couchdb",
- "users_db_security_editable",
- "true"
- },
- {
- "chttpd_auth",
- "iterations",
- "1"
- },
- {
- "admins",
- "jerry",
- "apple"
- }
- ]
- test "user db security" do
- # _users db
- # a doc with a field 'password' should be hashed to 'derived_key'
- # with salt and salt stored in 'salt', 'password' is set to null.
- # Exising 'derived_key' and 'salt' fields are overwritten with new values
- # when a non-null 'password' field exists.
- # anonymous should be able to create a user document
- user_doc = %{
- _id: "org.couchdb.user:tom",
- type: "user",
- name: "tom",
- password: "mp3",
- roles: []
- }
-
- resp =
- Couch.post("/#{@users_db}", body: user_doc, headers: [authorization: "annonymous"])
-
- assert resp.status_code in [201, 202]
- assert resp.body["ok"]
-
- user_doc =
- retry_until(fn ->
- user_doc = open_as(@users_db, "org.couchdb.user:tom", user: "tom")
- assert !user_doc["password"]
- assert String.length(user_doc["derived_key"]) == 40
- assert String.length(user_doc["salt"]) == 32
- user_doc
- end)
-
- # anonymous should not be able to read an existing user's user document
- resp =
- Couch.get("/#{@users_db}/org.couchdb.user:tom",
- headers: [authorization: "annonymous"]
- )
-
- assert resp.status_code == 404
-
- # anonymous should not be able to read /_users/_changes
- resp = Couch.get("/#{@users_db}/_changes", headers: [authorization: "annonymous"])
- assert resp.status_code == 401
- assert resp.body["error"] == "unauthorized"
-
- # user should be able to read their own document
- tom_doc = open_as(@users_db, "org.couchdb.user:tom", user: "tom")
- assert tom_doc["_id"] == "org.couchdb.user:tom"
-
- # user should not be able to read /_users/_changes
- changes_as(@users_db,
- user: "tom",
- expect_response: 401,
- expect_message: "unauthorized"
- )
-
- tom_doc = Map.put(tom_doc, "password", "couch")
- save_as(@users_db, tom_doc, user: "tom")
-
- tom_doc = open_as(@users_db, "org.couchdb.user:tom", user: "jerry")
- assert !tom_doc["password"]
- assert String.length(tom_doc["derived_key"]) == 40
- assert String.length(tom_doc["salt"]) == 32
- assert tom_doc["derived_key"] != user_doc["derived_key"]
- assert tom_doc["salt"] != user_doc["salt"]
-
- # user should not be able to read another user's user document
- spike_doc = %{
- _id: "org.couchdb.user:spike",
- type: "user",
- name: "spike",
- password: "foobar",
- roles: []
- }
-
- {:ok, _} = create_doc(@users_db, spike_doc)
-
- open_as(@users_db, "org.couchdb.user:spike",
- user: "tom",
- pwd: "couch",
- expect_response: 404
- )
-
- speedy_doc = %{
- _id: "org.couchdb.user:speedy",
- type: "user",
- name: "speedy",
- password: "test",
- roles: ["user_admin"]
- }
-
- {:ok, _} = create_doc(@users_db, speedy_doc)
-
- security = %{
- admins: %{
- roles: [],
- names: ["speedy"]
- }
- }
-
- set_security(@users_db, security)
-
- # user should not be able to read from any view
- ddoc = %{
- _id: "_design/user_db_auth",
- views: %{
- test: %{
- map: "function(doc) { emit(doc._id, null); }"
- }
- },
- lists: %{
- names: """
- function(head, req) {
- var row; while (row = getRow()) { send(row.key + \"\\n\"); }
- }
- """
- },
- shows: %{
- name: "function(doc, req) { return doc.name; }"
- }
- }
-
- create_doc(@users_db, ddoc)
-
- resp =
- Couch.get("/#{@users_db}/_design/user_db_auth/_view/test",
- headers: [authorization: "annonymous"]
- )
-
- assert resp.body["error"] == "forbidden"
-
- # admin should be able to read from any view
- resp = view_as(@users_db, "user_db_auth/test", user: "jerry")
- assert resp.body["total_rows"] == 3
-
- # db admin should be able to read from any view
- resp = view_as(@users_db, "user_db_auth/test", user: "speedy")
- assert resp.body["total_rows"] == 3
-
- # non-admins can't read design docs
- open_as(@users_db, "_design/user_db_auth",
- user: "tom",
- pwd: "couch",
- expect_response: 403,
- expect_message: "forbidden"
- )
-
- # admin shold be able to read _list
- result =
- request_raw_as(@users_db, "_design/user_db_auth/_list/names/test", user: "jerry")
-
- assert result.status_code == 200
- assert length(String.split(result.body, "\n")) == 4
-
- # non-admins can't read _list
- request_raw_as(@users_db, "_design/user_db_auth/_list/names/test",
- user: "tom",
- pwd: "couch",
- expect_response: 403
- )
-
- # admin should be able to read _show
- result =
- request_raw_as(@users_db, "_design/user_db_auth/_show/name/org.couchdb.user:tom",
- user: "jerry"
- )
-
- assert result.status_code == 200
- assert result.body == "tom"
-
- # non-admin should be able to access own _show
- result =
- request_raw_as(@users_db, "_design/user_db_auth/_show/name/org.couchdb.user:tom",
- user: "tom",
- pwd: "couch"
- )
-
- assert result.status_code == 200
- assert result.body == "tom"
-
- # non-admin can't read other's _show
- request_raw_as(@users_db, "_design/user_db_auth/_show/name/org.couchdb.user:jerry",
- user: "tom",
- pwd: "couch",
- expect_response: 404
- )
-
- # admin should be able to read and edit any user doc
- spike_doc = open_as(@users_db, "org.couchdb.user:spike", user: "jerry")
- spike_doc = Map.put(spike_doc, "password", "mobile")
- save_as(@users_db, spike_doc, user: "jerry")
-
- # admin should be able to read and edit any user doc
- spike_doc = open_as(@users_db, "org.couchdb.user:spike", user: "jerry")
- spike_doc = Map.put(spike_doc, "password", "mobile1")
- save_as(@users_db, spike_doc, user: "speedy")
-
- security = %{
- admins: %{
- roles: ["user_admin"],
- names: []
- }
- }
-
- set_security(@users_db, security)
-
- # db admin should be able to read and edit any user doc
- spike_doc = open_as(@users_db, "org.couchdb.user:spike", user: "jerry")
- spike_doc = Map.put(spike_doc, "password", "mobile2")
- save_as(@users_db, spike_doc, user: "speedy")
-
- # ensure creation of old-style docs still works
- silvestre_doc = prepare_user_doc(name: "silvestre", password: "anchovy")
-
- resp =
- Couch.post("/#{@users_db}",
- body: silvestre_doc,
- headers: [authorization: "annonymous"]
- )
-
- assert resp.body["ok"]
-
- run_on_modified_server(
- [
- %{
- :section => "couch_httpd_auth",
- :key => "public_fields",
- :value => "name"
- },
- %{
- :section => "couch_httpd_auth",
- :key => "users_db_public",
- :value => "false"
- }
- ],
- fn ->
- request_as(@users_db, "_all_docs?include_docs=true",
- user: "tom",
- pwd: "couch",
- expect_response: 401,
- expect_message: "unauthorized"
- )
-
- # COUCHDB-1888 make sure admins always get all fields
- resp = request_as(@users_db, "_all_docs?include_docs=true", user: "jerry")
- rows = resp.body["rows"]
- assert Enum.at(rows, 2)["doc"]["type"] == "user"
- end
- )
- end
-end
diff --git a/test/elixir/test/users_db_test.exs b/test/elixir/test/users_db_test.exs
deleted file mode 100644
index 0b7ee8199..000000000
--- a/test/elixir/test/users_db_test.exs
+++ /dev/null
@@ -1,426 +0,0 @@
-defmodule UsersDbTest do
- use CouchTestCase
-
- @moduletag :authentication
-
- @users_db_name "_users"
-
- @moduletag config: [
- {
- "chttpd_auth",
- "authentication_db",
- @users_db_name
- },
- {
- "couch_httpd_auth",
- "authentication_db",
- @users_db_name
- },
- {
- "chttpd_auth",
- "iterations",
- "1"
- },
- {
- "admins",
- "jan",
- "apple"
- }
- ]
-
- setup do
- # Create db if not exists
- Couch.put("/#{@users_db_name}")
-
- resp =
- Couch.get(
- "/#{@users_db_name}/_changes",
- query: [feed: "longpoll", timeout: 5000, filter: "_design"]
- )
-
- assert resp.body
-
- on_exit(&tear_down/0)
-
- :ok
- end
-
- defp tear_down do
- delete_db(@users_db_name)
- create_db(@users_db_name)
- end
-
- defp save_as(db_name, doc, options) do
- session = Keyword.get(options, :use_session)
- expect_response = Keyword.get(options, :expect_response, [201, 202])
- expect_message = Keyword.get(options, :error_message)
- expect_reason = Keyword.get(options, :error_reason)
-
- headers =
- if session != nil do
- [
- Cookie: session.cookie,
- "X-CouchDB-www-Authenticate": "Cookie"
- ]
- else
- []
- end
-
- resp =
- Couch.put(
- "/#{db_name}/#{URI.encode(doc["_id"])}",
- headers: headers,
- body: doc
- )
-
- if is_list(expect_response) do
- assert resp.status_code in expect_response
- else
- assert resp.status_code == expect_response
- end
-
- if expect_message != nil do
- assert resp.body["error"] == expect_message
- end
-
- if expect_reason != nil do
- assert resp.body["reason"] == expect_reason
- end
-
- resp
- end
-
- defp login(user, password) do
- sess = Couch.login(user, password)
- assert sess.cookie, "Login correct is expected"
- sess
- end
-
- defp logout(session) do
- assert Couch.Session.logout(session).body["ok"]
- end
-
- @tag :with_db
- test "users db", context do
- db_name = context[:db_name]
- # test that the users db is born with the auth ddoc
- ddoc = Couch.get("/#{@users_db_name}/_design/_auth")
- assert ddoc.body["validate_doc_update"] != nil
-
- jchris_user_doc =
- prepare_user_doc([
- {:name, "jchris@apache.org"},
- {:password, "funnybone"}
- ])
-
- {:ok, resp} = create_doc(@users_db_name, jchris_user_doc)
- jchris_rev = resp.body["rev"]
-
- resp =
- Couch.get(
- "/_session",
- headers: [authorization: "Basic #{:base64.encode("jchris@apache.org:funnybone")}"]
- )
-
- assert resp.body["userCtx"]["name"] == "jchris@apache.org"
- assert resp.body["info"]["authenticated"] == "default"
- assert resp.body["info"]["authentication_db"] == @users_db_name
- assert Enum.member?(resp.body["info"]["authentication_handlers"], "cookie")
- assert Enum.member?(resp.body["info"]["authentication_handlers"], "default")
-
- resp =
- Couch.get(
- "/_session",
- headers: [authorization: "Basic Xzpf"]
- )
-
- assert resp.body["userCtx"]["name"] == :null
- assert not Enum.member?(resp.body["info"], "authenticated")
-
- # ok, now create a conflicting edit on the jchris doc, and make sure there's no login.
- # (use replication to create the conflict) - need 2 be admin
- session = login("jan", "apple")
- replicate(@users_db_name, db_name)
-
- jchris_user_doc = Map.put(jchris_user_doc, "_rev", jchris_rev)
-
- jchris_user_doc2 = Map.put(jchris_user_doc, "foo", "bar")
-
- save_as(@users_db_name, jchris_user_doc2, use_session: session)
- save_as(@users_db_name, jchris_user_doc, use_session: session, expect_response: 409)
-
- # then in the other
- jchris_user_doc3 = Map.put(jchris_user_doc, "foo", "barrrr")
- save_as(db_name, jchris_user_doc3, use_session: session)
- replicate(db_name, @users_db_name)
- # now we should have a conflict
-
- resp =
- Couch.get(
- "/#{@users_db_name}/#{jchris_user_doc3["_id"]}",
- query: [conflicts: true]
- )
-
- assert length(resp.body["_conflicts"]) == 1
- jchris_with_conflict = resp.body
-
- logout(session)
-
- # wait for auth_cache invalidation
- retry_until(
- fn ->
- resp =
- Couch.get(
- "/_session",
- headers: [
- authorization: "Basic #{:base64.encode("jchris@apache.org:funnybone")}"
- ]
- )
-
- assert resp.body["error"] == "unauthorized"
- assert String.contains?(resp.body["reason"], "conflict")
- resp
- end,
- 500,
- 20_000
- )
-
- # You can delete a user doc
- session = login("jan", "apple")
- info = Couch.Session.info(session)
- assert Enum.member?(info["userCtx"]["roles"], "_admin")
-
- resp =
- Couch.delete(
- "/#{@users_db_name}/#{jchris_with_conflict["_id"]}",
- query: [rev: jchris_with_conflict["_rev"]],
- headers: [
- Cookie: session.cookie,
- "X-CouchDB-www-Authenticate": "Cookie"
- ]
- )
-
- assert resp.body["ok"]
-
- # you can't change doc from type "user"
- resp =
- Couch.get(
- "/#{@users_db_name}/#{jchris_user_doc["_id"]}",
- headers: [
- Cookie: session.cookie,
- "X-CouchDB-www-Authenticate": "Cookie"
- ]
- )
-
- assert resp.status_code == 200
-
- jchris_user_doc = Map.replace!(resp.body, "type", "not user")
-
- save_as(
- @users_db_name,
- jchris_user_doc,
- use_session: session,
- expect_response: 403,
- error_message: "forbidden",
- error_reason: "doc.type must be user"
- )
-
- # "roles" must be an array
- jchris_user_doc =
- jchris_user_doc
- |> Map.replace!("type", "user")
- |> Map.replace!("roles", "not an array")
-
- save_as(
- @users_db_name,
- jchris_user_doc,
- use_session: session,
- expect_response: 403,
- error_message: "forbidden",
- error_reason: "doc.roles must be an array"
- )
-
- # "roles" must be and array of strings
- jchris_user_doc = Map.replace!(jchris_user_doc, "roles", [12])
-
- save_as(
- @users_db_name,
- jchris_user_doc,
- use_session: session,
- expect_response: 403,
- error_message: "forbidden",
- error_reason: "doc.roles can only contain strings"
- )
-
- # "roles" must exist
- jchris_user_doc = Map.drop(jchris_user_doc, ["roles"])
-
- save_as(
- @users_db_name,
- jchris_user_doc,
- use_session: session,
- expect_response: 403,
- error_message: "forbidden",
- error_reason: "doc.roles must exist"
- )
-
- # character : is not allowed in usernames
- joe_user_doc =
- prepare_user_doc([
- {:name, "joe:erlang"},
- {:password, "querty"}
- ])
-
- save_as(
- @users_db_name,
- joe_user_doc,
- use_session: session,
- expect_response: 403,
- error_message: "forbidden",
- error_reason: "Character `:` is not allowed in usernames."
- )
-
- # test that you can login as a user with a password starting with :
- joe_user_doc =
- prepare_user_doc([
- {:name, "foo@example.org"},
- {:password, ":bar"}
- ])
-
- {:ok, _} = create_doc(@users_db_name, joe_user_doc)
- logout(session)
-
- resp =
- Couch.get(
- "/_session",
- headers: [authorization: "Basic #{:base64.encode("foo@example.org::bar")}"]
- )
-
- assert resp.body["userCtx"]["name"] == "foo@example.org"
- end
-
- test "users password requirements", _context do
- set_config({
- "couch_httpd_auth",
- "password_regexp",
- Enum.join(
- [
- "[{\".{10,}\"},", # 10 chars
- "{\"[A-Z]+\", \"Requirement 2.\"},", # a uppercase char
- "{\"[a-z]+\", \"\"},", # a lowercase char
- "{\"\\\\d+\", \"Req 4.\"},", # A number
- "\"[!\.,\(\)]+\"]" # A special char
- ],
- " "
- )
- })
-
- session = login("jan", "apple")
-
- # With password that doesn't confirm to any requirement.
- # Requirement doesn't have a reason text.
- jchris_user_doc =
- prepare_user_doc([
- {:name, "jchris@apache.org"},
- {:password, "funnybone"}
- ])
- save_as(
- @users_db_name,
- jchris_user_doc,
- use_session: session,
- expect_response: 400,
- error_message: "bad_request",
- error_reason: "Password does not conform to requirements."
- )
-
- # With password that match the first requirement.
- # Requirement does have a reason text.
- jchris_user_doc2 = Map.put(jchris_user_doc, "password", "funnnnnybone")
- save_as(
- @users_db_name,
- jchris_user_doc2,
- use_session: session,
- expect_response: 400,
- error_message: "bad_request",
- error_reason: "Password does not conform to requirements. Requirement 2."
- )
-
- # With password that match the first two requirements.
- # Requirement does have an empty string as reason text.
- jchris_user_doc3 = Map.put(jchris_user_doc, "password", "FUNNNNNYBONE")
- save_as(
- @users_db_name,
- jchris_user_doc3,
- use_session: session,
- expect_response: 400,
- error_message: "bad_request",
- error_reason: "Password does not conform to requirements."
- )
-
- # With password that match the first three requirements.
- # Requirement does have a reason text.
- jchris_user_doc4 = Map.put(jchris_user_doc, "password", "funnnnnyBONE")
- save_as(
- @users_db_name,
- jchris_user_doc4,
- use_session: session,
- expect_response: 400,
- error_message: "bad_request",
- error_reason: "Password does not conform to requirements. Req 4."
- )
-
- # With password that match all but the last requirements.
- # Requirement does have a reason text.
- jchris_user_doc5 = Map.put(jchris_user_doc, "password", "funnnnnyB0N3")
- save_as(
- @users_db_name,
- jchris_user_doc5,
- use_session: session,
- expect_response: 400,
- error_message: "bad_request",
- error_reason: "Password does not conform to requirements."
- )
-
- # With password that match all requirements.
- jchris_user_doc6 = Map.put(jchris_user_doc, "password", "funnnnnyB0N3!")
- save_as(@users_db_name, jchris_user_doc6, use_session: session, expect_response: 201)
-
- # with non list value
- set_config({
- "couch_httpd_auth",
- "password_regexp",
- "{{\".{10,}\"}}"
- })
-
- joe_user_doc =
- prepare_user_doc([
- {:name, "joe_erlang"},
- {:password, "querty"}
- ])
-
- save_as(
- @users_db_name,
- joe_user_doc,
- use_session: session,
- expect_response: 403,
- error_message: "forbidden",
- error_reason: "Server cannot hash passwords at this time."
- )
-
- # Not correct syntax
- set_config({
- "couch_httpd_auth",
- "password_regexp",
- "[{\".{10,}\"]"
- })
-
- save_as(
- @users_db_name,
- joe_user_doc,
- use_session: session,
- expect_response: 403,
- error_message: "forbidden",
- error_reason: "Server cannot hash passwords at this time."
- )
- end
-end
diff --git a/test/elixir/test/utf8_test.exs b/test/elixir/test/utf8_test.exs
deleted file mode 100644
index 6b1c1cea9..000000000
--- a/test/elixir/test/utf8_test.exs
+++ /dev/null
@@ -1,65 +0,0 @@
-defmodule UTF8Test do
- use CouchTestCase
-
- @moduletag :utf8
-
- @moduledoc """
- Test CouchDB UTF8 support
- This is a port of the utf8.js test suite
- """
-
- @tag :with_db
- test "UTF8 support", context do
- db_name = context[:db_name]
- texts = [
- "1. Ascii: hello",
- "2. Russian: На берегу пустынных волн",
- "3. Math: ∮ E⋅da = Q, n → ∞, ∑ f(i) = ∏ g(i),",
- "4. Geek: STARGΛ̊TE SG-1",
- "5. Braille: ⡌⠁⠧⠑ ⠼⠁⠒ ⡍⠜⠇⠑⠹⠰⠎ ⡣⠕⠌",
- "6. null \u0000 byte",
- ]
-
- texts
- |> Enum.with_index()
- |> Enum.each(fn {string, index} ->
- status = Couch.post("/#{db_name}", query: [w: 3], body: %{"_id" => Integer.to_string(index), "text" => string}).status_code
- assert status in [201, 202]
- end)
-
- texts
- |> Enum.with_index()
- |> Enum.each(fn {_string, index} ->
- resp = Couch.get("/#{db_name}/#{index}")
- %{"_id" => id, "text" => text} = resp.body
- assert resp.status_code == 200
- assert Enum.at(texts, String.to_integer(id)) === text
- end)
-
- design_doc = %{
- :_id => "_design/temp_utf8_support",
- :language => "javascript",
- :views => %{
- :view => %{
- :map => "function(doc) { emit(null, doc.text) }"
- }
- }
- }
-
- design_resp =
- Couch.put(
- "/#{db_name}/_design/temp_utf8_support",
- body: design_doc,
- query: %{w: 3}
- )
-
- assert design_resp.status_code in [201, 202]
-
- %{"rows" => values} = Couch.get("/#{db_name}/_design/temp_utf8_support/_view/view").body
- values
- |> Enum.with_index()
- |> Enum.each(fn {%{"value" => value}, index} ->
- assert Enum.at(texts, index) === value
- end)
- end
-end
diff --git a/test/elixir/test/uuids_test.exs b/test/elixir/test/uuids_test.exs
deleted file mode 100644
index bb9369b80..000000000
--- a/test/elixir/test/uuids_test.exs
+++ /dev/null
@@ -1,96 +0,0 @@
-defmodule UUIDsTest do
- use CouchTestCase
-
- @moduledoc """
- Test CouchDB UUIDs API
- This is a port of the uuids.js suite
- """
-
- test "cache busting headers are set" do
- resp = Couch.get("/_uuids")
- assert resp.status_code == 200
- assert Regex.match?(~r/no-cache/, resp.headers["Cache-Control"])
- assert resp.headers["Pragma"] == "no-cache"
- assert String.length(resp.headers["ETag"]) > 0
- end
-
- test "can return single uuid" do
- resp = Couch.get("/_uuids")
- assert resp.status_code == 200
- [uuid1] = resp.body["uuids"]
-
- resp = Couch.get("/_uuids", query: %{:count => 1})
- assert resp.status_code == 200
- [uuid2] = resp.body["uuids"]
-
- assert uuid1 != uuid2
- end
-
- test "no duplicates in 1,000 UUIDs" do
- resp = Couch.get("/_uuids", query: %{:count => 1000})
- assert resp.status_code == 200
- uuids = resp.body["uuids"]
-
- assert length(Enum.uniq(uuids)) == length(uuids)
- end
-
- test "Method Not Allowed error on POST" do
- resp = Couch.post("/_uuids", query: %{:count => 1000})
- assert resp.status_code == 405
- end
-
- test "Bad Request error when exceeding max UUID count" do
- resp = Couch.get("/_uuids", query: %{:count => 1001})
- assert resp.status_code == 400
- end
-
- @tag config: [
- {"uuids", "algorithm", "sequential"}
- ]
- test "sequential uuids are sequential" do
- resp = Couch.get("/_uuids", query: %{:count => 1000})
- assert resp.status_code == 200
-
- Enum.reduce(resp.body["uuids"], fn curr, acc ->
- assert String.length(curr) == 32
- assert acc < curr
- curr
- end)
- end
-
- @tag config: [
- {"uuids", "algorithm", "utc_random"}
- ]
- test "utc_random uuids are roughly random" do
- resp = Couch.get("/_uuids", query: %{:count => 1000})
- assert resp.status_code == 200
- uuids = resp.body["uuids"]
-
- assert String.length(Enum.at(uuids, 1)) == 32
-
- # Assert no collisions
- assert length(Enum.uniq(uuids)) == length(uuids)
-
- # Assert rough ordering of UUIDs
- u1 = String.slice(Enum.at(uuids, 1), 0..13)
- u2 = String.slice(Enum.at(uuids, -1), 0..13)
- assert u1 < u2
- end
-
- @utc_id_suffix "frog"
- @tag config: [
- {"uuids", "algorithm", "utc_id"},
- {"uuids", "utc_id_suffix", @utc_id_suffix}
- ]
- test "utc_id uuids are correct" do
- resp = Couch.get("/_uuids", query: %{:count => 10})
- assert resp.status_code == 200
-
- Enum.reduce(resp.body["uuids"], fn curr, acc ->
- assert String.length(curr) == 14 + String.length(@utc_id_suffix)
- assert String.slice(curr, 14..-1) == @utc_id_suffix
- assert curr > acc
- curr
- end)
- end
-end
diff --git a/test/elixir/test/view_collation_raw_test.exs b/test/elixir/test/view_collation_raw_test.exs
deleted file mode 100644
index ee272d72e..000000000
--- a/test/elixir/test/view_collation_raw_test.exs
+++ /dev/null
@@ -1,159 +0,0 @@
-defmodule ViewCollationRawTest do
- use CouchTestCase
-
- @moduledoc """
- Test CouchDB View Raw Collation Behavior
- This is a port of the view_collation_raw.js suite
- """
-
- @values [
- # Then numbers
- 1,
- 2,
- 3,
- 4,
- false,
- :null,
- true,
-
- # Then objects, compared each key value in the list until different.
- # Larger objects sort after their subset objects
- {[a: 1]},
- {[a: 2]},
- {[b: 1]},
- {[b: 2]},
- # Member order does matter for collation
- {[b: 2, a: 1]},
- {[b: 2, c: 2]},
-
- # Then arrays, compared element by element until different.
- # Longer arrays sort after their prefixes
- ["a"],
- ["b"],
- ["b", "c"],
- ["b", "c", "a"],
- ["b", "d"],
- ["b", "d", "e"],
-
- # Then text, case sensitive
- "A",
- "B",
- "a",
- "aa",
- "b",
- "ba",
- "bb"
- ]
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- {docs, _} =
- Enum.flat_map_reduce(@values, 1, fn value, idx ->
- doc = %{:_id => Integer.to_string(idx), :foo => value}
- {[doc], idx + 1}
- end)
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs})
- Enum.each(resp.body, &assert(&1["ok"]))
-
- map_fun = "function(doc) { emit(doc.foo, null); }"
-
- map_doc = %{
- :language => "javascript",
- :views => %{:test => %{:map => map_fun, :options => %{:collation => "raw"}}}
- }
-
- resp = Couch.put("/#{db_name}/_design/test", body: map_doc)
- assert resp.body["ok"]
-
- {:ok, [db_name: db_name]}
- end
-
- test "ascending collation order", context do
- retry_until(fn ->
- resp = Couch.get(url(context))
- pairs = Enum.zip(resp.body["rows"], @values)
-
- Enum.each(pairs, fn {row, value} ->
- assert row["key"] == convert(value)
- end)
- end)
- end
-
- test "raw semantics in key ranges", context do
- retry_until(fn ->
- resp =
- Couch.get(url(context),
- query: %{"startkey" => :jiffy.encode("Z"), "endkey" => :jiffy.encode("a")}
- )
-
- assert length(resp.body["rows"]) == 1
- assert Enum.at(resp.body["rows"], 0)["key"] == "a"
- end)
- end
-
- test "descending collation order", context do
- retry_until(fn ->
- resp = Couch.get(url(context), query: %{"descending" => "true"})
- pairs = Enum.zip(resp.body["rows"], Enum.reverse(@values))
-
- Enum.each(pairs, fn {row, value} ->
- assert row["key"] == convert(value)
- end)
- end)
- end
-
- test "key query option", context do
- Enum.each(@values, fn value ->
- retry_until(fn ->
- resp = Couch.get(url(context), query: %{:key => :jiffy.encode(value)})
- assert length(resp.body["rows"]) == 1
- assert Enum.at(resp.body["rows"], 0)["key"] == convert(value)
- end)
- end)
- end
-
- test "inclusive_end=true", context do
- query = %{:endkey => :jiffy.encode("b"), :inclusive_end => true}
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "b"
-
- query = Map.put(query, :descending, true)
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "b"
- end
-
- test "inclusive_end=false", context do
- query = %{:endkey => :jiffy.encode("b"), :inclusive_end => false}
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "aa"
-
- query = Map.put(query, :descending, true)
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "ba"
-
- query = %{
- :endkey => :jiffy.encode("b"),
- :endkey_docid => 10,
- :inclusive_end => false
- }
-
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "aa"
-
- query = Map.put(query, :endkey_docid, 11)
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "aa"
- end
-
- def url(context) do
- "/#{context[:db_name]}/_design/test/_view/test"
- end
-
- def convert(value) do
- :jiffy.decode(:jiffy.encode(value), [:return_maps])
- end
-end
diff --git a/test/elixir/test/view_collation_test.exs b/test/elixir/test/view_collation_test.exs
deleted file mode 100644
index 7563ba416..000000000
--- a/test/elixir/test/view_collation_test.exs
+++ /dev/null
@@ -1,144 +0,0 @@
-defmodule ViewCollationTest do
- use CouchTestCase
-
- @moduledoc """
- Test CouchDB View Collation Behavior
- This is a port of the view_collation.js suite
- """
-
- @values [
- # Special values sort before all other types
- :null,
- false,
- true,
-
- # Then numbers
- 1,
- 2,
- 3.0,
- 4,
-
- # Then text, case sensitive
- "a",
- "A",
- "aa",
- "b",
- "B",
- "ba",
- "bb",
-
- # Then arrays, compared element by element until different.
- # Longer arrays sort after their prefixes
- ["a"],
- ["b"],
- ["b", "c"],
- ["b", "c", "a"],
- ["b", "d"],
- ["b", "d", "e"],
-
- # Then objects, compared each key value in the list until different.
- # Larger objects sort after their subset objects
- {[a: 1]},
- {[a: 2]},
- {[b: 1]},
- {[b: 2]},
- # Member order does matter for collation
- {[b: 2, a: 1]},
- {[b: 2, c: 2]}
- ]
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- {docs, _} =
- Enum.flat_map_reduce(@values, 1, fn value, idx ->
- doc = %{:_id => Integer.to_string(idx), :foo => value}
- {[doc], idx + 1}
- end)
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs})
- Enum.each(resp.body, &assert(&1["ok"]))
-
- map_fun = "function(doc) { emit(doc.foo, null); }"
- map_doc = %{:views => %{:foo => %{:map => map_fun}}}
- resp = Couch.put("/#{db_name}/_design/foo", body: map_doc)
- assert resp.body["ok"]
-
- {:ok, [db_name: db_name]}
- end
-
- test "ascending collation order", context do
- retry_until(fn ->
- resp = Couch.get(url(context))
- pairs = Enum.zip(resp.body["rows"], @values)
-
- Enum.each(pairs, fn {row, value} ->
- assert row["key"] == convert(value)
- end)
- end)
- end
-
- test "descending collation order", context do
- retry_until(fn ->
- resp = Couch.get(url(context), query: %{"descending" => "true"})
- pairs = Enum.zip(resp.body["rows"], Enum.reverse(@values))
-
- Enum.each(pairs, fn {row, value} ->
- assert row["key"] == convert(value)
- end)
- end)
- end
-
- test "key query option", context do
- Enum.each(@values, fn value ->
- retry_until(fn ->
- resp = Couch.get(url(context), query: %{:key => :jiffy.encode(value)})
- assert length(resp.body["rows"]) == 1
- assert Enum.at(resp.body["rows"], 0)["key"] == convert(value)
- end)
- end)
- end
-
- test "inclusive_end=true", context do
- query = %{:endkey => :jiffy.encode("b"), :inclusive_end => true}
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "b"
-
- query = Map.put(query, :descending, true)
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "b"
- end
-
- test "inclusive_end=false", context do
- query = %{:endkey => :jiffy.encode("b"), :inclusive_end => false}
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "aa"
-
- query = Map.put(query, :descending, true)
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "B"
-
- query = %{
- :endkey => :jiffy.encode("b"),
- :endkey_docid => 11,
- :inclusive_end => false
- }
-
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "aa"
-
- query = Map.put(query, :endkey_docid, 12)
- resp = Couch.get(url(context), query: query)
- assert Enum.at(resp.body["rows"], -1)["key"] == "b"
- end
-
- def url(context) do
- "/#{context[:db_name]}/_design/foo/_view/foo"
- end
-
- def convert(value) do
- :jiffy.decode(:jiffy.encode(value), [:return_maps])
- end
-end
diff --git a/test/elixir/test/view_compaction_test.exs b/test/elixir/test/view_compaction_test.exs
deleted file mode 100644
index d2bf060ba..000000000
--- a/test/elixir/test/view_compaction_test.exs
+++ /dev/null
@@ -1,105 +0,0 @@
-defmodule ViewCompactionTest do
- use CouchTestCase
-
- @moduledoc """
- Test CouchDB View Compaction Behavior
- This is a port of the view_compaction.js suite
- """
- @num_docs 1000
-
- @ddoc %{
- _id: "_design/foo",
- language: "javascript",
- views: %{
- view1: %{
- map: "function(doc) { emit(doc._id, doc.value) }"
- },
- view2: %{
- map:
- "function(doc) { if (typeof(doc.integer) === 'number') {emit(doc._id, doc.integer);} }",
- reduce: "function(keys, values, rereduce) { return sum(values); }"
- }
- }
- }
-
- defp bulk_save_for_update(db_name, docs) do
- resp = bulk_save(db_name, docs)
- revs = resp.body
-
- Enum.map(docs, fn m ->
- rev = Enum.at(revs, String.to_integer(m["_id"]))["rev"]
-
- m
- |> Map.put("_rev", rev)
- |> Map.update!("integer", &(&1 + 1))
- end)
- end
-
- @tag :with_db
- test "view compaction", context do
- db_name = context[:db_name]
- create_doc(db_name, @ddoc)
-
- docs = make_docs(0..(@num_docs - 1))
- docs = bulk_save_for_update(db_name, docs)
-
- resp = view(db_name, "foo/view1")
- assert length(resp.body["rows"]) == @num_docs
-
- resp = view(db_name, "foo/view2")
- assert length(resp.body["rows"]) == 1
-
- resp = Couch.get("/#{db_name}/_design/foo/_info")
- assert resp.body["view_index"]["update_seq"] == @num_docs + 1
-
- docs = bulk_save_for_update(db_name, docs)
-
- resp = view(db_name, "foo/view1")
- assert length(resp.body["rows"]) == @num_docs
-
- resp = view(db_name, "foo/view2")
- assert length(resp.body["rows"]) == 1
-
- resp = Couch.get("/#{db_name}/_design/foo/_info")
- assert resp.body["view_index"]["update_seq"] == 2 * @num_docs + 1
-
- bulk_save(db_name, docs)
- resp = view(db_name, "foo/view1")
- assert length(resp.body["rows"]) == @num_docs
-
- resp = view(db_name, "foo/view2")
- assert length(resp.body["rows"]) == 1
-
- resp = Couch.get("/#{db_name}/_design/foo/_info")
- assert resp.body["view_index"]["update_seq"] == 3 * @num_docs + 1
-
- disk_size_before_compact = resp.body["view_index"]["sizes"]["file"]
- data_size_before_compact = resp.body["view_index"]["sizes"]["active"]
-
- assert is_integer(disk_size_before_compact)
- assert data_size_before_compact < disk_size_before_compact
-
- resp = Couch.post("/#{db_name}/_compact/foo")
- assert resp.body["ok"] == true
-
- retry_until(fn ->
- resp = Couch.get("/#{db_name}/_design/foo/_info")
- resp.body["view_index"]["compact_running"] == false
- end)
-
- resp = view(db_name, "foo/view1")
- assert length(resp.body["rows"]) == @num_docs
-
- resp = view(db_name, "foo/view2")
- assert length(resp.body["rows"]) == 1
-
- resp = Couch.get("/#{db_name}/_design/foo/_info")
- assert resp.body["view_index"]["update_seq"] == 3 * @num_docs + 1
-
- disk_size_after_compact = resp.body["view_index"]["sizes"]["file"]
- data_size_after_compact = resp.body["view_index"]["sizes"]["active"]
- assert disk_size_after_compact < disk_size_before_compact
- assert is_integer(data_size_after_compact)
- assert data_size_after_compact < disk_size_after_compact
- end
-end
diff --git a/test/elixir/test/view_conflicts_test.exs b/test/elixir/test/view_conflicts_test.exs
deleted file mode 100644
index 9261b1ef0..000000000
--- a/test/elixir/test/view_conflicts_test.exs
+++ /dev/null
@@ -1,74 +0,0 @@
-defmodule ViewConflictsTest do
- use CouchTestCase
-
- @moduletag kind: :single_node
-
- setup_all do
- db_name_a = random_db_name()
- db_name_b = random_db_name()
-
- {:ok, _} = create_db(db_name_a)
- {:ok, _} = create_db(db_name_b)
-
- on_exit(fn -> delete_db(db_name_a) end)
- on_exit(fn -> delete_db(db_name_b) end)
- {:ok, [db_name_a: db_name_a, db_name_b: db_name_b]}
- end
-
- test "view conflict", context do
- db_name_a = context[:db_name_a]
- db_name_b = context[:db_name_b]
-
- create_doc(db_name_a, %{_id: "foo", bar: 42})
- replicate(db_name_a, db_name_b)
-
- resp = Couch.get("/#{db_name_b}/foo")
-
- docb =
- resp.body
- |> Map.put("bar", 43)
-
- docb = save(db_name_b, docb)
-
- resp = Couch.get("/#{db_name_a}/foo")
-
- doca =
- resp.body
- |> Map.put("bar", 41)
-
- doca = save(db_name_a, doca)
-
- replicate(db_name_a, db_name_b)
-
- resp = Couch.get("/#{db_name_b}/foo", query: [conflicts: true])
- doc = resp.body
- assert length(resp.body["_conflicts"]) == 1
-
- conflict_rev = Enum.at(resp.body["_conflicts"], 0)
-
- case doc["bar"] do
- 41 -> assert conflict_rev == docb["_rev"]
- 43 -> assert conflict_rev == doca["_rev"]
- _ -> assert false
- end
-
- map_fun = """
- function(doc) {
- if (doc._conflicts) {
- emit(doc._id, doc._conflicts);
- }
- }
- """
-
- results = query(db_name_b, map_fun)
-
- rev =
- results
- |> Map.get("rows")
- |> Enum.at(0)
- |> Map.get("value")
- |> Enum.at(0)
-
- assert conflict_rev == rev
- end
-end
diff --git a/test/elixir/test/view_errors_test.exs b/test/elixir/test/view_errors_test.exs
deleted file mode 100644
index 3ac0582ea..000000000
--- a/test/elixir/test/view_errors_test.exs
+++ /dev/null
@@ -1,290 +0,0 @@
-defmodule ViewErrorsTest do
- use CouchTestCase
-
- @moduletag kind: :single_node
-
- @document %{integer: 1, string: "1", array: [1, 2, 3]}
-
- @tag :with_db
- test "emit undefined key results as null", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @document)
-
- map_fun = """
- function(doc) {
- emit(doc.undef, null);
- }
- """
-
- # emitting a key value that is undefined should result in that row
- # being included in the view results as null
- results = query(db_name, map_fun)
- assert results["total_rows"] == 1
- assert Enum.at(results["rows"], 0)["key"] == :null
- end
-
- @tag :with_db
- test "exception in map function", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @document)
-
- map_fun = """
- function(doc) {
- doc.undef(); // throws an error
- }
- """
-
- # if a view function throws an exception, its results are not included in
- # the view index, but the view does not itself raise an error
- results = query(db_name, map_fun)
- assert results["total_rows"] == 0
- end
-
- @tag :with_db
- test "emit undefined value results as null", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @document)
-
- map_fun = """
- function(doc) {
- emit([doc._id, doc.undef], null);
- }
- """
-
- # if a view function includes an undefined value in the emitted key or
- # value, it is treated as null
- results = query(db_name, map_fun)
- assert results["total_rows"] == 1
-
- key =
- results["rows"]
- |> Enum.at(0)
- |> Map.get("key")
- |> Enum.at(1)
-
- assert key == :null
- end
-
- @tag :with_db
- test "query view with invalid params", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @document)
-
- body = %{
- language: "javascript",
- map: "function(doc){emit(doc.integer)}"
- }
-
- # querying a view with invalid params should give a resonable error message
- resp =
- Couch.post("/#{db_name}/_all_docs?startkey=foo",
- headers: ["Content-Type": "application/json"],
- body: body
- )
-
- assert resp.body["error"] == "bad_request"
-
- resp =
- Couch.post("/#{db_name}/_all_docs",
- headers: ["Content-Type": "application/x-www-form-urlencoded"],
- body: body
- )
-
- assert resp.status_code == 415
- end
-
- @tag :with_db
- test "query parse error", context do
- db_name = context[:db_name]
-
- map_fun = """
- function(doc) {
- emit(doc.integer, doc.integer);
- }
- """
-
- ddoc_name = create_view(db_name, map_fun)
-
- resp = Couch.get("/#{db_name}/#{ddoc_name}/_view/view", query: [group: true])
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
-
- map_fun = "function() {emit(null, null)}"
- ddoc_name = create_view(db_name, map_fun)
-
- resp =
- Couch.get("/#{db_name}/#{ddoc_name}/_view/view", query: [startkey: 2, endkey: 1])
-
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
- assert String.contains?(resp.body["reason"], "No rows can match")
-
- design_doc = %{
- _id: "_design/test",
- language: "javascript",
- views: %{
- no_reduce: %{map: "function(doc) {emit(doc._id, null);}"},
- with_reduce: %{
- map: "function (doc) {emit(doc.integer, doc.integer)};",
- reduce: "function (keys, values) { return sum(values); };"
- }
- }
- }
-
- {:ok, _} = create_doc(db_name, design_doc)
-
- resp = Couch.get("/#{db_name}/_design/test/_view/no_reduce", query: [group: true])
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
-
- resp = Couch.get("/#{db_name}/_design/test/_view/no_reduce", query: [group_level: 1])
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
-
- resp = Couch.get("/#{db_name}/_design/test/_view/no_reduce", query: [reduce: true])
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
-
- resp = Couch.get("/#{db_name}/_design/test/_view/no_reduce", query: [reduce: false])
- assert resp.status_code == 200
-
- resp =
- Couch.get("/#{db_name}/_design/test/_view/with_reduce",
- query: [group: true, reduce: false]
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
-
- resp =
- Couch.get("/#{db_name}/_design/test/_view/with_reduce",
- query: [group_level: 1, reduce: false]
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
- end
-
- @tag :with_db
- test "infinite loop", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @document)
-
- design_doc3 = %{
- _id: "_design/infinite",
- language: "javascript",
- views: %{
- infinite_loop: %{
- map: "function(doc) {while(true){emit(doc,doc);}};"
- }
- }
- }
-
- {:ok, _} = create_doc(db_name, design_doc3)
-
- resp = Couch.get("/#{db_name}/_design/infinite/_view/infinite_loop")
- assert resp.status_code == 500
- # This test has two different races. The first is whether
- # the while loop exhausts the JavaScript RAM limits before
- # timing. The second is a race between which of two timeouts
- # fires first. The first timeout is the couch_os_process
- # waiting for data back from couchjs. The second is the
- # gen_server call to couch_os_process.
- assert resp.body["error"] == "os_process_error" or resp.body["error"] == "timeout"
- end
-
- @tag :with_db
- test "error responses for invalid multi-get bodies", context do
- db_name = context[:db_name]
-
- design_doc = %{
- _id: "_design/test",
- language: "javascript",
- views: %{
- no_reduce: %{map: "function(doc) {emit(doc._id, null);}"},
- with_reduce: %{
- map: "function (doc) {emit(doc.integer, doc.integer)};",
- reduce: "function (keys, values) { return sum(values); };"
- }
- }
- }
-
- {:ok, _} = create_doc(db_name, design_doc)
-
- resp =
- Couch.post("/#{db_name}/_design/test/_view/no_reduce",
- body: "[]"
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == "Request body must be a JSON object"
-
- resp =
- Couch.post("/#{db_name}/_design/test/_view/no_reduce",
- body: %{keys: 1}
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "bad_request"
- assert resp.body["reason"] == "`keys` member must be an array."
- end
-
- @tag :with_db
- test "reduce overflow error", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @document)
-
- design_doc2 = %{
- _id: "_design/testbig",
- language: "javascript",
- views: %{
- reduce_too_big: %{
- map: "function (doc) {emit(doc.integer, doc.integer)};",
- reduce:
- "function (keys, values) { var chars = []; for (var i=0; i < 1000; i++) {chars.push('wazzap');};return chars; };"
- }
- }
- }
-
- {:ok, _} = create_doc(db_name, design_doc2)
-
- resp = Couch.get("/#{db_name}/_design/testbig/_view/reduce_too_big")
- assert resp.status_code == 200
- # if the reduce grows to fast, throw an overflow error
- assert Enum.at(resp.body["rows"], 0)["error"] == "reduce_overflow_error"
- end
-
- @tag :with_db
- test "temporary view should give error message", context do
- db_name = context[:db_name]
-
- resp =
- Couch.post("/#{db_name}/_temp_view",
- headers: ["Content-Type": "application/json"],
- body: %{
- language: "javascript",
- map: "function(doc){emit(doc.integer)}"
- }
- )
-
- assert resp.status_code == 410
- assert resp.body["error"] == "gone"
- assert resp.body["reason"] == "Temporary views are not supported in CouchDB"
- end
-
- defp create_view(db_name, map_fun) do
- ddoc_name = "_design/temp_#{System.unique_integer([:positive])}"
-
- ddoc = %{
- _id: ddoc_name,
- language: "javascript",
- views: %{
- view: %{map: map_fun}
- }
- }
-
- {:ok, _} = create_doc(db_name, ddoc)
- ddoc_name
- end
-end
diff --git a/test/elixir/test/view_include_docs_test.exs b/test/elixir/test/view_include_docs_test.exs
deleted file mode 100644
index a77753058..000000000
--- a/test/elixir/test/view_include_docs_test.exs
+++ /dev/null
@@ -1,263 +0,0 @@
-defmodule ViewIncludeDocsTest do
- use CouchTestCase
-
- @moduletag kind: :single_node
-
- @ddoc %{
- _id: "_design/test",
- language: "javascript",
- views: %{
- all_docs: %{
- map: "function(doc) { emit(doc.integer, doc.string) }"
- },
- with_prev: %{
- map:
- "function(doc){if(doc.prev) emit(doc._id,{'_rev':doc.prev}); else emit(doc._id,{'_rev':doc._rev});}"
- },
- with_id: %{
- map:
- "function(doc) {if(doc.link_id) { var value = {'_id':doc.link_id}; if (doc.link_rev) {value._rev = doc.link_rev}; emit(doc._id, value);}};"
- },
- summate: %{
- map:
- "function (doc) { if (typeof doc.integer === 'number') {emit(doc.integer, doc.integer)};}",
- reduce: "function (keys, values) { return sum(values); };"
- }
- }
- }
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- bulk_save(db_name, make_docs(0..99))
-
- create_doc(db_name, @ddoc)
-
- {:ok, [db_name: db_name]}
- end
-
- test "include docs in view", context do
- db_name = context[:db_name]
- resp = view(db_name, "test/all_docs", %{include_docs: true, limit: 2})
- assert length(resp.body["rows"]) == 2
- row0 = Enum.at(resp.body["rows"], 0)
- assert row0["id"] == "0"
- assert row0["doc"]["_id"] == "0"
- row1 = Enum.at(resp.body["rows"], 1)
- assert row1["id"] == "1"
- assert row1["doc"]["_id"] == "1"
-
- resp = view(db_name, "test/all_docs", %{include_docs: true}, [29, 74])
- assert length(resp.body["rows"]) == 2
- row0 = Enum.at(resp.body["rows"], 0)
- assert row0["doc"]["_id"] == "29"
- row1 = Enum.at(resp.body["rows"], 1)
- assert row1["doc"]["integer"] == 74
- end
-
- test "include docs in all_docs", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_all_docs",
- query: [limit: 2, skip: 1, include_docs: true]
- )
-
- assert length(resp.body["rows"]) == 2
- row0 = Enum.at(resp.body["rows"], 0)
- row1 = Enum.at(resp.body["rows"], 1)
- assert row0["doc"]["integer"] == 1
- assert row1["doc"]["integer"] == 10
-
- resp =
- Couch.post("/#{db_name}/_all_docs",
- query: [include_docs: true],
- headers: ["Content-Type": "application/json"],
- body: %{"keys" => ["not_a_doc"]}
- )
-
- assert length(resp.body["rows"]) == 1
- row0 = Enum.at(resp.body["rows"], 0)
- assert not Map.has_key?(row0, "doc")
-
- resp =
- Couch.post("/#{db_name}/_all_docs",
- query: [include_docs: true],
- headers: ["Content-Type": "application/json"],
- body: %{"keys" => ["1", "foo"]}
- )
-
- assert length(resp.body["rows"]) == 2
- row0 = Enum.at(resp.body["rows"], 0)
- row1 = Enum.at(resp.body["rows"], 1)
- assert row0["doc"]["integer"] == 1
- assert not Map.has_key?(row1, "doc")
-
- resp =
- Couch.get("/#{db_name}/_all_docs",
- query: [limit: 0, include_docs: true]
- )
-
- assert Enum.empty?(resp.body["rows"])
- end
-
- test "no reduce support", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate", query: [include_docs: true])
-
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
- end
-
- test "Reduce support when reduce=false", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate",
- query: [reduce: false, include_docs: true]
- )
-
- assert length(resp.body["rows"]) == 100
- end
-
- test "Not an error with include_docs=false&reduce=true", context do
- db_name = context[:db_name]
-
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate",
- query: [reduce: true, include_docs: false]
- )
-
- assert length(resp.body["rows"]) == 1
- row0 = Enum.at(resp.body["rows"], 0)
- assert row0["value"] == 4950
- end
-
- @tag :with_db
- test "link to another doc from a value", context do
- db_name = context[:db_name]
-
- bulk_save(db_name, make_docs(0..99))
- create_doc(db_name, @ddoc)
-
- doc_link = %{
- _id: "link-to-10",
- link_id: "10"
- }
-
- {:ok, _} = create_doc(db_name, doc_link)
- resp = view(db_name, "test/with_id", %{key: ~s("link-to-10")})
- assert length(resp.body["rows"]) == 1
- row0 = Enum.at(resp.body["rows"], 0)
- assert row0["key"] == "link-to-10"
- assert row0["value"]["_id"] == "10"
-
- resp = view(db_name, "test/with_id", %{key: ~s("link-to-10"), include_docs: true})
- assert length(resp.body["rows"]) == 1
- row0 = Enum.at(resp.body["rows"], 0)
- assert row0["value"]["_id"] == "10"
- assert row0["doc"]["_id"] == "10"
- end
-
- @tag :with_db
- test "emitted _rev controls things", context do
- db_name = context[:db_name]
-
- bulk_save(db_name, make_docs(0..99))
- create_doc(db_name, @ddoc)
-
- resp =
- Couch.post("/#{db_name}/_all_docs",
- query: [include_docs: true],
- headers: ["Content-Type": "application/json"],
- body: %{"keys" => ["0"]}
- )
-
- doc_before = Enum.at(resp.body["rows"], 0)["doc"]
-
- resp = Couch.get("/#{db_name}/0")
- assert resp.status_code == 200
- prev = resp.body["_rev"]
-
- doc_after =
- resp.body
- |> Map.put("integer", 100)
- |> Map.put("prev", prev)
-
- saved_doc = save(db_name, doc_after)
-
- resp = Couch.get("/#{db_name}/0")
- assert resp.status_code == 200
- doc_after = resp.body
- assert doc_after["_rev"] == saved_doc["_rev"]
- assert doc_after["_rev"] != doc_after["prev"]
- assert doc_after["integer"] == 100
-
- resp = view(db_name, "test/with_prev", %{include_docs: true}, ["0"])
- row0 = Enum.at(resp.body["rows"], 0)["doc"]
- assert row0["_id"] == "0"
- assert row0["_rev"] == doc_before["_rev"]
- assert not Map.has_key?(row0, "prev")
- assert assert row0["integer"] == 0
- end
-
- test "COUCHDB-549 - include_docs=true with conflicts=true" do
- db_name_a = random_db_name()
- db_name_b = random_db_name()
- create_db(db_name_a)
- create_db(db_name_b)
- on_exit(fn -> delete_db(db_name_a) end)
- on_exit(fn -> delete_db(db_name_b) end)
-
- ddoc = %{
- _id: "_design/mydesign",
- language: "javascript",
- views: %{
- myview: %{
- map: """
- function(doc) {
- emit(doc.value, 1);
- }
- """
- }
- }
- }
-
- {:ok, _} = create_doc(db_name_a, ddoc)
-
- doc1a = %{_id: "foo", value: 1, str: "1"}
- {:ok, _} = create_doc(db_name_a, doc1a)
-
- doc1b = %{_id: "foo", value: 1, str: "666"}
- {:ok, _} = create_doc(db_name_b, doc1b)
-
- doc2 = %{_id: "bar", value: 2, str: "2"}
- {:ok, _} = create_doc(db_name_a, doc2)
-
- replicate(db_name_a, db_name_b)
-
- resp = Couch.get("/#{db_name_b}/foo", query: [conflicts: true])
- assert resp.status_code == 200
- doc1b = resp.body
- assert Map.has_key?(doc1b, "_conflicts")
- assert length(doc1b["_conflicts"]) == 1
- conflict_rev = Enum.at(doc1b["_conflicts"], 0)
-
- resp = Couch.get("/#{db_name_b}/bar", query: [conflicts: true])
- assert resp.status_code == 200
- doc2 = resp.body
- assert not Map.has_key?(doc2, "_conflicts")
-
- resp = view(db_name_b, "mydesign/myview", %{include_docs: true, conflicts: true})
- assert length(resp.body["rows"]) == 2
- row0 = Enum.at(resp.body["rows"], 0)["doc"]
- assert length(row0["_conflicts"]) == 1
- assert Enum.at(row0["_conflicts"], 0) == conflict_rev
- row1 = Enum.at(resp.body["rows"], 1)["doc"]
- assert not Map.has_key?(row1, "_conflicts")
- end
-end
diff --git a/test/elixir/test/view_multi_key_all_docs_test.exs b/test/elixir/test/view_multi_key_all_docs_test.exs
deleted file mode 100644
index d9fa41e23..000000000
--- a/test/elixir/test/view_multi_key_all_docs_test.exs
+++ /dev/null
@@ -1,191 +0,0 @@
-defmodule ViewMultiKeyAllDocsTest do
- use CouchTestCase
-
- @keys ["10", "15", "30", "37", "50"]
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- bulk_save(db_name, make_docs(0..99))
-
- {:ok, [db_name: db_name]}
- end
-
- test "keys in POST body", context do
- db_name = context[:db_name]
-
- resp = all_docs(db_name, nil, @keys)
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == length(@keys)
-
- rows_id = Enum.map(rows, & &1["id"])
- assert rows_id == @keys
- end
-
- test "keys in GET parameters", context do
- db_name = context[:db_name]
- resp = all_docs(db_name, keys: :jiffy.encode(@keys))
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == length(@keys)
- rows_id = Enum.map(rows, & &1["id"])
- assert rows_id == @keys
- end
-
- test "keys in POST body (limit)", context do
- db_name = context[:db_name]
-
- resp = all_docs(db_name, [limit: 1], @keys)
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == 1
- assert Enum.at(rows, 0)["id"] == Enum.at(@keys, 0)
- end
-
- test "keys in GET parameters (limit)", context do
- db_name = context[:db_name]
- resp = all_docs(db_name, limit: 1, keys: :jiffy.encode(@keys))
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == 1
- assert Enum.at(rows, 0)["id"] == Enum.at(@keys, 0)
- end
-
- test "keys in POST body (skip)", context do
- db_name = context[:db_name]
-
- resp = all_docs(db_name, [skip: 2], @keys)
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == 3
-
- rows_id = Enum.map(rows, & &1["id"])
- assert rows_id == Enum.drop(@keys, 2)
- end
-
- test "keys in GET parameters (skip)", context do
- db_name = context[:db_name]
- resp = all_docs(db_name, skip: 2, keys: :jiffy.encode(@keys))
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == 3
- rows_id = Enum.map(rows, & &1["id"])
- assert rows_id == Enum.drop(@keys, 2)
- end
-
- test "keys in POST body (descending)", context do
- db_name = context[:db_name]
-
- resp = all_docs(db_name, [descending: true], @keys)
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == length(@keys)
-
- rows_id = Enum.map(rows, & &1["id"])
- assert rows_id == Enum.reverse(@keys)
- end
-
- test "keys in GET parameters (descending)", context do
- db_name = context[:db_name]
- resp = all_docs(db_name, descending: true, keys: :jiffy.encode(@keys))
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == length(@keys)
- rows_id = Enum.map(rows, & &1["id"])
- assert rows_id == Enum.reverse(@keys)
- end
-
- test "keys in POST body (descending, skip, limit)", context do
- db_name = context[:db_name]
-
- resp = all_docs(db_name, [descending: "true", skip: 3, limit: 1], @keys)
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == 1
-
- key =
- @keys
- |> Enum.reverse()
- |> Enum.drop(3)
- |> Enum.at(0)
-
- assert Enum.at(rows, 0)["id"] == key
- end
-
- test "keys in GET parameters (descending, skip, limit)", context do
- db_name = context[:db_name]
-
- resp =
- all_docs(db_name, descending: "true", skip: 3, limit: 1, keys: :jiffy.encode(@keys))
-
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == 1
-
- key =
- @keys
- |> Enum.reverse()
- |> Enum.drop(3)
- |> Enum.at(0)
-
- assert Enum.at(rows, 0)["id"] == key
- end
-
- test "POST - get invalid rows when the key doesn't exist", context do
- db_name = context[:db_name]
-
- resp = all_docs(db_name, nil, ["1211", "i_dont_exist", "0"])
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["error"] == "not_found"
- assert not Map.has_key?(Enum.at(rows, 0), "id")
- assert Enum.at(rows, 1)["error"] == "not_found"
- assert not Map.has_key?(Enum.at(rows, 1), "id")
- assert Enum.at(rows, 2)["id"] == Enum.at(rows, 2)["key"]
- assert Enum.at(rows, 2)["key"] == "0"
- end
-
- test "GET - get invalid rows when the key doesn't exist", context do
- db_name = context[:db_name]
-
- resp = all_docs(db_name, keys: :jiffy.encode(["1211", "i_dont_exist", "0"]))
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert length(rows) == 3
- assert Enum.at(rows, 0)["error"] == "not_found"
- assert not Map.has_key?(Enum.at(rows, 0), "id")
- assert Enum.at(rows, 1)["error"] == "not_found"
- assert not Map.has_key?(Enum.at(rows, 1), "id")
- assert Enum.at(rows, 2)["id"] == Enum.at(rows, 2)["key"]
- assert Enum.at(rows, 2)["key"] == "0"
- end
-
- test "empty keys", context do
- db_name = context[:db_name]
-
- resp = all_docs(db_name, keys: :jiffy.encode([]))
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert Enum.empty?(rows)
- end
-
- defp all_docs(db_name, options, keys \\ nil) do
- resp =
- case keys do
- nil ->
- Couch.get("/#{db_name}/_all_docs", query: options)
-
- _ ->
- Couch.post("/#{db_name}/_all_docs",
- query: options,
- body: %{"keys" => keys}
- )
- end
-
- resp
- end
-end
diff --git a/test/elixir/test/view_multi_key_design_test.exs b/test/elixir/test/view_multi_key_design_test.exs
deleted file mode 100644
index c33491620..000000000
--- a/test/elixir/test/view_multi_key_design_test.exs
+++ /dev/null
@@ -1,346 +0,0 @@
-defmodule ViewMultiKeyDesignTest do
- use CouchTestCase
-
- @keys [10, 15, 30, 37, 50]
-
- @ddoc %{
- _id: "_design/test",
- language: "javascript",
- views: %{
- all_docs: %{
- map: "function(doc) { emit(doc.integer, doc.string) }"
- },
- multi_emit: %{
- map: "function(doc) {for(var i = 0 ; i < 3 ; i++) { emit(i, doc.integer) ; } }"
- },
- summate: %{
- map: "function (doc) {emit(doc.integer, doc.integer)};",
- reduce: "function (keys, values) { return sum(values); };"
- }
- }
- }
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- bulk_save(db_name, make_docs(0..99))
- {:ok, _} = create_doc(db_name, @ddoc)
-
- {:ok, [db_name: db_name]}
- end
-
- test "that missing keys work too", context do
- db_name = context[:db_name]
- keys = [101, 30, 15, 37, 50]
- resp = view(db_name, "test/summate", [group: true], keys)
- rows = resp.body["rows"]
- assert length(rows) == length(keys) - 1
-
- assert Enum.all?(rows, &Enum.member?(keys, &1["key"]))
- assert Enum.all?(rows, &(&1["key"] == &1["value"]))
- end
-
- test "keys in POST body", context do
- db_name = context[:db_name]
- resp = view(db_name, "test/all_docs", nil, @keys)
- rows = resp.body["rows"]
- assert length(rows) == length(@keys)
- assert Enum.all?(rows, &Enum.member?(@keys, &1["key"]))
- assert Enum.all?(rows, &(&1["key"] == String.to_integer(&1["value"])))
- end
-
- test "keys in GET parameters", context do
- db_name = context[:db_name]
- resp = view(db_name, "test/all_docs", keys: :jiffy.encode(@keys))
- rows = resp.body["rows"]
- assert length(rows) == length(@keys)
- assert Enum.all?(rows, &Enum.member?(@keys, &1["key"]))
- assert Enum.all?(rows, &(&1["key"] == String.to_integer(&1["value"])))
- end
-
- test "empty keys", context do
- db_name = context[:db_name]
-
- resp = view(db_name, "test/all_docs", keys: :jiffy.encode([]))
- assert resp.status_code == 200
- rows = resp.body["rows"]
- assert Enum.empty?(rows)
- end
-
- test "keys in POST body (group)", context do
- db_name = context[:db_name]
- resp = view(db_name, "test/summate", [group: true], @keys)
- rows = resp.body["rows"]
- assert length(rows) == length(@keys)
- assert Enum.all?(rows, &Enum.member?(@keys, &1["key"]))
- assert Enum.all?(rows, &(&1["key"] == &1["value"]))
- end
-
- test "keys in GET body (group)", context do
- db_name = context[:db_name]
- resp = view(db_name, "test/summate", group: true, keys: :jiffy.encode(@keys))
- rows = resp.body["rows"]
- assert length(rows) == length(@keys)
- assert Enum.all?(rows, &Enum.member?(@keys, &1["key"]))
- assert Enum.all?(rows, &(&1["key"] == &1["value"]))
- end
-
- test "POST - invalid parameter combinations get rejected ", context do
- db_name = context[:db_name]
-
- badargs = [[startkey: 0], [endkey: 0], [key: 0], [group_level: 2]]
-
- Enum.each(badargs, fn args ->
- resp =
- Couch.post("/#{db_name}/_design/test/_view/all_docs",
- query: args,
- body: %{"keys" => @keys}
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
- end)
-
- resp =
- Couch.post("/#{db_name}/_design/test/_view/summate",
- query: nil,
- body: %{"keys" => @keys}
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
- end
-
- test "GET - invalid parameter combinations get rejected ", context do
- db_name = context[:db_name]
-
- badargs = [
- [startkey: 0, keys: :jiffy.encode(@keys)],
- [endkey: 0, keys: :jiffy.encode(@keys)],
- [key: 0, keys: :jiffy.encode(@keys)],
- [group_level: 2, keys: :jiffy.encode(@keys)]
- ]
-
- Enum.each(badargs, fn args ->
- resp =
- Couch.get("/#{db_name}/_design/test/_view/all_docs",
- query: args
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
- end)
-
- resp =
- Couch.get("/#{db_name}/_design/test/_view/summate",
- query: [keys: :jiffy.encode(@keys)],
- body: %{"keys" => @keys}
- )
-
- assert resp.status_code == 400
- assert resp.body["error"] == "query_parse_error"
- end
-
- test "that a map & reduce containing func support keys when reduce=false", context do
- db_name = context[:db_name]
- resp = view(db_name, "test/summate", [reduce: false], @keys)
- assert length(resp.body["rows"]) == 5
-
- resp = view(db_name, "test/summate", reduce: false, keys: :jiffy.encode(@keys))
- assert length(resp.body["rows"]) == 5
- end
-
- test "that limiting by startkey_docid and endkey_docid get applied", context do
- db_name = context[:db_name]
-
- exp_key = [0, 0, 0, 2, 2, 2]
- exp_val = [21, 22, 23, 21, 22, 23]
-
- resp =
- view(db_name, "test/multi_emit", [startkey_docid: 21, endkey_docid: 23], [0, 2])
-
- rows = resp.body["rows"]
- rows_key = Enum.map(rows, & &1["key"])
- assert rows_key == exp_key
-
- rows_value = Enum.map(rows, & &1["value"])
- assert rows_value == exp_val
-
- resp =
- view(db_name, "test/multi_emit",
- startkey_docid: 21,
- endkey_docid: 23,
- keys: :jiffy.encode([0, 2])
- )
-
- rows = resp.body["rows"]
- rows_key = Enum.map(rows, & &1["key"])
- assert rows_key == exp_key
-
- rows_value = Enum.map(rows, & &1["value"])
- assert rows_value == exp_val
- end
-
- test "limit works", context do
- db_name = context[:db_name]
-
- resp = view(db_name, "test/all_docs", [limit: 1], @keys)
- rows = resp.body["rows"]
- assert length(rows) == 1
- assert Enum.at(rows, 0)["key"] == 10
-
- resp = view(db_name, "test/all_docs", limit: 1, keys: :jiffy.encode(@keys))
- rows = resp.body["rows"]
- assert length(rows) == 1
- assert Enum.at(rows, 0)["key"] == 10
- end
-
- test "offset works", context do
- db_name = context[:db_name]
-
- resp = view(db_name, "test/multi_emit", [skip: 1], [0])
- rows = resp.body["rows"]
- assert length(rows) == 99
-
- resp = view(db_name, "test/multi_emit", skip: 1, keys: :jiffy.encode([0]))
- rows = resp.body["rows"]
- assert length(rows) == 99
- end
-
- test "dir ascending works", context do
- db_name = context[:db_name]
-
- expect_rows = mk_rows(0..99, 1, &</2) ++ mk_rows(0..99, 2, &</2)
-
- resp = view(db_name, "test/multi_emit", [descending: false], [1, 2])
- rows = resp.body["rows"]
- assert length(rows) == 200
- assert expect_rows == rows
-
- keys = :jiffy.encode([1, 2])
- resp = view(db_name, "test/multi_emit", descending: false, keys: keys)
- rows = resp.body["rows"]
- assert length(rows) == 200
- assert expect_rows == rows
- end
-
- test "dir descending works", context do
- db_name = context[:db_name]
-
- expect_rows = mk_rows(0..99, 2, &>/2) ++ mk_rows(0..99, 1, &>/2)
-
- resp = view(db_name, "test/multi_emit", [descending: true], [1, 2])
- rows = resp.body["rows"]
- assert length(rows) == 200
- assert expect_rows == rows
-
- keys = :jiffy.encode([1, 2])
- resp = view(db_name, "test/multi_emit", descending: true, keys: keys)
- rows = resp.body["rows"]
- assert length(rows) == 200
- assert expect_rows == rows
- end
-
- test "argument combinations", context do
- db_name = context[:db_name]
-
- resp = view(db_name, "test/multi_emit", [descending: true, skip: 3, limit: 2], [2])
- rows = resp.body["rows"]
- assert length(rows) == 2
-
- resp =
- view(db_name, "test/multi_emit",
- descending: true,
- skip: 3,
- limit: 2,
- keys: :jiffy.encode([2])
- )
-
- rows = resp.body["rows"]
- assert length(rows) == 2
-
- resp =
- view(db_name, "test/multi_emit", [skip: 0, limit: 1, startkey_docid: "13"], [0])
-
- rows = resp.body["rows"]
- assert length(rows) == 1
- assert Enum.at(rows, 0)["value"] == 13
-
- resp =
- view(db_name, "test/multi_emit", [skip: 2, limit: 3, startkey_docid: "13"], [0])
-
- rows = resp.body["rows"]
- assert length(rows) == 3
-
- resp =
- view(db_name, "test/multi_emit",
- skip: 2,
- limit: 3,
- startkey_docid: "13",
- keys: :jiffy.encode([0])
- )
-
- rows = resp.body["rows"]
- assert length(rows) == 3
-
- resp =
- view(
- db_name,
- "test/multi_emit",
- [skip: 1, limit: 5, startkey_docid: "25", endkey_docid: "27"],
- [1]
- )
-
- rows = resp.body["rows"]
- assert length(rows) == 2
- assert Enum.at(rows, 0)["value"] == 26 or assert(Enum.at(rows, 0)["value"] == 27)
-
- resp =
- view(db_name, "test/multi_emit",
- skip: 1,
- limit: 5,
- startkey_docid: "25",
- endkey_docid: "27",
- keys: :jiffy.encode([1])
- )
-
- rows = resp.body["rows"]
- assert length(rows) == 2
- assert Enum.at(rows, 0)["value"] == 26 or assert(Enum.at(rows, 0)["value"] == 27)
-
- resp =
- view(
- db_name,
- "test/multi_emit",
- [skip: 1, limit: 5, startkey_docid: "28", endkey_docid: "26", descending: true],
- [1]
- )
-
- rows = resp.body["rows"]
- assert length(rows) == 2
- assert Enum.at(rows, 0)["value"] == 26 or assert(Enum.at(rows, 0)["value"] == 27)
-
- resp =
- view(db_name, "test/multi_emit",
- skip: 1,
- limit: 5,
- startkey_docid: "28",
- endkey_docid: "26",
- descending: true,
- keys: :jiffy.encode([1])
- )
-
- rows = resp.body["rows"]
- assert length(rows) == 2
- end
-
- defp mk_rows(range, key, sort_fun) do
- row_fun = fn(i) -> %{"id" => "#{i}", "key" => key, "value" => i} end
- sort_mapper = fn(row) -> {row["key"], row["id"]} end
- range
- |> Enum.map(row_fun)
- |> Enum.sort_by(sort_mapper, sort_fun)
- end
-end
diff --git a/test/elixir/test/view_offsets_test.exs b/test/elixir/test/view_offsets_test.exs
deleted file mode 100644
index edb5a58f6..000000000
--- a/test/elixir/test/view_offsets_test.exs
+++ /dev/null
@@ -1,100 +0,0 @@
-defmodule ViewOffsetTest do
- use CouchTestCase
-
- @moduletag :view_offsets
-
- @moduledoc """
- Tests about view offsets.
- This is a port of the view_offsets.js javascript test suite.
- """
-
- @docs [
- %{"_id" => "a1", "letter" => "a", "number" => 1, "foo" => "bar"},
- %{"_id" => "a2", "letter" => "a", "number" => 2, "foo" => "bar"},
- %{"_id" => "a3", "letter" => "a", "number" => 3, "foo" => "bar"},
- %{"_id" => "b1", "letter" => "b", "number" => 1, "foo" => "bar"},
- %{"_id" => "b2", "letter" => "b", "number" => 2, "foo" => "bar"},
- %{"_id" => "b3", "letter" => "b", "number" => 3, "foo" => "bar"},
- %{"_id" => "b4", "letter" => "b", "number" => 4, "foo" => "bar"},
- %{"_id" => "b5", "letter" => "b", "number" => 5, "foo" => "bar"},
- %{"_id" => "c1", "letter" => "c", "number" => 1, "foo" => "bar"},
- %{"_id" => "c2", "letter" => "c", "number" => 2, "foo" => "bar"}
- ]
-
- @design_doc %{
- "_id" => "_design/test",
- "views" => %{
- "offset" => %{
- "map" => "function(doc) { emit([doc.letter, doc.number], doc); }"
- }
- }
- }
-
- @tag :with_db
- test "basic view offsets", context do
- db_name = context[:db_name]
- save(db_name, @design_doc)
- bulk_save(db_name, @docs)
-
- [
- [["c", 2], 0],
- [["c", 1], 1],
- [["b", 5], 2],
- [["b", 4], 3],
- [["b", 3], 4],
- [["b", 2], 5],
- [["b", 1], 6],
- [["a", 3], 7],
- [["a", 2], 8],
- [["a", 1], 9]
- ]
- |> Enum.each(fn [start_key, offset] ->
- result =
- view(db_name, "test/offset", %{
- "startkey" => :jiffy.encode(start_key),
- "descending" => true
- })
-
- assert result.body["offset"] === offset
- end)
- end
-
- test "repeated view offsets" do
- 0..14 |> Enum.each(fn _ -> repeated_view_offset_test_fun() end)
- end
-
- def repeated_view_offset_test_fun do
- db_name = random_db_name()
- create_db(db_name)
-
- save(db_name, @design_doc)
- bulk_save(db_name, @docs)
-
- first_response =
- view(db_name, "test/offset", %{
- "startkey" => :jiffy.encode(["b", 4]),
- "startkey_docid" => "b4",
- "endkey" => :jiffy.encode(["b"]),
- "descending" => true,
- "limit" => 2,
- "skip" => 1
- })
-
- second_response =
- view(db_name, "test/offset", %{
- "startkey" => :jiffy.encode(["c", 3])
- })
-
- third_response =
- view(db_name, "test/offset", %{
- "startkey" => :jiffy.encode(["b", 6]),
- "endkey" => :jiffy.encode(["b", 7])
- })
-
- assert first_response.body["offset"] === 4
- assert second_response.body["offset"] === length(@docs)
- assert third_response.body["offset"] === 8
-
- delete_db(db_name)
- end
-end
diff --git a/test/elixir/test/view_pagination_test.exs b/test/elixir/test/view_pagination_test.exs
deleted file mode 100644
index 322b653cb..000000000
--- a/test/elixir/test/view_pagination_test.exs
+++ /dev/null
@@ -1,189 +0,0 @@
-defmodule ViewPaginationTest do
- use CouchTestCase
-
- @moduletag :view_pagination
-
- @moduledoc """
- Integration tests for pagination.
- This is a port of the view_pagination.js test suite.
- """
-
- @tag :with_db
- test "basic view pagination", context do
- db_name = context[:db_name]
-
- docs = make_docs(0..99)
- bulk_save(db_name, docs)
-
- query_function = "function(doc) { emit(doc.integer, null); }"
-
- 0..99
- |> Enum.filter(fn number -> rem(number, 10) === 0 end)
- |> Enum.each(fn i ->
- query_options = %{"startkey" => i, "startkey_docid" => i, limit: 10}
- result = query(db_name, query_function, nil, query_options)
- assert result["total_rows"] === length(docs)
- assert length(result["rows"]) === 10
- assert result["offset"] === i
- Enum.each(0..9, &assert(Enum.at(result["rows"], &1)["key"] === &1 + i))
- end)
- end
-
- @tag :with_db
- test "aliases start_key and start_key_doc_id should work", context do
- db_name = context[:db_name]
-
- docs = make_docs(0..99)
- bulk_save(db_name, docs)
-
- query_function = "function(doc) { emit(doc.integer, null); }"
-
- 0..99
- |> Enum.filter(fn number -> rem(number, 10) === 0 end)
- |> Enum.each(fn i ->
- query_options = %{"start_key" => i, "start_key_docid" => i, limit: 10}
- result = query(db_name, query_function, nil, query_options)
- assert result["total_rows"] === length(docs)
- assert length(result["rows"]) === 10
- assert result["offset"] === i
- Enum.each(0..9, &assert(Enum.at(result["rows"], &1)["key"] === &1 + i))
- end)
- end
-
- @tag :with_db
- test "descending view pagination", context do
- db_name = context[:db_name]
-
- docs = make_docs(0..99)
- bulk_save(db_name, docs)
-
- query_function = "function(doc) { emit(doc.integer, null); }"
-
- 100..0
- |> Enum.filter(fn number -> rem(number, 10) === 0 end)
- |> Enum.map(&(&1 - 1))
- |> Enum.filter(&(&1 > 0))
- |> Enum.each(fn i ->
- query_options = %{
- "startkey" => i,
- "startkey_docid" => i,
- limit: 10,
- descending: true
- }
-
- result = query(db_name, query_function, nil, query_options)
- assert result["total_rows"] === length(docs)
- assert length(result["rows"]) === 10
- assert result["offset"] === length(docs) - i - 1
- Enum.each(0..9, &assert(Enum.at(result["rows"], &1)["key"] === i - &1))
- end)
- end
-
- @tag :with_db
- test "descending=false parameter should just be ignored", context do
- db_name = context[:db_name]
-
- docs = make_docs(0..99)
- bulk_save(db_name, docs)
-
- query_function = "function(doc) { emit(doc.integer, null); }"
-
- 0..99
- |> Enum.filter(fn number -> rem(number, 10) === 0 end)
- |> Enum.each(fn i ->
- query_options = %{
- "start_key" => i,
- "start_key_docid" => i,
- limit: 10,
- descending: false
- }
-
- result = query(db_name, query_function, nil, query_options)
- assert result["total_rows"] === length(docs)
- assert length(result["rows"]) === 10
- assert result["offset"] === i
- Enum.each(0..9, &assert(Enum.at(result["rows"], &1)["key"] === &1 + i))
- end)
- end
-
- @tag :with_db
- test "endkey document id", context do
- db_name = context[:db_name]
-
- docs = make_docs(0..99)
- bulk_save(db_name, docs)
-
- query_function = "function(doc) { emit(null, null); }"
-
- query_options = %{
- "startkey" => :null,
- "startkey_docid" => 1,
- "endkey" => :null,
- "endkey_docid" => 40,
- }
-
- result = query(db_name, query_function, nil, query_options)
- test_end_key_doc_id(result, docs)
- end
-
- @tag :with_db
- test "endkey document id, but with end_key_doc_id alias", context do
- db_name = context[:db_name]
-
- docs = make_docs(0..99)
- bulk_save(db_name, docs)
-
- query_function = "function(doc) { emit(null, null); }"
-
- query_options = %{
- "start_key" => :null,
- "start_key_doc_id" => 1,
- "end_key" => :null,
- "end_key_doc_id" => 40,
- }
-
- result = query(db_name, query_function, nil, query_options)
- test_end_key_doc_id(result, docs)
- end
-
- defp test_end_key_doc_id(query_result, docs) do
- assert length(query_result["rows"]) === 35
- assert query_result["total_rows"] === length(docs)
- assert query_result["offset"] === 1
- assert Enum.at(query_result["rows"], 0)["id"] === "1"
- assert Enum.at(query_result["rows"], 1)["id"] === "10"
- assert Enum.at(query_result["rows"], 2)["id"] === "11"
- assert Enum.at(query_result["rows"], 3)["id"] === "12"
- assert Enum.at(query_result["rows"], 4)["id"] === "13"
- assert Enum.at(query_result["rows"], 5)["id"] === "14"
- assert Enum.at(query_result["rows"], 6)["id"] === "15"
- assert Enum.at(query_result["rows"], 7)["id"] === "16"
- assert Enum.at(query_result["rows"], 8)["id"] === "17"
- assert Enum.at(query_result["rows"], 9)["id"] === "18"
- assert Enum.at(query_result["rows"], 10)["id"] === "19"
- assert Enum.at(query_result["rows"], 11)["id"] === "2"
- assert Enum.at(query_result["rows"], 12)["id"] === "20"
- assert Enum.at(query_result["rows"], 13)["id"] === "21"
- assert Enum.at(query_result["rows"], 14)["id"] === "22"
- assert Enum.at(query_result["rows"], 15)["id"] === "23"
- assert Enum.at(query_result["rows"], 16)["id"] === "24"
- assert Enum.at(query_result["rows"], 17)["id"] === "25"
- assert Enum.at(query_result["rows"], 18)["id"] === "26"
- assert Enum.at(query_result["rows"], 19)["id"] === "27"
- assert Enum.at(query_result["rows"], 20)["id"] === "28"
- assert Enum.at(query_result["rows"], 21)["id"] === "29"
- assert Enum.at(query_result["rows"], 22)["id"] === "3"
- assert Enum.at(query_result["rows"], 23)["id"] === "30"
- assert Enum.at(query_result["rows"], 24)["id"] === "31"
- assert Enum.at(query_result["rows"], 25)["id"] === "32"
- assert Enum.at(query_result["rows"], 26)["id"] === "33"
- assert Enum.at(query_result["rows"], 27)["id"] === "34"
- assert Enum.at(query_result["rows"], 28)["id"] === "35"
- assert Enum.at(query_result["rows"], 29)["id"] === "36"
- assert Enum.at(query_result["rows"], 30)["id"] === "37"
- assert Enum.at(query_result["rows"], 31)["id"] === "38"
- assert Enum.at(query_result["rows"], 32)["id"] === "39"
- assert Enum.at(query_result["rows"], 33)["id"] === "4"
- assert Enum.at(query_result["rows"], 34)["id"] === "40"
- end
-end
diff --git a/test/elixir/test/view_sandboxing_test.exs b/test/elixir/test/view_sandboxing_test.exs
deleted file mode 100644
index af0928efa..000000000
--- a/test/elixir/test/view_sandboxing_test.exs
+++ /dev/null
@@ -1,191 +0,0 @@
-defmodule ViewSandboxingTest do
- use CouchTestCase
-
- @document %{integer: 1, string: "1", array: [1, 2, 3]}
-
- @tag :with_db
- test "attempting to change the document has no effect", context do
- db_name = context[:db_name]
-
- {:ok, _} = create_doc(db_name, @document)
-
- map_fun = """
- function(doc) {
- doc.integer = 2;
- emit(null, doc);
- }
- """
-
- resp = query(db_name, map_fun, nil, %{include_docs: true})
- rows = resp["rows"]
- # either we have an error or our doc is unchanged
- assert resp["total_rows"] == 0 or Enum.at(rows, 0)["doc"]["integer"] == 1
-
- map_fun = """
- function(doc) {
- doc.array[0] = 0;
- emit(null, doc);
- }
- """
-
- resp = query(db_name, map_fun, nil, %{include_docs: true})
- row = Enum.at(resp["rows"], 0)
- # either we have an error or our doc is unchanged
- assert resp["total_rows"] == 0 or Enum.at(row["doc"]["array"], 0) == 1
- end
-
- @tag :with_db
- test "view cannot invoke interpreter internals", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @document)
-
- map_fun = """
- function(doc) {
- gc();
- emit(null, doc);
- }
- """
-
- # make sure that a view cannot invoke interpreter internals such as the
- # garbage collector
- resp = query(db_name, map_fun)
- assert resp["total_rows"] == 0
- end
-
- @tag :with_db
- test "view cannot access the map_funs and map_results array", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @document)
-
- map_fun = """
- function(doc) {
- map_funs.push(1);
- emit(null, doc);
- }
- """
-
- resp = query(db_name, map_fun)
- assert resp["total_rows"] == 0
-
- map_fun = """
- function(doc) {
- map_results.push(1);
- emit(null, doc);
- }
- """
-
- resp = query(db_name, map_fun)
- assert resp["total_rows"] == 0
- end
-
- @tag :with_db
- test "COUCHDB-925 - altering 'doc' variable in map function affects other map functions",
- context do
- db_name = context[:db_name]
-
- ddoc = %{
- _id: "_design/foobar",
- language: "javascript",
- views: %{
- view1: %{
- map: """
- function(doc) {
- if (doc.values) {
- doc.values = [666];
- }
- if (doc.tags) {
- doc.tags.push("qwerty");
- }
- if (doc.tokens) {
- doc.tokens["c"] = 3;
- }
- }
- """
- },
- view2: %{
- map: """
- function(doc) {
- if (doc.values) {
- emit(doc._id, doc.values);
- }
- if (doc.tags) {
- emit(doc._id, doc.tags);
- }
- if (doc.tokens) {
- emit(doc._id, doc.tokens);
- }
- }
- """
- }
- }
- }
-
- doc1 = %{
- _id: "doc1",
- values: [1, 2, 3]
- }
-
- doc2 = %{
- _id: "doc2",
- tags: ["foo", "bar"],
- tokens: %{a: 1, b: 2}
- }
-
- {:ok, _} = create_doc(db_name, ddoc)
- {:ok, _} = create_doc(db_name, doc1)
- {:ok, _} = create_doc(db_name, doc2)
-
- resp1 = view(db_name, "foobar/view1")
- resp2 = view(db_name, "foobar/view2")
-
- assert Enum.empty?(resp1.body["rows"])
- assert length(resp2.body["rows"]) == 3
-
- assert doc1[:_id] == Enum.at(resp2.body["rows"], 0)["key"]
- assert doc2[:_id] == Enum.at(resp2.body["rows"], 1)["key"]
- assert doc2[:_id] == Enum.at(resp2.body["rows"], 2)["key"]
-
- assert length(Enum.at(resp2.body["rows"], 0)["value"]) == 3
-
- row0_values = Enum.at(resp2.body["rows"], 0)["value"]
-
- assert Enum.at(row0_values, 0) == 1
- assert Enum.at(row0_values, 1) == 2
- assert Enum.at(row0_values, 2) == 3
-
- row1_values = Enum.at(resp2.body["rows"], 1)["value"]
- row2_values = Enum.at(resp2.body["rows"], 2)["value"]
-
- # we can't be 100% sure about the order for the same key
- assert (is_map(row1_values) and row1_values["a"] == 1) or
- (is_list(row1_values) and Enum.at(row1_values, 0) == "foo")
-
- assert (is_map(row1_values) and row1_values["b"] == 2) or
- (is_list(row1_values) and Enum.at(row1_values, 1) == "bar")
-
- assert (is_map(row2_values) and row2_values["a"] == 1) or
- (is_list(row2_values) and Enum.at(row2_values, 0) == "foo")
-
- assert (is_map(row2_values) and row2_values["b"] == 2) or
- (is_list(row2_values) and Enum.at(row2_values, 1) == "bar")
-
- assert is_list(row1_values) or !Map.has_key?(row1_values, "c")
- assert is_list(row2_values) or !Map.has_key?(row2_values, "c")
- end
-
- @tag :with_db
- test "runtime code evaluation can be prevented", context do
- db_name = context[:db_name]
- {:ok, _} = create_doc(db_name, @document)
-
- map_fun = """
- function(doc) {
- var glob = emit.constructor('return this')();
- emit(doc._id, null);
- }
- """
-
- resp = query(db_name, map_fun)
- assert resp["total_rows"] == 0
- end
-end
diff --git a/test/elixir/test/view_test.exs b/test/elixir/test/view_test.exs
deleted file mode 100644
index 728a0ba51..000000000
--- a/test/elixir/test/view_test.exs
+++ /dev/null
@@ -1,155 +0,0 @@
-defmodule ViewTest do
- use CouchTestCase
-
- @moduletag :view
-
- @moduledoc """
- Test CouchDB /{db}/_design/{ddoc}/_view/{view}
- """
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name)
- on_exit(fn -> delete_db(db_name) end)
-
- {:ok, _} = create_doc(
- db_name,
- %{
- _id: "foo",
- bar: "baz"
- }
- )
-
- {:ok, _} = create_doc(
- db_name,
- %{
- _id: "foo2",
- bar: "baz2"
- }
- )
-
- map_fun = """
- function(doc) {
- emit(doc._id, doc.bar);
- }
- """
-
-
- body = %{
- :docs => [
- %{
- _id: "_design/map",
- views: %{
- some: %{
- map: map_fun
- }
- }
- }
- ]
- }
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
- Enum.each(resp.body, &assert(&1["ok"]))
-
- {:ok, [db_name: db_name]}
- end
-
- test "GET with no parameters", context do
- resp = Couch.get(
- "/#{context[:db_name]}/_design/map/_view/some"
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 2
- end
-
- test "GET with one key", context do
- resp = Couch.get(
- "/#{context[:db_name]}/_design/map/_view/some",
- query: %{
- :key => "\"foo\"",
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- test "GET with multiple keys", context do
- resp = Couch.get(
- "/#{context[:db_name]}/_design/map/_view/some",
- query: %{
- :keys => "[\"foo\", \"foo2\"]",
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 2
- end
-
- test "POST with empty body", context do
- resp = Couch.post(
- "/#{context[:db_name]}/_design/map/_view/some",
- body: %{}
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 2
- end
-
- test "POST with keys and limit", context do
- resp = Couch.post(
- "/#{context[:db_name]}/_design/map/_view/some",
- body: %{
- :keys => ["foo", "foo2"],
- :limit => 1
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- test "POST with query parameter and JSON body", context do
- resp = Couch.post(
- "/#{context[:db_name]}/_design/map/_view/some",
- query: %{
- :limit => 1
- },
- body: %{
- :keys => ["foo", "foo2"]
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- test "POST edge case with colliding parameters - query takes precedence", context do
- resp = Couch.post(
- "/#{context[:db_name]}/_design/map/_view/some",
- query: %{
- :limit => 1
- },
- body: %{
- :keys => ["foo", "foo2"],
- :limit => 2
- }
- )
-
- assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
- end
-
- test "POST with boolean parameter", context do
- resp = Couch.post(
- "/#{context[:db_name]}/_design/map/_view/some",
- body: %{
- :stable => true,
- :update => true
- }
- )
-
- assert resp.status_code == 200
- end
-end
diff --git a/test/elixir/test/view_update_seq_test.exs b/test/elixir/test/view_update_seq_test.exs
deleted file mode 100644
index 38b42c7a7..000000000
--- a/test/elixir/test/view_update_seq_test.exs
+++ /dev/null
@@ -1,142 +0,0 @@
-defmodule ViewUpdateSeqTest do
- use CouchTestCase
-
- @moduletag :view_update_seq
-
- @moduledoc """
- This is a port of the view_update_seq.js test suite.
- """
-
- @design_doc %{
- _id: "_design/test",
- language: "javascript",
- autoupdate: false,
- views: %{
- all_docs: %{
- map: "function(doc) { emit(doc.integer, doc.string) }"
- },
- summate: %{
- map:
- "function (doc) { if (typeof doc.integer === 'number') { emit(doc.integer, doc.integer)}; }",
- reduce: "function (keys, values) { return sum(values); };"
- }
- }
- }
-
- defp seq_int(seq) do
- {int, _} =
- seq
- |> String.split("-")
- |> Enum.at(0)
- |> Integer.parse()
-
- int
- end
-
- @tag :with_db
- test "db info update seq", context do
- db_name = context[:db_name]
-
- info = info(db_name)
- assert seq_int(info["update_seq"]) == 0
-
- create_doc(db_name, @design_doc)
-
- info = info(db_name)
- assert seq_int(info["update_seq"]) == 1
- end
-
- @tag :with_db
- test "_all_docs update seq", context do
- db_name = context[:db_name]
-
- resp = Couch.get("/#{db_name}/_all_docs", query: %{:update_seq => true})
- assert seq_int(resp.body["update_seq"]) == 0
-
- create_doc(db_name, @design_doc)
-
- resp = Couch.get("/#{db_name}/_all_docs", query: %{:update_seq => true})
- assert length(resp.body["rows"]) == 1
- assert seq_int(resp.body["update_seq"]) == 1
-
- docs = make_docs(0..99)
- bulk_save(db_name, docs)
-
- resp = Couch.get("/#{db_name}/_all_docs", query: %{:limit => 1})
- assert length(resp.body["rows"]) == 1
- assert Map.has_key?(resp.body, "update_seq") == false
-
- resp = Couch.get("/#{db_name}/_all_docs", query: %{:limit => 1, :update_seq => true})
- assert length(resp.body["rows"]) == 1
- assert seq_int(resp.body["update_seq"]) == 101
- end
-
- @tag :with_db
- test "view update seq", context do
- db_name = context[:db_name]
-
- create_doc(db_name, @design_doc)
- docs = make_docs(0..99)
- bulk_save(db_name, docs)
-
- resp = view(db_name, "test/all_docs", %{:limit => 1, :update_seq => true})
- assert length(resp.body["rows"]) == 1
- assert seq_int(resp.body["update_seq"]) == 101
-
- resp = view(db_name, "test/all_docs", %{:limit => 1, :update_seq => false})
- assert length(resp.body["rows"]) == 1
- assert Map.has_key?(resp.body, "update_seq") == false
-
- resp = view(db_name, "test/summate", %{:update_seq => true})
- assert length(resp.body["rows"]) == 1
- assert seq_int(resp.body["update_seq"]) == 101
-
- save(db_name, %{"_id" => "A", "integer" => 1})
-
- resp =
- view(db_name, "test/all_docs", %{:limit => 1, :stale => "ok", :update_seq => true})
-
- assert length(resp.body["rows"]) == 1
- assert seq_int(resp.body["update_seq"]) == 101
-
- save(db_name, %{"_id" => "AA", "integer" => 2})
-
- resp =
- view(db_name, "test/all_docs", %{
- :limit => 1,
- :stale => "update_after",
- :update_seq => true
- })
-
- assert length(resp.body["rows"]) == 1
- assert seq_int(resp.body["update_seq"]) == 101
-
- retry_until(fn ->
- resp =
- view(db_name, "test/all_docs", %{:limit => 1, :stale => "ok", :update_seq => true})
-
- assert length(resp.body["rows"]) == 1
- seq_int(resp.body["update_seq"]) == 103
- end)
-
- resp =
- view(db_name, "test/all_docs", %{:limit => 1, :stale => "ok", :update_seq => true})
-
- assert length(resp.body["rows"]) == 1
- assert seq_int(resp.body["update_seq"]) == 103
-
- resp = view(db_name, "test/all_docs", %{:limit => 1, :update_seq => true})
-
- assert length(resp.body["rows"]) == 1
- assert seq_int(resp.body["update_seq"]) == 103
-
- resp = view(db_name, "test/all_docs", %{:update_seq => true}, ["0", "1"])
- assert seq_int(resp.body["update_seq"]) == 103
-
- resp = view(db_name, "test/all_docs", %{:update_seq => true}, ["0", "1"])
- assert seq_int(resp.body["update_seq"]) == 103
-
- resp = view(db_name, "test/summate", %{:group => true, :update_seq => true}, [0, 1])
- assert seq_int(resp.body["update_seq"]) == 103
- end
-end
diff --git a/test/javascript/tests/list_views.js b/test/javascript/tests/list_views.js
deleted file mode 100644
index 2d74586fe..000000000
--- a/test/javascript/tests/list_views.js
+++ /dev/null
@@ -1,502 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-couchTests.elixir = true;
-couchTests.list_views = function(debug) {
-
- var db_name = get_random_db_name();
- var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
- db.createDb();
- if (debug) debugger;
-
- var designDoc = {
- _id:"_design/lists",
- language: "javascript",
- views : {
- basicView : {
- map : stringFun(function(doc) {
- emit(doc.integer, doc.string);
- })
- },
- withReduce : {
- map : stringFun(function(doc) {
- emit(doc.integer, doc.string);
- }),
- reduce : stringFun(function(keys, values, rereduce) {
- if (rereduce) {
- return sum(values);
- } else {
- return values.length;
- }
- })
- }
- },
- lists: {
- basicBasic : stringFun(function(head, req) {
- send("head");
- var row;
- while(row = getRow()) {
- log("row: "+toJSON(row));
- send(row.key);
- };
- return "tail";
- }),
- basicJSON : stringFun(function(head, req) {
- start({"headers":{"Content-Type" : "application/json"}});
- send('{"head":'+toJSON(head)+', ');
- send('"req":'+toJSON(req)+', ');
- send('"rows":[');
- var row, sep = '';
- while (row = getRow()) {
- send(sep + toJSON(row));
- sep = ', ';
- }
- return "]}";
- }),
- simpleForm: stringFun(function(head, req) {
- log("simpleForm");
- send('<ul>');
- var row, row_number = 0, prevKey, firstKey = null;
- while (row = getRow()) {
- row_number += 1;
- if (!firstKey) firstKey = row.key;
- prevKey = row.key;
- send('\n<li>Key: '+row.key
- +' Value: '+row.value
- +' LineNo: '+row_number+'</li>');
- }
- return '</ul><p>FirstKey: '+ firstKey + ' LastKey: '+ prevKey+'</p>';
- }),
- acceptSwitch: stringFun(function(head, req) {
- // respondWith takes care of setting the proper headers
- provides("html", function() {
- send("HTML <ul>");
-
- var row, num = 0;
- while (row = getRow()) {
- num ++;
- send('\n<li>Key: '
- +row.key+' Value: '+row.value
- +' LineNo: '+num+'</li>');
- }
-
- // tail
- return '</ul>';
- });
- }),
- qsParams: stringFun(function(head, req) {
- return toJSON(req.query) + "\n";
- }),
- stopIter: stringFun(function(req) {
- send("head");
- var row, row_number = 0;
- while(row = getRow()) {
- if(row_number > 2) break;
- send(" " + row_number);
- row_number += 1;
- };
- return " tail";
- }),
- stopIter2: stringFun(function(head, req) {
- provides("html", function() {
- send("head");
- var row, row_number = 0;
- while(row = getRow()) {
- if(row_number > 2) break;
- send(" " + row_number);
- row_number += 1;
- };
- return " tail";
- });
- }),
- tooManyGetRows : stringFun(function() {
- send("head");
- var row;
- while(row = getRow()) {
- send(row.key);
- };
- getRow();
- getRow();
- getRow();
- row = getRow();
- return "after row: "+toJSON(row);
- }),
- emptyList: stringFun(function() {
- return " ";
- }),
- rowError : stringFun(function(head, req) {
- send("head");
- var row = getRow();
- send(fooBarBam); // intentional error
- return "tail";
- }),
- docReference : stringFun(function(head, req) {
- send("head");
- var row = getRow();
- send(row.doc.integer);
- return "tail";
- }),
- secObj: stringFun(function(head, req) {
- return toJSON(req.secObj);
- }),
- setHeaderAfterGotRow: stringFun(function(head, req) {
- getRow();
- start({
- code: 400,
- headers: {
- "X-My-Header": "MyHeader"
- }
- });
- send("bad request");
- }),
- allDocs: stringFun(function(head, req){
- start({'headers': {'Content-Type': 'application/json'}});
- var resp = head;
- var rows = [];
- while(row=getRow()){
- rows.push(row);
- }
- resp.rows = rows;
- return toJSON(resp);
- })
- }
- };
- var viewOnlyDesignDoc = {
- _id:"_design/views",
- language: "javascript",
- views : {
- basicView : {
- map : stringFun(function(doc) {
- emit(-doc.integer, doc.string);
- })
- }
- }
- };
- var erlListDoc = {
- _id: "_design/erlang",
- language: "erlang",
- lists: {
- simple:
- 'fun(Head, {Req}) -> ' +
- ' Send(<<"[">>), ' +
- ' Fun = fun({Row}, Sep) -> ' +
- ' Val = couch_util:get_value(<<"key">>, Row, 23), ' +
- ' Send(list_to_binary(Sep ++ integer_to_list(Val))), ' +
- ' {ok, ","} ' +
- ' end, ' +
- ' {ok, _} = FoldRows(Fun, ""), ' +
- ' Send(<<"]">>) ' +
- 'end.'
- }
- };
-
- T(db.save(designDoc).ok);
-
- var docs = makeDocs(0, 10);
- db.bulkSave(docs);
-
- var view = db.view('lists/basicView');
- T(view.total_rows == 10);
-
- // standard get
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/basicBasic/basicView");
- T(xhr.status == 200, "standard get should be 200");
- T(/head0123456789tail/.test(xhr.responseText));
-
- // standard options - works though it does not make lots of sense
- var xhr = CouchDB.request("OPTIONS", "/" + db_name + "/_design/lists/_list/basicBasic/basicView");
- T(xhr.status == 200, "standard get should be 200");
- T(/head0123456789tail/.test(xhr.responseText));
-
- // TODO: test that etags are available - actually they're not (yet): https://issues.apache.org/jira/browse/COUCHDB-2859
- //var etag = xhr.getResponseHeader("etag");
- //xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/basicBasic/basicView", {
- // headers: {"if-none-match": etag}
- //});
- //T(xhr.status == 304);
-
- // confirm ETag changes with different POST bodies
- // (not yet - see above)
- //xhr = CouchDB.request("POST", "/" + db_name + "/_design/lists/_list/basicBasic/basicView",
- // {body: JSON.stringify({keys:[1]})}
- //);
- //var etag1 = xhr.getResponseHeader("etag");
- //xhr = CouchDB.request("POST", "/" + db_name + "/_design/lists/_list/basicBasic/basicView",
- // {body: JSON.stringify({keys:[2]})}
- //);
- //var etag2 = xhr.getResponseHeader("etag");
- //T(etag1 != etag2, "POST to map _list generates key-depdendent ETags");
-
- // test the richness of the arguments
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/basicJSON/basicView?update_seq=true");
- T(xhr.status == 200, "standard get should be 200");
- var resp = JSON.parse(xhr.responseText);
- TEquals(10, resp.head.total_rows);
- TEquals(0, resp.head.offset);
- // we don't have a (meaningful) update seq in a clustered env
- //TEquals(11, resp.head.update_seq);
-
- T(resp.rows.length == 10);
- TEquals(resp.rows[0], {"id": "0","key": 0,"value": "0"});
-
- TEquals(resp.req.info.db_name, "" + db_name + "");
- TEquals(resp.req.method, "GET");
- TEquals(resp.req.path, [
- "" + db_name + "",
- "_design",
- "lists",
- "_list",
- "basicJSON",
- "basicView"
- ]);
- T(resp.req.headers.Accept);
- T(resp.req.headers.Host);
- T(resp.req.headers["User-Agent"]);
- T(resp.req.cookie);
- TEquals("/" + db_name + "/_design/lists/_list/basicJSON/basicView?update_seq=true",
- resp.req.raw_path, "should include raw path");
-
- // get with query params
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/simpleForm/basicView?startkey=3&endkey=8");
- T(xhr.status == 200, "with query params");
- T(!(/Key: 1/.test(xhr.responseText)));
- T(/FirstKey: 3/.test(xhr.responseText));
- T(/LastKey: 8/.test(xhr.responseText));
-
- // with 0 rows
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/simpleForm/basicView?startkey=30");
- T(xhr.status == 200, "0 rows");
- T(/<\/ul>/.test(xhr.responseText));
-
- //too many Get Rows
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/tooManyGetRows/basicView");
- T(xhr.status == 200, "tooManyGetRows");
- T(/9after row: null/.test(xhr.responseText));
-
-
- // reduce with 0 rows
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/simpleForm/withReduce?startkey=30");
- T(xhr.status == 200, "reduce 0 rows");
- T(/LastKey: undefined/.test(xhr.responseText));
-
- // when there is a reduce present, but not used
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/simpleForm/withReduce?reduce=false");
- T(xhr.status == 200, "reduce false");
- T(/Key: 1/.test(xhr.responseText));
-
-
- // when there is a reduce present, and used
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/simpleForm/withReduce?group=true");
- T(xhr.status == 200, "group reduce");
- T(/Key: 1/.test(xhr.responseText));
-
- // there should be etags on reduce as well
- // (see above 4 etags)
- //var etag = xhr.getResponseHeader("etag");
- //T(etag, "Etags should be served with reduce lists");
- //xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/simpleForm/withReduce?group=true", {
- // headers: {"if-none-match": etag}
- //});
- //T(xhr.status == 304);
-
- // confirm ETag changes with different POST bodies
- // (see above)
- //xhr = CouchDB.request("POST", "/" + db_name + "/_design/lists/_list/simpleForm/withReduce?group=true",
- // {body: JSON.stringify({keys:[1]})}
- //);
- //var etag1 = xhr.getResponseHeader("etag");
- //xhr = CouchDB.request("POST", "/" + db_name + "/_design/lists/_list/simpleForm/withReduce?group=true",
- // {body: JSON.stringify({keys:[2]})}
- //);
- //var etag2 = xhr.getResponseHeader("etag");
- //T(etag1 != etag2, "POST to reduce _list generates key-depdendent ETags");
-
- // verify the etags expire correctly
- var docs = makeDocs(11, 12);
- db.bulkSave(docs);
-
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/simpleForm/withReduce?group=true", {
- // will always be 200 as etags don't make sense (see above)
- //headers: {"if-none-match": etag}
- });
- T(xhr.status == 200, "reduce etag");
-
- // empty list
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/emptyList/basicView");
- T(xhr.responseText.match(/^ $/));
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/emptyList/withReduce?group=true");
- T(xhr.responseText.match(/^ $/));
-
- // multi-key fetch
- var xhr = CouchDB.request("POST", "/" + db_name + "/_design/lists/_list/simpleForm/basicView", {
- body: '{"keys":[2,4,5,7]}'
- });
- T(xhr.status == 200, "multi key");
- T(!(/Key: 1 /.test(xhr.responseText)));
- T(/Key: 2/.test(xhr.responseText));
- T(/FirstKey: 2/.test(xhr.responseText));
- T(/LastKey: 7/.test(xhr.responseText));
-
- // multi-key fetch with GET
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/simpleForm/basicView" +
- "?keys=[2,4,5,7]");
-
- T(xhr.status == 200, "multi key");
- T(!(/Key: 1 /.test(xhr.responseText)));
- T(/Key: 2/.test(xhr.responseText));
- T(/FirstKey: 2/.test(xhr.responseText));
- T(/LastKey: 7/.test(xhr.responseText));
-
- // no multi-key fetch allowed when group=false
- xhr = CouchDB.request("POST", "/" + db_name + "/_design/lists/_list/simpleForm/withReduce?group=false", {
- body: '{"keys":[2,4,5,7]}'
- });
- T(xhr.status == 400);
- T(/query_parse_error/.test(xhr.responseText));
-
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/rowError/basicView");
- T(/ReferenceError/.test(xhr.responseText));
-
-
- // with include_docs and a reference to the doc.
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/docReference/basicView?include_docs=true");
- T(xhr.responseText.match(/head0tail/));
-
- // now with extra qs params
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/qsParams/basicView?foo=blam");
- T(xhr.responseText.match(/blam/));
-
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/stopIter/basicView");
- // T(xhr.getResponseHeader("Content-Type") == "text/plain");
- T(xhr.responseText.match(/^head 0 1 2 tail$/) && "basic stop");
-
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/stopIter2/basicView", {
- headers : {
- "Accept" : "text/html"
- }
- });
- T(xhr.responseText.match(/^head 0 1 2 tail$/) && "stop 2");
-
- // aborting iteration with reduce
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/stopIter/withReduce?group=true");
- T(xhr.responseText.match(/^head 0 1 2 tail$/) && "reduce stop");
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/stopIter2/withReduce?group=true", {
- headers : {
- "Accept" : "text/html"
- }
- });
- T(xhr.responseText.match(/^head 0 1 2 tail$/) && "reduce stop 2");
-
- // with accept headers for HTML
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/acceptSwitch/basicView", {
- headers: {
- "Accept": 'text/html'
- }
- });
- T(xhr.getResponseHeader("Content-Type") == "text/html; charset=utf-8");
- T(xhr.responseText.match(/HTML/));
- T(xhr.responseText.match(/Value/));
-
- // Test we can run lists and views from separate docs.
- T(db.save(viewOnlyDesignDoc).ok);
- var url = "/" + db_name + "/_design/lists/_list/simpleForm/views/basicView" +
- "?startkey=-3";
- xhr = CouchDB.request("GET", url);
- T(xhr.status == 200, "multiple design docs.");
- T(!(/Key: -4/.test(xhr.responseText)));
- T(/FirstKey: -3/.test(xhr.responseText));
- T(/LastKey: 0/.test(xhr.responseText));
-
- // Test we do multi-key requests on lists and views in separate docs.
- var url = "/" + db_name + "/_design/lists/_list/simpleForm/views/basicView";
- xhr = CouchDB.request("POST", url, {
- body: '{"keys":[-2,-4,-5,-7]}'
- });
-
- T(xhr.status == 200, "multi key separate docs");
- T(!(/Key: -3/.test(xhr.responseText)));
- T(/Key: -7/.test(xhr.responseText));
- T(/FirstKey: -2/.test(xhr.responseText));
- T(/LastKey: -7/.test(xhr.responseText));
-
- // Test if secObj is available
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/secObj/basicView");
- T(xhr.status == 200, "standard get should be 200");
- var resp = JSON.parse(xhr.responseText);
- T(typeof(resp) == "object");
-
- var erlViewTest = function() {
- T(db.save(erlListDoc).ok);
- var url = "/" + db_name + "/_design/erlang/_list/simple/views/basicView" +
- "?startkey=-3";
- xhr = CouchDB.request("GET", url);
- T(xhr.status == 200, "multiple languages in design docs.");
- var list = JSON.parse(xhr.responseText);
- T(list.length == 4);
- for(var i = 0; i < list.length; i++)
- {
- T(list[i] + 3 == i);
- }
- };
-
- // make _config available 4 tests or leave commented out
- //run_on_modified_server([{
- // section: "native_query_servers",
- // key: "erlang",
- // value: "{couch_native_process, start_link, []}"
- //}], erlViewTest);
-
- // COUCHDB-1113
- var ddoc = {
- _id: "_design/test",
- views: {
- me: {
- map: (function(doc) { emit(null,null)}).toString()
- }
- },
- lists: {
- you: (function(head, req) {
- var row;
- while(row = getRow()) {
- send(row);
- }
- }).toString()
- }
- };
- db.save(ddoc);
-
- var resp = CouchDB.request("GET", "/" + db.name + "/_design/test/_list/you/me", {
- headers: {
- "Content-Type": "application/x-www-form-urlencoded"
- }
- });
- TEquals(200, resp.status, "should return a 200 response");
-
- // TEST HTTP header response set after getRow() called in _list function.
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/setHeaderAfterGotRow/basicView");
- T(xhr.status == 400);
- T(xhr.getResponseHeader("X-My-Header") == "MyHeader");
- T(xhr.responseText.match(/^bad request$/));
-
- // test handling _all_docs by _list functions. the result should be equal
- var xhr_lAllDocs = CouchDB.request("GET", "/" + db_name + "/_design/lists/_list/allDocs/_all_docs");
- T(xhr_lAllDocs.status == 200, "standard get should be 200");
- var xhr_allDocs = CouchDB.request("GET", "/" + db_name + "/_all_docs");
- var allDocs = JSON.parse(xhr_allDocs.responseText);
- var lAllDocs = JSON.parse(xhr_lAllDocs.responseText);
- TEquals(allDocs.total_rows, lAllDocs.total_rows, "total_rows mismatch");
- TEquals(allDocs.offset, lAllDocs.offset, "offset mismatch");
- TEquals(allDocs.rows.length, lAllDocs.rows.length, "amount of rows mismatch");
- TEquals(allDocs.rows, lAllDocs.rows, "rows mismatch");
-
- // cleanup
- db.deleteDb();
-};
diff --git a/test/javascript/tests/proxyauth.js b/test/javascript/tests/proxyauth.js
deleted file mode 100644
index 7f2e097e8..000000000
--- a/test/javascript/tests/proxyauth.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-
-couchTests.elixir = true;
-couchTests.proxyauth = function(debug) {
- // this test proxy authentification handler
- return console.log('done in test/elixir/test/proxyauth_test.exs');
- var users_db_name = get_random_db_name();
- var usersDb = new CouchDB(users_db_name, {"X-Couch-Full-Commit":"false"});
- usersDb.createDb();
-
- var db_name = get_random_db_name();
- var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
- db.createDb();
-
- if (debug) debugger;
-
- // Simple secret key generator
- function generateSecret(length) {
- var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
- var secret = '';
- for (var i=0; i<length; i++) {
- secret += tab.charAt(Math.floor(Math.random() * 64));
- }
- return secret;
- }
-
- var secret = generateSecret(64);
-
- function TestFun() {
-
- var benoitcUserDoc = CouchDB.prepareUserDoc({
- name: "benoitc@apache.org"
- }, "test");
- T(usersDb.save(benoitcUserDoc).ok);
-
- T(CouchDB.session().userCtx.name == null);
-
- // test that you can use basic auth aginst the users db
- var s = CouchDB.session({
- headers : {
- "Authorization" : "Basic YmVub2l0Y0BhcGFjaGUub3JnOnRlc3Q="
- }
- });
- T(s.userCtx.name == "benoitc@apache.org");
- T(s.info.authenticated == "default");
-
- CouchDB.logout();
-
-/* XXX: None of the rest of this is supported yet in 2.0
- var headers = {
- "X-Auth-CouchDB-UserName": "benoitc@apache.org",
- "X-Auth-CouchDB-Roles": "test",
- "X-Auth-CouchDB-Token": hex_hmac_sha1(secret, "benoitc@apache.org")
- };
-
- var designDoc = {
- _id:"_design/test",
- language: "javascript",
-
- shows: {
- "welcome": stringFun(function(doc,req) {
- return "Welcome " + req.userCtx["name"];
- }),
- "role": stringFun(function(doc, req) {
- return req.userCtx['roles'][0];
- })
- }
- };
-
- db.save(designDoc);
-
- var req = CouchDB.request("GET", "/" + db_name + "/_design/test/_show/welcome",
- {headers: headers});
- T(req.responseText == "Welcome benoitc@apache.org", req.responseText);
-
- req = CouchDB.request("GET", "/" + db_name + "/_design/test/_show/role",
- {headers: headers});
- T(req.responseText == "test");
-
- var xhr = CouchDB.request("PUT", "/_node/node1@127.0.0.1/_config/couch_httpd_auth/proxy_use_secret",{
- body : JSON.stringify("true"),
- headers: {"X-Couch-Persist": "false"}
- });
- T(xhr.status == 200);
-
- req = CouchDB.request("GET", "/" + db_name + "/_design/test/_show/welcome",
- {headers: headers});
- T(req.responseText == "Welcome benoitc@apache.org");
-
- req = CouchDB.request("GET", "/" + db_name + "/_design/test/_show/role",
- {headers: headers});
- T(req.responseText == "test");
-*/
-
- }
-
- run_on_modified_server(
- [{section: "httpd",
- key: "authentication_handlers",
- value:"{chttpd_auth, proxy_authentication_handler}, {chttpd_auth, default_authentication_handler}"},
- {section: "chttpd_auth",
- key: "authentication_db",
- value: users_db_name},
- {section: "chttpd_auth",
- key: "secret",
- value: secret},
- {section: "chttpd_auth",
- key: "x_auth_username",
- value: "X-Auth-CouchDB-UserName"},
- {section: "chttpd_auth",
- key: "x_auth_roles",
- value: "X-Auth-CouchDB-Roles"},
- {section: "chttpd_auth",
- key: "x_auth_token",
- value: "X-Auth-CouchDB-Token"},
- {section: "chttpd_auth",
- key: "proxy_use_secret",
- value: "false"}],
- TestFun
- );
-
- // cleanup
- db.deleteDb();
- usersDb.deleteDb();
-
-};
diff --git a/test/javascript/tests/replicator_db_bad_rep_id.js b/test/javascript/tests/replicator_db_bad_rep_id.js
deleted file mode 100644
index 30a124505..000000000
--- a/test/javascript/tests/replicator_db_bad_rep_id.js
+++ /dev/null
@@ -1,103 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-couchTests.replicator_db_bad_rep_id = function(debug) {
- //return console.log('TODO');
- if (debug) debugger;
-
- var populate_db = replicator_db.populate_db;
- var docs1 = replicator_db.docs1;
- // TODO: dice DBs (at least target)
- var dbA = replicator_db.dbA;
- var dbB = replicator_db.dbB;
- //var repDb = replicator_db.repDb;
- var replDb = new CouchDB("_replicator");
- var wait = replicator_db.wait;
- var waitForRep = replicator_db.waitForRep;
- var waitForSeq = replicator_db.waitForSeq;
-
- function rep_doc_with_bad_rep_id() {
- populate_db(dbA, docs1);
- populate_db(dbB, []);
-
- var repDoc = {
- _id: "foo_rep",
-// TODO: fix DB name issue and remove absolute URL again
- source: 'http://localhost:15984/'+dbA.name,
- target: 'http://localhost:15984/'+dbB.name,
- replication_id: "1234abc"
- };
- T(replDb.save(repDoc).ok);
-
- T(waitForRep(replDb, repDoc, "completed", "error") == "completed");
- for (var i = 0; i < docs1.length; i++) {
- var doc = docs1[i];
- var copy = dbB.open(doc._id);
- T(copy !== null);
- T(copy.value === doc.value);
- }
-
- var repDoc1 = replDb.open(repDoc._id);
- T(repDoc1 !== null);
- T(repDoc1.source === repDoc.source);
- T(repDoc1.target === repDoc.target);
- T(repDoc1._replication_state === "completed",
- "replication document with bad replication id failed");
- T(typeof repDoc1._replication_state_time === "string");
- T(typeof repDoc1._replication_id === "undefined");
- }
-
- /*var server_config = [
- {
- section: "couch_httpd_auth",
- key: "iterations",
- value: "1"
- },
- {
- section: "replicator",
- key: "db",
- value: null //repDb.name
- }
- ];*/
-
- //repDb.deleteDb();
- // don't run on modified server as it would be strange on cluster
- // but use "normal" replication DB, create a doc, reliably clear after run
- // on delete fail, the next tests would all fail
- function handleReplDoc(show) {
- var replDoc = replDb.open("foo_rep");
- if(replDoc!=null) {
- if(show) {
- //console.log(JSON.stringify(replDoc));
- }
- replDb.deleteDoc(replDoc);
- }
- }
-
- handleReplDoc();
- try {
- rep_doc_with_bad_rep_id();
- } finally {
- // cleanup or log
- try {
- handleReplDoc(true);
- } catch (e2) {
- console.log("Error during cleanup " + e2);
- }
- }
- //run_on_modified_server(server_config, rep_doc_with_bad_rep_id);
-
- // cleanup
- //repDb.deleteDb();
- dbA.deleteDb();
- dbB.deleteDb();
-}
diff --git a/test/javascript/tests/replicator_db_by_doc_id.js b/test/javascript/tests/replicator_db_by_doc_id.js
deleted file mode 100644
index a3ca05272..000000000
--- a/test/javascript/tests/replicator_db_by_doc_id.js
+++ /dev/null
@@ -1,128 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-couchTests.replicator_db_by_doc_id = function(debug) {
- //return console.log('TODO');
-
- if (debug) debugger;
-
- var populate_db = replicator_db.populate_db;
- var docs1 = replicator_db.docs1;
- // TODO: dice DBs (at least target)
- var dbA = replicator_db.dbA;
- var dbB = replicator_db.dbB;
- //var repDb = replicator_db.repDb;
- var replDb = new CouchDB("_replicator");
- var wait = replicator_db.wait;
- var waitForRep = replicator_db.waitForRep;
- var waitForSeq = replicator_db.waitForSeq;
-
- function by_doc_ids_replication() {
- // to test that we can replicate docs with slashes in their IDs
- var docs2 = docs1.concat([
- {
- _id: "_design/mydesign",
- language : "javascript"
- }
- ]);
-
- populate_db(dbA, docs2);
- populate_db(dbB, []);
-
- var repDoc = {
- _id: "foo_cont_rep_doc",
- source: "http://" + CouchDB.host + "/" + dbA.name,
- // TODO: fix DB name issue and remove absolute URL again
- target: 'http://localhost:15984/' + dbB.name,
- doc_ids: ["foo666", "foo3", "_design/mydesign", "foo999", "foo1"]
- };
- T(replDb.save(repDoc).ok);
-
- waitForRep(replDb, repDoc, "completed");
- var copy = dbB.open("foo1");
- T(copy !== null);
- T(copy.value === 11);
-
- copy = dbB.open("foo2");
- T(copy === null);
-
- copy = dbB.open("foo3");
- T(copy !== null);
- T(copy.value === 33);
-
- copy = dbB.open("foo666");
- T(copy === null);
-
- copy = dbB.open("foo999");
- T(copy === null);
-
- copy = dbB.open("_design/mydesign");
- // TODO: recheck - but I believe this should be in the target! (see also #written below)
- T(copy !== null);
-
- repDoc = replDb.open(repDoc._id);
- T(typeof repDoc._replication_stats === "object", "doc has stats");
- var stats = repDoc._replication_stats;
- TEquals(3, stats.revisions_checked, "right # of revisions_checked");
- TEquals(3, stats.missing_revisions_found, "right # of missing_revisions_found");
- TEquals(3, stats.docs_read, "right # of docs_read");
- TEquals(3, stats.docs_written, "right # of docs_written");
- TEquals(0, stats.doc_write_failures, "right # of doc_write_failures");
- // sequences are no more meaningful in a cluster
- //TEquals(dbA.info().update_seq, stats.checkpointed_source_seq, "right checkpointed_source_seq");
- }
-
- /*var server_config = [
- {
- section: "chttpd_auth",
- key: "iterations",
- value: "1"
- },
- {
- section: "replicator",
- key: "db",
- value: repDb.name
- }
- ];*/
-
- //repDb.deleteDb();
- // don't run on modified server as it would be strange on cluster
- // but use "normal" replication DB, create a doc, reliably clear after run
- // on delete fail, the next tests would all fail
- function handleReplDoc(show) {
- var replDoc = replDb.open("foo_cont_rep_doc");
- if(replDoc!=null) {
- if(show) {
- //console.log(JSON.stringify(replDoc));
- }
- replDb.deleteDoc(replDoc);
- }
- }
-
- handleReplDoc();
- try {
- by_doc_ids_replication();
- } finally {
- // cleanup or log
- try {
- handleReplDoc(true);
- } catch (e2) {
- console.log("Error during cleanup " + e2);
- }
- }
- //run_on_modified_server(server_config, by_doc_ids_replication);
-
- // cleanup
- //repDb.deleteDb();
- dbA.deleteDb();
- dbB.deleteDb();
-}
diff --git a/test/javascript/tests/rewrite.js b/test/javascript/tests/rewrite.js
deleted file mode 100644
index a470eaee6..000000000
--- a/test/javascript/tests/rewrite.js
+++ /dev/null
@@ -1,513 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-couchTests.elixir = true;
-
-couchTests.rewrite = function(debug) {
- if (debug) debugger;
- var dbNames = ["test_suite_db", "test_suite_db/with_slashes"];
- for (var i=0; i < dbNames.length; i++) {
- var db = new CouchDB(dbNames[i]);
- var dbName = encodeURIComponent(dbNames[i]);
- db.deleteDb();
- db.createDb();
-
-
- run_on_modified_server(
- [{section: "httpd",
- key: "authentication_handlers",
- value: "{couch_httpd_auth, special_test_authentication_handler}"},
- {section:"chttpd",
- key: "WWW-Authenticate",
- value: "X-Couch-Test-Auth"}],
-
- function(){
- var designDoc = {
- _id:"_design/test",
- language: "javascript",
- _attachments:{
- "foo.txt": {
- content_type:"text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- },
- rewrites: [
- {
- "from": "foo",
- "to": "foo.txt"
- },
- {
- "from": "foo2",
- "to": "foo.txt",
- "method": "GET"
- },
- {
- "from": "hello/:id",
- "to": "_update/hello/:id",
- "method": "PUT"
- },
- {
- "from": "/welcome",
- "to": "_show/welcome"
- },
- {
- "from": "/welcome/:name",
- "to": "_show/welcome",
- "query": {
- "name": ":name"
- }
- },
- {
- "from": "/welcome2",
- "to": "_show/welcome",
- "query": {
- "name": "user"
- }
- },
- {
- "from": "/welcome3/:name",
- "to": "_update/welcome2/:name",
- "method": "PUT"
- },
- {
- "from": "/welcome3/:name",
- "to": "_show/welcome2/:name",
- "method": "GET"
- },
- {
- "from": "/welcome4/*",
- "to" : "_show/welcome3",
- "query": {
- "name": "*"
- }
- },
- {
- "from": "/welcome5/*",
- "to" : "_show/*",
- "query": {
- "name": "*"
- }
- },
- {
- "from": "basicView",
- "to": "_view/basicView",
- },
- {
- "from": "simpleForm/basicView",
- "to": "_list/simpleForm/basicView",
- },
- {
- "from": "simpleForm/basicViewFixed",
- "to": "_list/simpleForm/basicView",
- "query": {
- "startkey": 3,
- "endkey": 8
- }
- },
- {
- "from": "simpleForm/basicViewPath/:start/:end",
- "to": "_list/simpleForm/basicView",
- "query": {
- "startkey": ":start",
- "endkey": ":end"
- },
- "formats": {
- "start": "int",
- "end": "int"
- }
- },
- {
- "from": "simpleForm/complexView",
- "to": "_list/simpleForm/complexView",
- "query": {
- "key": [1, 2]
- }
- },
- {
- "from": "simpleForm/complexView2",
- "to": "_list/simpleForm/complexView",
- "query": {
- "key": ["test", {}]
- }
- },
- {
- "from": "simpleForm/complexView3",
- "to": "_list/simpleForm/complexView",
- "query": {
- "key": ["test", ["test", "essai"]]
- }
- },
- {
- "from": "simpleForm/complexView4",
- "to": "_list/simpleForm/complexView2",
- "query": {
- "key": {"c": 1}
- }
- },
- {
- "from": "simpleForm/complexView5/:a/:b",
- "to": "_list/simpleForm/complexView3",
- "query": {
- "key": [":a", ":b"]
- }
- },
- {
- "from": "simpleForm/complexView6",
- "to": "_list/simpleForm/complexView3",
- "query": {
- "key": [":a", ":b"]
- }
- },
- {
- "from": "simpleForm/complexView7/:a/:b",
- "to": "_view/complexView3",
- "query": {
- "key": [":a", ":b"],
- "include_docs": ":doc"
- },
- "format": {
- "doc": "bool"
- }
-
- },
- {
- "from": "/",
- "to": "_view/basicView",
- },
- {
- "from": "/db/*",
- "to": "../../*"
- }
- ],
- lists: {
- simpleForm: stringFun(function(head, req) {
- log("simpleForm");
- send('<ul>');
- var row, row_number = 0, prevKey, firstKey = null;
- while (row = getRow()) {
- row_number += 1;
- if (!firstKey) firstKey = row.key;
- prevKey = row.key;
- send('\n<li>Key: '+row.key
- +' Value: '+row.value
- +' LineNo: '+row_number+'</li>');
- }
- return '</ul><p>FirstKey: '+ firstKey + ' LastKey: '+ prevKey+'</p>';
- }),
- },
- shows: {
- "welcome": stringFun(function(doc,req) {
- return "Welcome " + req.query["name"];
- }),
- "welcome2": stringFun(function(doc, req) {
- return "Welcome " + doc.name;
- }),
- "welcome3": stringFun(function(doc,req) {
- return "Welcome " + req.query["name"];
- })
- },
- updates: {
- "hello" : stringFun(function(doc, req) {
- if (!doc) {
- if (req.id) {
- return [{
- _id : req.id
- }, "New World"]
- }
- return [null, "Empty World"];
- }
- doc.world = "hello";
- doc.edited_by = req.userCtx;
- return [doc, "hello doc"];
- }),
- "welcome2": stringFun(function(doc, req) {
- if (!doc) {
- if (req.id) {
- return [{
- _id: req.id,
- name: req.id
- }, "New World"]
- }
- return [null, "Empty World"];
- }
- return [doc, "hello doc"];
- })
- },
- views : {
- basicView : {
- map : stringFun(function(doc) {
- if (doc.integer) {
- emit(doc.integer, doc.string);
- }
-
- })
- },
- complexView: {
- map: stringFun(function(doc) {
- if (doc.type == "complex") {
- emit([doc.a, doc.b], doc.string);
- }
- })
- },
- complexView2: {
- map: stringFun(function(doc) {
- if (doc.type == "complex") {
- emit(doc.a, doc.string);
- }
- })
- },
- complexView3: {
- map: stringFun(function(doc) {
- if (doc.type == "complex") {
- emit(doc.b, doc.string);
- }
- })
- }
- }
- }
-
- db.save(designDoc);
-
- var docs = makeDocs(0, 10);
- db.bulkSave(docs);
-
- var docs2 = [
- {"a": 1, "b": 1, "string": "doc 1", "type": "complex"},
- {"a": 1, "b": 2, "string": "doc 2", "type": "complex"},
- {"a": "test", "b": {}, "string": "doc 3", "type": "complex"},
- {"a": "test", "b": ["test", "essai"], "string": "doc 4", "type": "complex"},
- {"a": {"c": 1}, "b": "", "string": "doc 5", "type": "complex"}
- ];
-
- db.bulkSave(docs2);
-
- // test simple rewriting
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/foo");
- T(req.responseText == "This is a base64 encoded text");
- T(req.getResponseHeader("Content-Type") == "text/plain");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/foo2");
- T(req.responseText == "This is a base64 encoded text");
- T(req.getResponseHeader("Content-Type") == "text/plain");
-
-
- // test POST
- // hello update world
-
- var doc = {"word":"plankton", "name":"Rusty"}
- var resp = db.save(doc);
- T(resp.ok);
- var docid = resp.id;
-
- xhr = CouchDB.request("PUT", "/"+dbName+"/_design/test/_rewrite/hello/"+docid);
- T(xhr.status == 201);
- T(xhr.responseText == "hello doc");
- T(/charset=utf-8/.test(xhr.getResponseHeader("Content-Type")))
-
- doc = db.open(docid);
- T(doc.world == "hello");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome?name=user");
- T(req.responseText == "Welcome user");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome/user");
- T(req.responseText == "Welcome user");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome2");
- T(req.responseText == "Welcome user");
-
- xhr = CouchDB.request("PUT", "/"+dbName+"/_design/test/_rewrite/welcome3/test");
- T(xhr.status == 201);
- T(xhr.responseText == "New World");
- T(/charset=utf-8/.test(xhr.getResponseHeader("Content-Type")));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome3/test");
- T(xhr.responseText == "Welcome test");
-
-/* // XXX: THESE ARE BUGGED and I don't know what the right response is
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome4/user");
- T(req.responseText == "Welcome user", req.responseText);
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome5/welcome3");
- T(req.responseText == "Welcome welcome3", req.responseText);
-*/
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/basicView");
- T(xhr.status == 200, "view call");
- T(/{"total_rows":9/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/");
- T(xhr.status == 200, "view call");
- T(/{"total_rows":9/.test(xhr.responseText));
-
-
- // get with query params
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/basicView?startkey=3&endkey=8");
- T(xhr.status == 200, "with query params");
- T(!(/Key: 1/.test(xhr.responseText)));
- T(/FirstKey: 3/.test(xhr.responseText));
- T(/LastKey: 8/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/basicViewFixed");
- T(xhr.status == 200, "with query params");
- T(!(/Key: 1/.test(xhr.responseText)));
- T(/FirstKey: 3/.test(xhr.responseText));
- T(/LastKey: 8/.test(xhr.responseText));
-
- // get with query params
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/basicViewFixed?startkey=4");
- T(xhr.status == 200, "with query params");
- T(!(/Key: 1/.test(xhr.responseText)));
- T(/FirstKey: 3/.test(xhr.responseText));
- T(/LastKey: 8/.test(xhr.responseText));
-
- // get with query params
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/basicViewPath/3/8");
- T(xhr.status == 200, "with query params");
- T(!(/Key: 1/.test(xhr.responseText)));
- T(/FirstKey: 3/.test(xhr.responseText));
- T(/LastKey: 8/.test(xhr.responseText));
-
- // get with query params
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView");
- T(xhr.status == 200, "with query params");
- T(/FirstKey: [1, 2]/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView2");
- T(xhr.status == 200, "with query params");
- T(/Value: doc 3/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView3");
- T(xhr.status == 200, "with query params");
- T(/Value: doc 4/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView4");
- T(xhr.status == 200, "with query params");
- T(/Value: doc 5/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView5/test/essai");
- T(xhr.status == 200, "with query params");
- T(/Value: doc 4/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView6?a=test&b=essai");
- T(xhr.status == 200, "with query params");
- T(/Value: doc 4/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView7/test/essai?doc=true");
- T(xhr.status == 200, "with query params");
- var result = JSON.parse(xhr.responseText);
- T(typeof(result.rows[0].doc) === "object");
-
- // COUCHDB-2031 - path normalization versus qs params
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/db/_design/test?meta=true");
- T(xhr.status == 200, "path normalization works with qs params");
- var result = JSON.parse(xhr.responseText);
- T(result['_id'] == "_design/test");
- T(typeof(result['_revs_info']) === "object");
-
- // test path relative to server
- T(db.save({
- _id: "_design/test2",
- rewrites: [{
- "from": "uuids",
- "to": "../../../_uuids"
- }]
- }).ok);
-
- var xhr = CouchDB.request("GET", "/"+dbName+"/_design/test2/_rewrite/uuids");
- T(xhr.status == 500);
- var result = JSON.parse(xhr.responseText);
- T(result.error == "insecure_rewrite_rule");
-
- run_on_modified_server(
- [{section: "chttpd",
- key: "secure_rewrites",
- value: "false"}],
- function() {
- var xhr = CouchDB.request("GET", "/"+dbName+"/_design/test2/_rewrite/uuids?cache=bust");
- T(xhr.status == 200);
- var result = JSON.parse(xhr.responseText);
- T(result.uuids.length == 1);
- var first = result.uuids[0];
- });
- });
-
-/* // XXX: we have actual failures here that we need to get to
- // test invalid rewrites
- // string
- var ddoc = {
- _id: "_design/invalid",
- rewrites: "[{\"from\":\"foo\",\"to\":\"bar\"}]"
- }
- db.save(ddoc);
- var res = CouchDB.request("GET", "/"+dbName+"/_design/invalid/_rewrite/foo");
- TEquals(400, res.status, "should return 400");
-
- var ddoc_requested_path = {
- _id: "_design/requested_path",
- rewrites:[
- {"from": "show", "to": "_show/origin/0"},
- {"from": "show_rewritten", "to": "_rewrite/show"}
- ],
- shows: {
- origin: stringFun(function(doc, req) {
- return req.headers["x-couchdb-requested-path"];
- })}
- };
-
- db.save(ddoc_requested_path);
- var url = "/"+dbName+"/_design/requested_path/_rewrite/show";
- var res = CouchDB.request("GET", url);
- TEquals(url, res.responseText, "should return the original url");
-
- var url = "/"+dbName+"/_design/requested_path/_rewrite/show_rewritten";
- var res = CouchDB.request("GET", url);
- TEquals(url, res.responseText, "returned the original url");
-*/
-
- var ddoc_loop = {
- _id: "_design/loop",
- rewrites: [{ "from": "loop", "to": "_rewrite/loop"}]
- };
- db.save(ddoc_loop);
-
- // Assert loop detection
- run_on_modified_server(
- [{section: "chttpd",
- key: "rewrite_limit",
- value: "2"}],
- function(){
- var url = "/"+dbName+"/_design/loop/_rewrite/loop";
- var xhr = CouchDB.request("GET", url);
- TEquals(400, xhr.status);
- });
-
- // Assert serial execution is not spuriously counted as loop
- run_on_modified_server(
- [{section: "chttpd",
- key: "rewrite_limit",
- value: "2"},
- {section: "chttpd",
- key: "secure_rewrites",
- value: "false"}],
- function(){
- var url = "/"+dbName+"/_design/test/_rewrite/foo";
- for (var i=0; i < 5; i++) {
- var xhr = CouchDB.request("GET", url);
- TEquals(200, xhr.status);
- }
- });
- // cleanup
- db.deleteDb();
- }
-}
diff --git a/test/javascript/tests/rewrite_js.js b/test/javascript/tests/rewrite_js.js
deleted file mode 100644
index 7179fc9f5..000000000
--- a/test/javascript/tests/rewrite_js.js
+++ /dev/null
@@ -1,366 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-
-couchTests.elixir = true;
-couchTests.rewrite = function(debug) {
- if (debug) debugger;
- var dbNames = [get_random_db_name(), get_random_db_name() + "test_suite_db/with_slashes"];
- for (var i=0; i < dbNames.length; i++) {
- var db = new CouchDB(dbNames[i]);
- var dbName = encodeURIComponent(dbNames[i]);
- db.deleteDb();
- db.createDb();
-
- var designDoc = {
- _id:"_design/test",
- language: "javascript",
- _attachments:{
- "foo.txt": {
- content_type:"text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- },
- rewrites: stringFun(function(req) {
- prefix = req.path[4];
- if (prefix === 'foo') {
- return 'foo.txt';
- }
- if (prefix === 'foo2') {
- return {path: 'foo.txt', method: 'GET'};
- }
- if (prefix === 'hello') {
- if (req.method != 'PUT') {
- return
- }
- id = req.path[5];
- return {path: '_update/hello/' + id};
- }
- if (prefix === 'welcome') {
- if (req.path.length == 6){
- name = req.path[5];
- return {path: '_show/welcome', query: {'name': name}};
- }
- return '_show/welcome';
- }
- if (prefix === 'welcome2') {
- return {path: '_show/welcome', query: {'name': 'user'}};
- }
- if (prefix === 'welcome3') {
- name = req.path[5];
- if (req.method == 'PUT') {
- path = '_update/welcome2/' + name;
- } else if (req.method == 'GET') {
- path = '_show/welcome2/' + name;
- } else {
- return;
- }
- return path;
- }
- if (prefix === 'welcome4') {
- return {path: '_show/welcome3', query: {name: req.path[5]}};
- }
- if (prefix === 'welcome5') {
- rest = req.path.slice(5).join('/');
- return {path: '_show/' + rest, query: {name: rest}};
- }
- if (prefix === 'basicView') {
- rest = req.path.slice(5).join('/');
- return {path: '_view/basicView'};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/basicView') {
- return {path: '_list/simpleForm/basicView'};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/basicViewFixed') {
- return {path: '_list/simpleForm/basicView',
- query: {startkey: '"3"', endkey: '"8"'}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/complexView') {
- return {path: '_list/simpleForm/complexView',
- query: {key: JSON.stringify([1,2])}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/complexView2') {
- return {path: '_list/simpleForm/complexView',
- query: {key: JSON.stringify(['test', {}])}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/complexView3') {
- return {path: '_list/simpleForm/complexView',
- query: {key: JSON.stringify(['test', ['test', 'essai']])}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/complexView4') {
- return {path: '_list/simpleForm/complexView2',
- query: {key: JSON.stringify({"c": 1})}};
- }
- if (req.path.slice(4).join('/') === 'simpleForm/sendBody1') {
- return {path: '_list/simpleForm/complexView2',
- method: 'POST',
- query: {limit: '1'},
- headers:{'Content-type':'application/json'},
- body: JSON.stringify( {keys: [{"c": 1}]} )};
- }
- if (req.path.slice(4).join('/') === '/') {
- return {path: '_view/basicView'};
- }
- if (prefix === 'db') {
- return {path: '../../' + req.path.slice(5).join('/')};
- }
- }),
- lists: {
- simpleForm: stringFun(function(head, req) {
- send('<ul>');
- var row, row_number = 0, prevKey, firstKey = null;
- while (row = getRow()) {
- row_number += 1;
- if (!firstKey) firstKey = row.key;
- prevKey = row.key;
- send('\n<li>Key: '+row.key
- +' Value: '+row.value
- +' LineNo: '+row_number+'</li>');
- }
- return '</ul><p>FirstKey: '+ firstKey + ' LastKey: '+ prevKey+'</p>';
- }),
- },
- shows: {
- "welcome": stringFun(function(doc,req) {
- return "Welcome " + req.query["name"];
- }),
- "welcome2": stringFun(function(doc, req) {
- return "Welcome " + doc.name;
- }),
- "welcome3": stringFun(function(doc,req) {
- return "Welcome " + req.query["name"];
- })
- },
- updates: {
- "hello" : stringFun(function(doc, req) {
- if (!doc) {
- if (req.id) {
- return [{
- _id : req.id
- }, "New World"]
- }
- return [null, "Empty World"];
- }
- doc.world = "hello";
- doc.edited_by = req.userCtx;
- return [doc, "hello doc"];
- }),
- "welcome2": stringFun(function(doc, req) {
- if (!doc) {
- if (req.id) {
- return [{
- _id: req.id,
- name: req.id
- }, "New World"]
- }
- return [null, "Empty World"];
- }
- return [doc, "hello doc"];
- })
- },
- views : {
- basicView : {
- map : stringFun(function(doc) {
- if (doc.integer) {
- emit(doc.integer, doc.string);
- }
-
- })
- },
- complexView: {
- map: stringFun(function(doc) {
- if (doc.type == "complex") {
- emit([doc.a, doc.b], doc.string);
- }
- })
- },
- complexView2: {
- map: stringFun(function(doc) {
- if (doc.type == "complex") {
- emit(doc.a, doc.string);
- }
- })
- },
- complexView3: {
- map: stringFun(function(doc) {
- if (doc.type == "complex") {
- emit(doc.b, doc.string);
- }
- })
- }
- }
- }
-
- db.save(designDoc);
-
- var docs = makeDocs(0, 10);
- db.bulkSave(docs);
-
- var docs2 = [
- {"a": 1, "b": 1, "string": "doc 1", "type": "complex"},
- {"a": 1, "b": 2, "string": "doc 2", "type": "complex"},
- {"a": "test", "b": {}, "string": "doc 3", "type": "complex"},
- {"a": "test", "b": ["test", "essai"], "string": "doc 4", "type": "complex"},
- {"a": {"c": 1}, "b": "", "string": "doc 5", "type": "complex"}
- ];
-
- db.bulkSave(docs2);
-
- // test simple rewriting
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/foo");
- T(req.responseText == "This is a base64 encoded text");
- T(req.getResponseHeader("Content-Type") == "text/plain");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/foo2");
- T(req.responseText == "This is a base64 encoded text");
- T(req.getResponseHeader("Content-Type") == "text/plain");
-
-
- // test POST
- // hello update world
-
- var doc = {"word":"plankton", "name":"Rusty"}
- var resp = db.save(doc);
- T(resp.ok);
- var docid = resp.id;
-
- xhr = CouchDB.request("PUT", "/"+dbName+"/_design/test/_rewrite/hello/"+docid);
- T(xhr.status == 201);
- T(xhr.responseText == "hello doc");
- T(/charset=utf-8/.test(xhr.getResponseHeader("Content-Type")))
-
- doc = db.open(docid);
- T(doc.world == "hello");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome?name=user");
- T(req.responseText == "Welcome user");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome/user");
- T(req.responseText == "Welcome user");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome2");
- T(req.responseText == "Welcome user");
-
- xhr = CouchDB.request("PUT", "/"+dbName+"/_design/test/_rewrite/welcome3/test");
- T(xhr.status == 201);
- T(xhr.responseText == "New World");
- T(/charset=utf-8/.test(xhr.getResponseHeader("Content-Type")));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome3/test");
- T(xhr.responseText == "Welcome test");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome4/user");
- T(req.responseText == "Welcome user");
-
- req = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/welcome5/welcome3");
- T(req.responseText == "Welcome welcome3");
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/basicView");
- T(xhr.status == 200, "view call");
- T(/{"total_rows":9/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView");
- T(xhr.status == 200, "with query params");
- T(/FirstKey: [1, 2]/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView2");
- T(xhr.status == 200, "with query params");
- T(/Value: doc 3/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView3");
- T(xhr.status == 200, "with query params");
- T(/Value: doc 4/.test(xhr.responseText));
-
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/complexView4");
- T(xhr.status == 200, "with query params");
- T(/Value: doc 5/.test(xhr.responseText));
-
- // COUCHDB-1612 - send body rewriting get to post
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/sendBody1");
- T(xhr.status == 200, "get->post rewrite failed:\n"+xhr.responseText);
- T(/Value: doc 5 LineNo: 1/.test(xhr.responseText), "get->post rewrite responded wrong:\n"+xhr.responseText);
-
- // COUCHDB-2031 - path normalization versus qs params
- xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/db/_design/test?meta=true");
- T(xhr.status == 200, "path normalization works with qs params");
- var result = JSON.parse(xhr.responseText);
- T(result['_id'] == "_design/test");
- T(typeof(result['_revs_info']) === "object");
-
- // test early response
- var ddoc = {
- _id: "_design/response",
- rewrites: stringFun(function(req){
- status = parseInt(req.query.status);
- return {code: status,
- body: JSON.stringify({"status": status}),
- headers: {'x-foo': 'bar', 'Content-Type': 'application/json'}};
- })
- }
- T(db.save(ddoc).ok);
- var xhr = CouchDB.request("GET", "/"+dbName+"/_design/response/_rewrite?status=200");
- T(xhr.status == 200);
- T(xhr.headers['x-foo'] == 'bar');
- T(xhr.responseText == '{"status":200}');
- var xhr = CouchDB.request("GET", "/"+dbName+"/_design/response/_rewrite?status=451");
- T(xhr.status == 451);
- T(xhr.headers['Content-Type'] == 'application/json');
- var xhr = CouchDB.request("GET", "/"+dbName+"/_design/response/_rewrite?status=600");
- T(xhr.status == 500);
-
-
- // test path relative to server
- var ddoc = {
- _id: "_design/relative",
- rewrites: stringFun(function(req){
- return '../../../_uuids'
- })
- }
- T(db.save(ddoc).ok);
- var xhr = CouchDB.request("GET", "/"+dbName+"/_design/relative/_rewrite/uuids");
- T(xhr.status == 200);
- var result = JSON.parse(xhr.responseText);
- T(result.uuids.length == 1);
-
- // test loop
- var ddoc_loop = {
- _id: "_design/loop",
- rewrites: stringFun(function(req) {
- return '_rewrite/loop';
- })
- };
- db.save(ddoc_loop);
- var url = "/"+dbName+"/_design/loop/_rewrite/loop";
- var xhr = CouchDB.request("GET", url);
- TEquals(400, xhr.status);
-
- // test requests with body preserve the query string rewrite
- var ddoc_qs = {
- "_id": "_design/qs",
- "rewrites": "function (r) { return {path: '../../_changes', query: {'filter': '_doc_ids'}};};"
- }
- db.save(ddoc_qs);
- db.save({"_id": "qs1", "foo": "bar"});
- db.save({"_id": "qs2", "foo": "bar"});
-
- var url = "/"+dbName+"/_design/qs/_rewrite";
-
- var xhr = CouchDB.request("POST", url, {body: JSON.stringify({"doc_ids": ["qs2"]})});
- var result = JSON.parse(xhr.responseText);
- T(xhr.status == 200);
- T(result.results.length == 1, "Only one doc is expected");
- TEquals(result.results[0].id, "qs2");
- // cleanup
- db.deleteDb();
- }
-} \ No newline at end of file
diff --git a/test/javascript/tests/security_validation.js b/test/javascript/tests/security_validation.js
deleted file mode 100644
index 365f716e6..000000000
--- a/test/javascript/tests/security_validation.js
+++ /dev/null
@@ -1,330 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-couchTests.elixir = true;
-couchTests.security_validation = function(debug) {
-
- var db_name = get_random_db_name();
- var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
- db.createDb();
- var authDb_name = get_random_db_name() + "_authdb";
- var authDb = new CouchDB(authDb_name, {"X-Couch-Full-Commit":"false"});
- authDb.createDb();
- var adminDbA, adminDbB; // used later
- if (debug) debugger;
-
- run_on_modified_server(
- [{section: "httpd",
- key: "authentication_handlers",
- value: "{couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}"},
- {section: "couch_httpd_auth",
- key: "authentication_db", value: authDb_name},
- {section: "chttpd_auth",
- key: "authentication_db", value: authDb_name}],
-
- function () {
- // the special case handler does not exist (any longer) in clusters, so we have
- // to replicate the behavior using a "normal" DB even though tests might no more
- // run universally (why the "X-Couch-Test-Auth" header was introduced).
- // btw: this needs to be INSIDE configured server to propagate correctly ;-)
- // At least they'd run in the build, though
- T(authDb.save(CouchDB.prepareUserDoc({name: "tom"}, "cat")).ok); // Basic dG9tOmNhdA==
- T(authDb.save(CouchDB.prepareUserDoc({name: "jerry"}, "mouse")).ok); // Basic amVycnk6bW91c2U=
- T(authDb.save(CouchDB.prepareUserDoc({name: "spike"}, "dog")).ok); // Basic c3Bpa2U6ZG9n
- authDb.ensureFullCommit();
-
- // try saving document using the wrong credentials
- var wrongPasswordDb = new CouchDB(db_name + "",
- {"Authorization": "Basic c3Bpa2U6Y2F0"} // spike:cat - which is wrong
- );
-
- try {
- wrongPasswordDb.save({foo:1,author:"Damien Katz"});
- T(false, "Can't get here. Should have thrown an error 1");
- } catch (e) {
- T(e.error == "unauthorized");
- T(wrongPasswordDb.last_req.status == 401);
- }
-
- // test force basic login
- var resp = wrongPasswordDb.request("GET", "/_session?basic=true");
- var err = JSON.parse(resp.responseText);
- T(err.error == "unauthorized");
- T(resp.status == 401);
-
- // Create the design doc that will run custom validation code
- var designDoc = {
- _id:"_design/test",
- language: "javascript",
- validate_doc_update: stringFun(function (newDoc, oldDoc, userCtx, secObj) {
- if (secObj.admin_override) {
- if (userCtx.roles.indexOf('_admin') != -1) {
- // user is admin, they can do anything
- return true;
- }
- }
- // docs should have an author field.
- if (!newDoc._deleted && !newDoc.author) {
- throw {forbidden:
- "Documents must have an author field"};
- }
- if (oldDoc && oldDoc.author != userCtx.name) {
- throw {unauthorized:
- "You are '" + userCtx.name + "', not the author '" + oldDoc.author + "' of this document. You jerk."};
- }
- })
- }
-
- // Save a document normally
- var userDb = new CouchDB("" + db_name + "",
- {"Authorization": "Basic amVycnk6bW91c2U="} // jerry
- );
- // test session
- TEquals("jerry", JSON.parse(userDb.request("GET", "/_session").responseText).userCtx.name);
-
- T(userDb.save({_id:"testdoc", foo:1, author:"jerry"}).ok);
-
- // Attempt to save the design as a non-admin
- try {
- userDb.save(designDoc);
- T(false && "Can't get here. Should have thrown an error on design doc");
- } catch (e) {
- // cluster changes from 401 unauthorized to 403 forbidden
- TEquals("forbidden", e.error);
- TEquals(403, userDb.last_req.status);
- }
-
- // set user as the admin
- T(db.setSecObj({
- admins : {names : ["jerry"]}
- }).ok);
-
- // TODO: when _security is correctly honored (COUCHDB-2990), switch back
- //T(userDb.save(designDoc).ok);
- T(db.save(designDoc).ok);
-
- var user2Db = new CouchDB("" + db_name + "",
- {"Authorization": "Basic dG9tOmNhdA=="} // tom
- );
- // Attempt to save the design as a non-admin (in replication scenario)
- designDoc.foo = "bar";
- designDoc._rev = "2-642e20f96624a0aae6025b4dba0c6fb2";
- try {
- user2Db.save(designDoc, {new_edits : false});
- T(false && "Can't get here. Should have thrown an error on design doc");
- } catch (e) {
- // cluster changes from 401 unauthorized to 403 forbidden
- TEquals("forbidden", e.error);
- TEquals(403, userDb.last_req.status);
- }
-
- // test the _session API
- var resp = userDb.request("GET", "/_session");
- var user = JSON.parse(resp.responseText).userCtx;
- T(user.name == "jerry");
- // test that the roles are listed properly
- TEquals(user.roles, []);
-
-
- // update the document
- var doc = userDb.open("testdoc");
- doc.foo=2;
- T(userDb.save(doc).ok);
-
- // Save a document that's missing an author field (before and after compaction)
- for (var i=0; i<2; i++) {
- try {
- userDb.save({foo:1});
- T(false && "Can't get here. Should have thrown an error 2");
- } catch (e) {
- T(e.error == "forbidden");
- T(userDb.last_req.status == 403);
- }
- // compact. - no more available on clusters (but: test is still valid w/out compaction)
- /*T(db.compact().ok);
- T(db.last_req.status == 202);
- // compaction isn't instantaneous, loop until done
- while (db.info().compact_running) {};*/
- }
-
- // Now attempt to update the document as a different user, Jan
- var doc = user2Db.open("testdoc");
- doc.foo=3;
- try {
- user2Db.save(doc);
- T(false && "Can't get here. Should have thrown an error 3");
- } catch (e) {
- T(e.error == "unauthorized");
- T(user2Db.last_req.status == 401);
- }
-
- // Now have jerry change the author to tom
- doc = userDb.open("testdoc");
- doc.author="tom";
- T(userDb.save(doc).ok);
-
- // Now update the document as tom
- doc = user2Db.open("testdoc");
- doc.foo = 3;
- T(user2Db.save(doc).ok);
-
- // jerry can't delete it
- try {
- userDb.deleteDoc(doc);
- T(false && "Can't get here. Should have thrown an error 4");
- } catch (e) {
- T(e.error == "unauthorized");
- T(userDb.last_req.status == 401);
- }
-
- // admin must save with author field unless admin override
- var resp = db.request("GET", "/_session");
- var user = JSON.parse(resp.responseText).userCtx;
- T(user.name == null);
- // test that we are admin
- TEquals(user.roles, ["_admin"]);
-
- // can't save the doc even though we are admin
- var doc = db.open("testdoc");
- doc.foo=3;
- try {
- db.save(doc);
- T(false && "Can't get here. Should have thrown an error 3");
- } catch (e) {
- T(e.error == "unauthorized");
- T(db.last_req.status == 401);
- }
-
- // now turn on admin override
- T(db.setDbProperty("_security", {admin_override : true}).ok);
- // TODO: re-include after COUCHDB-2990
- //T(db.save(doc).ok);
-
- // try to do something lame
- try {
- db.setDbProperty("_security", ["foo"]);
- T(false && "can't do this");
- } catch(e) {}
-
- // go back to normal
- T(db.setDbProperty("_security", {admin_override : false}).ok);
-
- // Now delete document
- T(user2Db.deleteDoc(doc).ok);
-
- // now test bulk docs
- var docs = [{_id:"bahbah",author:"jerry",foo:"bar"},{_id:"fahfah",foo:"baz"}];
-
- // Create the docs
- var results = db.bulkSave(docs);
-
- T(results[0].rev)
- T(results[0].error == undefined)
- T(results[1].rev === undefined)
- T(results[1].error == "forbidden")
-
- T(db.open("bahbah"));
- T(db.open("fahfah") == null);
-
-
- // now all or nothing with a failure - no more available on cluster
-/* var docs = [{_id:"booboo",author:"Damien Katz",foo:"bar"},{_id:"foofoo",foo:"baz"}];
-
- // Create the docs
- var results = db.bulkSave(docs, {all_or_nothing:true});
-
- T(results.errors.length == 1);
- T(results.errors[0].error == "forbidden");
- T(db.open("booboo") == null);
- T(db.open("foofoo") == null);
-*/
-
- // Now test replication
- var AuthHeaders = {"Authorization": "Basic c3Bpa2U6ZG9n"}; // spike
- adminDbA = new CouchDB("" + db_name + "_a", {"X-Couch-Full-Commit":"false"});
- adminDbB = new CouchDB("" + db_name + "_b", {"X-Couch-Full-Commit":"false"});
- var dbA = new CouchDB("" + db_name + "_a", AuthHeaders);
- var dbB = new CouchDB("" + db_name + "_b", AuthHeaders);
- // looping does not really add value as the scenario is the same anyway (there's nothing 2 be gained from it)
- var A = CouchDB.protocol + CouchDB.host + "/" + db_name + "_a";
- var B = CouchDB.protocol + CouchDB.host + "/" + db_name + "_b";
-
- // (the databases never exist b4 - and we made sure they're deleted below)
- //adminDbA.deleteDb();
- adminDbA.createDb();
- //adminDbB.deleteDb();
- adminDbB.createDb();
-
- // save and replicate a documents that will and will not pass our design
- // doc validation function.
- T(dbA.save({_id:"foo1",value:"a",author:"tom"}).ok);
- T(dbA.save({_id:"foo2",value:"a",author:"spike"}).ok);
- T(dbA.save({_id:"bad1",value:"a"}).ok);
-
- T(CouchDB.replicate(A, B, {headers:AuthHeaders}).ok);
- T(CouchDB.replicate(B, A, {headers:AuthHeaders}).ok);
-
- T(dbA.open("foo1"));
- T(dbB.open("foo1"));
- T(dbA.open("foo2"));
- T(dbB.open("foo2"));
-
- // save the design doc to dbA
- delete designDoc._rev; // clear rev from previous saves
- T(adminDbA.save(designDoc).ok);
-
- // no affect on already saved docs
- T(dbA.open("bad1"));
-
- // Update some docs on dbB. Since the design hasn't replicated, anything
- // is allowed.
-
- // this edit will fail validation on replication to dbA (no author)
- T(dbB.save({_id:"bad2",value:"a"}).ok);
-
- // this edit will fail security on replication to dbA (wrong author
- // replicating the change)
- var foo1 = dbB.open("foo1");
- foo1.value = "b";
- T(dbB.save(foo1).ok);
-
- // this is a legal edit
- var foo2 = dbB.open("foo2");
- foo2.value = "b";
- T(dbB.save(foo2).ok);
-
- var results = CouchDB.replicate({"url": B, "headers": AuthHeaders}, {"url": A, "headers": AuthHeaders}, {headers:AuthHeaders});
- T(results.ok);
- TEquals(1, results.history[0].docs_written);
- TEquals(2, results.history[0].doc_write_failures);
-
- // bad2 should not be on dbA
- T(dbA.open("bad2") == null);
-
- // The edit to foo1 should not have replicated.
- T(dbA.open("foo1").value == "a");
-
- // The edit to foo2 should have replicated.
- T(dbA.open("foo2").value == "b");
- });
-
- // cleanup
- db.deleteDb();
- if(adminDbA){
- adminDbA.deleteDb();
- }
- if(adminDbB){
- adminDbB.deleteDb();
- }
- authDb.deleteDb();
- // don't have to clean the backend authDb since this test only calls
- // couch_auth_cache:get_admin/1 which doesn't auto-create the users db
-};
diff --git a/test/javascript/tests/show_documents.js b/test/javascript/tests/show_documents.js
deleted file mode 100644
index e604f3058..000000000
--- a/test/javascript/tests/show_documents.js
+++ /dev/null
@@ -1,376 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-couchTests.elixir = true
-couchTests.show_documents = function(debug) {
-
- var db_name = get_random_db_name();
- var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
- db.createDb();
- if (debug) debugger;
-
- var designDoc = {
- _id:"_design/template",
- language: "javascript",
- shows: {
- "hello" : stringFun(function(doc, req) {
- log("hello fun");
- if (doc) {
- return "Hello World";
- } else {
- if(req.id) {
- return "New World";
- } else {
- return "Empty World";
- }
- }
- }),
- "just-name" : stringFun(function(doc, req) {
- if (doc) {
- return {
- body : "Just " + doc.name
- };
- } else {
- return {
- body : "No such doc",
- code : 404
- };
- }
- }),
- "json" : stringFun(function(doc, req) {
- return {
- json : doc
- }
- }),
- "req-info" : stringFun(function(doc, req) {
- return {
- json : req
- }
- }),
- "show-deleted" : stringFun(function(doc, req) {
- if(doc) {
- return doc._id;
- } else {
- return "No doc " + req.id;
- }
- }),
- "render-error" : stringFun(function(doc, req) {
- return noSuchVariable;
- }),
- "empty" : stringFun(function(doc, req) {
- return "";
- }),
- "fail" : stringFun(function(doc, req) {
- return doc._id;
- }),
- "no-set-etag" : stringFun(function(doc, req) {
- return {
- headers : {
- "Etag" : "skipped"
- },
- "body" : "something"
- }
- }),
- "list-api" : stringFun(function(doc, req) {
- start({"X-Couch-Test-Header": "Yeah"});
- send("Hey");
- }),
- "list-api-provides" : stringFun(function(doc, req) {
- provides("text", function(){
- send("foo, ");
- send("bar, ");
- send("baz!");
- })
- }),
- "list-api-provides-and-return" : stringFun(function(doc, req) {
- provides("text", function(){
- send("4, ");
- send("5, ");
- send("6, ");
- return "7!";
- })
- send("1, ");
- send("2, ");
- return "3, ";
- }),
- "list-api-mix" : stringFun(function(doc, req) {
- start({"X-Couch-Test-Header": "Yeah"});
- send("Hey ");
- return "Dude";
- }),
- "list-api-mix-with-header" : stringFun(function(doc, req) {
- start({"X-Couch-Test-Header": "Yeah"});
- send("Hey ");
- return {
- headers: {
- "X-Couch-Test-Header-Awesome": "Oh Yeah!"
- },
- body: "Dude"
- };
- }),
- "accept-switch" : stringFun(function(doc, req) {
- if (req.headers["Accept"].match(/image/)) {
- return {
- // a 16x16 px version of the CouchDB logo
- "base64" :
-["iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAsV",
-"BMVEUAAAD////////////////////////5ur3rEBn////////////////wDBL/",
-"AADuBAe9EB3IEBz/7+//X1/qBQn2AgP/f3/ilpzsDxfpChDtDhXeCA76AQH/v7",
-"/84eLyWV/uc3bJPEf/Dw/uw8bRWmP1h4zxSlD6YGHuQ0f6g4XyQkXvCA36MDH6",
-"wMH/z8/yAwX64ODeh47BHiv/Ly/20dLQLTj98PDXWmP/Pz//39/wGyJ7Iy9JAA",
-"AADHRSTlMAbw8vf08/bz+Pv19jK/W3AAAAg0lEQVR4Xp3LRQ4DQRBD0QqTm4Y5",
-"zMxw/4OleiJlHeUtv2X6RbNO1Uqj9g0RMCuQO0vBIg4vMFeOpCWIWmDOw82fZx",
-"vaND1c8OG4vrdOqD8YwgpDYDxRgkSm5rwu0nQVBJuMg++pLXZyr5jnc1BaH4GT",
-"LvEliY253nA3pVhQqdPt0f/erJkMGMB8xucAAAAASUVORK5CYII="].join(''),
- headers : {
- "Content-Type" : "image/png",
- "Vary" : "Accept" // we set this for proxy caches
- }
- };
- } else {
- return {
- "body" : "accepting text requests",
- headers : {
- "Content-Type" : "text/html",
- "Vary" : "Accept"
- }
- };
- }
- }),
- "provides" : stringFun(function(doc, req) {
- registerType("foo", "application/foo","application/x-foo");
-
- provides("html", function() {
- return "Ha ha, you said \"" + doc.word + "\".";
- });
-
- provides("foo", function() {
- return "foofoo";
- });
- }),
- "withSlash": stringFun(function(doc, req) {
- return { json: doc }
- }),
- "secObj": stringFun(function(doc, req) {
- return { json: req.secObj };
- })
- }
- };
- T(db.save(designDoc).ok);
-
- var doc = {"word":"plankton", "name":"Rusty"}
- var resp = db.save(doc);
- T(resp.ok);
- var docid = resp.id;
-
- // show error
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/");
- T(xhr.status == 404, 'Should be missing');
- T(JSON.parse(xhr.responseText).reason == "Invalid path.");
-
- // hello template world
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/hello/"+docid);
- T(xhr.responseText == "Hello World", "hello");
- T(/charset=utf-8/.test(xhr.getResponseHeader("Content-Type")));
-
-
- // Fix for COUCHDB-379
- T(equals(xhr.getResponseHeader("Server").substr(0,7), "CouchDB"));
-
- // // error stacktraces
- // xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/render-error/"+docid);
- // T(JSON.parse(xhr.responseText).error == "render_error");
-
- // hello template world (no docid)
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/hello");
- T(xhr.responseText == "Empty World");
-
- // hello template world (no docid)
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/empty");
- T(xhr.responseText == "");
-
- // // hello template world (non-existing docid)
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/fail/nonExistingDoc");
- T(xhr.status == 404);
- var resp = JSON.parse(xhr.responseText);
- T(resp.error == "not_found");
-
- // show with doc
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/just-name/"+docid);
- T(xhr.responseText == "Just Rusty");
-
- // show with missing doc
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/just-name/missingdoc");
- T(xhr.status == 404);
- TEquals("No such doc", xhr.responseText);
-
- // show with missing func
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/missing/"+docid);
- T(xhr.status == 404, "function is missing");
-
- // missing design doc
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/missingddoc/_show/just-name/"+docid);
- T(xhr.status == 404);
- var resp = JSON.parse(xhr.responseText);
- T(resp.error == "not_found");
-
- // query parameters
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/req-info/"+docid+"?foo=bar", {
- headers: {
- "Accept": "text/html;text/plain;*/*",
- "X-Foo" : "bar"
- }
- });
- var resp = JSON.parse(xhr.responseText);
- T(equals(resp.headers["X-Foo"], "bar"));
- T(equals(resp.query, {foo:"bar"}));
- T(equals(resp.method, "GET"));
- T(equals(resp.path[5], docid));
- T(equals(resp.info.db_name, "" + db_name + ""));
-
- // accept header switching
- // different mime has different etag
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/accept-switch/"+docid, {
- headers: {"Accept": "text/html;text/plain;*/*"}
- });
- var ct = xhr.getResponseHeader("Content-Type");
- T(/text\/html/.test(ct))
- T("Accept" == xhr.getResponseHeader("Vary"));
- var etag = xhr.getResponseHeader("etag");
-
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/accept-switch/"+docid, {
- headers: {"Accept": "image/png;*/*"}
- });
- T(xhr.responseText.match(/PNG/))
- T("image/png" == xhr.getResponseHeader("Content-Type"));
- var etag2 = xhr.getResponseHeader("etag");
- T(etag2 != etag);
-
- // proper etags
- // show with doc
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/just-name/"+docid);
- // extract the ETag header values
- etag = xhr.getResponseHeader("etag");
- // get again with etag in request
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/just-name/"+docid, {
- headers: {"if-none-match": etag}
- });
- // should be 304
- T(xhr.status == 304);
-
- // update the doc
- doc.name = "Crusty";
- resp = db.save(doc);
- T(resp.ok);
- // req with same etag
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/just-name/"+docid, {
- headers: {"if-none-match": etag}
- });
- // status is 200
- T(xhr.status == 200);
-
- // JS can't set etag
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/no-set-etag/"+docid);
- // extract the ETag header values
- etag = xhr.getResponseHeader("etag");
- T(etag != "skipped")
-
- // test the provides mime matcher
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/provides/"+docid, {
- headers: {
- "Accept": 'text/html,application/atom+xml; q=0.9'
- }
- });
- var ct = xhr.getResponseHeader("Content-Type");
- T(/charset=utf-8/.test(ct))
- T(/text\/html/.test(ct))
- T(xhr.responseText == "Ha ha, you said \"plankton\".");
-
- // registering types works
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/provides/"+docid, {
- headers: {
- "Accept": "application/x-foo"
- }
- });
- T(xhr.getResponseHeader("Content-Type") == "application/x-foo");
- T(xhr.responseText.match(/foofoo/));
-
- // test the provides mime matcher without a match
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/provides/"+docid, {
- headers: {
- "Accept": 'text/monkeys'
- }
- });
- var rs = JSON.parse(xhr.responseText);
- T(rs.error == "not_acceptable")
-
-
- // test inclusion of conflict state
- var doc1 = {_id:"foo", a:1};
- var doc2 = {_id:"foo", a:2};
- db.save(doc1);
-
- var doc3 = {_id:"a/b/c", a:1};
- db.save(doc3);
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/withSlash/a/b/c");
- T(xhr.status == 200);
-
- // hello template world (non-existing docid)
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/hello/nonExistingDoc");
- T(xhr.responseText == "New World");
-
- // test list() compatible API
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/list-api/foo");
- T(xhr.responseText == "Hey");
- TEquals("Yeah", xhr.getResponseHeader("X-Couch-Test-Header"), "header should be cool");
-
- // test list() compatible API with provides function
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/list-api-provides/foo?format=text");
- TEquals(xhr.responseText, "foo, bar, baz!", "should join chunks to response body");
-
- // should keep next result order: chunks + return value + provided chunks + provided return value
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/list-api-provides-and-return/foo?format=text");
- TEquals(xhr.responseText, "1, 2, 3, 4, 5, 6, 7!", "should not break 1..7 range");
-
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/list-api-mix/foo");
- T(xhr.responseText == "Hey Dude");
- TEquals("Yeah", xhr.getResponseHeader("X-Couch-Test-Header"), "header should be cool");
-
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/list-api-mix-with-header/foo");
- T(xhr.responseText == "Hey Dude");
- TEquals("Yeah", xhr.getResponseHeader("X-Couch-Test-Header"), "header should be cool");
- TEquals("Oh Yeah!", xhr.getResponseHeader("X-Couch-Test-Header-Awesome"), "header should be cool");
-
- // test deleted docs
- var doc = {_id:"testdoc",foo:1};
- db.save(doc);
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/show-deleted/testdoc");
- TEquals("testdoc", xhr.responseText, "should return 'testdoc'");
-
- db.deleteDoc(doc);
- var xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/show-deleted/testdoc");
- TEquals("No doc testdoc", xhr.responseText, "should return 'no doc testdoc'");
-
- // (we don't need no modified server!)
- T(db.setDbProperty("_security", {foo: true}).ok);
- T(db.save({_id:"testdoc",foo:1}).ok);
- // nasty source of Heisenbugs - it replicates after a short time, so give it some tries
- // (needs PR #400 and #401 to be merged)
- retry_part(function(){
- xhr = CouchDB.request("GET", "/" + db_name + "/_design/template/_show/secObj");
- var resp = JSON.parse(xhr.responseText);
- T(resp.foo == true);
- }, 10);
-
- // cleanup
- db.deleteDb();
-
-};
diff --git a/test/javascript/tests/users_db_security.js b/test/javascript/tests/users_db_security.js
deleted file mode 100644
index 3e293c5eb..000000000
--- a/test/javascript/tests/users_db_security.js
+++ /dev/null
@@ -1,418 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-couchTests.elixir = true;
-couchTests.users_db_security = function(debug) {
- var db_name = '_users';
- var usersDb = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
- try { usersDb.createDb(); } catch (e) { /* ignore if exists*/ }
-
- if (debug) debugger;
-
- var loginUser = function(username) {
- var pws = {
- jan: "apple",
- jchris: "mp3",
- jchris1: "couch",
- fdmanana: "foobar",
- benoitc: "test"
- };
- // we are changing jchris’s password further down
- // the next two lines keep the code cleaner in
- // the actual tests
- var username1 = username.replace(/[0-9]$/, "");
- var password = pws[username];
- T(CouchDB.login(username1, pws[username]).ok);
- };
-
- var open_as = function(db, docId, username) {
- loginUser(username);
- try {
- return db.open(docId, {"anti-cache": Math.round(Math.random() * 100000)});
- } finally {
- CouchDB.logout();
- }
- };
-
- var view_as = function(db, viewname, username) {
- loginUser(username);
- try {
- return db.view(viewname);
- } finally {
- CouchDB.logout();
- }
- };
-
- var save_as = function(db, doc, username)
- {
- loginUser(username);
- try {
- return db.save(doc);
- } catch (ex) {
- return ex;
- } finally {
- CouchDB.logout();
- }
- };
-
- var changes_as = function(db, username)
- {
- loginUser(username);
- try {
- return db.changes();
- } catch(ex) {
- return ex;
- } finally {
- CouchDB.logout();
- }
- };
-
- var request_as = function(db, ddoc_path, username) {
- loginUser(username);
- try {
- var uri = db.uri + ddoc_path;
- var req = CouchDB.request("GET", uri);
- return req;
- } finally {
- CouchDB.logout();
- }
- };
-
- var testFun = function()
- {
-
- // _users db
- // a doc with a field 'password' should be hashed to 'derived_key'
- // with salt and salt stored in 'salt', 'password' is set to null.
- // Exising 'derived_key' and 'salt' fields are overwritten with new values
- // when a non-null 'password' field exists.
- // anonymous should be able to create a user document
- var userDoc = {
- _id: "org.couchdb.user:jchris",
- type: "user",
- name: "jchris",
- password: "mp3",
- roles: []
- };
-
- // jan's gonna be admin as he's the first user
- TEquals(true, usersDb.save(userDoc).ok, "should save document");
- wait(5000)
- userDoc = open_as(usersDb, "org.couchdb.user:jchris", "jchris");
- TEquals(undefined, userDoc.password, "password field should be null 1");
- TEquals(40, userDoc.derived_key.length, "derived_key should exist");
- TEquals(32, userDoc.salt.length, "salt should exist");
-
- // create server admin
-
- // anonymous should not be able to read an existing user's user document
- var res = usersDb.open("org.couchdb.user:jchris");
- TEquals(null, res, "anonymous user doc read should be not found");
-
- // anonymous should not be able to read /_users/_changes
- try {
- var ch = usersDb.changes();
- T(false, "anonymous can read _changes");
- } catch(e) {
- TEquals("unauthorized", e.error, "anoymous can't read _changes");
- }
-
- // user should be able to read their own document
- var jchrisDoc = open_as(usersDb, "org.couchdb.user:jchris", "jchris");
- TEquals("org.couchdb.user:jchris", jchrisDoc._id);
-
- // user should not be able to read /_users/_changes
- var changes = changes_as(usersDb, "jchris");
- TEquals("unauthorized", changes.error, "user can't read _changes");
-
- // new 'password' fields should trigger new hashing routine
- jchrisDoc.password = "couch";
-
- TEquals(true, save_as(usersDb, jchrisDoc, "jchris").ok);
- // wait(10000);
- var jchrisDoc = open_as(usersDb, "org.couchdb.user:jchris", "jan");
-
- TEquals(undefined, jchrisDoc.password, "password field should be null 2");
- TEquals(40, jchrisDoc.derived_key.length, "derived_key should exist");
- TEquals(32, jchrisDoc.salt.length, "salt should exist");
-
- TEquals(true, userDoc.salt != jchrisDoc.salt, "should have new salt");
- TEquals(true, userDoc.derived_key != jchrisDoc.derived_key,
- "should have new derived_key");
-
- // user should not be able to read another user's user document
- var fdmananaDoc = {
- _id: "org.couchdb.user:fdmanana",
- type: "user",
- name: "fdmanana",
- password: "foobar",
- roles: []
- };
-
- usersDb.save(fdmananaDoc);
- var fdmananaDocAsReadByjchris = open_as(usersDb, "org.couchdb.user:fdmanana", "jchris1");
- TEquals(null, fdmananaDocAsReadByjchris,
- "should not_found opening another user's user doc");
-
-
- // save a db admin
- var benoitcDoc = {
- _id: "org.couchdb.user:benoitc",
- type: "user",
- name: "benoitc",
- password: "test",
- roles: ["user_admin"]
- };
- save_as(usersDb, benoitcDoc, "jan");
-
- TEquals(true, CouchDB.login("jan", "apple").ok);
- T(usersDb.setSecObj({
- "admins" : {
- roles : [],
- names : ["benoitc"]
- }
- }).ok);
- CouchDB.logout();
-
- // user should not be able to read from any view
- var ddoc = {
- _id: "_design/user_db_auth",
- views: {
- test: {
- map: "function(doc) { emit(doc._id, null); }"
- }
- },
- lists: {
- names: "function(head, req) { "
- + "var row; while (row = getRow()) { send(row.key + \"\\n\"); }"
- + "}"
- },
- shows: {
- name: "function(doc, req) { return doc.name; }"
- }
- };
-
- save_as(usersDb, ddoc, "jan");
-
- try {
- usersDb.view("user_db_auth/test");
- T(false, "user had access to view in admin db");
- } catch(e) {
- TEquals("forbidden", e.error,
- "non-admins should not be able to read a view");
- }
-
- // admin should be able to read from any view
- var result = view_as(usersDb, "user_db_auth/test", "jan");
- TEquals(3, result.total_rows, "should allow access and list four users to admin");
-
- // db admin should be able to read from any view
- var result = view_as(usersDb, "user_db_auth/test", "benoitc");
- TEquals(3, result.total_rows, "should allow access and list four users to db admin");
-
-
- // non-admins can't read design docs
- try {
- open_as(usersDb, "_design/user_db_auth", "jchris1");
- T(false, "non-admin read design doc, should not happen");
- } catch(e) {
- TEquals("forbidden", e.error, "non-admins can't read design docs");
- }
-
- // admin shold be able to read _list
- var listPath = ddoc["_id"] + "/_list/names/test";
- var result = request_as(usersDb, listPath, "jan");
- var lines = result.responseText.split("\n");
- T(result.status == 200, "should allow access to db admin");
- TEquals(4, lines.length, "should list users to db admin");
-
- // non-admins can't read _list
- var result = request_as(usersDb, listPath, "jchris1");
- T(result.status == 403, "should deny access to non-admin");
-
- // admin should be able to read _show
- var showPath = ddoc["_id"] + "/_show/name/org.couchdb.user:jchris";
- var result = request_as(usersDb, showPath, "jan");
- T(result.status == 200, "should allow access to db admin");
- TEquals("jchris", result.responseText, "should show username to db admin");
-
- // non-admin should be able to access own _show
- var result = request_as(usersDb, showPath, "jchris1");
- T(result.status == 200, "should allow access to own user record");
- TEquals("jchris", result.responseText, "should show own username");
-
- // non-admin can't read other's _show
- var showPath = ddoc["_id"] + "/_show/name/org.couchdb.user:jan";
- var result = request_as(usersDb, showPath, "jchris1");
- T(result.status == 404, "non-admin can't read others's user docs");
-
- // admin should be able to read and edit any user doc
- fdmananaDoc.password = "mobile";
- var result = save_as(usersDb, fdmananaDoc, "jan");
- TEquals(true, result.ok, "admin should be able to update any user doc");
-
- // admin should be able to read and edit any user doc
- fdmananaDoc.password = "mobile1";
- var result = save_as(usersDb, fdmananaDoc, "benoitc");
- TEquals(true, result.ok, "db admin by role should be able to update any user doc");
-
- TEquals(true, CouchDB.login("jan", "apple").ok);
- T(usersDb.setSecObj({
- "admins" : {
- roles : ["user_admin"],
- names : []
- }
- }).ok);
- CouchDB.logout();
-
- // db admin should be able to read and edit any user doc
- fdmananaDoc.password = "mobile2";
- var result = save_as(usersDb, fdmananaDoc, "benoitc");
- TEquals(true, result.ok, "db admin should be able to update any user doc");
-
- // ensure creation of old-style docs still works
- var robertDoc = CouchDB.prepareUserDoc({ name: "robert" }, "anchovy");
- var result = usersDb.save(robertDoc);
- TEquals(true, result.ok, "old-style user docs should still be accepted");
-
- // log in one last time so run_on_modified_server can clean up the admin account
- TEquals(true, CouchDB.login("jan", "apple").ok);
-
- // run_on_modified_server([
- // {
- // section: "couch_httpd_auth",
- // key: "iterations",
- // value: "1"
- // },
- // {
- // section: "couch_httpd_auth",
- // key: "public_fields",
- // value: "name,type"
- // },
- // {
- // section: "couch_httpd_auth",
- // key: "users_db_public",
- // value: "true"
- // },
- // {
- // section: "admins",
- // key: "jan",
- // value: "apple"
- // }
- // ], function() {
- // var res = usersDb.open("org.couchdb.user:jchris");
- // TEquals("jchris", res.name);
- // TEquals("user", res.type);
- // TEquals(undefined, res.roles);
- // TEquals(undefined, res.salt);
- // TEquals(undefined, res.password_scheme);
- // TEquals(undefined, res.derived_key);
- //
- // TEquals(true, CouchDB.login("jan", "apple").ok);
- //
- // var all = usersDb.allDocs({ include_docs: true });
- // T(all.rows);
- // if (all.rows) {
- // T(all.rows.every(function(row) {
- // if (row.doc) {
- // return Object.keys(row.doc).every(function(key) {
- // return key === 'name' || key === 'type';
- // });
- // } else {
- // if(row.id[0] == "_") {
- // // ignore design docs
- // return true
- // } else {
- // return false;
- // }
- // }
- // }));
- // }
- // // log in one last time so run_on_modified_server can clean up the admin account
- // TEquals(true, CouchDB.login("jan", "apple").ok);
- // });
-
- run_on_modified_server([
- {
- section: "couch_httpd_auth",
- key: "public_fields",
- value: "name"
- },
- {
- section: "couch_httpd_auth",
- key: "users_db_public",
- value: "false"
- }
- ], function() {
- TEquals(true, CouchDB.login("jchris", "couch").ok);
-
- try {
- var all = usersDb.allDocs({ include_docs: true });
- T(false); // should never hit
- } catch(e) {
- TEquals("unauthorized", e.error, "should throw");
- }
-
- // COUCHDB-1888 make sure admins always get all fields
- TEquals(true, CouchDB.login("jan", "apple").ok);
- var all_admin = usersDb.allDocs({ include_docs: "true" });
- TEquals("user", all_admin.rows[2].doc.type,
- "should return type");
-
-
- // log in one last time so run_on_modified_server can clean up the admin account
- TEquals(true, CouchDB.login("jan", "apple").ok);
- });
- };
-
- run_on_modified_server(
- [
- {
- section:"couchdb",
- key:"users_db_security_editable",
- value:"true"
- },
- {
- section: "couch_httpd_auth",
- key: "iterations",
- value: "1"
- },
- {
- section: "admins",
- key: "jan",
- value: "apple"
- }],
- function() {
- try {
- testFun();
- } finally {
- CouchDB.login("jan", "apple");
- usersDb.deleteDb(); // cleanup
- waitForSuccess(function() {
- var req = CouchDB.request("GET", db_name);
- if (req.status == 404) {
- return true
- }
- throw({});
- }, 'usersDb.deleteDb')
- usersDb.createDb();
- waitForSuccess(function() {
- var req = CouchDB.request("GET", db_name);
- if (req.status == 200) {
- return true
- }
- throw({});
- }, 'usersDb.creteDb')
- }
- }
- );
- CouchDB.logout();
-};
diff --git a/test/random_port.ini b/test/random_port.ini
deleted file mode 100644
index 2b2d13027..000000000
--- a/test/random_port.ini
+++ /dev/null
@@ -1,19 +0,0 @@
-; Licensed to the Apache Software Foundation (ASF) under one
-; or more contributor license agreements. See the NOTICE file
-; distributed with this work for additional information
-; regarding copyright ownership. The ASF licenses this file
-; to you under the Apache License, Version 2.0 (the
-; "License"); you may not use this file except in compliance
-; with the License. You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing,
-; software distributed under the License is distributed on an
-; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-; KIND, either express or implied. See the License for the
-; specific language governing permissions and limitations
-; under the License.
-
-[httpd]
-port = 0
diff --git a/test/view_server/query_server_spec.rb b/test/view_server/query_server_spec.rb
deleted file mode 100644
index 59883c0eb..000000000
--- a/test/view_server/query_server_spec.rb
+++ /dev/null
@@ -1,885 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# to run (requires ruby and rspec):
-# rspec test/view_server/query_server_spec.rb
-#
-# environment options:
-# QS_TRACE=true
-# shows full output from the query server
-# QS_LANG=lang
-# run tests on the query server (for now, one of: js, erlang)
-#
-
-COUCH_ROOT = "#{File.dirname(__FILE__)}/../.." unless defined?(COUCH_ROOT)
-LANGUAGE = ENV["QS_LANG"] || "js"
-
-puts "Running query server specs for #{LANGUAGE} query server"
-
-require 'rspec'
-require 'json'
-
-class OSProcessRunner
- def self.run
- trace = ENV["QS_TRACE"] || false
- puts "launching #{run_command}" if trace
- if block_given?
- IO.popen(run_command, "r+") do |io|
- qs = QueryServerRunner.new(io, trace)
- yield qs
- end
- else
- io = IO.popen(run_command, "r+")
- QueryServerRunner.new(io, trace)
- end
- end
- def initialize io, trace = false
- @qsio = io
- @trace = trace
- end
- def close
- @qsio.close
- end
- def reset!
- run(["reset"])
- end
- def add_fun(fun)
- run(["add_fun", fun])
- end
- def teach_ddoc(ddoc)
- run(["ddoc", "new", ddoc_id(ddoc), ddoc])
- end
- def ddoc_run(ddoc, fun_path, args)
- run(["ddoc", ddoc_id(ddoc), fun_path, args])
- end
- def ddoc_id(ddoc)
- d_id = ddoc["_id"]
- raise 'ddoc must have _id' unless d_id
- d_id
- end
- def get_chunks
- resp = jsgets
- raise "not a chunk" unless resp.first == "chunks"
- return resp[1]
- end
- def run json
- rrun json
- jsgets
- end
- def rrun json
- line = json.to_json
- puts "run: #{line}" if @trace
- @qsio.puts line
- end
- def rgets
- resp = @qsio.gets
- puts "got: #{resp}" if @trace
- resp
- end
- def jsgets
- resp = rgets
- # err = @qserr.gets
- # puts "err: #{err}" if err
- if resp
- begin
- rj = JSON.parse("[#{resp.chomp}]")[0]
- rescue JSON::ParserError
- puts "JSON ERROR (dump under trace mode)"
- # puts resp.chomp
- while resp = rgets
- # puts resp.chomp
- end
- end
- if rj.respond_to?(:[]) && rj.is_a?(Array)
- if rj[0] == "log"
- log = rj[1]
- puts "log: #{log}" if @trace
- rj = jsgets
- end
- end
- rj
- else
- raise "no response"
- end
- end
-end
-
-class QueryServerRunner < OSProcessRunner
-
- COMMANDS = {
- "js" => "#{COUCH_ROOT}/bin/couchjs #{COUCH_ROOT}/share/server/main.js",
- "erlang" => "#{COUCH_ROOT}/test/view_server/run_native_process.es"
- }
-
- def self.run_command
- COMMANDS[LANGUAGE]
- end
-end
-
-class ExternalRunner < OSProcessRunner
- def self.run_command
- "#{COUCH_ROOT}/src/couchdb/couchjs #{COUCH_ROOT}/share/server/echo.js"
- end
-end
-
-# we could organize this into a design document per language.
-# that would make testing future languages really easy.
-
-functions = {
- "emit-twice" => {
- "js" => %{function(doc){emit("foo",doc.a); emit("bar",doc.a)}},
- "erlang" => <<-ERLANG
- fun({Doc}) ->
- A = couch_util:get_value(<<"a">>, Doc, null),
- Emit(<<"foo">>, A),
- Emit(<<"bar">>, A)
- end.
- ERLANG
- },
- "emit-once" => {
- "js" => <<-JS,
- function(doc){
- emit("baz",doc.a)
- }
- JS
- "erlang" => <<-ERLANG
- fun({Doc}) ->
- A = couch_util:get_value(<<"a">>, Doc, null),
- Emit(<<"baz">>, A)
- end.
- ERLANG
- },
- "reduce-values-length" => {
- "js" => %{function(keys, values, rereduce) { return values.length; }},
- "erlang" => %{fun(Keys, Values, ReReduce) -> length(Values) end.}
- },
- "reduce-values-sum" => {
- "js" => %{function(keys, values, rereduce) { return sum(values); }},
- "erlang" => %{fun(Keys, Values, ReReduce) -> lists:sum(Values) end.}
- },
- "validate-forbidden" => {
- "js" => <<-JS,
- function(newDoc, oldDoc, userCtx) {
- if(newDoc.bad)
- throw({forbidden:"bad doc"}); "foo bar";
- }
- JS
- "erlang" => <<-ERLANG
- fun({NewDoc}, _OldDoc, _UserCtx) ->
- case couch_util:get_value(<<"bad">>, NewDoc) of
- undefined -> 1;
- _ -> {[{forbidden, <<"bad doc">>}]}
- end
- end.
- ERLANG
- },
- "show-simple" => {
- "js" => <<-JS,
- function(doc, req) {
- log("ok");
- return [doc.title, doc.body].join(' - ');
- }
- JS
- "erlang" => <<-ERLANG
- fun({Doc}, Req) ->
- Title = couch_util:get_value(<<"title">>, Doc),
- Body = couch_util:get_value(<<"body">>, Doc),
- Resp = <<Title/binary, " - ", Body/binary>>,
- {[{<<"body">>, Resp}]}
- end.
- ERLANG
- },
- "show-headers" => {
- "js" => <<-JS,
- function(doc, req) {
- var resp = {"code":200, "headers":{"X-Plankton":"Rusty"}};
- resp.body = [doc.title, doc.body].join(' - ');
- return resp;
- }
- JS
- "erlang" => <<-ERLANG
- fun({Doc}, Req) ->
- Title = couch_util:get_value(<<"title">>, Doc),
- Body = couch_util:get_value(<<"body">>, Doc),
- Resp = <<Title/binary, " - ", Body/binary>>,
- {[
- {<<"code">>, 200},
- {<<"headers">>, {[{<<"X-Plankton">>, <<"Rusty">>}]}},
- {<<"body">>, Resp}
- ]}
- end.
- ERLANG
- },
- "show-sends" => {
- "js" => <<-JS,
- function(head, req) {
- start({headers:{"Content-Type" : "text/plain"}});
- send("first chunk");
- send('second "chunk"');
- return "tail";
- };
- JS
- "erlang" => <<-ERLANG
- fun(Head, Req) ->
- Resp = {[
- {<<"headers">>, {[{<<"Content-Type">>, <<"text/plain">>}]}}
- ]},
- Start(Resp),
- Send(<<"first chunk">>),
- Send(<<"second \\\"chunk\\\"">>),
- <<"tail">>
- end.
- ERLANG
- },
- "show-while-get-rows" => {
- "js" => <<-JS,
- function(head, req) {
- send("first chunk");
- send(req.q);
- var row;
- log("about to getRow " + typeof(getRow));
- while(row = getRow()) {
- send(row.key);
- };
- return "tail";
- };
- JS
- "erlang" => <<-ERLANG,
- fun(Head, {Req}) ->
- Send(<<"first chunk">>),
- Send(couch_util:get_value(<<"q">>, Req)),
- Fun = fun({Row}, _) ->
- Send(couch_util:get_value(<<"key">>, Row)),
- {ok, nil}
- end,
- {ok, _} = FoldRows(Fun, nil),
- <<"tail">>
- end.
- ERLANG
- },
- "show-while-get-rows-multi-send" => {
- "js" => <<-JS,
- function(head, req) {
- send("bacon");
- var row;
- log("about to getRow " + typeof(getRow));
- while(row = getRow()) {
- send(row.key);
- send("eggs");
- };
- return "tail";
- };
- JS
- "erlang" => <<-ERLANG,
- fun(Head, Req) ->
- Send(<<"bacon">>),
- Fun = fun({Row}, _) ->
- Send(couch_util:get_value(<<"key">>, Row)),
- Send(<<"eggs">>),
- {ok, nil}
- end,
- FoldRows(Fun, nil),
- <<"tail">>
- end.
- ERLANG
- },
- "list-simple" => {
- "js" => <<-JS,
- function(head, req) {
- send("first chunk");
- send(req.q);
- var row;
- while(row = getRow()) {
- send(row.key);
- };
- return "early";
- };
- JS
- "erlang" => <<-ERLANG,
- fun(Head, {Req}) ->
- Send(<<"first chunk">>),
- Send(couch_util:get_value(<<"q">>, Req)),
- Fun = fun({Row}, _) ->
- Send(couch_util:get_value(<<"key">>, Row)),
- {ok, nil}
- end,
- FoldRows(Fun, nil),
- <<"early">>
- end.
- ERLANG
- },
- "list-chunky" => {
- "js" => <<-JS,
- function(head, req) {
- send("first chunk");
- send(req.q);
- var row, i=0;
- while(row = getRow()) {
- send(row.key);
- i += 1;
- if (i > 2) {
- return('early tail');
- }
- };
- };
- JS
- "erlang" => <<-ERLANG,
- fun(Head, {Req}) ->
- Send(<<"first chunk">>),
- Send(couch_util:get_value(<<"q">>, Req)),
- Fun = fun
- ({Row}, Count) when Count < 2 ->
- Send(couch_util:get_value(<<"key">>, Row)),
- {ok, Count+1};
- ({Row}, Count) when Count == 2 ->
- Send(couch_util:get_value(<<"key">>, Row)),
- {stop, <<"early tail">>}
- end,
- {ok, Tail} = FoldRows(Fun, 0),
- Tail
- end.
- ERLANG
- },
- "list-old-style" => {
- "js" => <<-JS,
- function(head, req, foo, bar) {
- return "stuff";
- }
- JS
- "erlang" => <<-ERLANG,
- fun(Head, Req, Foo, Bar) ->
- <<"stuff">>
- end.
- ERLANG
- },
- "list-capped" => {
- "js" => <<-JS,
- function(head, req) {
- send("bacon")
- var row, i = 0;
- while(row = getRow()) {
- send(row.key);
- i += 1;
- if (i > 2) {
- return('early');
- }
- };
- }
- JS
- "erlang" => <<-ERLANG,
- fun(Head, Req) ->
- Send(<<"bacon">>),
- Fun = fun
- ({Row}, Count) when Count < 2 ->
- Send(couch_util:get_value(<<"key">>, Row)),
- {ok, Count+1};
- ({Row}, Count) when Count == 2 ->
- Send(couch_util:get_value(<<"key">>, Row)),
- {stop, <<"early">>}
- end,
- {ok, Tail} = FoldRows(Fun, 0),
- Tail
- end.
- ERLANG
- },
- "list-raw" => {
- "js" => <<-JS,
- function(head, req) {
- // log(this.toSource());
- // log(typeof send);
- send("first chunk");
- send(req.q);
- var row;
- while(row = getRow()) {
- send(row.key);
- };
- return "tail";
- };
- JS
- "erlang" => <<-ERLANG,
- fun(Head, {Req}) ->
- Send(<<"first chunk">>),
- Send(couch_util:get_value(<<"q">>, Req)),
- Fun = fun({Row}, _) ->
- Send(couch_util:get_value(<<"key">>, Row)),
- {ok, nil}
- end,
- FoldRows(Fun, nil),
- <<"tail">>
- end.
- ERLANG
- },
- "filter-basic" => {
- "js" => <<-JS,
- function(doc, req) {
- if (doc.good) {
- return true;
- }
- }
- JS
- "erlang" => <<-ERLANG,
- fun({Doc}, Req) ->
- couch_util:get_value(<<"good">>, Doc)
- end.
- ERLANG
- },
- "update-basic" => {
- "js" => <<-JS,
- function(doc, req) {
- doc.world = "hello";
- var resp = [doc, "hello doc"];
- return resp;
- }
- JS
- "erlang" => <<-ERLANG,
- fun({Doc}, Req) ->
- Doc2 = [{<<"world">>, <<"hello">>}|Doc],
- [{Doc2}, {[{<<"body">>, <<"hello doc">>}]}]
- end.
- ERLANG
- },
- "rewrite-basic" => {
- "js" => <<-JS,
- function(req) {
- return "new/location";
- }
- JS
- "erlang" => <<-ERLANG,
- fun(Req) ->
- {[{"path", "new/location"}]}
- end.
- ERLANG
- },
- "rewrite-no-rule" => {
- "js" => <<-JS,
- function(req) {
- return;
- }
- JS
- "erlang" => <<-ERLANG,
- fun(Req) ->
- undefined
- end.
- ERLANG
- },
- "error" => {
- "js" => <<-JS,
- function() {
- throw(["error","error_key","testing"]);
- }
- JS
- "erlang" => <<-ERLANG
- fun(A, B) ->
- throw([<<"error">>,<<"error_key">>,<<"testing">>])
- end.
- ERLANG
- },
- "fatal" => {
- "js" => <<-JS,
- function() {
- throw(["fatal","error_key","testing"]);
- }
- JS
- "erlang" => <<-ERLANG
- fun(A, B) ->
- throw([<<"fatal">>,<<"error_key">>,<<"testing">>])
- end.
- ERLANG
- }
-}
-
-def make_ddoc(fun_path, fun_str)
- doc = {"_id"=>"foo"}
- d = doc
- while p = fun_path.shift
- l = p
- if !fun_path.empty?
- d[p] = {}
- d = d[p]
- end
- end
- d[l] = fun_str
- doc
-end
-
-describe "query server normal case" do
- before(:all) do
- `cd #{COUCH_ROOT} && make`
- @qs = QueryServerRunner.run
- end
- after(:all) do
- @qs.close
- end
- it "should reset" do
- @qs.run(["reset"]).should == true
- end
- it "should not erase ddocs on reset" do
- @fun = functions["show-simple"][LANGUAGE]
- @ddoc = make_ddoc(["shows","simple"], @fun)
- @qs.teach_ddoc(@ddoc)
- @qs.run(["reset"]).should == true
- @qs.ddoc_run(@ddoc,
- ["shows","simple"],
- [{:title => "Best ever", :body => "Doc body"}, {}]).should ==
- ["resp", {"body" => "Best ever - Doc body"}]
- end
-
- it "should run map funs" do
- @qs.reset!
- @qs.run(["add_fun", functions["emit-twice"][LANGUAGE]]).should == true
- @qs.run(["add_fun", functions["emit-once"][LANGUAGE]]).should == true
- rows = @qs.run(["map_doc", {:a => "b"}])
- rows[0][0].should == ["foo", "b"]
- rows[0][1].should == ["bar", "b"]
- rows[1][0].should == ["baz", "b"]
- end
- describe "reduce" do
- before(:all) do
- @fun = functions["reduce-values-length"][LANGUAGE]
- @qs.reset!
- end
- it "should reduce" do
- kvs = (0...10).collect{|i|[i,i*2]}
- @qs.run(["reduce", [@fun], kvs]).should == [true, [10]]
- end
- end
- describe "rereduce" do
- before(:all) do
- @fun = functions["reduce-values-sum"][LANGUAGE]
- @qs.reset!
- end
- it "should rereduce" do
- vs = (0...10).collect{|i|i}
- @qs.run(["rereduce", [@fun], vs]).should == [true, [45]]
- end
- end
-
- describe "design docs" do
- before(:all) do
- @ddoc = {
- "_id" => "foo"
- }
- @qs.reset!
- end
- it "should learn design docs" do
- @qs.teach_ddoc(@ddoc).should == true
- end
- end
-
- # it "should validate"
- describe "validation" do
- before(:all) do
- @fun = functions["validate-forbidden"][LANGUAGE]
- @ddoc = make_ddoc(["validate_doc_update"], @fun)
- @qs.teach_ddoc(@ddoc)
- end
- it "should allow good updates" do
- @qs.ddoc_run(@ddoc,
- ["validate_doc_update"],
- [{"good" => true}, {}, {}]).should == 1
- end
- it "should reject invalid updates" do
- @qs.ddoc_run(@ddoc,
- ["validate_doc_update"],
- [{"bad" => true}, {}, {}]).should == {"forbidden"=>"bad doc"}
- end
- end
-
- describe "show" do
- before(:all) do
- @fun = functions["show-simple"][LANGUAGE]
- @ddoc = make_ddoc(["shows","simple"], @fun)
- @qs.teach_ddoc(@ddoc)
- end
- it "should show" do
- @qs.ddoc_run(@ddoc,
- ["shows","simple"],
- [{:title => "Best ever", :body => "Doc body"}, {}]).should ==
- ["resp", {"body" => "Best ever - Doc body"}]
- end
- end
-
- describe "show with headers" do
- before(:all) do
- # TODO we can make real ddocs up there.
- @fun = functions["show-headers"][LANGUAGE]
- @ddoc = make_ddoc(["shows","headers"], @fun)
- @qs.teach_ddoc(@ddoc)
- end
- it "should show headers" do
- @qs.ddoc_run(
- @ddoc,
- ["shows","headers"],
- [{:title => "Best ever", :body => "Doc body"}, {}]
- ).
- should == ["resp", {"code"=>200,"headers" => {"X-Plankton"=>"Rusty"}, "body" => "Best ever - Doc body"}]
- end
- end
-
- describe "recoverable error" do
- before(:all) do
- @fun = functions["error"][LANGUAGE]
- @ddoc = make_ddoc(["shows","error"], @fun)
- @qs.teach_ddoc(@ddoc)
- end
- it "should not exit" do
- @qs.ddoc_run(@ddoc, ["shows","error"],
- [{"foo"=>"bar"}, {"q" => "ok"}]).
- should == ["error", "error_key", "testing"]
- # still running
- @qs.run(["reset"]).should == true
- end
- end
-
- describe "changes filter" do
- before(:all) do
- @fun = functions["filter-basic"][LANGUAGE]
- @ddoc = make_ddoc(["filters","basic"], @fun)
- @qs.teach_ddoc(@ddoc)
- end
- it "should only return true for good docs" do
- @qs.ddoc_run(@ddoc,
- ["filters","basic"],
- [[{"key"=>"bam", "good" => true}, {"foo" => "bar"}, {"good" => true}], {"req" => "foo"}]
- ).
- should == [true, [true, false, true]]
- end
- end
-
- describe "update" do
- before(:all) do
- # in another patch we can remove this duplication
- # by setting up the design doc for each language ahead of time.
- @fun = functions["update-basic"][LANGUAGE]
- @ddoc = make_ddoc(["updates","basic"], @fun)
- @qs.teach_ddoc(@ddoc)
- end
- it "should return a doc and a resp body" do
- up, doc, resp = @qs.ddoc_run(@ddoc,
- ["updates","basic"],
- [{"foo" => "gnarly"}, {"method" => "POST"}]
- )
- up.should == "up"
- doc.should == {"foo" => "gnarly", "world" => "hello"}
- resp["body"].should == "hello doc"
- end
- end
-
-# end
-# LIST TESTS
-# __END__
-
- describe "ddoc list" do
- before(:all) do
- @ddoc = {
- "_id" => "foo",
- "lists" => {
- "simple" => functions["list-simple"][LANGUAGE],
- "headers" => functions["show-sends"][LANGUAGE],
- "rows" => functions["show-while-get-rows"][LANGUAGE],
- "buffer-chunks" => functions["show-while-get-rows-multi-send"][LANGUAGE],
- "chunky" => functions["list-chunky"][LANGUAGE]
- }
- }
- @qs.teach_ddoc(@ddoc)
- end
-
- describe "example list" do
- it "should run normal" do
- @qs.ddoc_run(@ddoc,
- ["lists","simple"],
- [{"foo"=>"bar"}, {"q" => "ok"}]
- ).should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
- @qs.run(["list_row", {"key"=>"baz"}]).should == ["chunks", ["baz"]]
- @qs.run(["list_row", {"key"=>"bam"}]).should == ["chunks", ["bam"]]
- @qs.run(["list_row", {"key"=>"foom"}]).should == ["chunks", ["foom"]]
- @qs.run(["list_row", {"key"=>"fooz"}]).should == ["chunks", ["fooz"]]
- @qs.run(["list_row", {"key"=>"foox"}]).should == ["chunks", ["foox"]]
- @qs.run(["list_end"]).should == ["end" , ["early"]]
- end
- end
-
- describe "headers" do
- it "should do headers proper" do
- @qs.ddoc_run(@ddoc, ["lists","headers"],
- [{"total_rows"=>1000}, {"q" => "ok"}]
- ).should == ["start", ["first chunk", 'second "chunk"'],
- {"headers"=>{"Content-Type"=>"text/plain"}}]
- @qs.rrun(["list_end"])
- @qs.jsgets.should == ["end", ["tail"]]
- end
- end
-
- describe "with rows" do
- it "should list em" do
- @qs.ddoc_run(@ddoc, ["lists","rows"],
- [{"foo"=>"bar"}, {"q" => "ok"}]).
- should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
- @qs.rrun(["list_row", {"key"=>"baz"}])
- @qs.get_chunks.should == ["baz"]
- @qs.rrun(["list_row", {"key"=>"bam"}])
- @qs.get_chunks.should == ["bam"]
- @qs.rrun(["list_end"])
- @qs.jsgets.should == ["end", ["tail"]]
- end
- it "should work with zero rows" do
- @qs.ddoc_run(@ddoc, ["lists","rows"],
- [{"foo"=>"bar"}, {"q" => "ok"}]).
- should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
- @qs.rrun(["list_end"])
- @qs.jsgets.should == ["end", ["tail"]]
- end
- end
-
- describe "should buffer multiple chunks sent for a single row." do
- it "should should buffer em" do
- @qs.ddoc_run(@ddoc, ["lists","buffer-chunks"],
- [{"foo"=>"bar"}, {"q" => "ok"}]).
- should == ["start", ["bacon"], {"headers"=>{}}]
- @qs.rrun(["list_row", {"key"=>"baz"}])
- @qs.get_chunks.should == ["baz", "eggs"]
- @qs.rrun(["list_row", {"key"=>"bam"}])
- @qs.get_chunks.should == ["bam", "eggs"]
- @qs.rrun(["list_end"])
- @qs.jsgets.should == ["end", ["tail"]]
- end
- end
- it "should end after 2" do
- @qs.ddoc_run(@ddoc, ["lists","chunky"],
- [{"foo"=>"bar"}, {"q" => "ok"}]).
- should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
-
- @qs.run(["list_row", {"key"=>"baz"}]).
- should == ["chunks", ["baz"]]
-
- @qs.run(["list_row", {"key"=>"bam"}]).
- should == ["chunks", ["bam"]]
-
- @qs.run(["list_row", {"key"=>"foom"}]).
- should == ["end", ["foom", "early tail"]]
- # here's where js has to discard quit properly
- @qs.run(["reset"]).
- should == true
- end
- end
-
- describe "ddoc rewrites" do
- describe "simple rewrite" do
- before(:all) do
- @ddoc = {
- "_id" => "foo",
- "rewrites" => functions["rewrite-basic"][LANGUAGE]
- }
- @qs.teach_ddoc(@ddoc)
- end
- it "should run normal" do
- ok, resp = @qs.ddoc_run(@ddoc,
- ["rewrites"],
- [{"path" => "foo/bar"}, {"method" => "POST"}]
- )
- ok.should == "ok"
- resp["path"].should == "new/location"
- end
- end
-
- describe "no rule" do
- before(:all) do
- @ddoc = {
- "_id" => "foo",
- "rewrites" => functions["rewrite-no-rule"][LANGUAGE]
- }
- @qs.teach_ddoc(@ddoc)
- end
- it "should run normal" do
- resp = @qs.ddoc_run(@ddoc,
- ["rewrites"],
- [{"path" => "foo/bar"}, {"method" => "POST"}]
- )
- resp.should == ['no_dispatch_rule']
- end
- end
- end
-end
-
-
-
-def should_have_exited qs
- begin
- qs.run(["reset"])
- "raise before this (except Erlang)".should == true
- rescue RuntimeError => e
- e.message.should == "no response"
- rescue Errno::EPIPE
- true.should == true
- end
-end
-
-describe "query server that exits" do
- before(:each) do
- @qs = QueryServerRunner.run
- @ddoc = {
- "_id" => "foo",
- "lists" => {
- "capped" => functions["list-capped"][LANGUAGE],
- "raw" => functions["list-raw"][LANGUAGE]
- },
- "shows" => {
- "fatal" => functions["fatal"][LANGUAGE]
- }
- }
- @qs.teach_ddoc(@ddoc)
- end
- after(:each) do
- @qs.close
- end
-
- describe "only goes to 2 list" do
- it "should exit if erlang sends too many rows" do
- @qs.ddoc_run(@ddoc, ["lists","capped"],
- [{"foo"=>"bar"}, {"q" => "ok"}]).
- should == ["start", ["bacon"], {"headers"=>{}}]
- @qs.run(["list_row", {"key"=>"baz"}]).should == ["chunks", ["baz"]]
- @qs.run(["list_row", {"key"=>"foom"}]).should == ["chunks", ["foom"]]
- @qs.run(["list_row", {"key"=>"fooz"}]).should == ["end", ["fooz", "early"]]
- e = @qs.run(["list_row", {"key"=>"foox"}])
- e[0].should == "error"
- e[1].should == "unknown_command"
- should_have_exited @qs
- end
- end
-
- describe "raw list" do
- it "should exit if it gets a non-row in the middle" do
- @qs.ddoc_run(@ddoc, ["lists","raw"],
- [{"foo"=>"bar"}, {"q" => "ok"}]).
- should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
- e = @qs.run(["reset"])
- e[0].should == "error"
- e[1].should == "list_error"
- should_have_exited @qs
- end
- end
-
- describe "fatal error" do
- it "should exit" do
- @qs.ddoc_run(@ddoc, ["shows","fatal"],
- [{"foo"=>"bar"}, {"q" => "ok"}]).
- should == ["error", "error_key", "testing"]
- should_have_exited @qs
- end
- end
-end
-
-describe "thank you for using the tests" do
- it "for more info run with QS_TRACE=true or see query_server_spec.rb file header" do
- end
-end
diff --git a/test/view_server/run_native_process.es b/test/view_server/run_native_process.es
deleted file mode 100755
index fcf16d75d..000000000
--- a/test/view_server/run_native_process.es
+++ /dev/null
@@ -1,59 +0,0 @@
-#! /usr/bin/env escript
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-read() ->
- case io:get_line('') of
- eof -> stop;
- Data -> couch_util:json_decode(Data)
- end.
-
-send(Data) when is_binary(Data) ->
- send(binary_to_list(Data));
-send(Data) when is_list(Data) ->
- io:format(Data ++ "\n", []).
-
-write(Data) ->
- % log("~p", [Data]),
- case (catch couch_util:json_encode(Data)) of
- % when testing, this is what prints your errors
- {json_encode, Error} -> write({[{<<"error">>, Error}]});
- Json -> send(Json)
- end.
-
-% log(Mesg) ->
-% log(Mesg, []).
-% log(Mesg, Params) ->
-% io:format(standard_error, Mesg, Params).
-% jlog(Mesg) ->
-% write([<<"log">>, list_to_binary(io_lib:format("~p",[Mesg]))]).
-
-loop(Pid) ->
- case read() of
- stop -> ok;
- Json ->
- case (catch couch_native_process:prompt(Pid, Json)) of
- {error, Reason} ->
- ok = write([error, Reason, Reason]);
- Resp ->
- ok = write(Resp),
- loop(Pid)
- end
- end.
-
-main([]) ->
- code:add_pathz("src/couchdb"),
- code:add_pathz("src/mochiweb"),
- {ok, Pid} = couch_native_process:start_link(),
- loop(Pid).
-
diff --git a/version.mk b/version.mk
deleted file mode 100644
index dd9cd4842..000000000
--- a/version.mk
+++ /dev/null
@@ -1,3 +0,0 @@
-vsn_major=3
-vsn_minor=2
-vsn_patch=2